diff --git a/api/type.rb b/api/type.rb index 4e1698ff2c6a..24c29cf47854 100644 --- a/api/type.rb +++ b/api/type.rb @@ -70,6 +70,12 @@ module Fields # A list of properties that conflict with this property. attr_reader :conflicts + # A list of properties that at least one of must be set. + attr_reader :at_least_one_of + + # A list of properties that exactly one of must be set. + attr_reader :exactly_one_of + # Can only be overridden - we should never set this ourselves. attr_reader :new_type @@ -109,6 +115,8 @@ def validate check_default_value_property check_conflicts + check_at_least_one_of + check_exactly_one_of end def to_s @@ -135,6 +143,10 @@ def to_json(opts = nil) instance_variables.each do |v| if v == :@conflicts && instance_variable_get(v).empty? # ignore empty conflict arrays + elsif v == :@at_least_one_of && instance_variable_get(v).empty? + # ignore empty at_least_one_of arrays + elsif v == :@exactly_one_of && instance_variable_get(v).empty? + # ignore empty exactly_one_of arrays elsif instance_variable_get(v) == false || instance_variable_get(v).nil? # ignore false booleans as non-existence indicates falsey elsif !ignored_fields.include? v @@ -194,6 +206,38 @@ def conflicting @__resource.all_user_properties.select { |p| p.conflicts.include?(@api_name) }).uniq end + # Checks that all properties that needs at least one of their fields actually exist. + # This currently just returns if empty, because we don't want to do the check, since + # this list will have a full path for nested attributes. + def check_at_least_one_of + check :at_least_one_of, type: ::Array, default: [], item_type: ::String + + return if @at_least_one_of.empty? + end + + # Returns list of properties that needs at least one of their fields set. + def at_least_one_of_list + return [] unless @__resource + + @at_least_one_of + end + + # Checks that all properties that needs exactly one of their fields actually exist. + # This currently just returns if empty, because we don't want to do the check, since + # this list will have a full path for nested attributes. + def check_exactly_one_of + check :exactly_one_of, type: ::Array, default: [], item_type: ::String + + return if @exactly_one_of.empty? + end + + # Returns list of properties that needs exactly one of their fields set. + def exactly_one_of_list + return [] unless @__resource + + @exactly_one_of + end + def type self.class.name.split('::').last end @@ -289,6 +333,8 @@ class FetchedExternal < Type def validate @conflicts ||= [] + @at_least_one_of ||= [] + @exactly_one_of ||= [] end def api_name diff --git a/overrides/terraform/property_override.rb b/overrides/terraform/property_override.rb index ce49ebed6244..21c365638086 100644 --- a/overrides/terraform/property_override.rb +++ b/overrides/terraform/property_override.rb @@ -69,6 +69,12 @@ def self.attributes # Names of attributes that can't be set alongside this one :conflicts_with, + # Names of attributes that at least one of must be set + :at_least_one_of, + + # Names of attributes that exactly one of must be set + :exactly_one_of, + # Names of fields that should be included in the updateMask. :update_mask_fields, diff --git a/overrides/terraform/resource_override.rb b/overrides/terraform/resource_override.rb index 76e8853516d7..834cbd779d18 100644 --- a/overrides/terraform/resource_override.rb +++ b/overrides/terraform/resource_override.rb @@ -76,7 +76,7 @@ def validate @examples ||= [] check :legacy_name, type: String - check :id_format, type: String, default: '{{name}}' + check :id_format, type: String check :examples, item_type: Provider::Terraform::Examples, type: Array, default: [] check :virtual_fields, item_type: Provider::Terraform::VirtualFields, diff --git a/products/accesscontextmanager/api.yaml b/products/accesscontextmanager/api.yaml index 491657f772de..f763276b18bd 100644 --- a/products/accesscontextmanager/api.yaml +++ b/products/accesscontextmanager/api.yaml @@ -267,14 +267,15 @@ objects: Format: "major.minor.patch" such as "10.5.301", "9.2.1". - !ruby/object:Api::Type::Enum name: 'osType' + required: true description: | The operating system type of the device. values: - - :OS_UNSPECIFIED - - :DESKTOP_MAC - - :DESKTOP_WINDOWS - - :DESKTOP_LINUX - - :DESKTOP_CHROME_OS + - :OS_UNSPECIFIED + - :DESKTOP_MAC + - :DESKTOP_WINDOWS + - :DESKTOP_LINUX + - :DESKTOP_CHROME_OS - !ruby/object:Api::Resource name: 'ServicePerimeter' # This is an unusual API, so we need to use a few fields to map the methods @@ -376,6 +377,10 @@ objects: A list of GCP resources that are inside of the service perimeter. Currently only projects are allowed. Format: projects/{project_number} + at_least_one_of: + - status.0.resources + - status.0.access_levels + - status.0.restricted_services item_type: Api::Type::String - !ruby/object:Api::Type::Array name: 'accessLevels' @@ -390,6 +395,10 @@ objects: be empty. Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} + at_least_one_of: + - status.0.resources + - status.0.access_levels + - status.0.restricted_services item_type: Api::Type::String - !ruby/object:Api::Type::Array name: 'restrictedServices' @@ -399,4 +408,8 @@ objects: `storage.googleapis.com` is specified, access to the storage buckets inside the perimeter must meet the perimeter's access restrictions. + at_least_one_of: + - status.0.resources + - status.0.access_levels + - status.0.restricted_services item_type: Api::Type::String diff --git a/products/accesscontextmanager/terraform.yaml b/products/accesscontextmanager/terraform.yaml index 0e0d6e646838..a11fda4e45c6 100644 --- a/products/accesscontextmanager/terraform.yaml +++ b/products/accesscontextmanager/terraform.yaml @@ -19,6 +19,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides update_minutes: 6 delete_minutes: 6 autogen_async: true + id_format: "{{name}}" import_format: ["{{name}}"] examples: - !ruby/object:Provider::Terraform::Examples diff --git a/products/appengine/api.yaml b/products/appengine/api.yaml index d33bfd8087bc..4f103a4a3781 100644 --- a/products/appengine/api.yaml +++ b/products/appengine/api.yaml @@ -94,6 +94,7 @@ objects: Example: 12345. - !ruby/object:Api::Type::Enum name: 'sslManagementType' + required: true description: | SSL management type for this domain. If `AUTOMATIC`, a managed certificate is automatically provisioned. If `MANUAL`, `certificateId` must be manually specified in order to configure SSL for this domain. @@ -331,16 +332,19 @@ objects: - :REDIRECT_HTTP_RESPONSE_CODE_307 - !ruby/object:Api::Type::NestedObject name: 'script' + # TODO (mbang): Exactly one of script, staticFiles, or apiEndpoint must be set description: | Executes a script to handle the requests that match this URL pattern. Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto". properties: - !ruby/object:Api::Type::String name: 'scriptPath' + required: true description: | Path to the script from the application root directory. - !ruby/object:Api::Type::NestedObject name: 'staticFiles' + # TODO (mbang): Exactly one of script, staticFiles, or apiEndpoint must be set description: | Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them. properties: @@ -403,10 +407,14 @@ objects: name: 'zip' description: 'Zip File' required: false + at_least_one_of: + - deployment.0.zip + - deployment.0.files properties: - !ruby/object:Api::Type::String name: 'sourceUrl' description: 'Source URL' + required: true - !ruby/object:Api::Type::Integer name: 'filesCount' description: 'files count' @@ -417,6 +425,9 @@ objects: Manifest of the files stored in Google Cloud Storage that are included as part of this version. All files must be readable using the credentials supplied with this call. required: false + at_least_one_of: + - deployment.0.zip + - deployment.0.files key_name: 'name' key_description: | name of file @@ -428,6 +439,7 @@ objects: SHA1 checksum of the file - !ruby/object:Api::Type::String name: 'sourceUrl' + required: true description: | Source URL - !ruby/object:Api::Type::NestedObject @@ -438,6 +450,7 @@ objects: properties: - !ruby/object:Api::Type::String name: 'shell' + required: true description: | The format should be a shell command that can be fed to bash -c. - !ruby/object:Api::Type::String diff --git a/products/appengine/terraform.yaml b/products/appengine/terraform.yaml index 829d31ed1ae8..c842440ca40c 100644 --- a/products/appengine/terraform.yaml +++ b/products/appengine/terraform.yaml @@ -14,8 +14,7 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides FirewallRule: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{project}}/{{priority}}" - import_format: ["{{project}}/{{priority}}"] + import_format: ["apps/{{project}}/firewall/ingressRules/{{priority}}"] examples: - !ruby/object:Provider::Terraform::Examples name: "app_engine_firewall_rule_basic" @@ -25,7 +24,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides test_env_vars: org_id: :ORG_ID StandardAppVersion: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "apps/{{project}}/services/{{service}}/versions/{{version_id}}" import_format: ["apps/{{project}}/services/{{service}}/versions/{{version_id}}"] mutex: "apps/{{project}}" parameters: @@ -73,8 +71,8 @@ overrides: !ruby/object:Overrides::ResourceOverrides exclude: true DomainMapping: !ruby/object:Overrides::Terraform::ResourceOverride self_link: 'apps/{{project}}/domainMappings/{{domain_name}}' - id_format: "{{domain_name}}" - import_format: ["{{domain_name}}"] + id_format: 'apps/{{project}}/domainMappings/{{domain_name}}' + import_format: ['apps/{{project}}/domainMappings/{{domain_name}}'] examples: - !ruby/object:Provider::Terraform::Examples name: "app_engine_domain_mapping_basic" diff --git a/products/bigquery/api.yaml b/products/bigquery/api.yaml index e287a628f7eb..76a0d427aac5 100644 --- a/products/bigquery/api.yaml +++ b/products/bigquery/api.yaml @@ -52,6 +52,7 @@ objects: description: An email address of a Google Group to grant access to. - !ruby/object:Api::Type::String name: 'role' + required: true description: | Describes the rights granted to the user specified by the other member of the access object. Primitive, Predefined and custom diff --git a/products/bigquery/terraform.yaml b/products/bigquery/terraform.yaml index f1ceb67f45a6..4dfb56def1b6 100644 --- a/products/bigquery/terraform.yaml +++ b/products/bigquery/terraform.yaml @@ -15,8 +15,7 @@ legacy_name: 'bigquery' overrides: !ruby/object:Overrides::ResourceOverrides Dataset: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{project}}:{{dataset_id}}" - import_format: ["{{project}}:{{dataset_id}}", "{{project}}/{{dataset_id}}", "{{dataset_id}}"] + import_format: ["projects/{{project}}/datasets/{{dataset_id}}"] delete_url: projects/{{project}}/datasets/{{dataset_id}}?deleteContents={{delete_contents_on_destroy}} examples: - !ruby/object:Provider::Terraform::Examples diff --git a/products/bigtable/api.yaml b/products/bigtable/api.yaml index aea5c2162cf7..2d6420b49562 100644 --- a/products/bigtable/api.yaml +++ b/products/bigtable/api.yaml @@ -64,8 +64,9 @@ objects: Long form description of the use case for this app profile. - !ruby/object:Api::Type::Boolean name: 'multiClusterRoutingUseAny' - conflicts: - - singleClusterRouting + exactly_one_of: + - single_cluster_routing + - multi_cluster_routing_use_any description: | If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes @@ -73,14 +74,16 @@ objects: input: true - !ruby/object:Api::Type::NestedObject name: 'singleClusterRouting' - conflicts: - - multiClusterRoutingUseAny + exactly_one_of: + - single_cluster_routing + - multi_cluster_routing_use_any description: | Use a single-cluster routing policy. input: true properties: - !ruby/object:Api::Type::String name: 'clusterId' + required: true description: | The cluster to which read/write requests should be routed. - !ruby/object:Api::Type::Boolean diff --git a/products/bigtable/terraform.yaml b/products/bigtable/terraform.yaml index 6e8c7204446d..44a112c82ee1 100644 --- a/products/bigtable/terraform.yaml +++ b/products/bigtable/terraform.yaml @@ -14,7 +14,7 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides AppProfile: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{project}}/{{instance}}/{{app_profile_id}}" + id_format: "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}" import_format: ["projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}"] examples: - !ruby/object:Provider::Terraform::Examples diff --git a/products/binaryauthorization/api.yaml b/products/binaryauthorization/api.yaml index acef119c7e20..f36d3ec143c3 100644 --- a/products/binaryauthorization/api.yaml +++ b/products/binaryauthorization/api.yaml @@ -96,6 +96,7 @@ objects: See the documentation on publicKey cases below for details. - !ruby/object:Api::Type::String name: asciiArmoredPgpPublicKey + # TODO (mbang): Exactly one of asciiArmoredPgpPublicKey or pkixPublicKey must be set description: | ASCII-armored representation of a PGP public key, as the entire output by the command @@ -108,6 +109,7 @@ objects: be overwritten by the API-calculated ID. - !ruby/object:Api::Type::NestedObject name: pkixPublicKey + # TODO (mbang): Exactly one of asciiArmoredPgpPublicKey or pkixPublicKey must be set description: | A raw PKIX SubjectPublicKeyInfo format public key. @@ -178,6 +180,7 @@ objects: properties: - !ruby/object:Api::Type::String name: namePattern + required: true description: | An image name pattern to whitelist, in the form `registry/path/to/image`. This supports a trailing * as a @@ -202,6 +205,7 @@ objects: properties: - !ruby/object:Api::Type::Enum name: evaluationMode + required: true description: How this admission rule will be evaluated. values: - :ALWAYS_ALLOW @@ -221,6 +225,7 @@ objects: item_type: Api::Type::String - !ruby/object:Api::Type::Enum name: enforcementMode + required: true description: | The action when a pod creation is denied by the admission rule. values: diff --git a/products/binaryauthorization/terraform.yaml b/products/binaryauthorization/terraform.yaml index fff7c4a35222..f2f06bdeabd0 100644 --- a/products/binaryauthorization/terraform.yaml +++ b/products/binaryauthorization/terraform.yaml @@ -15,7 +15,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides Attestor: !ruby/object:Overrides::Terraform::ResourceOverride import_format: ["projects/{{project}}/attestors/{{name}}"] - id_format: "{{project}}/{{name}}" examples: - !ruby/object:Provider::Terraform::Examples name: "binary_authorization_attestor_basic" @@ -39,7 +38,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides properties: name: !ruby/object:Overrides::Terraform::PropertyOverride custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' - custom_expand: 'templates/terraform/custom_expand/binaryauthorization_attestor_name.erb' + custom_expand: 'templates/terraform/custom_expand/shortname_to_url.go.erb' attestationAuthorityNote.noteReference: !ruby/object:Overrides::Terraform::PropertyOverride custom_expand: 'templates/terraform/custom_expand/container_analysis_note.erb' diff_suppress_func: 'compareSelfLinkOrResourceName' @@ -54,7 +53,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides attestationAuthorityNote.publicKeys.id: !ruby/object:Overrides::Terraform::PropertyOverride default_from_api: true Policy: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{project}}" + id_format: "projects/{{project}}" import_format: ["projects/{{project}}"] custom_code: !ruby/object:Provider::Terraform::CustomCode constants: 'templates/terraform/constants/binaryauthorization_policy.erb' diff --git a/products/cloudbuild/api.yaml b/products/cloudbuild/api.yaml index 8943af8b8e30..838242d5f0e3 100644 --- a/products/cloudbuild/api.yaml +++ b/products/cloudbuild/api.yaml @@ -73,7 +73,8 @@ objects: Substitutions data for Build resource. - !ruby/object:Api::Type::String name: 'filename' - conflicts: + exactly_one_of: + - filename - build description: | Path, from the source root, to a file whose contents is used for the template. Either a filename or build template must be provided. @@ -113,6 +114,7 @@ objects: Branch and tag names in trigger templates are interpreted as regular expressions. Any branch or tag change that matches that regular expression will trigger a build. + required: true properties: - !ruby/object:Api::Type::String name: 'projectId' @@ -137,15 +139,27 @@ objects: description: | Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. This field is a regular expression. + exactly_one_of: + - trigger_template.0.branch_name + - trigger_template.0.tag_name + - trigger_template.0.commit_sha - !ruby/object:Api::Type::String name: 'tagName' description: | Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. This field is a regular expression. + exactly_one_of: + - trigger_template.0.branch_name + - trigger_template.0.tag_name + - trigger_template.0.commit_sha - !ruby/object:Api::Type::String name: 'commitSha' description: | Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided. + exactly_one_of: + - trigger_template.0.branch_name + - trigger_template.0.tag_name + - trigger_template.0.commit_sha - !ruby/object:Api::Type::NestedObject name: 'github' description: | @@ -166,9 +180,13 @@ objects: name: 'pullRequest' description: | filter to match changes in pull requests. Specify only one of pullRequest or push. + exactly_one_of: + - github.0.pull_request + - github.0.push properties: - !ruby/object:Api::Type::String name: 'branch' + required: true description: | Regex of branches to match. - !ruby/object:Api::Type::Enum @@ -182,17 +200,29 @@ objects: name: 'push' description: | filter to match changes in refs, like branches or tags. Specify only one of pullRequest or push. + exactly_one_of: + - github.0.pull_request + - github.0.push properties: - !ruby/object:Api::Type::String name: 'branch' description: | Regex of branches to match. Specify only one of branch or tag. + exactly_one_of: + - github.0.push.0.branch + - github.0.push.0.tag - !ruby/object:Api::Type::String name: 'tag' description: | Regex of tags to match. Specify only one of branch or tag. + exactly_one_of: + - github.0.push.0.branch + - github.0.push.0.tag - !ruby/object:Api::Type::NestedObject name: 'build' + exactly_one_of: + - filename + - build description: | Contents of the build template. Either a filename or build template must be provided. properties: @@ -211,12 +241,14 @@ objects: If any of the images fail to be pushed, the build status is marked FAILURE. - !ruby/object:Api::Type::Array name: 'steps' + required: true description: | The operations to be performed on the workspace. item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::String name: 'name' + required: true description: | The name of the container image that will run this particular build step. @@ -313,6 +345,7 @@ objects: properties: - !ruby/object:Api::Type::String name: 'name' + required: true description: | Name of the volume to mount. @@ -320,6 +353,7 @@ objects: Docker volumes. Each named volume must be used by at least two build steps. - !ruby/object:Api::Type::String name: 'path' + required: true description: | Path at which to mount the volume. diff --git a/products/cloudbuild/terraform.yaml b/products/cloudbuild/terraform.yaml index 22cb1ad323fb..1b5f5f6b81a1 100644 --- a/products/cloudbuild/terraform.yaml +++ b/products/cloudbuild/terraform.yaml @@ -17,7 +17,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides Trigger: !ruby/object:Overrides::Terraform::ResourceOverride # import by default only works with old-style self links ending in a name import_format: ["projects/{{project}}/triggers/{{trigger_id}}"] - id_format: '{{project}}/{{trigger_id}}' + id_format: 'projects/{{project}}/triggers/{{trigger_id}}' self_link: 'projects/{{project}}/triggers/{{trigger_id}}' examples: - !ruby/object:Provider::Terraform::Examples @@ -30,9 +30,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides default_from_api: true build.steps: !ruby/object:Overrides::Terraform::PropertyOverride name: 'step' - triggerTemplate: !ruby/object:Overrides::Terraform::PropertyOverride - description: | - {{description}}This field is required, and will be validated as such in 3.0.0. triggerTemplate.projectId: !ruby/object:Overrides::Terraform::PropertyOverride default_from_api: true custom_code: !ruby/object:Provider::Terraform::CustomCode diff --git a/products/cloudfunctions/api.yaml b/products/cloudfunctions/api.yaml index 5223ead029b5..30e58d0d8bb4 100644 --- a/products/cloudfunctions/api.yaml +++ b/products/cloudfunctions/api.yaml @@ -136,17 +136,30 @@ objects: description: | The Google Cloud Storage URL, starting with gs://, pointing to the zip archive which contains the function. + exactly_one_of: + - source_repository + - source_archive_url + - source_upload_url - !ruby/object:Api::Type::String name: 'sourceUploadUrl' description: | The Google Cloud Storage signed URL used for source uploading. + exactly_one_of: + - source_repository + - source_archive_url + - source_upload_url - !ruby/object:Api::Type::NestedObject name: 'sourceRepository' description: | The source repository where a function is hosted. + exactly_one_of: + - source_repository + - source_archive_url + - source_upload_url properties: - !ruby/object:Api::Type::String name: 'url' + required: true description: | The URL pointing to the hosted repository where the function is defined - !ruby/object:Api::Type::String diff --git a/products/cloudfunctions/terraform.yaml b/products/cloudfunctions/terraform.yaml index 98f88077c493..ad900171f724 100644 --- a/products/cloudfunctions/terraform.yaml +++ b/products/cloudfunctions/terraform.yaml @@ -16,7 +16,7 @@ legacy_name: 'cloudfunctions' overrides: !ruby/object:Overrides::ResourceOverrides CloudFunction: !ruby/object:Overrides::Terraform::ResourceOverride legacy_name: 'google_cloudfunctions_function' - id_format: '{{project}}/{{region}}/{{cloud_function}}' + id_format: 'projects/{{project}}/locations/{{region}}/functions/{{cloud_function}}' base_url: projects/{{project}}/locations/{{region}}/functions import_format: ["projects/{{project}}/locations/{{region}}/functions/{{cloud_function}}"] exclude_resource: true diff --git a/products/cloudrun/api.yaml b/products/cloudrun/api.yaml index aca9ded3edbe..7c536d37f3fc 100644 --- a/products/cloudrun/api.yaml +++ b/products/cloudrun/api.yaml @@ -341,7 +341,7 @@ objects: - !ruby/object:Api::Type::NestedObject name: configMapRef description: |- - The ConfigMap to select from + The ConfigMap to select from. properties: - !ruby/object:Api::Type::Boolean name: optional @@ -353,6 +353,7 @@ objects: properties: - !ruby/object:Api::Type::String name: name + required: true description: |- Name of the referent. More info: @@ -360,7 +361,7 @@ objects: - !ruby/object:Api::Type::NestedObject name: secretRef description: |- - The Secret to select from + The Secret to select from. properties: - !ruby/object:Api::Type::NestedObject name: localObjectReference @@ -368,6 +369,7 @@ objects: properties: - !ruby/object:Api::Type::String name: name + required: true description: |- Name of the referent. More info: diff --git a/products/cloudscheduler/api.yaml b/products/cloudscheduler/api.yaml index 7fb0fdb19cde..422b4e22f9bb 100644 --- a/products/cloudscheduler/api.yaml +++ b/products/cloudscheduler/api.yaml @@ -90,6 +90,12 @@ objects: Values greater than 5 and negative values are not allowed. required: false input: true + at_least_one_of: + - retry_config.0.retry_count + - retry_config.0.max_retry_duration + - retry_config.0.min_backoff_duration + - retry_config.0.max_backoff_duration + - retry_config.0.max_doublings - !ruby/object:Api::Type::String name: maxRetryDuration description: | @@ -98,6 +104,12 @@ objects: A duration in seconds with up to nine fractional digits, terminated by 's'. required: false input: true + at_least_one_of: + - retry_config.0.retry_count + - retry_config.0.max_retry_duration + - retry_config.0.min_backoff_duration + - retry_config.0.max_backoff_duration + - retry_config.0.max_doublings - !ruby/object:Api::Type::String name: minBackoffDuration description: | @@ -105,6 +117,12 @@ objects: A duration in seconds with up to nine fractional digits, terminated by 's'. required: false input: true + at_least_one_of: + - retry_config.0.retry_count + - retry_config.0.max_retry_duration + - retry_config.0.min_backoff_duration + - retry_config.0.max_backoff_duration + - retry_config.0.max_doublings - !ruby/object:Api::Type::String name: maxBackoffDuration description: | @@ -112,6 +130,12 @@ objects: A duration in seconds with up to nine fractional digits, terminated by 's'. required: false input: true + at_least_one_of: + - retry_config.0.retry_count + - retry_config.0.max_retry_duration + - retry_config.0.min_backoff_duration + - retry_config.0.max_backoff_duration + - retry_config.0.max_doublings - !ruby/object:Api::Type::Integer name: maxDoublings description: | @@ -121,6 +145,12 @@ objects: and finally retries retries at intervals of maxBackoffDuration up to retryCount times. required: false input: true + at_least_one_of: + - retry_config.0.retry_count + - retry_config.0.max_retry_duration + - retry_config.0.min_backoff_duration + - retry_config.0.max_backoff_duration + - retry_config.0.max_doublings - !ruby/object:Api::Type::NestedObject name: pubsubTarget description: | @@ -128,9 +158,10 @@ objects: If the job providers a Pub/Sub target the cron will publish a message to the provided topic input: true - conflicts: - - httpTarget - - appEngineHttpTarget + exactly_one_of: + - pubsub_target + - http_target + - app_engine_http_target properties: - !ruby/object:Api::Type::String name: topicName @@ -161,9 +192,10 @@ objects: If the job providers a App Engine HTTP target the cron will send a request to the service instance input: true - conflicts: - - pubsubTarget - - httpTarget + exactly_one_of: + - pubsub_target + - http_target + - app_engine_http_target properties: - !ruby/object:Api::Type::String name: httpMethod @@ -183,6 +215,10 @@ objects: description: | App service. By default, the job is sent to the service which is the default service when the job is attempted. + at_least_one_of: + - app_engine_http_target.0.app_engine_routing.0.service + - app_engine_http_target.0.app_engine_routing.0.version + - app_engine_http_target.0.app_engine_routing.0.instance required: false input: true - !ruby/object:Api::Type::String @@ -190,6 +226,10 @@ objects: description: | App version. By default, the job is sent to the version which is the default version when the job is attempted. + at_least_one_of: + - app_engine_http_target.0.app_engine_routing.0.service + - app_engine_http_target.0.app_engine_routing.0.version + - app_engine_http_target.0.app_engine_routing.0.instance required: false input: true - !ruby/object:Api::Type::String @@ -197,6 +237,10 @@ objects: description: | App instance. By default, the job is sent to an instance which is available when the job is attempted. + at_least_one_of: + - app_engine_http_target.0.app_engine_routing.0.service + - app_engine_http_target.0.app_engine_routing.0.version + - app_engine_http_target.0.app_engine_routing.0.instance required: false input: true - !ruby/object:Api::Type::String @@ -231,9 +275,10 @@ objects: If the job providers a http_target the cron will send a request to the targeted url input: true - conflicts: - - pubsubTarget - - appEngineHttpTarget + exactly_one_of: + - pubsub_target + - http_target + - app_engine_http_target properties: - !ruby/object:Api::Type::String name: uri @@ -267,6 +312,7 @@ objects: properties: - !ruby/object:Api::Type::String name: serviceAccountEmail + required: true description: | Service account email to be used for generating OAuth token. The service account must be within the same project as the job. @@ -284,6 +330,7 @@ objects: properties: - !ruby/object:Api::Type::String name: serviceAccountEmail + required: true description: | Service account email to be used for generating OAuth token. The service account must be within the same project as the job. diff --git a/products/cloudscheduler/terraform.yaml b/products/cloudscheduler/terraform.yaml index 8730f55d3a83..334700bb06df 100644 --- a/products/cloudscheduler/terraform.yaml +++ b/products/cloudscheduler/terraform.yaml @@ -14,6 +14,7 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides Job: !ruby/object:Overrides::Terraform::ResourceOverride + id_format: 'projects/{{project}}/locations/{{region}}/jobs/{{name}}' custom_code: !ruby/object:Provider::Terraform::CustomCode constants: templates/terraform/constants/scheduler_auth.erb resource_definition: templates/terraform/resource_definition/scheduler_auth.erb @@ -49,7 +50,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides job_name: "test-job" properties: name: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: 'templates/terraform/custom_expand/cloudscheduler_job_name.erb' + custom_expand: 'templates/terraform/custom_expand/shortname_to_url.go.erb' custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' httpTarget.headers: !ruby/object:Overrides::Terraform::PropertyOverride custom_flatten: 'templates/terraform/custom_flatten/http_headers.erb' diff --git a/products/compute/api.yaml b/products/compute/api.yaml index b213d130787c..2e8fb6053046 100644 --- a/products/compute/api.yaml +++ b/products/compute/api.yaml @@ -461,10 +461,10 @@ objects: properties: - !ruby/object:Api::Type::Integer name: 'signedUrlCacheMaxAgeSec' - default_value: 3600 + required: true description: | Maximum number of seconds the response to a signed URL request will - be considered fresh. Defaults to 1hr (3600s). After this time period, + be considered fresh. After this time period, the response will be revalidated before being served. When serving responses to signed URL requests, Cloud CDN will internally behave as though @@ -665,6 +665,7 @@ objects: Note that you must specify an Instance Group or Network Endpoint Group resource using the fully-qualified URL, rather than a partial URL. + required: true - !ruby/object:Api::Type::Integer name: 'maxConnections' description: | @@ -734,6 +735,13 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'connectTimeout' + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The timeout for new network connections to hosts. properties: @@ -752,6 +760,13 @@ objects: be from 0 to 999,999,999 inclusive. - !ruby/object:Api::Type::Integer name: 'maxRequestsPerConnection' + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | Maximum requests for a single backend connection. This parameter is respected by both the HTTP/1.1 and HTTP/2 implementations. If @@ -760,24 +775,52 @@ objects: - !ruby/object:Api::Type::Integer name: 'maxConnections' default_value: 1024 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of connections to the backend cluster. Defaults to 1024. - !ruby/object:Api::Type::Integer name: 'maxPendingRequests' default_value: 1024 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of pending requests to the backend cluster. Defaults to 1024. - !ruby/object:Api::Type::Integer name: 'maxRequests' default_value: 1024 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of parallel requests to the backend cluster. Defaults to 1024. - !ruby/object:Api::Type::Integer name: 'maxRetries' default_value: 3 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of parallel retries to the backend cluster. Defaults to 3. @@ -796,6 +839,10 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'httpCookie' + at_least_one_of: + - consistent_hash.0.http_cookie + - consistent_hash.0.http_header_name + - consistent_hash.0.minimum_ring_size description: | Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load @@ -804,6 +851,10 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'ttl' + at_least_one_of: + - consistent_hash.0.http_cookie.0.ttl + - consistent_hash.0.http_cookie.0.name + - consistent_hash.0.http_cookie.0.path description: | Lifetime of the cookie. properties: @@ -822,20 +873,36 @@ objects: be from 0 to 999,999,999 inclusive. - !ruby/object:Api::Type::String name: 'name' + at_least_one_of: + - consistent_hash.0.http_cookie.0.ttl + - consistent_hash.0.http_cookie.0.name + - consistent_hash.0.http_cookie.0.path description: | Name of the cookie. - !ruby/object:Api::Type::String name: 'path' + at_least_one_of: + - consistent_hash.0.http_cookie.0.ttl + - consistent_hash.0.http_cookie.0.name + - consistent_hash.0.http_cookie.0.path description: | Path to set for the cookie. - !ruby/object:Api::Type::String name: 'httpHeaderName' + at_least_one_of: + - consistent_hash.0.http_cookie + - consistent_hash.0.http_header_name + - consistent_hash.0.minimum_ring_size description: | The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. - !ruby/object:Api::Type::Integer name: 'minimumRingSize' default_value: 1024 + at_least_one_of: + - consistent_hash.0.http_cookie + - consistent_hash.0.http_header_name + - consistent_hash.0.minimum_ring_size description: | The minimum number of virtual nodes to use for the hash ring. Larger ring sizes result in more granular load @@ -850,17 +917,38 @@ objects: - !ruby/object:Api::Type::NestedObject name: 'cacheKeyPolicy' description: 'The CacheKeyPolicy for this CdnPolicy.' + at_least_one_of: + - cdn_policy.0.cache_key_policy + - cdn_policy.0.signed_url_cache_max_age_sec properties: - !ruby/object:Api::Type::Boolean name: 'includeHost' + at_least_one_of: + - cdn_policy.0.cache_key_policy.0.include_host + - cdn_policy.0.cache_key_policy.0.include_protocol + - cdn_policy.0.cache_key_policy.0.include_query_string + - cdn_policy.0.cache_key_policy.0.query_string_blacklist + - cdn_policy.0.cache_key_policy.0.query_string_whitelist description: | If true requests to different hosts will be cached separately. - !ruby/object:Api::Type::Boolean name: 'includeProtocol' + at_least_one_of: + - cdn_policy.0.cache_key_policy.0.include_host + - cdn_policy.0.cache_key_policy.0.include_protocol + - cdn_policy.0.cache_key_policy.0.include_query_string + - cdn_policy.0.cache_key_policy.0.query_string_blacklist + - cdn_policy.0.cache_key_policy.0.query_string_whitelist description: | If true, http and https requests will be cached separately. - !ruby/object:Api::Type::Boolean name: 'includeQueryString' + at_least_one_of: + - cdn_policy.0.cache_key_policy.0.include_host + - cdn_policy.0.cache_key_policy.0.include_protocol + - cdn_policy.0.cache_key_policy.0.include_query_string + - cdn_policy.0.cache_key_policy.0.query_string_blacklist + - cdn_policy.0.cache_key_policy.0.query_string_whitelist description: | If true, include query string parameters in the cache key according to query_string_whitelist and @@ -871,6 +959,12 @@ objects: key entirely. - !ruby/object:Api::Type::Array name: 'queryStringBlacklist' + at_least_one_of: + - cdn_policy.0.cache_key_policy.0.include_host + - cdn_policy.0.cache_key_policy.0.include_protocol + - cdn_policy.0.cache_key_policy.0.include_query_string + - cdn_policy.0.cache_key_policy.0.query_string_blacklist + - cdn_policy.0.cache_key_policy.0.query_string_whitelist description: | Names of query string parameters to exclude in cache keys. @@ -881,6 +975,12 @@ objects: item_type: Api::Type::String - !ruby/object:Api::Type::Array name: 'queryStringWhitelist' + at_least_one_of: + - cdn_policy.0.cache_key_policy.0.include_host + - cdn_policy.0.cache_key_policy.0.include_protocol + - cdn_policy.0.cache_key_policy.0.include_query_string + - cdn_policy.0.cache_key_policy.0.query_string_blacklist + - cdn_policy.0.cache_key_policy.0.query_string_whitelist description: | Names of query string parameters to include in cache keys. @@ -892,6 +992,9 @@ objects: - !ruby/object:Api::Type::Integer name: 'signedUrlCacheMaxAgeSec' default_value: 3600 + at_least_one_of: + - cdn_policy.0.cache_key_policy + - cdn_policy.0.signed_url_cache_max_age_sec description: | Maximum number of seconds the response to a signed URL request will be considered fresh, defaults to 1hr (3600s). After this @@ -1056,6 +1159,18 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'baseEjectionTime' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The base time that a host is ejected for. The real time is equal to the base time multiplied by the number of times the host has been ejected. Defaults to @@ -1075,6 +1190,18 @@ objects: `nanos` field. Must be from 0 to 999,999,999 inclusive. - !ruby/object:Api::Type::Integer name: 'consecutiveErrors' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 5 description: | Number of errors before a host is ejected from the connection pool. When the @@ -1082,6 +1209,18 @@ objects: Defaults to 5. - !ruby/object:Api::Type::Integer name: 'consecutiveGatewayFailure' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 5 description: | The number of consecutive gateway failures (502, 503, 504 status or connection @@ -1089,6 +1228,18 @@ objects: gateway failure ejection occurs. Defaults to 5. - !ruby/object:Api::Type::Integer name: 'enforcingConsecutiveErrors' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 100 description: | The percentage chance that a host will be actually ejected when an outlier @@ -1096,6 +1247,18 @@ objects: ejection or to ramp it up slowly. Defaults to 100. - !ruby/object:Api::Type::Integer name: 'enforcingConsecutiveGatewayFailure' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 0 description: | The percentage chance that a host will be actually ejected when an outlier @@ -1103,6 +1266,18 @@ objects: used to disable ejection or to ramp it up slowly. Defaults to 0. - !ruby/object:Api::Type::Integer name: 'enforcingSuccessRate' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 100 description: | The percentage chance that a host will be actually ejected when an outlier @@ -1110,6 +1285,18 @@ objects: disable ejection or to ramp it up slowly. Defaults to 100. - !ruby/object:Api::Type::NestedObject name: 'interval' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | Time interval between ejection sweep analysis. This can result in both new ejections as well as hosts being returned to service. Defaults to 10 seconds. @@ -1128,12 +1315,36 @@ objects: `nanos` field. Must be from 0 to 999,999,999 inclusive. - !ruby/object:Api::Type::Integer name: 'maxEjectionPercent' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 10 description: | Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 10%. - !ruby/object:Api::Type::Integer name: 'successRateMinimumHosts' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 5 description: | The number of hosts in a cluster that must have enough request volume to detect @@ -1142,6 +1353,18 @@ objects: cluster. Defaults to 5. - !ruby/object:Api::Type::Integer name: 'successRateRequestVolume' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 100 description: | The minimum number of total requests that must be collected in one interval (as @@ -1151,6 +1374,18 @@ objects: to 100. - !ruby/object:Api::Type::Integer name: 'successRateStdevFactor' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor default_value: 1900 description: | This factor is used to determine the ejection threshold for success rate outlier @@ -1214,10 +1449,16 @@ objects: properties: - !ruby/object:Api::Type::Boolean name: 'enable' + at_least_one_of: + - log_config.0.enable + - log_config.0.sample_rate description: | Whether to enable logging for the load balancer traffic served by this backend service. - !ruby/object:Api::Type::Double name: 'sampleRate' + at_least_one_of: + - log_config.0.enable + - log_config.0.sample_rate description: | This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer @@ -1398,6 +1639,13 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'connectTimeout' + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The timeout for new network connections to hosts. properties: @@ -1416,6 +1664,13 @@ objects: be from 0 to 999,999,999 inclusive. - !ruby/object:Api::Type::Integer name: 'maxRequestsPerConnection' + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | Maximum requests for a single backend connection. This parameter is respected by both the HTTP/1.1 and HTTP/2 implementations. If @@ -1424,24 +1679,52 @@ objects: - !ruby/object:Api::Type::Integer name: 'maxConnections' default_value: 1024 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of connections to the backend cluster. Defaults to 1024. - !ruby/object:Api::Type::Integer name: 'maxPendingRequests' default_value: 1024 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of pending requests to the backend cluster. Defaults to 1024. - !ruby/object:Api::Type::Integer name: 'maxRequests' default_value: 1024 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of parallel requests to the backend cluster. Defaults to 1024. - !ruby/object:Api::Type::Integer name: 'maxRetries' default_value: 3 + at_least_one_of: + - circuit_breakers.0.connect_timeout + - circuit_breakers.0.max_requests_per_connection + - circuit_breakers.0.max_connections + - circuit_breakers.0.max_pending_requests + - circuit_breakers.0.max_requests + - circuit_breakers.0.max_retries description: | The maximum number of parallel retries to the backend cluster. Defaults to 3. @@ -1462,6 +1745,10 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'httpCookie' + at_least_one_of: + - consistent_hash.0.http_cookie + - consistent_hash.0.http_header_name + - consistent_hash.0.minimum_ring_size description: | Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load @@ -1470,6 +1757,10 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'ttl' + at_least_one_of: + - consistent_hash.0.http_cookie.0.ttl + - consistent_hash.0.http_cookie.0.name + - consistent_hash.0.http_cookie.0.path description: | Lifetime of the cookie. properties: @@ -1488,20 +1779,36 @@ objects: be from 0 to 999,999,999 inclusive. - !ruby/object:Api::Type::String name: 'name' + at_least_one_of: + - consistent_hash.0.http_cookie.0.ttl + - consistent_hash.0.http_cookie.0.name + - consistent_hash.0.http_cookie.0.path description: | Name of the cookie. - !ruby/object:Api::Type::String name: 'path' + at_least_one_of: + - consistent_hash.0.http_cookie.0.ttl + - consistent_hash.0.http_cookie.0.name + - consistent_hash.0.http_cookie.0.path description: | Path to set for the cookie. - !ruby/object:Api::Type::String name: 'httpHeaderName' + at_least_one_of: + - consistent_hash.0.http_cookie + - consistent_hash.0.http_header_name + - consistent_hash.0.minimum_ring_size description: | The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. - !ruby/object:Api::Type::Integer name: 'minimumRingSize' default_value: 1024 + at_least_one_of: + - consistent_hash.0.http_cookie + - consistent_hash.0.http_header_name + - consistent_hash.0.minimum_ring_size description: | The minimum number of virtual nodes to use for the hash ring. Larger ring sizes result in more granular load @@ -1538,6 +1845,10 @@ objects: properties: - !ruby/object:Api::Type::Boolean name: 'disableConnectionDrainOnFailover' + at_least_one_of: + - failover_policy.0.disable_connection_drain_on_failover + - failover_policy.0.drop_traffic_if_unhealthy + - failover_policy.0.failover_ratio description: | On failover or failback, this field indicates whether connection drain will be honored. Setting this to true has the following effect: connections @@ -1549,6 +1860,10 @@ objects: The default is false. - !ruby/object:Api::Type::Boolean name: 'dropTrafficIfUnhealthy' + at_least_one_of: + - failover_policy.0.disable_connection_drain_on_failover + - failover_policy.0.drop_traffic_if_unhealthy + - failover_policy.0.failover_ratio description: | This option is used only when no healthy VMs are detected in the primary and backup instance groups. When set to true, traffic is dropped. When @@ -1556,6 +1871,10 @@ objects: The default is false. - !ruby/object:Api::Type::Double name: 'failoverRatio' + at_least_one_of: + - failover_policy.0.disable_connection_drain_on_failover + - failover_policy.0.drop_traffic_if_unhealthy + - failover_policy.0.failover_ratio description: | The value of the field must be in [0, 1]. If the ratio of the healthy VMs in the primary backend is at or below this number, traffic arriving @@ -1660,6 +1979,18 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'baseEjectionTime' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The base time that a host is ejected for. The real time is equal to the base time multiplied by the number of times the host has been ejected. Defaults to @@ -1680,6 +2011,18 @@ objects: - !ruby/object:Api::Type::Integer name: 'consecutiveErrors' default_value: 5 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. @@ -1687,6 +2030,18 @@ objects: - !ruby/object:Api::Type::Integer name: 'consecutiveGatewayFailure' default_value: 5 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive @@ -1694,6 +2049,18 @@ objects: - !ruby/object:Api::Type::Integer name: 'enforcingConsecutiveErrors' default_value: 100 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable @@ -1701,6 +2068,18 @@ objects: - !ruby/object:Api::Type::Integer name: 'enforcingConsecutiveGatewayFailure' default_value: 0 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be @@ -1708,12 +2087,36 @@ objects: - !ruby/object:Api::Type::Integer name: 'enforcingSuccessRate' default_value: 100 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - !ruby/object:Api::Type::NestedObject name: 'interval' + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | Time interval between ejection sweep analysis. This can result in both new ejections as well as hosts being returned to service. Defaults to 10 seconds. @@ -1733,12 +2136,36 @@ objects: - !ruby/object:Api::Type::Integer name: 'maxEjectionPercent' default_value: 10 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 10%. - !ruby/object:Api::Type::Integer name: 'successRateMinimumHosts' default_value: 5 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier @@ -1747,6 +2174,18 @@ objects: - !ruby/object:Api::Type::Integer name: 'successRateRequestVolume' default_value: 100 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate @@ -1756,6 +2195,18 @@ objects: - !ruby/object:Api::Type::Integer name: 'successRateStdevFactor' default_value: 1900 + at_least_one_of: + - outlier_detection.0.base_ejection_time + - outlier_detection.0.consecutive_errors + - outlier_detection.0.consecutive_gateway_failure + - outlier_detection.0.enforcing_consecutive_errors + - outlier_detection.0.enforcing_consecutive_gateway_failure + - outlier_detection.0.enforcing_success_rate + - outlier_detection.0.interval + - outlier_detection.0.max_ejection_percent + - outlier_detection.0.success_rate_minimum_hosts + - outlier_detection.0.success_rate_request_volume + - outlier_detection.0.success_rate_stdev_factor description: | This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success @@ -1807,10 +2258,16 @@ objects: properties: - !ruby/object:Api::Type::Boolean name: 'enable' + at_least_one_of: + - log_config.0.enable + - log_config.0.sample_rate description: | Whether to enable logging for the load balancer traffic served by this backend service. - !ruby/object:Api::Type::Double name: 'sampleRate' + at_least_one_of: + - log_config.0.enable + - log_config.0.sample_rate description: | This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer @@ -2391,8 +2848,9 @@ objects: The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection. - conflicts: - - denied + exactly_one_of: + - allow + - deny item_type: !ruby/object:Api::Type::NestedObject properties: # IPProtocol has to be string, instead of Enum because user can @@ -2423,6 +2881,9 @@ objects: output: true - !ruby/object:Api::Type::Array name: 'denied' + exactly_one_of: + - allow + - deny description: | The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection. @@ -2717,16 +3178,6 @@ objects: description: | A BackendService to receive the matched traffic. This is used only for INTERNAL load balancing. - - !ruby/object:Api::Type::Enum - name: 'ipVersion' - deprecation_message: >- - ipVersion is not used for regional forwarding rules. Please remove - this field if you are using it. - description: | - ipVersion is not a valid field for regional forwarding rules. - values: - - :IPV4 - - :IPV6 - !ruby/object:Api::Type::Enum name: 'loadBalancingScheme' description: | @@ -3565,42 +4016,91 @@ objects: - :HTTP2 - !ruby/object:Api::Type::NestedObject name: 'httpHealthCheck' - conflicts: - - httpsHealthCheck - - http2HealthCheck - - tcpHealthCheck - - sslHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'host' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used. - !ruby/object:Api::Type::String name: 'requestPath' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The request path of the HTTP health check request. The default value is /. default_value: "/" - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The TCP port number for the HTTP health check request. The default value is 80. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -3610,6 +4110,14 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -3631,42 +4139,91 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'httpsHealthCheck' - conflicts: - - httpHealthCheck - - http2HealthCheck - - tcpHealthCheck - - sslHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'host' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used. - !ruby/object:Api::Type::String name: 'requestPath' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The request path of the HTTPS health check request. The default value is /. default_value: "/" - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The TCP port number for the HTTPS health check request. The default value is 443. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -3676,6 +4233,14 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -3697,14 +4262,22 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'tcpHealthCheck' - conflicts: - - httpHealthCheck - - http2HealthCheck - - httpsHealthCheck - - sslHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'request' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | The application data to send once the TCP connection has been established (default value is empty). If both request and response are @@ -3712,22 +4285,50 @@ objects: data can only be ASCII. - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | The TCP port number for the TCP health check request. The default value is 443. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -3737,6 +4338,13 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -3758,14 +4366,22 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'sslHealthCheck' - conflicts: - - httpHealthCheck - - http2HealthCheck - - httpsHealthCheck - - tcpHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'request' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | The application data to send once the SSL connection has been established (default value is empty). If both request and response are @@ -3773,22 +4389,50 @@ objects: data can only be ASCII. - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | The TCP port number for the SSL health check request. The default value is 443. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -3798,6 +4442,13 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -3819,42 +4470,91 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'http2HealthCheck' - conflicts: - - httpHealthCheck - - sslHealthCheck - - httpsHealthCheck - - tcpHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'host' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The value of the host header in the HTTP2 health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used. - !ruby/object:Api::Type::String name: 'requestPath' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The request path of the HTTP2 health check request. The default value is /. default_value: "/" - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The TCP port number for the HTTP2 health check request. The default value is 443. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -3864,6 +4564,14 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -4546,6 +5254,7 @@ objects: properties: - !ruby/object:Api::Type::Enum name: 'type' + required: true description: | The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. values: @@ -5933,25 +6642,6 @@ objects: name: 'id' description: 'The unique identifier for the resource.' output: true - - !ruby/object:Api::Type::String - name: 'ipv4_range' - # We override this in api.yaml so that the name is more aesthetic - api_name: 'IPv4Range' - deprecation_message: >- - Legacy Networks are deprecated and you will no longer be able to - create them using this field from Feb 1, 2020 onwards. - input: true - conflicts: - - autoCreateSubnetworks - description: | - If this field is specified, a deprecated legacy network is created. - You will no longer be able to create a legacy network on Feb 1, 2020. - See the [legacy network docs](https://cloud.google.com/vpc/docs/legacy) - for more details. - - The range of internal addresses that are legal on this legacy network. - This range is a CIDR specification, for example: `192.168.0.0/16`. - The resource must be recreated to modify this field. - !ruby/object:Api::Type::String name: 'name' description: | @@ -6341,10 +7031,16 @@ objects: properties: - !ruby/object:Api::Type::String name: cpus + at_least_one_of: + - node_type_flexibility.0.cpus + - node_type_flexibility.0.memory description: | Number of virtual CPUs to use. - !ruby/object:Api::Type::String name: memory + at_least_one_of: + - node_type_flexibility.0.cpus + - node_type_flexibility.0.memory description: | Physical memory available to the node, defined in MB. - !ruby/object:Api::Type::String @@ -7340,42 +8036,91 @@ objects: - :HTTP2 - !ruby/object:Api::Type::NestedObject name: 'httpHealthCheck' - conflicts: - - httpsHealthCheck - - http2HealthCheck - - tcpHealthCheck - - sslHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'host' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used. - !ruby/object:Api::Type::String name: 'requestPath' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The request path of the HTTP health check request. The default value is /. default_value: "/" - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | The TCP port number for the HTTP health check request. The default value is 80. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -7385,6 +8130,14 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - http_health_check.0.host + - http_health_check.0.request_path + - http_health_check.0.response + - http_health_check.0.port + - http_health_check.0.port_name + - http_health_check.0.proxy_header + - http_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -7406,42 +8159,91 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'httpsHealthCheck' - conflicts: - - httpHealthCheck - - http2HealthCheck - - tcpHealthCheck - - sslHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'host' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used. - !ruby/object:Api::Type::String name: 'requestPath' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The request path of the HTTPS health check request. The default value is /. default_value: "/" - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | The TCP port number for the HTTPS health check request. The default value is 443. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -7451,6 +8253,14 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - https_health_check.0.host + - https_health_check.0.request_path + - https_health_check.0.response + - https_health_check.0.port + - https_health_check.0.port_name + - https_health_check.0.proxy_header + - https_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -7472,14 +8282,22 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'tcpHealthCheck' - conflicts: - - httpHealthCheck - - http2HealthCheck - - httpsHealthCheck - - sslHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'request' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | The application data to send once the TCP connection has been established (default value is empty). If both request and response are @@ -7487,22 +8305,50 @@ objects: data can only be ASCII. - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | The TCP port number for the TCP health check request. The default value is 80. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -7512,6 +8358,13 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - tcp_health_check.0.request + - tcp_health_check.0.response + - tcp_health_check.0.port + - tcp_health_check.0.port_name + - tcp_health_check.0.proxy_header + - tcp_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -7533,14 +8386,22 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'sslHealthCheck' - conflicts: - - httpHealthCheck - - http2HealthCheck - - httpsHealthCheck - - tcpHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'request' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | The application data to send once the SSL connection has been established (default value is empty). If both request and response are @@ -7548,22 +8409,50 @@ objects: data can only be ASCII. - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | The TCP port number for the SSL health check request. The default value is 443. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -7573,6 +8462,13 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - ssl_health_check.0.request + - ssl_health_check.0.response + - ssl_health_check.0.port + - ssl_health_check.0.port_name + - ssl_health_check.0.proxy_header + - ssl_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -7594,42 +8490,91 @@ objects: - :USE_SERVING_PORT - !ruby/object:Api::Type::NestedObject name: 'http2HealthCheck' - conflicts: - - httpHealthCheck - - sslHealthCheck - - httpsHealthCheck - - tcpHealthCheck + exactly_one_of: + - http_health_check + - https_health_check + - http2_health_check + - tcp_health_check + - ssl_health_check properties: - !ruby/object:Api::Type::String name: 'host' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The value of the host header in the HTTP2 health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used. - !ruby/object:Api::Type::String name: 'requestPath' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The request path of the HTTP2 health check request. The default value is /. default_value: "/" - !ruby/object:Api::Type::String name: 'response' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. - !ruby/object:Api::Type::Integer name: 'port' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | The TCP port number for the HTTP2 health check request. The default value is 443. - !ruby/object:Api::Type::String name: 'portName' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. - !ruby/object:Api::Type::Enum name: 'proxyHeader' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. @@ -7639,6 +8584,14 @@ objects: default_value: :NONE - !ruby/object:Api::Type::Enum name: 'portSpecification' + at_least_one_of: + - http2_health_check.0.host + - http2_health_check.0.request_path + - http2_health_check.0.response + - http2_health_check.0.port + - http2_health_check.0.port_name + - http2_health_check.0.proxy_header + - http2_health_check.0.port_specification description: | Specifies how port is selected for health checking, can be one of the following values: @@ -7719,6 +8672,10 @@ objects: name: 'hourlySchedule' description: | The policy will execute every nth hour starting at the specified time. + exactly_one_of: + - snapshot_schedule_policy.0.schedule.0.hourly_schedule + - snapshot_schedule_policy.0.schedule.0.daily_schedule + - snapshot_schedule_policy.0.schedule.0.weekly_schedule properties: - !ruby/object:Api::Type::Integer name: 'hoursInCycle' @@ -7736,6 +8693,10 @@ objects: name: 'dailySchedule' description: | The policy will execute every nth day at the specified time. + exactly_one_of: + - snapshot_schedule_policy.0.schedule.0.hourly_schedule + - snapshot_schedule_policy.0.schedule.0.daily_schedule + - snapshot_schedule_policy.0.schedule.0.weekly_schedule properties: - !ruby/object:Api::Type::Integer name: 'daysInCycle' @@ -7753,6 +8714,10 @@ objects: name: 'weeklySchedule' description: | Allows specifying a snapshot time for each day of the week. + exactly_one_of: + - snapshot_schedule_policy.0.schedule.0.hourly_schedule + - snapshot_schedule_policy.0.schedule.0.daily_schedule + - snapshot_schedule_policy.0.schedule.0.weekly_schedule properties: - !ruby/object:Api::Type::Array name: 'dayOfWeeks' @@ -7809,16 +8774,28 @@ objects: properties: - !ruby/object:Api::Type::KeyValuePairs name: 'labels' + at_least_one_of: + - snapshot_schedule_policy.0.snapshot_properties.0.labels + - snapshot_schedule_policy.0.snapshot_properties.0.storage_locations + - snapshot_schedule_policy.0.snapshot_properties.0.guest_flush description: | A set of key-value pairs. - !ruby/object:Api::Type::Array name: 'storageLocations' + at_least_one_of: + - snapshot_schedule_policy.0.snapshot_properties.0.labels + - snapshot_schedule_policy.0.snapshot_properties.0.storage_locations + - snapshot_schedule_policy.0.snapshot_properties.0.guest_flush max_size: 1 description: | GCS bucket location in which to store the snapshot (regional or multi-regional). item_type: Api::Type::String - !ruby/object:Api::Type::Boolean name: 'guestFlush' + at_least_one_of: + - snapshot_schedule_policy.0.snapshot_properties.0.labels + - snapshot_schedule_policy.0.snapshot_properties.0.storage_locations + - snapshot_schedule_policy.0.snapshot_properties.0.guest_flush description: | Whether to perform a 'guest aware' snapshot. - !ruby/object:Api::Resource @@ -7849,7 +8826,8 @@ objects: sending virtual machine's routing table will be dropped. A Route resource must have exactly one specification of either - nextHopGateway, nextHopInstance, nextHopIp, or nextHopVpnTunnel. + nextHopGateway, nextHopInstance, nextHopIp, nextHopVpnTunnel, or + nextHopIlb. references: !ruby/object:Api::Resource::ReferenceLinks guides: @@ -7922,6 +8900,12 @@ objects: - !ruby/object:Api::Type::String name: 'nextHopGateway' input: true + exactly_one_of: + - next_hop_gateway + - next_hop_instance + - next_hop_ip + - next_hop_vpn_tunnel + - next_hop_ilb description: | URL to a gateway that should handle matching packets. @@ -7937,6 +8921,12 @@ objects: resource: 'Instance' imports: 'selfLink' input: true + exactly_one_of: + - next_hop_gateway + - next_hop_instance + - next_hop_ip + - next_hop_vpn_tunnel + - next_hop_ilb description: | URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example: @@ -7950,11 +8940,23 @@ objects: description: | Network IP address of an instance that should handle matching packets. input: true + exactly_one_of: + - next_hop_gateway + - next_hop_instance + - next_hop_ip + - next_hop_vpn_tunnel + - next_hop_ilb - !ruby/object:Api::Type::ResourceRef name: 'nextHopVpnTunnel' resource: 'VpnTunnel' imports: 'selfLink' input: true + exactly_one_of: + - next_hop_gateway + - next_hop_instance + - next_hop_ip + - next_hop_vpn_tunnel + - next_hop_ilb description: | URL to a VpnTunnel that should handle matching packets. - !ruby/object:Api::Type::String @@ -7973,6 +8975,12 @@ objects: regions/region/forwardingRules/forwardingRule Note that this can only be used when the destinationRange is a public (non-RFC 1918) IP CIDR range. input: true + exactly_one_of: + - next_hop_gateway + - next_hop_instance + - next_hop_ip + - next_hop_vpn_tunnel + - next_hop_ilb min_version: beta - !ruby/object:Api::Resource name: 'Router' @@ -8105,6 +9113,7 @@ objects: properties: - !ruby/object:Api::Type::String name: range + required: true description: | The IP range to advertise. The value must be a CIDR-formatted string. @@ -9123,18 +10132,6 @@ objects: Only networks that are in the distributed mode can have subnetworks. input: true required: true - - !ruby/object:Api::Type::Boolean - name: 'enableFlowLogs' - description: | - Whether to enable flow logging for this subnetwork. - update_verb: :PATCH - update_url: projects/{{project}}/regions/{{region}}/subnetworks/{{name}} - update_id: 'enableFlowLogs' - fingerprint_name: 'fingerprint' - send_empty_value: true - deprecation_message: >- - This field is being removed in favor of log_config. If log_config is present, - flow logs are enabled. # TODO(rileykarson): Work with rambleraptor to remove this field from downstreams. - !ruby/object:Api::Type::Fingerprint name: 'fingerprint' @@ -9224,10 +10221,15 @@ objects: update_id: 'logConfig' description: | Denotes the logging options for the subnetwork flow logs. If logging is enabled - logs will be exported to Stackdriver. + logs will be exported to Stackdriver. This field cannot be set if the `purpose` of this + subnetwork is `INTERNAL_HTTPS_LOAD_BALANCER` properties: - !ruby/object:Api::Type::Enum name: 'aggregationInterval' + at_least_one_of: + - log_config.0.aggregation_interval + - log_config.0.flow_sampling + - log_config.0.metadata description: | Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the @@ -9245,6 +10247,10 @@ objects: default_value: :INTERVAL_5_SEC - !ruby/object:Api::Type::Double name: 'flowSampling' + at_least_one_of: + - log_config.0.aggregation_interval + - log_config.0.flow_sampling + - log_config.0.metadata description: | Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC @@ -9254,6 +10260,10 @@ objects: default_value: 0.5 - !ruby/object:Api::Type::Enum name: 'metadata' + at_least_one_of: + - log_config.0.aggregation_interval + - log_config.0.flow_sampling + - log_config.0.metadata description: | Can only be specified if VPC flow logging for this subnetwork is enabled. Configures whether metadata fields should be added to the reported VPC diff --git a/products/compute/terraform.yaml b/products/compute/terraform.yaml index df5de9c48bac..c4603219581d 100644 --- a/products/compute/terraform.yaml +++ b/products/compute/terraform.yaml @@ -14,7 +14,6 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides Address: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{project}}/{{region}}/{{name}}" examples: - !ruby/object:Provider::Terraform::Examples name: "address_basic" @@ -71,7 +70,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides custom_code: !ruby/object:Provider::Terraform::CustomCode post_create: templates/terraform/post_create/labels.erb Autoscaler: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{zone}}/{{name}}" examples: - !ruby/object:Provider::Terraform::Examples name: "autoscaler_single_instance" @@ -435,7 +433,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides pre_delete: templates/terraform/pre_delete/detach_disk.erb constants: templates/terraform/constants/disk.erb encoder: templates/terraform/encoders/disk.erb - extra_schema_entry: templates/terraform/extra_schema_entry/disk.erb decoder: templates/terraform/decoders/disk.erb resource_definition: templates/terraform/resource_definition/disk.erb docs: !ruby/object:Provider::Terraform::Docs @@ -526,6 +523,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides network_name: "website-net" custom_code: !ruby/object:Provider::Terraform::CustomCode post_create: templates/terraform/post_create/labels.erb + extra_schema_entry: templates/terraform/extra_schema_entry/forwarding_rule.erb properties: id: !ruby/object:Overrides::Terraform::PropertyOverride exclude: true @@ -536,6 +534,8 @@ overrides: !ruby/object:Overrides::ResourceOverrides default_from_api: true IPAddress: !ruby/object:Overrides::Terraform::PropertyOverride default_from_api: true + validation: !ruby/object:Provider::Terraform::Validation + function: 'validateIpAddress' description: | The IP address that this forwarding rule is serving on behalf of. @@ -556,15 +556,11 @@ overrides: !ruby/object:Overrides::ResourceOverrides internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule. - ~> **NOTE** The address should be specified as a literal IP address, - e.g. `100.1.2.3` to avoid a permanent diff, as the server returns the - IP address regardless of the input value. - - The server accepts a literal IP address or a URL reference to an existing - Address resource. The following examples are all valid but only the first - will prevent a permadiff. If you are using `google_compute_address` or - similar, interpolate using `.address` instead of `.self_link` or similar - to prevent a diff on re-apply. + An address must be specified by a literal IP address. ~> **NOTE**: While + the API allows you to specify various resource paths for an address resource + instead, Terraform requires this to specifically be an IP address to + avoid needing to fetching the IP address from resource paths on refresh + or unnecessary diffs. IPProtocol: !ruby/object:Overrides::Terraform::PropertyOverride diff_suppress_func: 'caseDiffSuppress' default_from_api: true @@ -639,6 +635,8 @@ overrides: !ruby/object:Overrides::ResourceOverrides exclude: true IPAddress: !ruby/object:Overrides::Terraform::PropertyOverride default_from_api: true + validation: !ruby/object:Provider::Terraform::Validation + function: 'validateIpAddress' description: | The IP address that this forwarding rule is serving on behalf of. @@ -659,15 +657,11 @@ overrides: !ruby/object:Overrides::ResourceOverrides internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule. - ~> **NOTE** The address should be specified as a literal IP address, - e.g. `100.1.2.3` to avoid a permanent diff, as the server returns the - IP address regardless of the input value. - - The server accepts a literal IP address or a URL reference to an existing - Address resource. The following examples are all valid but only the first - will prevent a permadiff. If you are using `google_compute_address` or - similar, interpolate using `.address` instead of `.self_link` or similar - to prevent a diff on re-apply. + An address must be specified by a literal IP address. ~> **NOTE**: While + the API allows you to specify various resource paths for an address resource + instead, Terraform requires this to specifically be an IP address to + avoid needing to fetching the IP address from resource paths on refresh + or unnecessary diffs. IPProtocol: !ruby/object:Overrides::Terraform::PropertyOverride diff_suppress_func: 'caseDiffSuppress' default_from_api: true @@ -933,9 +927,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides exclude: true autoCreateSubnetworks: !ruby/object:Overrides::Terraform::PropertyOverride default_value: True - # autoCreateSubnetworks defaults to true, so we need to disable it explicitly - conflicts: [] - ipv4_range: !ruby/object:Overrides::Terraform::PropertyOverride + send_empty_value: true # autoCreateSubnetworks defaults to true, so we need to disable it explicitly conflicts: [] routingConfig: !ruby/object:Overrides::Terraform::PropertyOverride @@ -951,7 +943,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides default_from_api: true custom_code: !ruby/object:Provider::Terraform::CustomCode post_create: templates/terraform/post_create/compute_network_delete_default_route.erb - encoder: templates/terraform/encoders/network.erb + extra_schema_entry: templates/terraform/extra_schema_entry/network.erb NetworkEndpoint: !ruby/object:Overrides::Terraform::ResourceOverride id_format: "{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}" mutex: networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}} @@ -1050,7 +1042,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides Region: !ruby/object:Overrides::Terraform::ResourceOverride exclude: true RegionAutoscaler: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{region}}/{{name}}" examples: - !ruby/object:Provider::Terraform::Examples name: "region_autoscaler_beta" @@ -1363,7 +1354,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides `next_hop_instance`. Omit if `next_hop_instance` is specified as a URL. Router: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "{{region}}/{{name}}" mutex: router/{{region}}/{{name}} examples: - !ruby/object:Provider::Terraform::Examples @@ -1458,6 +1448,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides # https://github.com/GoogleCloudPlatform/magic-modules/issues/1019 ignore_read: true sensitive: true + required: true custom_flatten: templates/terraform/custom_flatten/compute_snapshot_snapshot_encryption_raw_key.go.erb snapshotEncryptionKey.kmsKeyName: !ruby/object:Overrides::Terraform::PropertyOverride # This is a beta field that showed up in GA. Removed from both. @@ -1701,10 +1692,8 @@ overrides: !ruby/object:Overrides::ResourceOverrides function: 'validateIpCidrRange' fingerprint: !ruby/object:Overrides::Terraform::PropertyOverride exclude: false - enableFlowLogs: !ruby/object:Overrides::Terraform::PropertyOverride - default_from_api: true logConfig: !ruby/object:Overrides::Terraform::PropertyOverride - default_from_api: true + send_empty_value: true custom_expand: 'templates/terraform/custom_expand/subnetwork_log_config.go.erb' custom_flatten: 'templates/terraform/custom_flatten/subnetwork_log_config.go.erb' ipCidrRange: !ruby/object:Overrides::Terraform::PropertyOverride @@ -1720,6 +1709,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides custom_code: !ruby/object:Provider::Terraform::CustomCode constants: templates/terraform/constants/subnetwork.erb resource_definition: templates/terraform/resource_definition/subnetwork.erb + extra_schema_entry: templates/terraform/extra_schema_entry/subnetwork.erb examples: - !ruby/object:Provider::Terraform::Examples name: "subnetwork_basic" @@ -1961,7 +1951,9 @@ overrides: !ruby/object:Overrides::ResourceOverrides id: !ruby/object:Overrides::Terraform::PropertyOverride name: "map_id" defaultService: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: 'templates/terraform/custom_expand/url_map_only_set_string.go.erb' + # ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. + # Just read the self_link string instead of extracting the name and making a self_link out of it. + custom_expand: 'templates/terraform/custom_expand/resourceref_as_string.go.erb' description: The backend service or backend bucket to use when none of the given rules match. hostRules: !ruby/object:Overrides::Terraform::PropertyOverride name: "host_rule" @@ -1971,17 +1963,23 @@ overrides: !ruby/object:Overrides::ResourceOverrides pathMatchers: !ruby/object:Overrides::Terraform::PropertyOverride name: "path_matcher" pathMatchers.defaultService: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: 'templates/terraform/custom_expand/url_map_only_set_string.go.erb' + # ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. + # Just read the self_link string instead of extracting the name and making a self_link out of it. + custom_expand: 'templates/terraform/custom_expand/resourceref_as_string.go.erb' description: The backend service or backend bucket to use when none of the given paths match. pathMatchers.pathRules: !ruby/object:Overrides::Terraform::PropertyOverride name: "path_rule" pathMatchers.pathRules.paths: !ruby/object:Overrides::Terraform::PropertyOverride is_set: true pathMatchers.pathRules.service: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: 'templates/terraform/custom_expand/url_map_only_set_string.go.erb' + # ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. + # Just read the self_link string instead of extracting the name and making a self_link out of it. + custom_expand: 'templates/terraform/custom_expand/resourceref_as_string.go.erb' description: The backend service or backend bucket to use if any of the given paths match. tests.service: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: 'templates/terraform/custom_expand/url_map_only_set_string.go.erb' + # ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. + # Just read the self_link string instead of extracting the name and making a self_link out of it. + custom_expand: 'templates/terraform/custom_expand/resourceref_as_string.go.erb' description: The backend service or backend bucket link that should be matched by this test. tests: !ruby/object:Overrides::Terraform::PropertyOverride name: "test" diff --git a/products/containeranalysis/terraform.yaml b/products/containeranalysis/terraform.yaml index 6fcf384ae447..99a3579f59f6 100644 --- a/products/containeranalysis/terraform.yaml +++ b/products/containeranalysis/terraform.yaml @@ -14,6 +14,7 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides Note: !ruby/object:Overrides::Terraform::ResourceOverride + id_format: "projects/{{project}}/notes/{{name}}" import_format: ["projects/{{project}}/notes/{{name}}"] custom_code: !ruby/object:Provider::Terraform::CustomCode pre_update: 'templates/terraform/pre_update/containeranalysis_note.erb' diff --git a/products/datafusion/terraform.yaml b/products/datafusion/terraform.yaml index 8737b896e5c0..fbb8b1e504ff 100644 --- a/products/datafusion/terraform.yaml +++ b/products/datafusion/terraform.yaml @@ -19,8 +19,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides update_minutes: 10 delete_minutes: 25 autogen_async: true - id_format: "{{project}}/{{region}}/{{name}}" - import_format: ["projects/{{project}}/locations/{{region}}/instances/{{name}}"] examples: - !ruby/object:Provider::Terraform::Examples name: "data_fusion_instance_basic" @@ -40,7 +38,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides required: false default_from_api: true name: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: 'templates/terraform/custom_expand/redis_instance_name.erb' + custom_expand: 'templates/terraform/custom_expand/shortname_to_url.go.erb' custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' # This is for copying files over files: !ruby/object:Provider::Config::Files diff --git a/products/dataproc/api.yaml b/products/dataproc/api.yaml index cddf927b880d..355510688afd 100644 --- a/products/dataproc/api.yaml +++ b/products/dataproc/api.yaml @@ -97,11 +97,19 @@ objects: properties: - !ruby/object:Api::Type::Integer name: 'minInstances' + at_least_one_of: + - secondary_worker_config.0.min_instances + - secondary_worker_config.0.max_instances + - secondary_worker_config.0.weight default_value: 2 description: | Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0. - !ruby/object:Api::Type::Integer name: 'maxInstances' + at_least_one_of: + - secondary_worker_config.0.min_instances + - secondary_worker_config.0.max_instances + - secondary_worker_config.0.weight default_value: 0 description: | Maximum number of instances for this group. Note that by default, clusters will not use @@ -109,6 +117,10 @@ objects: Bounds: [minInstances, ). Defaults to 0. - !ruby/object:Api::Type::Integer name: 'weight' + at_least_one_of: + - secondary_worker_config.0.min_instances + - secondary_worker_config.0.max_instances + - secondary_worker_config.0.weight default_value: 1 description: | Weight for the instance group, which is used to determine the fraction of total workers diff --git a/products/dns/api.yaml b/products/dns/api.yaml index 76213104a213..9635fd2e4ea3 100644 --- a/products/dns/api.yaml +++ b/products/dns/api.yaml @@ -61,10 +61,20 @@ objects: properties: - !ruby/object:Api::Type::String name: 'kind' + at_least_one_of: + - dnssec_config.0.kind + - dnssec_config.0.non_existence + - dnssec_config.0.state + - dnssec_config.0.default_key_specs description: Identifies what kind of resource this is default_value: 'dns#managedZoneDnsSecConfig' - !ruby/object:Api::Type::Enum name: 'nonExistence' + at_least_one_of: + - dnssec_config.0.kind + - dnssec_config.0.non_existence + - dnssec_config.0.state + - dnssec_config.0.default_key_specs description: | Specifies the mechanism used to provide authenticated denial-of-existence responses. values: @@ -72,6 +82,11 @@ objects: - "nsec3" - !ruby/object:Api::Type::Enum name: 'state' + at_least_one_of: + - dnssec_config.0.kind + - dnssec_config.0.non_existence + - dnssec_config.0.state + - dnssec_config.0.default_key_specs description: Specifies whether DNSSEC is enabled, and what mode it is in values: - "off" @@ -79,6 +94,11 @@ objects: - "transfer" - !ruby/object:Api::Type::Array name: 'defaultKeySpecs' + at_least_one_of: + - dnssec_config.0.kind + - dnssec_config.0.non_existence + - dnssec_config.0.state + - dnssec_config.0.default_key_specs description: | Specifies parameters that will be used for generating initial DnsKeys for this ManagedZone. If you provide a spec for keySigning or zoneSigning, @@ -170,6 +190,7 @@ objects: - !ruby/object:Api::Type::Array name: 'networks' description: 'The list of VPC networks that can see this zone.' + required: true item_type: !ruby/object:Api::Type::NestedObject properties: # TODO(drebes): Make 'networkUrl' a ResourceRef once cross-module references @@ -180,6 +201,7 @@ objects: The fully qualified URL of the VPC network to bind to. This should be formatted like `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true - !ruby/object:Api::Type::NestedObject name: 'forwardingConfig' description: | @@ -191,6 +213,7 @@ objects: properties: - !ruby/object:Api::Type::Array name: 'targetNameServers' + required: true description: | List of target name servers to forward to. Cloud DNS will select the best available name server if more than @@ -199,6 +222,7 @@ objects: properties: - !ruby/object:Api::Type::String name: 'ipv4Address' + required: true description: 'IPv4 address of a target name server.' min_version: beta - !ruby/object:Api::Type::NestedObject @@ -212,12 +236,14 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: 'targetNetwork' + required: true description: 'The network with which to peer.' properties: # TODO(drebes): Make 'networkUrl' a ResourceRef once cross-module references # are possible. - !ruby/object:Api::Type::String name: 'networkUrl' + required: true description: | The fully qualified URL of the VPC network to forward queries to. This should be formatted like @@ -247,6 +273,7 @@ objects: properties: - !ruby/object:Api::Type::Array name: 'targetNameServers' + required: true description: | Sets an alternative name server for the associated networks. When specified, all DNS queries are forwarded to a name server that you choose. Names such as .internal @@ -255,6 +282,7 @@ objects: properties: - !ruby/object:Api::Type::String name: 'ipv4Address' + required: true description: 'IPv4 address to forward to.' - !ruby/object:Api::Type::String name: 'description' @@ -301,6 +329,7 @@ objects: # are possible. - !ruby/object:Api::Type::String name: 'networkUrl' + required: true description: | The fully qualified URL of the VPC network to bind to. This should be formatted like diff --git a/products/dns/terraform.yaml b/products/dns/terraform.yaml index 574d36a18baf..9074542696a3 100644 --- a/products/dns/terraform.yaml +++ b/products/dns/terraform.yaml @@ -14,6 +14,7 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides ManagedZone: !ruby/object:Overrides::Terraform::ResourceOverride + id_format: 'projects/{{project}}/managedZones/{{name}}' examples: - !ruby/object:Provider::Terraform::Examples name: "dns_managed_zone_basic" @@ -97,6 +98,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides diff_suppress_func: 'caseDiffSuppress' custom_flatten: templates/terraform/custom_flatten/default_if_empty.erb Policy: !ruby/object:Overrides::Terraform::ResourceOverride + id_format: 'projects/{{project}}/policies/{{name}}' examples: - !ruby/object:Provider::Terraform::Examples name: "dns_policy_basic" diff --git a/products/filestore/terraform.yaml b/products/filestore/terraform.yaml index 6a4a9ca8b9df..f20a9567914f 100644 --- a/products/filestore/terraform.yaml +++ b/products/filestore/terraform.yaml @@ -19,8 +19,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides update_minutes: 6 delete_minutes: 6 autogen_async: true - id_format: "{{project}}/{{zone}}/{{name}}" - import_format: ["projects/{{project}}/locations/{{zone}}/instances/{{name}}"] examples: - !ruby/object:Provider::Terraform::Examples name: "filestore_instance_basic" diff --git a/products/firestore/api.yaml b/products/firestore/api.yaml index 17580d09dd90..960636bf52cd 100644 --- a/products/firestore/api.yaml +++ b/products/firestore/api.yaml @@ -95,6 +95,7 @@ objects: Name of the field. - !ruby/object:Api::Type::Enum name: 'order' + # TODO (mbang): Exactly one of order or arrayConfig must be set description: | Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=. Only one of `order` and `arrayConfig` can be specified. @@ -103,6 +104,7 @@ objects: - :DESCENDING - !ruby/object:Api::Type::Enum name: 'arrayConfig' + # TODO (mbang): Exactly one of order or arrayConfig must be set description: | Indicates that this field supports operations on arrayValues. Only one of `order` and `arrayConfig` can be specified. diff --git a/products/healthcare/api.yaml b/products/healthcare/api.yaml index a289e288c3b3..c76cdc4c9580 100644 --- a/products/healthcare/api.yaml +++ b/products/healthcare/api.yaml @@ -319,10 +319,16 @@ objects: properties: - !ruby/object:Api::Type::Boolean name: allowNullHeader + at_least_one_of: + - parser_config.0.allow_null_header + - parser_config.0.segment_terminator description: | Determines whether messages with no header are allowed. - !ruby/object:Api::Type::String name: segmentTerminator + at_least_one_of: + - parser_config.0.allow_null_header + - parser_config.0.segment_terminator description: | Byte(s) to be used as the segment terminator. If this is unset, '\r' will be used as segment terminator. diff --git a/products/logging/api.yaml b/products/logging/api.yaml index fbd101c5736c..0c1a119da06e 100644 --- a/products/logging/api.yaml +++ b/products/logging/api.yaml @@ -160,47 +160,84 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: linearBuckets + at_least_one_of: + - bucket_options.0.linear_buckets + - bucket_options.0.exponential_buckets + - bucket_options.0.explicit_buckets description: | Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket. properties: - !ruby/object:Api::Type::Integer name: numFiniteBuckets + at_least_one_of: + - bucket_options.0.linear_buckets.0.num_finite_buckets + - bucket_options.0.linear_buckets.0.width + - bucket_options.0.linear_buckets.0.offset description: | Must be greater than 0. - !ruby/object:Api::Type::Integer name: width + at_least_one_of: + - bucket_options.0.linear_buckets.0.num_finite_buckets + - bucket_options.0.linear_buckets.0.width + - bucket_options.0.linear_buckets.0.offset description: | Must be greater than 0. - !ruby/object:Api::Type::Double name: offset + at_least_one_of: + - bucket_options.0.linear_buckets.0.num_finite_buckets + - bucket_options.0.linear_buckets.0.width + - bucket_options.0.linear_buckets.0.offset description: | Lower bound of the first bucket. - !ruby/object:Api::Type::NestedObject name: exponentialBuckets + at_least_one_of: + - bucket_options.0.linear_buckets + - bucket_options.0.exponential_buckets + - bucket_options.0.explicit_buckets description: | Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket. properties: - !ruby/object:Api::Type::Integer name: numFiniteBuckets + at_least_one_of: + - bucket_options.0.exponential_buckets.0.num_finite_buckets + - bucket_options.0.exponential_buckets.0.growth_factor + - bucket_options.0.exponential_buckets.0.scale description: | Must be greater than 0. - !ruby/object:Api::Type::Integer name: growthFactor + at_least_one_of: + - bucket_options.0.exponential_buckets.0.num_finite_buckets + - bucket_options.0.exponential_buckets.0.growth_factor + - bucket_options.0.exponential_buckets.0.scale description: | Must be greater than 1. - !ruby/object:Api::Type::Double name: scale + at_least_one_of: + - bucket_options.0.exponential_buckets.0.num_finite_buckets + - bucket_options.0.exponential_buckets.0.growth_factor + - bucket_options.0.exponential_buckets.0.scale description: | Must be greater than 0. - !ruby/object:Api::Type::NestedObject name: explicitBuckets + at_least_one_of: + - bucket_options.0.linear_buckets + - bucket_options.0.exponential_buckets + - bucket_options.0.explicit_buckets description: | Specifies a set of buckets with arbitrary widths. properties: - !ruby/object:Api::Type::Array name: bounds + required: true item_type: Api::Type::Double description: | The values must be monotonically increasing. diff --git a/products/mlengine/api.yaml b/products/mlengine/api.yaml index 1311173b42d2..485205eeb1a8 100644 --- a/products/mlengine/api.yaml +++ b/products/mlengine/api.yaml @@ -73,6 +73,7 @@ objects: properties: - !ruby/object:Api::Type::String name: 'name' + required: true description: The name specified for the version when it was created. # Even though only one region is supported, keeping this as an array # to future-proof it. diff --git a/products/monitoring/api.yaml b/products/monitoring/api.yaml index 452d1fefa4bb..596a3a9eddb2 100644 --- a/products/monitoring/api.yaml +++ b/products/monitoring/api.yaml @@ -721,6 +721,9 @@ objects: properties: - !ruby/object:Api::Type::String name: content + at_least_one_of: + - documentation.0.content + - documentation.0.mime_type description: | The text of the documentation, interpreted according to mimeType. The content may not exceed 8,192 Unicode characters and may not @@ -728,6 +731,9 @@ objects: whichever is smaller. - !ruby/object:Api::Type::String name: mimeType + at_least_one_of: + - documentation.0.content + - documentation.0.mime_type default_value: text/markdown description: | The format of the content field. Presently, only the value @@ -933,6 +939,7 @@ objects: - !ruby/object:Api::Type::String name: content description: String or regex content to match (max 1024 bytes) + required: true - !ruby/object:Api::Type::Array name: selectedRegions description: The list of regions from which the check will be run. Some regions @@ -944,27 +951,51 @@ objects: - !ruby/object:Api::Type::NestedObject name: httpCheck description: Contains information needed to make an HTTP or HTTPS check. - conflicts: - - tcpCheck + exactly_one_of: + - http_check + - tcp_check properties: - !ruby/object:Api::Type::NestedObject name: authInfo + at_least_one_of: + - http_check.0.auth_info + - http_check.0.port + - http_check.0.headers + - http_check.0.path + - http_check.0.use_ssl + - http_check.0.mask_headers description: The authentication information. Optional when creating an HTTP check; defaults to empty. properties: - !ruby/object:Api::Type::String name: password + required: true description: The password to authenticate. - !ruby/object:Api::Type::String name: username + required: true description: The username to authenticate. - !ruby/object:Api::Type::Integer name: port + at_least_one_of: + - http_check.0.auth_info + - http_check.0.port + - http_check.0.headers + - http_check.0.path + - http_check.0.use_ssl + - http_check.0.mask_headers description: The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL). - !ruby/object:Api::Type::KeyValuePairs name: headers + at_least_one_of: + - http_check.0.auth_info + - http_check.0.port + - http_check.0.headers + - http_check.0.path + - http_check.0.use_ssl + - http_check.0.mask_headers description: The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the @@ -974,12 +1005,26 @@ objects: headers allowed is 100. - !ruby/object:Api::Type::String name: path + at_least_one_of: + - http_check.0.auth_info + - http_check.0.port + - http_check.0.headers + - http_check.0.path + - http_check.0.use_ssl + - http_check.0.mask_headers default_value: "/" description: The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. Optional (defaults to "/"). - !ruby/object:Api::Type::Boolean name: useSsl + at_least_one_of: + - http_check.0.auth_info + - http_check.0.port + - http_check.0.headers + - http_check.0.path + - http_check.0.use_ssl + - http_check.0.mask_headers description: If true, use HTTPS instead of HTTP to run the check. - !ruby/object:Api::Type::Boolean name: validateSsl @@ -988,6 +1033,13 @@ objects: is set to uptime_url. If useSsl is false, setting validateSsl to true has no effect. - !ruby/object:Api::Type::Boolean name: maskHeaders + at_least_one_of: + - http_check.0.auth_info + - http_check.0.port + - http_check.0.headers + - http_check.0.path + - http_check.0.use_ssl + - http_check.0.mask_headers description: Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will @@ -996,8 +1048,9 @@ objects: - !ruby/object:Api::Type::NestedObject name: tcpCheck description: Contains information needed to make a TCP check. - conflicts: - - httpCheck + exactly_one_of: + - http_check + - tcp_check properties: - !ruby/object:Api::Type::Integer name: port @@ -1008,12 +1061,16 @@ objects: name: resourceGroup input: true description: The group resource associated with the configuration. - conflicts: - - monitoredResource + exactly_one_of: + - monitored_resource + - resource_group properties: - !ruby/object:Api::Type::Enum name: resourceType input: true + at_least_one_of: + - resource_group.0.resource_type + - resource_group.0.group_id description: The resource type of the group members. values: - :RESOURCE_TYPE_UNSPECIFIED @@ -1022,6 +1079,9 @@ objects: - !ruby/object:Api::Type::ResourceRef name: groupId input: true + at_least_one_of: + - resource_group.0.resource_type + - resource_group.0.group_id resource: Group imports: name description: The group of resources being monitored. Should be the `name` of a group @@ -1031,8 +1091,9 @@ objects: description: 'The monitored resource (https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are supported for uptime checks: uptime_url gce_instance gae_app aws_ec2_instance aws_elb_load_balancer' - conflicts: - - resourceGroup + exactly_one_of: + - monitored_resource + - resource_group properties: - !ruby/object:Api::Type::String name: type diff --git a/products/monitoring/terraform.yaml b/products/monitoring/terraform.yaml index 9e34f05386be..b47a5200bd98 100644 --- a/products/monitoring/terraform.yaml +++ b/products/monitoring/terraform.yaml @@ -108,7 +108,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides group_display_name: "uptime-check-group" custom_code: !ruby/object:Provider::Terraform::CustomCode custom_import: templates/terraform/custom_import/self_link_as_name.erb - decoder: 'templates/terraform/decoders/monitoring_uptime_check_internal.go.erb' extra_schema_entry: 'templates/terraform/extra_schema_entry/monitoring_uptime_check_config_internal.go.erb' post_create: templates/terraform/post_create/set_computed_name.erb properties: diff --git a/products/pubsub/api.yaml b/products/pubsub/api.yaml index 2af625676fc1..9965f0d640a0 100644 --- a/products/pubsub/api.yaml +++ b/products/pubsub/api.yaml @@ -232,12 +232,13 @@ objects: A subscription is considered active as long as any connected subscriber is successfully consuming messages from the subscription or is issuing operations on the subscription. If expirationPolicy is not set, a default - policy with ttl of 31 days will be used. If it is set but left empty, the + policy with ttl of 31 days will be used. If it is set but ttl is "", the resource never expires. The minimum allowed value for expirationPolicy.ttl is 1 day. properties: - !ruby/object:Api::Type::String name: 'ttl' + required: true description: | Specifies the "time-to-live" duration for an associated resource. The resource expires if it is not active for a period of ttl. diff --git a/products/pubsub/terraform.yaml b/products/pubsub/terraform.yaml index 57dec762fbe9..b4dcbffb8646 100644 --- a/products/pubsub/terraform.yaml +++ b/products/pubsub/terraform.yaml @@ -14,7 +14,6 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides Topic: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "projects/{{project}}/topics/{{name}}" error_retry_predicates: ["pubsubTopicProjectNotReady"] iam_policy: !ruby/object:Api::Resource::IamPolicy parent_resource_attribute: 'topic' @@ -50,7 +49,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides encoder: templates/terraform/encoders/no_send_name.go.erb update_encoder: templates/terraform/update_encoder/pubsub_topic.erb Subscription: !ruby/object:Overrides::Terraform::ResourceOverride - id_format: "projects/{{project}}/subscriptions/{{name}}" examples: - !ruby/object:Provider::Terraform::Examples name: "pubsub_subscription_push" @@ -79,8 +77,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides * `path`: Path of the subscription in the format `projects/{project}/subscriptions/{name}` properties: name: !ruby/object:Overrides::Terraform::PropertyOverride - diff_suppress_func: 'comparePubsubSubscriptionBasename' - custom_expand: templates/terraform/custom_expand/computed_subscription_name.erb + custom_expand: templates/terraform/custom_expand/shortname_to_url.go.erb custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb topic: !ruby/object:Overrides::Terraform::PropertyOverride diff_suppress_func: 'compareSelfLinkOrResourceName' diff --git a/products/redis/terraform.yaml b/products/redis/terraform.yaml index 59eacce05ad2..5c0d7b428c75 100644 --- a/products/redis/terraform.yaml +++ b/products/redis/terraform.yaml @@ -19,8 +19,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides update_minutes: 10 delete_minutes: 10 autogen_async: true - id_format: "{{project}}/{{region}}/{{name}}" - import_format: ["projects/{{project}}/locations/{{region}}/instances/{{name}}"] custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/redis_location_id_for_fallback_zone.go.erb examples: @@ -45,7 +43,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides locationId: !ruby/object:Overrides::Terraform::PropertyOverride default_from_api: true name: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: 'templates/terraform/custom_expand/redis_instance_name.erb' + custom_expand: 'templates/terraform/custom_expand/shortname_to_url.go.erb' custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' redisVersion: !ruby/object:Overrides::Terraform::PropertyOverride default_from_api: true diff --git a/products/resourcemanager/terraform.yaml b/products/resourcemanager/terraform.yaml index c4970ca439f4..7e8fe24942d7 100644 --- a/products/resourcemanager/terraform.yaml +++ b/products/resourcemanager/terraform.yaml @@ -18,6 +18,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides autogen_async: true exclude: true Lien: !ruby/object:Overrides::Terraform::ResourceOverride + id_format: "{{name}}" import_format: ["{{parent}}/{{name}}"] examples: - !ruby/object:Provider::Terraform::Examples diff --git a/products/runtimeconfig/terraform.yaml b/products/runtimeconfig/terraform.yaml index 3f6a3b8ce15d..6aca227268e2 100644 --- a/products/runtimeconfig/terraform.yaml +++ b/products/runtimeconfig/terraform.yaml @@ -16,7 +16,6 @@ legacy_name: runtimeconfig overrides: !ruby/object:Overrides::ResourceOverrides Config: !ruby/object:Overrides::Terraform::ResourceOverride exclude_resource: true - id_format: 'projects/{{project}}/configs/{{config}}' import_format: ["projects/{{project}}/configs/{{config}}"] examples: - !ruby/object:Provider::Terraform::Examples diff --git a/products/securityscanner/api.yaml b/products/securityscanner/api.yaml index 8fdb9350de81..a4cb1a634b9d 100644 --- a/products/securityscanner/api.yaml +++ b/products/securityscanner/api.yaml @@ -68,6 +68,9 @@ objects: properties: - !ruby/object:Api::Type::NestedObject name: googleAccount + at_least_one_of: + - authentication.0.google_account + - authentication.0.custom_account description: | Describes authentication configuration that uses a Google account. properties: @@ -85,6 +88,9 @@ objects: in GCP. - !ruby/object:Api::Type::NestedObject name: customAccount + at_least_one_of: + - authentication.0.google_account + - authentication.0.custom_account description: | Describes authentication configuration that uses a custom account. properties: diff --git a/products/securityscanner/terraform.yaml b/products/securityscanner/terraform.yaml index 66e7a5436f98..dae06d1a6e1b 100644 --- a/products/securityscanner/terraform.yaml +++ b/products/securityscanner/terraform.yaml @@ -13,7 +13,6 @@ --- !ruby/object:Provider::Terraform::Config overrides: !ruby/object:Overrides::ResourceOverrides ScanConfig: !ruby/object:Overrides::Terraform::ResourceOverride - import_format: ["{{name}}"] examples: - !ruby/object:Provider::Terraform::Examples name: "scan_config_basic" diff --git a/products/sourcerepo/terraform.yaml b/products/sourcerepo/terraform.yaml index 2c18421b498d..c280265ce574 100644 --- a/products/sourcerepo/terraform.yaml +++ b/products/sourcerepo/terraform.yaml @@ -19,7 +19,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides exclude: false method_name_separator: ':' parent_resource_attribute: 'repository' - id_format: '{{project}}/{{name}}' examples: - !ruby/object:Provider::Terraform::Examples name: "sourcerepo_repository_basic" @@ -29,7 +28,7 @@ overrides: !ruby/object:Overrides::ResourceOverrides repository_name: "my-repository" properties: name: !ruby/object:Overrides::Terraform::PropertyOverride - custom_expand: templates/terraform/custom_expand/repository_name_from_short_name.go.erb + custom_expand: templates/terraform/custom_expand/shortname_to_url.go.erb custom_flatten: templates/terraform/custom_flatten/repository_short_name_from_name.go.erb description: | Resource name of the repository, of the form `{{repo}}`. diff --git a/products/spanner/terraform.yaml b/products/spanner/terraform.yaml index fa7d29732465..16765e710866 100644 --- a/products/spanner/terraform.yaml +++ b/products/spanner/terraform.yaml @@ -15,7 +15,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides Database: !ruby/object:Overrides::Terraform::ResourceOverride autogen_async: true - id_format: "{{project}}/{{instance}}/{{name}}" import_format: - "projects/{{project}}/instances/{{instance}}/databases/{{name}}" - "instances/{{instance}}/databases/{{name}}" diff --git a/products/sql/terraform.yaml b/products/sql/terraform.yaml index 0488eec48868..6b217793c43b 100644 --- a/products/sql/terraform.yaml +++ b/products/sql/terraform.yaml @@ -16,12 +16,9 @@ client_name: 'SqlAdmin' overrides: !ruby/object:Overrides::ResourceOverrides Database: !ruby/object:Overrides::Terraform::ResourceOverride mutex: "google-sql-database-instance-{{project}}-{{instance}}" - id_format: "{{instance}}:{{name}}" import_format: ["projects/{{project}}/instances/{{instance}}/databases/{{name}}", "{{project}}/{{instance}}/{{name}}", "instances/{{instance}}/databases/{{name}}", - # support for a legacy import format - "{{instance}}:{{name}}", "{{instance}}/{{name}}", "{{name}}"] examples: diff --git a/products/tpu/api.yaml b/products/tpu/api.yaml index 39855c837987..825724edaa78 100644 --- a/products/tpu/api.yaml +++ b/products/tpu/api.yaml @@ -124,7 +124,7 @@ objects: name: 'preemptible' description: | Defines whether the TPU instance is preemptible. - default_value: false + required: true - !ruby/object:Api::Type::Array name: 'networkEndpoints' output: true diff --git a/products/tpu/terraform.yaml b/products/tpu/terraform.yaml index 56f9b85a58cc..857085513288 100644 --- a/products/tpu/terraform.yaml +++ b/products/tpu/terraform.yaml @@ -18,8 +18,6 @@ overrides: !ruby/object:Overrides::ResourceOverrides insert_minutes: 15 update_minutes: 15 delete_minutes: 15 - id_format: "{{project}}/{{zone}}/{{name}}" - import_format: ["projects/{{project}}/locations/{{zone}}/nodes/{{name}}"] autogen_async: true examples: - !ruby/object:Provider::Terraform::Examples diff --git a/provider/terraform.rb b/provider/terraform.rb index d890ddc9cfea..4adbe277dd35 100644 --- a/provider/terraform.rb +++ b/provider/terraform.rb @@ -246,5 +246,12 @@ def build_object_data(object, output_folder, version) def extract_identifiers(url) url.scan(/\{\{(\w+)\}\}/).flatten end + + # Returns the id format of an object, or self_link_uri if none is explicitly defined + # We prefer the long name of a resource as the id so that users can reference + # resources in a standard way, and most APIs accept short name, long name or self_link + def id_format(object) + object.id_format || object.self_link_uri + end end end diff --git a/provider/terraform/sub_template.rb b/provider/terraform/sub_template.rb index acff1d4f3466..a4e38113d3d6 100644 --- a/provider/terraform/sub_template.rb +++ b/provider/terraform/sub_template.rb @@ -40,10 +40,11 @@ def build_flatten_method(prefix, property, object) # Transforms a Terraform schema representation of a property into a # representation used by the Cloud API. - def build_expand_method(prefix, property) + def build_expand_method(prefix, property, object) compile_template 'templates/terraform/expand_property_method.erb', prefix: prefix, - property: property + property: property, + object: object end def build_expand_resource_ref(var_name, property) diff --git a/templates/inspec/tests/integration/build/gcp-mm.tf b/templates/inspec/tests/integration/build/gcp-mm.tf index 94b6f721962e..c150ab5884c7 100644 --- a/templates/inspec/tests/integration/build/gcp-mm.tf +++ b/templates/inspec/tests/integration/build/gcp-mm.tf @@ -612,7 +612,7 @@ resource "google_cloudfunctions_function" "function" { trigger_http = "${var.cloudfunction["trigger_http"]}" timeout = "${var.cloudfunction["timeout"]}" entry_point = "${var.cloudfunction["entry_point"]}" - runtime = "nodejs6" + runtime = "nodejs8" environment_variables = { MY_ENV_VAR = "${var.cloudfunction["env_var_value"]}" diff --git a/templates/terraform/constants/subnetwork.erb b/templates/terraform/constants/subnetwork.erb index a54a182cae65..e698124da16c 100644 --- a/templates/terraform/constants/subnetwork.erb +++ b/templates/terraform/constants/subnetwork.erb @@ -17,11 +17,3 @@ func isShrinkageIpCidr(old, new, _ interface{}) bool { return true } - - -func splitSubnetID(id string) (region string, name string) { - parts := strings.Split(id, "/") - region = parts[0] - name = parts[1] - return -} diff --git a/templates/terraform/custom_expand/cloudscheduler_job_name.erb b/templates/terraform/custom_expand/cloudscheduler_job_name.erb deleted file mode 100644 index 09e969d101e0..000000000000 --- a/templates/terraform/custom_expand/cloudscheduler_job_name.erb +++ /dev/null @@ -1,35 +0,0 @@ -<%# # the license inside this if block pertains to this file - # Copyright 2018 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. -#%> -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - var jobName string - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - - if v, ok := d.GetOk("name"); ok { - jobName = fmt.Sprintf("projects/%s/locations/%s/jobs/%s", project, region, v.(string)) - } else { - err := fmt.Errorf("The name is missing for the job cannot be empty") - return nil, err - } - - return jobName, nil -} diff --git a/templates/terraform/custom_expand/computed_subscription_name.erb b/templates/terraform/custom_expand/computed_subscription_name.erb deleted file mode 100644 index 569e688b0fc8..000000000000 --- a/templates/terraform/custom_expand/computed_subscription_name.erb +++ /dev/null @@ -1,36 +0,0 @@ -<%# # the license inside this if block pertains to this file - # Copyright 2018 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. -#%> -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return "", err - } - - subscription := d.Get("name").(string) - - re := regexp.MustCompile("projects\\/(.*)\\/subscriptions\\/(.*)") - match := re.FindStringSubmatch(subscription) - if len(match) == 3 { - // We need to preserve the behavior where the user passes the subscription name already in the long form, - // however we need it to be stored as the short form since it's used for the replaceVars in the URL. - // The unintuitive behavior is that if the user provides the long form, we use the project from there, not the one - // specified on the resource or provider. - // TODO(drebes): consider deprecating the long form behavior for 3.0 - d.Set("project", match[1]) - d.Set("name", match[2]) - return subscription, nil - } - return fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription), nil -} diff --git a/templates/terraform/custom_expand/monitoring_group_name.erb b/templates/terraform/custom_expand/monitoring_group_name.erb deleted file mode 100644 index a08dd8f81523..000000000000 --- a/templates/terraform/custom_expand/monitoring_group_name.erb +++ /dev/null @@ -1,26 +0,0 @@ -<%# The license inside this block applies to this file. - # Copyright 2018 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - if v.(string) == "" { - return "", nil - } - - return fmt.Sprintf("projects/%s/groups/%s", project, v.(string)), nil -} \ No newline at end of file diff --git a/templates/terraform/custom_expand/redis_instance_name.erb b/templates/terraform/custom_expand/redis_instance_name.erb deleted file mode 100644 index 20c3b5d983cb..000000000000 --- a/templates/terraform/custom_expand/redis_instance_name.erb +++ /dev/null @@ -1,27 +0,0 @@ -<%# The license inside this block applies to this file. - # Copyright 2017 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - - return fmt.Sprintf("projects/%s/locations/%s/instances/%s", project, region, v.(string)), nil -} diff --git a/templates/terraform/custom_expand/route_gateway.erb b/templates/terraform/custom_expand/route_gateway.erb index d4165dfdbd4d..d0d349dfb472 100644 --- a/templates/terraform/custom_expand/route_gateway.erb +++ b/templates/terraform/custom_expand/route_gateway.erb @@ -1,10 +1,6 @@ func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { if v == "default-internet-gateway" { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - return fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", project), nil + return replaceVars(d, config, "projects/{{project}}/global/gateways/default-internet-gateway") } else { return v, nil } diff --git a/templates/terraform/custom_expand/shortname_to_url.go.erb b/templates/terraform/custom_expand/shortname_to_url.go.erb new file mode 100644 index 000000000000..2ee215313bd9 --- /dev/null +++ b/templates/terraform/custom_expand/shortname_to_url.go.erb @@ -0,0 +1,17 @@ +<%# # the license inside this if block pertains to this file + # Copyright 2019 Google Inc. + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +#%> +func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return replaceVars(d, config, "<%= id_format(object) -%>") +} diff --git a/templates/terraform/custom_expand/spanner_instance_config.go b/templates/terraform/custom_expand/spanner_instance_config.go deleted file mode 100644 index 26707d2b3ca1..000000000000 --- a/templates/terraform/custom_expand/spanner_instance_config.go +++ /dev/null @@ -1,27 +0,0 @@ -<%- # the license inside this block applies to this file - # Copyright 2018 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := regexp.MustCompile("projects/(.+)/notes/(.+)") - if r.MatchString(v.(string)) { - return v.(string), nil - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return fmt.Sprintf("projects/%s/notes/%s", project, v.(string)), nil -} diff --git a/templates/terraform/custom_expand/subnetwork_log_config.go.erb b/templates/terraform/custom_expand/subnetwork_log_config.go.erb index e1b4e3a63bc9..a293f1b97562 100644 --- a/templates/terraform/custom_expand/subnetwork_log_config.go.erb +++ b/templates/terraform/custom_expand/subnetwork_log_config.go.erb @@ -14,21 +14,27 @@ -%> func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) + transformed := make(map[string]interface{}) if len(l) == 0 || l[0] == nil { - return nil, nil + purpose, ok := d.GetOkExists("purpose") + + if ok && purpose.(string) == "INTERNAL_HTTPS_LOAD_BALANCER" { + // Subnetworks for L7ILB do not accept any values for logConfig + return nil, nil + } + // send enable = false to ensure logging is disabled if there is no config + transformed["enable"] = false + return transformed, nil } + raw := l[0] original := raw.(map[string]interface{}) - v, ok := d.GetOkExists("enable_flow_logs") - - transformed := make(map[string]interface{}) - if !ok || v.(bool) { - transformed["enable"] = true - transformed["aggregationInterval"] = original["aggregation_interval"] - transformed["flowSampling"] = original["flow_sampling"] - transformed["metadata"] = original["metadata"] - } + // The log_config block is specified, so logging should be enabled + transformed["enable"] = true + transformed["aggregationInterval"] = original["aggregation_interval"] + transformed["flowSampling"] = original["flow_sampling"] + transformed["metadata"] = original["metadata"] return transformed, nil } diff --git a/templates/terraform/custom_expand/url_map_only_set_string.go.erb b/templates/terraform/custom_expand/url_map_only_set_string.go.erb deleted file mode 100644 index 3f021c9cf97b..000000000000 --- a/templates/terraform/custom_expand/url_map_only_set_string.go.erb +++ /dev/null @@ -1,19 +0,0 @@ -<%# The license inside this block applies to this file. - # Copyright 2017 Google Inc. - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. --%> -// ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. Just read the self_link string -// instead of extracting the name and making a self_link out of it. -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/templates/terraform/decoders/monitoring_uptime_check_internal.go.erb b/templates/terraform/decoders/monitoring_uptime_check_internal.go.erb deleted file mode 100644 index b09250fa6843..000000000000 --- a/templates/terraform/decoders/monitoring_uptime_check_internal.go.erb +++ /dev/null @@ -1,2 +0,0 @@ -d.Set("internal_checkers", nil) -return res, nil diff --git a/templates/terraform/encoders/network.erb b/templates/terraform/encoders/network.erb deleted file mode 100644 index e5e725c1ed67..000000000000 --- a/templates/terraform/encoders/network.erb +++ /dev/null @@ -1,5 +0,0 @@ -if _, ok := d.GetOk("ipv4_range"); !ok { - obj["autoCreateSubnetworks"] = d.Get("auto_create_subnetworks") -} - -return obj, nil diff --git a/templates/terraform/examples/access_context_manager_access_level_basic.tf.erb b/templates/terraform/examples/access_context_manager_access_level_basic.tf.erb index a02eab096c1e..6a1f8feb3867 100644 --- a/templates/terraform/examples/access_context_manager_access_level_basic.tf.erb +++ b/templates/terraform/examples/access_context_manager_access_level_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_access_context_manager_access_level" "<%= ctx[:primary_resource_id] %>" { - parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" - name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/<%= ctx[:vars]['access_level_name'] %>" - title = "<%= ctx[:vars]['access_level_name'] %>" + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/<%= ctx[:vars]['access_level_name'] %>" + title = "<%= ctx[:vars]['access_level_name'] %>" basic { conditions { device_policy { diff --git a/templates/terraform/examples/access_context_manager_service_perimeter_basic.tf.erb b/templates/terraform/examples/access_context_manager_service_perimeter_basic.tf.erb index 812c54063d6f..825de2a04c6a 100644 --- a/templates/terraform/examples/access_context_manager_service_perimeter_basic.tf.erb +++ b/templates/terraform/examples/access_context_manager_service_perimeter_basic.tf.erb @@ -1,16 +1,16 @@ resource "google_access_context_manager_service_perimeter" "<%= ctx[:primary_resource_id] %>" { - parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" - name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/<%= ctx[:vars]['service_perimeter_name'] %>" - title = "<%= ctx[:vars]['service_perimeter_name'] %>" + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/<%= ctx[:vars]['service_perimeter_name'] %>" + title = "<%= ctx[:vars]['service_perimeter_name'] %>" status { restricted_services = ["storage.googleapis.com"] } } resource "google_access_context_manager_access_level" "access-level" { - parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" - name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/<%= ctx[:vars]['access_level_name'] %>" - title = "<%= ctx[:vars]['access_level_name'] %>" + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/<%= ctx[:vars]['access_level_name'] %>" + title = "<%= ctx[:vars]['access_level_name'] %>" basic { conditions { device_policy { diff --git a/templates/terraform/examples/address_with_subnetwork.tf.erb b/templates/terraform/examples/address_with_subnetwork.tf.erb index 236654234b78..95b5202af389 100644 --- a/templates/terraform/examples/address_with_subnetwork.tf.erb +++ b/templates/terraform/examples/address_with_subnetwork.tf.erb @@ -6,12 +6,12 @@ resource "google_compute_subnetwork" "default" { name = "<%= ctx[:vars]['subnetwork_name'] %>" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } resource "google_compute_address" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['address_name'] %>" - subnetwork = "${google_compute_subnetwork.default.self_link}" + subnetwork = google_compute_subnetwork.default.self_link address_type = "INTERNAL" address = "10.0.42.42" region = "us-central1" diff --git a/templates/terraform/examples/app_engine_application_url_dispatch_rules_basic.tf.erb b/templates/terraform/examples/app_engine_application_url_dispatch_rules_basic.tf.erb index 056bb4804083..2516b058f8ad 100644 --- a/templates/terraform/examples/app_engine_application_url_dispatch_rules_basic.tf.erb +++ b/templates/terraform/examples/app_engine_application_url_dispatch_rules_basic.tf.erb @@ -1,21 +1,21 @@ resource "google_app_engine_application_url_dispatch_rules" "<%= ctx[:primary_resource_id] %>" { dispatch_rules { - domain = "*" - path = "/*" + domain = "*" + path = "/*" service = "default" } dispatch_rules { - domain = "*" - path = "/admin/*" - service = "${google_app_engine_standard_app_version.admin_v3.service}" + domain = "*" + path = "/admin/*" + service = google_app_engine_standard_app_version.admin_v3.service } } resource "google_app_engine_standard_app_version" "admin_v3" { version_id = "v3" - service = "admin" - runtime = "nodejs10" + service = "admin" + runtime = "nodejs10" entrypoint { shell = "node ./app.js" @@ -35,11 +35,11 @@ resource "google_app_engine_standard_app_version" "admin_v3" { } resource "google_storage_bucket" "bucket" { - name = "<%= ctx[:vars]['bucket_name'] %>" + name = "<%= ctx[:vars]['bucket_name'] %>" } resource "google_storage_bucket_object" "object" { - name = "hello-world.zip" - bucket = "${google_storage_bucket.bucket.name}" - source = "./test-fixtures/appengine/hello-world.zip" + name = "hello-world.zip" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/appengine/hello-world.zip" } diff --git a/templates/terraform/examples/app_engine_domain_mapping_basic.tf.erb b/templates/terraform/examples/app_engine_domain_mapping_basic.tf.erb index d54f726c8ea6..b3806ca9df2b 100644 --- a/templates/terraform/examples/app_engine_domain_mapping_basic.tf.erb +++ b/templates/terraform/examples/app_engine_domain_mapping_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_app_engine_domain_mapping" "<%= ctx[:primary_resource_id] %>" { domain_name = "<%= ctx[:vars]["domain"] %>.gcp.tfacc.hashicorptest.com" - + ssl_settings { ssl_management_type = "AUTOMATIC" } diff --git a/templates/terraform/examples/app_engine_firewall_rule_basic.tf.erb b/templates/terraform/examples/app_engine_firewall_rule_basic.tf.erb index 73ba20571d21..d21956cd3191 100644 --- a/templates/terraform/examples/app_engine_firewall_rule_basic.tf.erb +++ b/templates/terraform/examples/app_engine_firewall_rule_basic.tf.erb @@ -5,13 +5,13 @@ resource "google_project" "my_project" { } resource "google_app_engine_application" "app" { - project = "${google_project.my_project.project_id}" + project = google_project.my_project.project_id location_id = "us-central" } resource "google_app_engine_firewall_rule" "rule" { - project = "${google_app_engine_application.app.project}" - priority = 1000 - action = "ALLOW" + project = google_app_engine_application.app.project + priority = 1000 + action = "ALLOW" source_range = "*" } diff --git a/templates/terraform/examples/app_engine_standard_app_version.tf.erb b/templates/terraform/examples/app_engine_standard_app_version.tf.erb index f9bf973843a5..cdf15ef6acbb 100644 --- a/templates/terraform/examples/app_engine_standard_app_version.tf.erb +++ b/templates/terraform/examples/app_engine_standard_app_version.tf.erb @@ -43,11 +43,11 @@ resource "google_app_engine_standard_app_version" "myapp_v2" { } resource "google_storage_bucket" "bucket" { - name = "<%= ctx[:vars]['bucket_name'] %>" + name = "<%= ctx[:vars]['bucket_name'] %>" } resource "google_storage_bucket_object" "object" { - name = "hello-world.zip" - bucket = "${google_storage_bucket.bucket.name}" - source = "./test-fixtures/appengine/hello-world.zip" + name = "hello-world.zip" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/appengine/hello-world.zip" } diff --git a/templates/terraform/examples/autoscaler_basic.tf.erb b/templates/terraform/examples/autoscaler_basic.tf.erb index a2ad5f349f05..e5e714925b1c 100644 --- a/templates/terraform/examples/autoscaler_basic.tf.erb +++ b/templates/terraform/examples/autoscaler_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_compute_autoscaler" "foobar" { name = "<%= ctx[:vars]['autoscaler_name'] %>" zone = "us-central1-f" - target = "${google_compute_instance_group_manager.foobar.self_link}" + target = google_compute_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -22,7 +22,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -47,15 +47,15 @@ resource "google_compute_instance_group_manager" "foobar" { zone = "us-central1-f" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } diff --git a/templates/terraform/examples/autoscaler_single_instance.tf.erb b/templates/terraform/examples/autoscaler_single_instance.tf.erb index 66d7741642af..5963289d49e2 100644 --- a/templates/terraform/examples/autoscaler_single_instance.tf.erb +++ b/templates/terraform/examples/autoscaler_single_instance.tf.erb @@ -1,9 +1,9 @@ resource "google_compute_autoscaler" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['autoscaler_name'] %>" zone = "us-central1-f" - target = "${google_compute_instance_group_manager.default.self_link}" + target = google_compute_instance_group_manager.default.self_link autoscaling_policy { max_replicas = 5 @@ -19,7 +19,7 @@ resource "google_compute_autoscaler" "<%= ctx[:primary_resource_id] %>" { } resource "google_compute_instance_template" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['instance_template_name'] %>" machine_type = "n1-standard-1" @@ -28,7 +28,7 @@ resource "google_compute_instance_template" "default" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -45,34 +45,34 @@ resource "google_compute_instance_template" "default" { } resource "google_compute_target_pool" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['target_pool_name'] %>" } resource "google_compute_instance_group_manager" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['igm_name'] %>" zone = "us-central1-f" version { - instance_template = "${google_compute_instance_template.default.self_link}" - name = "primary" + instance_template = google_compute_instance_template.default.self_link + name = "primary" } - target_pools = ["${google_compute_target_pool.default.self_link}"] + target_pools = [google_compute_target_pool.default.self_link] base_instance_name = "autoscaler-sample" } data "google_compute_image" "debian_9" { - provider = "google-beta" + provider = google-beta family = "debian-9" project = "debian-cloud" } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/templates/terraform/examples/backend_bucket_basic.tf.erb b/templates/terraform/examples/backend_bucket_basic.tf.erb index 89aa019daa8a..06496f64bbb3 100644 --- a/templates/terraform/examples/backend_bucket_basic.tf.erb +++ b/templates/terraform/examples/backend_bucket_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_compute_backend_bucket" "image_backend" { name = "<%= ctx[:vars]['backend_bucket_name'] %>" description = "Contains beautiful images" - bucket_name = "${google_storage_bucket.image_bucket.name}" + bucket_name = google_storage_bucket.image_bucket.name enable_cdn = true } diff --git a/templates/terraform/examples/backend_bucket_signed_url_key.tf.erb b/templates/terraform/examples/backend_bucket_signed_url_key.tf.erb index a9341ce2167f..da56c1c8dda5 100644 --- a/templates/terraform/examples/backend_bucket_signed_url_key.tf.erb +++ b/templates/terraform/examples/backend_bucket_signed_url_key.tf.erb @@ -1,13 +1,13 @@ resource "google_compute_backend_bucket_signed_url_key" "backend_key" { name = "<%= ctx[:vars]['key_name'] %>" key_value = "pPsVemX8GM46QVeezid6Rw==" - backend_bucket = "${google_compute_backend_bucket.test_backend.name}" + backend_bucket = google_compute_backend_bucket.test_backend.name } resource "google_compute_backend_bucket" "test_backend" { name = "<%= ctx[:vars]['backend_name'] %>" description = "Contains beautiful images" - bucket_name = "${google_storage_bucket.bucket.name}" + bucket_name = google_storage_bucket.bucket.name enable_cdn = true } diff --git a/templates/terraform/examples/backend_service_basic.tf.erb b/templates/terraform/examples/backend_service_basic.tf.erb index a8a9d151087b..c3779e7cb9d2 100644 --- a/templates/terraform/examples/backend_service_basic.tf.erb +++ b/templates/terraform/examples/backend_service_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_backend_service" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['backend_service_name'] %>" - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/templates/terraform/examples/backend_service_signed_url_key.tf.erb b/templates/terraform/examples/backend_service_signed_url_key.tf.erb index 89f6a73b05c8..b9570befa984 100644 --- a/templates/terraform/examples/backend_service_signed_url_key.tf.erb +++ b/templates/terraform/examples/backend_service_signed_url_key.tf.erb @@ -1,7 +1,7 @@ resource "google_compute_backend_service_signed_url_key" "backend_key" { - name = "<%= ctx[:vars]['key_name'] %>" - key_value = "pPsVemX8GM46QVeezid6Rw==" - backend_service = "${google_compute_backend_service.example_backend.name}" + name = "<%= ctx[:vars]['key_name'] %>" + key_value = "pPsVemX8GM46QVeezid6Rw==" + backend_service = google_compute_backend_service.example_backend.name } resource "google_compute_backend_service" "example_backend" { @@ -13,17 +13,17 @@ resource "google_compute_backend_service" "example_backend" { enable_cdn = true backend { - group = "${google_compute_instance_group_manager.webservers.instance_group}" + group = google_compute_instance_group_manager.webservers.instance_group } - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_instance_group_manager" "webservers" { name = "my-webservers" version { - instance_template = "${google_compute_instance_template.webserver.self_link}" + instance_template = google_compute_instance_template.webserver.self_link name = "primary" } @@ -52,4 +52,4 @@ resource "google_compute_http_health_check" "default" { request_path = "/" check_interval_sec = 1 timeout_sec = 1 -} \ No newline at end of file +} diff --git a/templates/terraform/examples/backend_service_traffic_director_ring_hash.tf.erb b/templates/terraform/examples/backend_service_traffic_director_ring_hash.tf.erb index 29476e438df7..080fb2aee9de 100644 --- a/templates/terraform/examples/backend_service_traffic_director_ring_hash.tf.erb +++ b/templates/terraform/examples/backend_service_traffic_director_ring_hash.tf.erb @@ -1,11 +1,11 @@ resource "google_compute_backend_service" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['backend_service_name'] %>" - health_checks = ["${google_compute_health_check.health_check.self_link}"] + name = "<%= ctx[:vars]['backend_service_name'] %>" + health_checks = [google_compute_health_check.health_check.self_link] load_balancing_scheme = "INTERNAL_SELF_MANAGED" - locality_lb_policy = "RING_HASH" - session_affinity = "HTTP_COOKIE" + locality_lb_policy = "RING_HASH" + session_affinity = "HTTP_COOKIE" circuit_breakers { max_connections = 10 } @@ -13,7 +13,7 @@ resource "google_compute_backend_service" "<%= ctx[:primary_resource_id] %>" { http_cookie { ttl { seconds = 11 - nanos = 1111 + nanos = 1111 } name = "mycookie" } @@ -24,10 +24,10 @@ resource "google_compute_backend_service" "<%= ctx[:primary_resource_id] %>" { } resource "google_compute_health_check" "health_check" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" http_health_check { - + port = 80 } } diff --git a/templates/terraform/examples/backend_service_traffic_director_round_robin.tf.erb b/templates/terraform/examples/backend_service_traffic_director_round_robin.tf.erb index 7109a237a511..068013aee910 100644 --- a/templates/terraform/examples/backend_service_traffic_director_round_robin.tf.erb +++ b/templates/terraform/examples/backend_service_traffic_director_round_robin.tf.erb @@ -1,17 +1,17 @@ resource "google_compute_backend_service" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['backend_service_name'] %>" - health_checks = ["${google_compute_health_check.health_check.self_link}"] + name = "<%= ctx[:vars]['backend_service_name'] %>" + health_checks = [google_compute_health_check.health_check.self_link] load_balancing_scheme = "INTERNAL_SELF_MANAGED" - locality_lb_policy = "ROUND_ROBIN" + locality_lb_policy = "ROUND_ROBIN" } resource "google_compute_health_check" "health_check" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" http_health_check { - + port = 80 } } diff --git a/templates/terraform/examples/bigquery_dataset_cmek.tf.erb b/templates/terraform/examples/bigquery_dataset_cmek.tf.erb index a39106dabd3f..5bec43c07e05 100644 --- a/templates/terraform/examples/bigquery_dataset_cmek.tf.erb +++ b/templates/terraform/examples/bigquery_dataset_cmek.tf.erb @@ -6,13 +6,13 @@ resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { default_table_expiration_ms = 3600000 default_encryption_configuration { - kms_key_name = "${google_kms_crypto_key.crypto_key.self_link}" + kms_key_name = google_kms_crypto_key.crypto_key.self_link } } resource "google_kms_crypto_key" "crypto_key" { name = "<%= ctx[:vars]['key_name'] %>" - key_ring = "${google_kms_key_ring.key_ring.self_link}" + key_ring = google_kms_key_ring.key_ring.self_link } resource "google_kms_key_ring" "key_ring" { diff --git a/templates/terraform/examples/bigtable_app_profile_multicluster.tf.erb b/templates/terraform/examples/bigtable_app_profile_multicluster.tf.erb index 8a5bb3004daa..52748eeb377b 100644 --- a/templates/terraform/examples/bigtable_app_profile_multicluster.tf.erb +++ b/templates/terraform/examples/bigtable_app_profile_multicluster.tf.erb @@ -1,17 +1,17 @@ resource "google_bigtable_instance" "instance" { - name = "<%= ctx[:vars]['instance_name'] %>" - cluster { - cluster_id = "<%= ctx[:vars]['instance_name'] %>" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" - } + name = "<%= ctx[:vars]['instance_name'] %>" + cluster { + cluster_id = "<%= ctx[:vars]['instance_name'] %>" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_app_profile" "ap" { - instance = google_bigtable_instance.instance.name - app_profile_id = "<%= ctx[:vars]['app_profile_name'] %>" + instance = google_bigtable_instance.instance.name + app_profile_id = "<%= ctx[:vars]['app_profile_name'] %>" - multi_cluster_routing_use_any = true - ignore_warnings = true + multi_cluster_routing_use_any = true + ignore_warnings = true } diff --git a/templates/terraform/examples/bigtable_app_profile_singlecluster.tf.erb b/templates/terraform/examples/bigtable_app_profile_singlecluster.tf.erb index d2b0d8c6aca2..3aee678a14fe 100644 --- a/templates/terraform/examples/bigtable_app_profile_singlecluster.tf.erb +++ b/templates/terraform/examples/bigtable_app_profile_singlecluster.tf.erb @@ -1,21 +1,21 @@ resource "google_bigtable_instance" "instance" { - name = "<%= ctx[:vars]['instance_name'] %>" - cluster { - cluster_id = "<%= ctx[:vars]['instance_name'] %>" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" - } + name = "<%= ctx[:vars]['instance_name'] %>" + cluster { + cluster_id = "<%= ctx[:vars]['instance_name'] %>" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_app_profile" "ap" { - instance = google_bigtable_instance.instance.name - app_profile_id = "<%= ctx[:vars]['app_profile_name'] %>" + instance = google_bigtable_instance.instance.name + app_profile_id = "<%= ctx[:vars]['app_profile_name'] %>" - single_cluster_routing { - cluster_id = "<%= ctx[:vars]['instance_name'] %>" - allow_transactional_writes = true - } + single_cluster_routing { + cluster_id = "<%= ctx[:vars]['instance_name'] %>" + allow_transactional_writes = true + } - ignore_warnings = true + ignore_warnings = true } diff --git a/templates/terraform/examples/binary_authorization_attestor_basic.tf.erb b/templates/terraform/examples/binary_authorization_attestor_basic.tf.erb index 20580a9a3b9d..8d0fceec2b8d 100644 --- a/templates/terraform/examples/binary_authorization_attestor_basic.tf.erb +++ b/templates/terraform/examples/binary_authorization_attestor_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_binary_authorization_attestor" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]["attestor_name"] %>" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name public_keys { ascii_armored_pgp_public_key = <" { name = "<%= ctx[:vars]["attestor_name"] %>" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name public_keys { - id = "${data.google_kms_crypto_key_version.version.id}" + id = data.google_kms_crypto_key_version.version.id pkix_public_key { - public_key_pem = "${data.google_kms_crypto_key_version.version.public_key[0].pem}" - signature_algorithm = "${data.google_kms_crypto_key_version.version.public_key[0].algorithm}" + public_key_pem = data.google_kms_crypto_key_version.version.public_key[0].pem + signature_algorithm = data.google_kms_crypto_key_version.version.public_key[0].algorithm } } } } data "google_kms_crypto_key_version" "version" { - crypto_key = "${google_kms_crypto_key.crypto-key.self_link}" + crypto_key = google_kms_crypto_key.crypto-key.self_link } resource "google_container_analysis_note" "note" { @@ -27,7 +27,7 @@ resource "google_container_analysis_note" "note" { resource "google_kms_crypto_key" "crypto-key" { name = "<%= ctx[:vars]["key_name"] %>" - key_ring = "${google_kms_key_ring.keyring.self_link}" + key_ring = google_kms_key_ring.keyring.self_link purpose = "ASYMMETRIC_SIGN" version_template { diff --git a/templates/terraform/examples/binary_authorization_policy_basic.tf.erb b/templates/terraform/examples/binary_authorization_policy_basic.tf.erb index 159478ba0f47..eb7d0904a068 100644 --- a/templates/terraform/examples/binary_authorization_policy_basic.tf.erb +++ b/templates/terraform/examples/binary_authorization_policy_basic.tf.erb @@ -1,18 +1,18 @@ resource "google_binary_authorization_policy" "<%= ctx[:primary_resource_id] %>" { admission_whitelist_patterns { - name_pattern= "gcr.io/google_containers/*" + name_pattern = "gcr.io/google_containers/*" } default_admission_rule { - evaluation_mode = "ALWAYS_ALLOW" + evaluation_mode = "ALWAYS_ALLOW" enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" } cluster_admission_rules { - cluster = "us-central1-a.prod-cluster" - evaluation_mode = "REQUIRE_ATTESTATION" - enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" - require_attestations_by = ["${google_binary_authorization_attestor.attestor.name}"] + cluster = "us-central1-a.prod-cluster" + evaluation_mode = "REQUIRE_ATTESTATION" + enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" + require_attestations_by = [google_binary_authorization_attestor.attestor.name] } } @@ -28,6 +28,6 @@ resource "google_container_analysis_note" "note" { resource "google_binary_authorization_attestor" "attestor" { name = "<%= ctx[:vars]["attestor_name"] %>" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name } } diff --git a/templates/terraform/examples/binary_authorization_policy_global_evaluation.tf.erb b/templates/terraform/examples/binary_authorization_policy_global_evaluation.tf.erb index 0f83f97aba09..e96652d0fde8 100644 --- a/templates/terraform/examples/binary_authorization_policy_global_evaluation.tf.erb +++ b/templates/terraform/examples/binary_authorization_policy_global_evaluation.tf.erb @@ -1,13 +1,11 @@ resource "google_binary_authorization_policy" "<%= ctx[:primary_resource_id] %>" { - default_admission_rule { - evaluation_mode = "REQUIRE_ATTESTATION" - enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" - require_attestations_by = ["${google_binary_authorization_attestor.attestor.name}"] + evaluation_mode = "REQUIRE_ATTESTATION" + enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" + require_attestations_by = [google_binary_authorization_attestor.attestor.name] } global_policy_evaluation_mode = "ENABLE" - } resource "google_container_analysis_note" "note" { @@ -22,6 +20,6 @@ resource "google_container_analysis_note" "note" { resource "google_binary_authorization_attestor" "attestor" { name = "<%= ctx[:vars]["attestor_name"] %>" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name } } diff --git a/templates/terraform/examples/cloud_run_domain_mapping_basic.tf.erb b/templates/terraform/examples/cloud_run_domain_mapping_basic.tf.erb index 9475fb5db089..33190b628706 100644 --- a/templates/terraform/examples/cloud_run_domain_mapping_basic.tf.erb +++ b/templates/terraform/examples/cloud_run_domain_mapping_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_cloud_run_domain_mapping" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" - provider = "google-beta" - name = "<%= ctx[:vars]['cloud_run_domain_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['cloud_run_domain_name'] %>" metadata { namespace = "<%= ctx[:test_env_vars]['namespace'] %>" diff --git a/templates/terraform/examples/cloudbuild_trigger_github.tf.erb b/templates/terraform/examples/cloudbuild_trigger_github.tf.erb index 699594b93b71..7acebda38bec 100644 --- a/templates/terraform/examples/cloudbuild_trigger_github.tf.erb +++ b/templates/terraform/examples/cloudbuild_trigger_github.tf.erb @@ -1,7 +1,7 @@ resource "google_cloudbuild_trigger" "<%= ctx[:primary_resource_id] %>" { github { owner = "terraform-providers" - name = "terraform-provider-google-beta" + name = "terraform-provider-google-beta" push { branch = "feature-.*" } diff --git a/templates/terraform/examples/cloudfunctions_cloud_function.tf.erb b/templates/terraform/examples/cloudfunctions_cloud_function.tf.erb index f96e9464d15a..c1b34fdd18b1 100644 --- a/templates/terraform/examples/cloudfunctions_cloud_function.tf.erb +++ b/templates/terraform/examples/cloudfunctions_cloud_function.tf.erb @@ -4,19 +4,19 @@ resource "google_storage_bucket" "bucket" { resource "google_storage_bucket_object" "archive" { name = "index.zip" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name source = "path/to/index.zip" } resource "google_cloudfunctions_function" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['function_name'] %>" - description = "My function" - runtime = "nodejs10" + name = "<%= ctx[:vars]['function_name'] %>" + description = "My function" + runtime = "nodejs10" available_memory_mb = 128 - source_archive_bucket = "${google_storage_bucket.bucket.name}" - source_archive_object = "${google_storage_bucket_object.archive.name}" + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name trigger_http = true timeout = 60 entry_point = "helloGET" -} \ No newline at end of file +} diff --git a/templates/terraform/examples/dataproc_autoscaling_policy.tf.erb b/templates/terraform/examples/dataproc_autoscaling_policy.tf.erb index b0fbdb757213..1987a428977a 100644 --- a/templates/terraform/examples/dataproc_autoscaling_policy.tf.erb +++ b/templates/terraform/examples/dataproc_autoscaling_policy.tf.erb @@ -1,9 +1,10 @@ -provider "google-beta" {} +provider "google-beta" { +} resource "google_dataproc_cluster" "basic" { - provider = "google-beta" - name = "<%= ctx[:vars]['name'] %>" - region = "us-central1" + provider = google-beta + name = "<%= ctx[:vars]['name'] %>" + region = "us-central1" cluster_config { autoscaling_config { @@ -13,9 +14,9 @@ resource "google_dataproc_cluster" "basic" { } resource "google_dataproc_autoscaling_policy" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta policy_id = "<%= ctx[:vars]['name'] %>" - location = "us-central1" + location = "us-central1" worker_config { max_instances = 3 diff --git a/templates/terraform/examples/dns_managed_zone_basic.tf.erb b/templates/terraform/examples/dns_managed_zone_basic.tf.erb index 8ecbb4ae821a..db5721b24113 100644 --- a/templates/terraform/examples/dns_managed_zone_basic.tf.erb +++ b/templates/terraform/examples/dns_managed_zone_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { - name = "example-zone" - dns_name = "example-${random_id.rnd.hex}.com." + name = "example-zone" + dns_name = "example-${random_id.rnd.hex}.com." description = "Example DNS zone" labels = { foo = "bar" diff --git a/templates/terraform/examples/dns_managed_zone_private.tf.erb b/templates/terraform/examples/dns_managed_zone_private.tf.erb index 449a82f4f65f..b3afb2178df1 100644 --- a/templates/terraform/examples/dns_managed_zone_private.tf.erb +++ b/templates/terraform/examples/dns_managed_zone_private.tf.erb @@ -1,6 +1,6 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['zone_name'] %>" - dns_name = "private.example.com." + name = "<%= ctx[:vars]['zone_name'] %>" + dns_name = "private.example.com." description = "Example private DNS zone" labels = { foo = "bar" @@ -10,20 +10,20 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { private_visibility_config { networks { - network_url = "${google_compute_network.network-1.self_link}" + network_url = google_compute_network.network-1.self_link } networks { - network_url = "${google_compute_network.network-2.self_link}" + network_url = google_compute_network.network-2.self_link } } } resource "google_compute_network" "network-1" { - name = "<%= ctx[:vars]['network_1_name'] %>" + name = "<%= ctx[:vars]['network_1_name'] %>" auto_create_subnetworks = false } resource "google_compute_network" "network-2" { - name = "<%= ctx[:vars]['network_2_name'] %>" + name = "<%= ctx[:vars]['network_2_name'] %>" auto_create_subnetworks = false } diff --git a/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb b/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb index bd7a2c3ae3ee..6b38db0716d3 100644 --- a/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb +++ b/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb @@ -1,7 +1,7 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" - name = "<%= ctx[:vars]['zone_name'] %>" - dns_name = "private.example.com." + provider = google-beta + name = "<%= ctx[:vars]['zone_name'] %>" + dns_name = "private.example.com." description = "Example private DNS zone" labels = { foo = "bar" @@ -11,10 +11,10 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { private_visibility_config { networks { - network_url = "${google_compute_network.network-1.self_link}" + network_url = google_compute_network.network-1.self_link } networks { - network_url = "${google_compute_network.network-2.self_link}" + network_url = google_compute_network.network-2.self_link } } @@ -26,15 +26,14 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { ipv4_address = "172.16.1.20" } } - } resource "google_compute_network" "network-1" { - name = "<%= ctx[:vars]['network_1_name'] %>" + name = "<%= ctx[:vars]['network_1_name'] %>" auto_create_subnetworks = false } resource "google_compute_network" "network-2" { - name = "<%= ctx[:vars]['network_2_name'] %>" + name = "<%= ctx[:vars]['network_2_name'] %>" auto_create_subnetworks = false } diff --git a/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb b/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb index 4f9a71ffbb73..129b1aa8e25d 100644 --- a/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb +++ b/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb @@ -1,36 +1,36 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['zone_name'] %>" - dns_name = "peering.example.com." + name = "<%= ctx[:vars]['zone_name'] %>" + dns_name = "peering.example.com." description = "Example private DNS peering zone" visibility = "private" private_visibility_config { networks { - network_url = "${google_compute_network.network-source.self_link}" + network_url = google_compute_network.network-source.self_link } } peering_config { target_network { - network_url = "${google_compute_network.network-target.self_link}" + network_url = google_compute_network.network-target.self_link } } } resource "google_compute_network" "network-source" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['network_source_name'] %>" + name = "<%= ctx[:vars]['network_source_name'] %>" auto_create_subnetworks = false } resource "google_compute_network" "network-target" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['network_target_name'] %>" + name = "<%= ctx[:vars]['network_target_name'] %>" auto_create_subnetworks = false } diff --git a/templates/terraform/examples/dns_policy_basic.tf.erb b/templates/terraform/examples/dns_policy_basic.tf.erb index 1eeb9e08b57a..89b0f3f3b599 100644 --- a/templates/terraform/examples/dns_policy_basic.tf.erb +++ b/templates/terraform/examples/dns_policy_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_dns_policy" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['policy_name'] %>" + name = "<%= ctx[:vars]['policy_name'] %>" enable_inbound_forwarding = true enable_logging = true @@ -16,28 +16,28 @@ resource "google_dns_policy" "<%= ctx[:primary_resource_id] %>" { } networks { - network_url = "${google_compute_network.network-1.self_link}" + network_url = google_compute_network.network-1.self_link } networks { - network_url = "${google_compute_network.network-2.self_link}" + network_url = google_compute_network.network-2.self_link } } resource "google_compute_network" "network-1" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['network_1_name'] %>" + name = "<%= ctx[:vars]['network_1_name'] %>" auto_create_subnetworks = false } resource "google_compute_network" "network-2" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['network_2_name'] %>" + name = "<%= ctx[:vars]['network_2_name'] %>" auto_create_subnetworks = false } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/templates/terraform/examples/external_vpn_gateway.tf.erb b/templates/terraform/examples/external_vpn_gateway.tf.erb index c8bf17228a19..2b45778d9edd 100644 --- a/templates/terraform/examples/external_vpn_gateway.tf.erb +++ b/templates/terraform/examples/external_vpn_gateway.tf.erb @@ -1,113 +1,113 @@ resource "google_compute_ha_vpn_gateway" "ha_gateway" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['ha_vpn_gateway_name'] %>" - network = "${google_compute_network.network.self_link}" + network = google_compute_network.network.self_link } resource "google_compute_external_vpn_gateway" "external_gateway" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['external_gateway_name'] %>" redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" description = "An externally managed VPN gateway" interface { - id = 0 + id = 0 ip_address = "8.8.8.8" } } resource "google_compute_network" "network" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['network_name'] %>" routing_mode = "GLOBAL" auto_create_subnetworks = false } resource "google_compute_subnetwork" "network_subnet1" { - provider = "google-beta" + provider = google-beta name = "ha-vpn-subnet-1" ip_cidr_range = "10.0.1.0/24" region = "us-central1" - network = "${google_compute_network.network.self_link}" + network = google_compute_network.network.self_link } resource "google_compute_subnetwork" "network_subnet2" { - provider = "google-beta" + provider = google-beta name = "ha-vpn-subnet-2" ip_cidr_range = "10.0.2.0/24" region = "us-west1" - network = "${google_compute_network.network.self_link}" + network = google_compute_network.network.self_link } resource "google_compute_router" "router1" { - provider = "google-beta" - name = "ha-vpn-router1" - network = "${google_compute_network.network.name}" + provider = google-beta + name = "ha-vpn-router1" + network = google_compute_network.network.name bgp { asn = 64514 } } resource "google_compute_vpn_tunnel" "tunnel1" { - provider = "google-beta" - name = "ha-vpn-tunnel1" - region = "us-central1" - vpn_gateway = "${google_compute_ha_vpn_gateway.ha_gateway.self_link}" - peer_external_gateway = "${google_compute_external_vpn_gateway.external_gateway.self_link}" + provider = google-beta + name = "ha-vpn-tunnel1" + region = "us-central1" + vpn_gateway = google_compute_ha_vpn_gateway.ha_gateway.self_link + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.self_link peer_external_gateway_interface = 0 - shared_secret = "a secret message" - router = "${google_compute_router.router1.self_link}" - vpn_gateway_interface = 0 + shared_secret = "a secret message" + router = google_compute_router.router1.self_link + vpn_gateway_interface = 0 } resource "google_compute_vpn_tunnel" "tunnel2" { - provider = "google-beta" - name = "ha-vpn-tunnel2" - region = "us-central1" - vpn_gateway = "${google_compute_ha_vpn_gateway.ha_gateway.self_link}" - peer_external_gateway = "${google_compute_external_vpn_gateway.external_gateway.self_link}" + provider = google-beta + name = "ha-vpn-tunnel2" + region = "us-central1" + vpn_gateway = google_compute_ha_vpn_gateway.ha_gateway.self_link + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.self_link peer_external_gateway_interface = 0 - shared_secret = "a secret message" - router = " ${google_compute_router.router1.self_link}" - vpn_gateway_interface = 1 + shared_secret = "a secret message" + router = " ${google_compute_router.router1.self_link}" + vpn_gateway_interface = 1 } resource "google_compute_router_interface" "router1_interface1" { - provider = "google-beta" + provider = google-beta name = "router1-interface1" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" ip_range = "169.254.0.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.name}" + vpn_tunnel = google_compute_vpn_tunnel.tunnel1.name } resource "google_compute_router_peer" "router1_peer1" { - provider = "google-beta" + provider = google-beta name = "router1-peer1" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" peer_ip_address = "169.254.0.2" peer_asn = 64515 advertised_route_priority = 100 - interface = "${google_compute_router_interface.router1_interface1.name}" + interface = google_compute_router_interface.router1_interface1.name } resource "google_compute_router_interface" "router1_interface2" { - provider = "google-beta" + provider = google-beta name = "router1-interface2" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" ip_range = "169.254.1.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.tunnel2.name}" + vpn_tunnel = google_compute_vpn_tunnel.tunnel2.name } resource "google_compute_router_peer" "router1_peer2" { - provider = "google-beta" + provider = google-beta name = "router1-peer2" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" peer_ip_address = "169.254.1.2" peer_asn = 64515 advertised_route_priority = 100 - interface = "${google_compute_router_interface.router1_interface2.name}" -} \ No newline at end of file + interface = google_compute_router_interface.router1_interface2.name +} diff --git a/templates/terraform/examples/firestore_index_basic.tf.erb b/templates/terraform/examples/firestore_index_basic.tf.erb index b0e79dbc0f20..90fbf4963bb1 100644 --- a/templates/terraform/examples/firestore_index_basic.tf.erb +++ b/templates/terraform/examples/firestore_index_basic.tf.erb @@ -1,5 +1,5 @@ resource "google_firestore_index" "<%= ctx[:primary_resource_id] %>" { - project = "<%= ctx[:test_env_vars]['project_id'] %>" + project = "<%= ctx[:test_env_vars]['project_id'] %>" collection = "chatrooms" diff --git a/templates/terraform/examples/firewall_basic.tf.erb b/templates/terraform/examples/firewall_basic.tf.erb index af0e2f4dc627..7b868126159f 100644 --- a/templates/terraform/examples/firewall_basic.tf.erb +++ b/templates/terraform/examples/firewall_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_firewall" "default" { name = "<%= ctx[:vars]['firewall_name'] %>" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name allow { protocol = "icmp" diff --git a/templates/terraform/examples/forwarding_rule_basic.tf.erb b/templates/terraform/examples/forwarding_rule_basic.tf.erb index aaa4d9c73d79..47fa4a17245c 100644 --- a/templates/terraform/examples/forwarding_rule_basic.tf.erb +++ b/templates/terraform/examples/forwarding_rule_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_forwarding_rule" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['forwarding_rule_name'] %>" - target = "${google_compute_target_pool.default.self_link}" + target = google_compute_target_pool.default.self_link port_range = "80" } diff --git a/templates/terraform/examples/forwarding_rule_internallb.tf.erb b/templates/terraform/examples/forwarding_rule_internallb.tf.erb index 2867dbf6d0e4..d78072f6392e 100644 --- a/templates/terraform/examples/forwarding_rule_internallb.tf.erb +++ b/templates/terraform/examples/forwarding_rule_internallb.tf.erb @@ -1,19 +1,19 @@ // Forwarding rule for Internal Load Balancing resource "google_compute_forwarding_rule" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['forwarding_rule_name'] %>" - region = "us-central1" + name = "<%= ctx[:vars]['forwarding_rule_name'] %>" + region = "us-central1" load_balancing_scheme = "INTERNAL" - backend_service = "${google_compute_region_backend_service.backend.self_link}" + backend_service = google_compute_region_backend_service.backend.self_link all_ports = true - network = "${google_compute_network.default.name}" - subnetwork = "${google_compute_subnetwork.default.name}" + network = google_compute_network.default.name + subnetwork = google_compute_subnetwork.default.name } resource "google_compute_region_backend_service" "backend" { - name = "<%= ctx[:vars]['backend_name'] %>" - region = "us-central1" - health_checks = ["${google_compute_health_check.hc.self_link}"] + name = "<%= ctx[:vars]['backend_name'] %>" + region = "us-central1" + health_checks = [google_compute_health_check.hc.self_link] } resource "google_compute_health_check" "hc" { @@ -27,7 +27,7 @@ resource "google_compute_health_check" "hc" { } resource "google_compute_network" "default" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } @@ -35,5 +35,5 @@ resource "google_compute_subnetwork" "default" { name = "<%= ctx[:vars]['network_name'] %>" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } diff --git a/templates/terraform/examples/global_forwarding_rule_http.tf.erb b/templates/terraform/examples/global_forwarding_rule_http.tf.erb index fef12245b099..71225b15e162 100644 --- a/templates/terraform/examples/global_forwarding_rule_http.tf.erb +++ b/templates/terraform/examples/global_forwarding_rule_http.tf.erb @@ -1,19 +1,19 @@ resource "google_compute_global_forwarding_rule" "default" { name = "<%= ctx[:vars]['forwarding_rule_name'] %>" - target = "${google_compute_target_http_proxy.default.self_link}" + target = google_compute_target_http_proxy.default.self_link port_range = "80" } resource "google_compute_target_http_proxy" "default" { name = "<%= ctx[:vars]['http_proxy_name'] %>" description = "a description" - url_map = "${google_compute_url_map.default.self_link}" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { name = "url-map-<%= ctx[:vars]['http_proxy_name'] %>" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -22,11 +22,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -37,7 +37,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/templates/terraform/examples/global_forwarding_rule_internal.tf.erb b/templates/terraform/examples/global_forwarding_rule_internal.tf.erb index ef991db33733..aa01994cb81e 100644 --- a/templates/terraform/examples/global_forwarding_rule_internal.tf.erb +++ b/templates/terraform/examples/global_forwarding_rule_internal.tf.erb @@ -1,31 +1,31 @@ resource "google_compute_global_forwarding_rule" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['forwarding_rule_name'] %>" - target = "${google_compute_target_http_proxy.default.self_link}" + target = google_compute_target_http_proxy.default.self_link port_range = "80" load_balancing_scheme = "INTERNAL_SELF_MANAGED" ip_address = "0.0.0.0" metadata_filters { filter_match_criteria = "MATCH_ANY" filter_labels { - name = "PLANET" + name = "PLANET" value = "MARS" } } } resource "google_compute_target_http_proxy" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['http_proxy_name'] %>" description = "a description" - url_map = "${google_compute_url_map.default.self_link}" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { - provider = "google-beta" + provider = google-beta name = "url-map-<%= ctx[:vars]['http_proxy_name'] %>" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -34,17 +34,17 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } resource "google_compute_backend_service" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['backend_service_name'] %>" port_name = "http" protocol = "HTTP" @@ -52,27 +52,27 @@ resource "google_compute_backend_service" "default" { load_balancing_scheme = "INTERNAL_SELF_MANAGED" backend { - group = "${google_compute_instance_group_manager.igm.instance_group}" - balancing_mode = "RATE" - capacity_scaler = 0.4 + group = google_compute_instance_group_manager.igm.instance_group + balancing_mode = "RATE" + capacity_scaler = 0.4 max_rate_per_instance = 50 } - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } data "google_compute_image" "debian_image" { - provider = "google-beta" + provider = google-beta family = "debian-9" project = "debian-cloud" } resource "google_compute_instance_group_manager" "igm" { - provider = "google-beta" - name = "igm-internal" + provider = google-beta + name = "igm-internal" version { - instance_template = "${google_compute_instance_template.instance_template.self_link}" - name = "primary" + instance_template = google_compute_instance_template.instance_template.self_link + name = "primary" } base_instance_name = "internal-glb" zone = "us-central1-f" @@ -80,7 +80,7 @@ resource "google_compute_instance_group_manager" "igm" { } resource "google_compute_instance_template" "instance_template" { - provider = "google-beta" + provider = google-beta name = "template-<%= ctx[:vars]['backend_service_name'] %>" machine_type = "n1-standard-1" @@ -89,14 +89,14 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "${data.google_compute_image.debian_image.self_link}" + source_image = data.google_compute_image.debian_image.self_link auto_delete = true boot = true } } resource "google_compute_health_check" "default" { - provider = "google-beta" + provider = google-beta name = "check-<%= ctx[:vars]['backend_service_name'] %>" check_interval_sec = 1 timeout_sec = 1 @@ -105,3 +105,4 @@ resource "google_compute_health_check" "default" { port = "80" } } + diff --git a/templates/terraform/examples/ha_vpn_gateway_basic.tf.erb b/templates/terraform/examples/ha_vpn_gateway_basic.tf.erb index 1624c0316f46..98c135c1621d 100644 --- a/templates/terraform/examples/ha_vpn_gateway_basic.tf.erb +++ b/templates/terraform/examples/ha_vpn_gateway_basic.tf.erb @@ -1,12 +1,12 @@ resource "google_compute_ha_vpn_gateway" "ha_gateway1" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['ha_vpn_gateway1_name'] %>" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['network1_name'] %>" auto_create_subnetworks = false -} \ No newline at end of file +} diff --git a/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb b/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb index 71930b4f57ad..abc47c260485 100644 --- a/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb +++ b/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb @@ -1,201 +1,201 @@ resource "google_compute_ha_vpn_gateway" "ha_gateway1" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['ha_vpn_gateway1_name'] %>" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_ha_vpn_gateway" "ha_gateway2" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['ha_vpn_gateway2_name'] %>" - network = "${google_compute_network.network2.self_link}" + network = google_compute_network.network2.self_link } resource "google_compute_network" "network1" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['network1_name'] %>" routing_mode = "GLOBAL" auto_create_subnetworks = false } resource "google_compute_network" "network2" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['network2_name'] %>" routing_mode = "GLOBAL" auto_create_subnetworks = false } resource "google_compute_subnetwork" "network1_subnet1" { - provider = "google-beta" + provider = google-beta name = "ha-vpn-subnet-1" ip_cidr_range = "10.0.1.0/24" region = "us-central1" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_subnetwork" "network1_subnet2" { - provider = "google-beta" + provider = google-beta name = "ha-vpn-subnet-2" ip_cidr_range = "10.0.2.0/24" region = "us-west1" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_subnetwork" "network2_subnet1" { - provider = "google-beta" + provider = google-beta name = "ha-vpn-subnet-3" ip_cidr_range = "192.168.1.0/24" region = "us-central1" - network = "${google_compute_network.network2.self_link}" + network = google_compute_network.network2.self_link } resource "google_compute_subnetwork" "network2_subnet2" { - provider = "google-beta" + provider = google-beta name = "ha-vpn-subnet-4" ip_cidr_range = "192.168.2.0/24" region = "us-east1" - network = "${google_compute_network.network2.self_link}" + network = google_compute_network.network2.self_link } resource "google_compute_router" "router1" { - provider = "google-beta" - name = "ha-vpn-router1" - network = "${google_compute_network.network1.name}" + provider = google-beta + name = "ha-vpn-router1" + network = google_compute_network.network1.name bgp { asn = 64514 } } resource "google_compute_router" "router2" { - provider = "google-beta" + provider = google-beta name = "ha-vpn-router2" - network = "${google_compute_network.network2.name}" + network = google_compute_network.network2.name bgp { asn = 64515 } } resource "google_compute_vpn_tunnel" "tunnel1" { - provider = "google-beta" - name = "ha-vpn-tunnel1" - region = "us-central1" - vpn_gateway = "${google_compute_ha_vpn_gateway.ha_gateway1.self_link}" - peer_gcp_gateway = "${google_compute_ha_vpn_gateway.ha_gateway2.self_link}" - shared_secret = "a secret message" - router = "${google_compute_router.router1.self_link}" + provider = google-beta + name = "ha-vpn-tunnel1" + region = "us-central1" + vpn_gateway = google_compute_ha_vpn_gateway.ha_gateway1.self_link + peer_gcp_gateway = google_compute_ha_vpn_gateway.ha_gateway2.self_link + shared_secret = "a secret message" + router = google_compute_router.router1.self_link vpn_gateway_interface = 0 } resource "google_compute_vpn_tunnel" "tunnel2" { - provider = "google-beta" - name = "ha-vpn-tunnel2" - region = "us-central1" - vpn_gateway = "${google_compute_ha_vpn_gateway.ha_gateway1.self_link}" - peer_gcp_gateway = "${google_compute_ha_vpn_gateway.ha_gateway2.self_link}" - shared_secret = "a secret message" - router = " ${google_compute_router.router1.self_link}" + provider = google-beta + name = "ha-vpn-tunnel2" + region = "us-central1" + vpn_gateway = google_compute_ha_vpn_gateway.ha_gateway1.self_link + peer_gcp_gateway = google_compute_ha_vpn_gateway.ha_gateway2.self_link + shared_secret = "a secret message" + router = google_compute_router.router1.self_link vpn_gateway_interface = 1 } resource "google_compute_vpn_tunnel" "tunnel3" { - provider = "google-beta" - name = "ha-vpn-tunnel3" - region = "us-central1" - vpn_gateway = "${google_compute_ha_vpn_gateway.ha_gateway2.self_link}" - peer_gcp_gateway = "${google_compute_ha_vpn_gateway.ha_gateway1.self_link}" - shared_secret = "a secret message" - router = "${google_compute_router.router2.self_link}" + provider = google-beta + name = "ha-vpn-tunnel3" + region = "us-central1" + vpn_gateway = google_compute_ha_vpn_gateway.ha_gateway2.self_link + peer_gcp_gateway = google_compute_ha_vpn_gateway.ha_gateway1.self_link + shared_secret = "a secret message" + router = google_compute_router.router2.self_link vpn_gateway_interface = 0 } resource "google_compute_vpn_tunnel" "tunnel4" { - provider = "google-beta" - name = "ha-vpn-tunnel4" - region = "us-central1" - vpn_gateway = "${google_compute_ha_vpn_gateway.ha_gateway2.self_link}" - peer_gcp_gateway = "${google_compute_ha_vpn_gateway.ha_gateway1.self_link}" - shared_secret = "a secret message" - router = "${google_compute_router.router2.self_link}" + provider = google-beta + name = "ha-vpn-tunnel4" + region = "us-central1" + vpn_gateway = google_compute_ha_vpn_gateway.ha_gateway2.self_link + peer_gcp_gateway = google_compute_ha_vpn_gateway.ha_gateway1.self_link + shared_secret = "a secret message" + router = google_compute_router.router2.self_link vpn_gateway_interface = 1 } resource "google_compute_router_interface" "router1_interface1" { - provider = "google-beta" + provider = google-beta name = "router1-interface1" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" ip_range = "169.254.0.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.name}" + vpn_tunnel = google_compute_vpn_tunnel.tunnel1.name } resource "google_compute_router_peer" "router1_peer1" { - provider = "google-beta" + provider = google-beta name = "router1-peer1" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" peer_ip_address = "169.254.0.2" peer_asn = 64515 advertised_route_priority = 100 - interface = "${google_compute_router_interface.router1_interface1.name}" + interface = google_compute_router_interface.router1_interface1.name } resource "google_compute_router_interface" "router1_interface2" { - provider = "google-beta" + provider = google-beta name = "router1-interface2" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" ip_range = "169.254.1.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.tunnel2.name}" + vpn_tunnel = google_compute_vpn_tunnel.tunnel2.name } resource "google_compute_router_peer" "router1_peer2" { - provider = "google-beta" + provider = google-beta name = "router1-peer2" - router = "${google_compute_router.router1.name}" + router = google_compute_router.router1.name region = "us-central1" peer_ip_address = "169.254.1.2" peer_asn = 64515 advertised_route_priority = 100 - interface = "${google_compute_router_interface.router1_interface2.name}" + interface = google_compute_router_interface.router1_interface2.name } resource "google_compute_router_interface" "router2_interface1" { - provider = "google-beta" + provider = google-beta name = "router2-interface1" - router = "${google_compute_router.router2.name}" + router = google_compute_router.router2.name region = "us-central1" ip_range = "169.254.0.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.tunnel3.name}" + vpn_tunnel = google_compute_vpn_tunnel.tunnel3.name } resource "google_compute_router_peer" "router2_peer1" { - provider = "google-beta" + provider = google-beta name = "router2-peer1" - router = "${google_compute_router.router2.name}" + router = google_compute_router.router2.name region = "us-central1" peer_ip_address = "169.254.0.2" peer_asn = 64514 advertised_route_priority = 100 - interface = "${google_compute_router_interface.router2_interface1.name}" + interface = google_compute_router_interface.router2_interface1.name } resource "google_compute_router_interface" "router2_interface2" { - provider = "google-beta" + provider = google-beta name = "router2-interface2" - router = "${google_compute_router.router2.name}" + router = google_compute_router.router2.name region = "us-central1" ip_range = "169.254.1.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.tunnel4.name}" + vpn_tunnel = google_compute_vpn_tunnel.tunnel4.name } resource "google_compute_router_peer" "router2_peer2" { - provider = "google-beta" + provider = google-beta name = "router2-peer2" - router = "${google_compute_router.router2.name}" + router = google_compute_router.router2.name region = "us-central1" peer_ip_address = "169.254.1.2" peer_asn = 64514 advertised_route_priority = 100 - interface = "${google_compute_router_interface.router2_interface2.name}" -} \ No newline at end of file + interface = google_compute_router_interface.router2_interface2.name +} diff --git a/templates/terraform/examples/health_check_http.tf.erb b/templates/terraform/examples/health_check_http.tf.erb index 36ed60ee5395..bb9c776bbb48 100644 --- a/templates/terraform/examples/health_check_http.tf.erb +++ b/templates/terraform/examples/health_check_http.tf.erb @@ -1,10 +1,10 @@ resource "google_compute_health_check" "http-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - http_health_check { - port = 80 - } + http_health_check { + port = 80 + } } diff --git a/templates/terraform/examples/health_check_http2.tf.erb b/templates/terraform/examples/health_check_http2.tf.erb index 757d9bcca504..95c57512b903 100644 --- a/templates/terraform/examples/health_check_http2.tf.erb +++ b/templates/terraform/examples/health_check_http2.tf.erb @@ -1,10 +1,10 @@ resource "google_compute_health_check" "http2-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - http2_health_check { - port = "443" - } + http2_health_check { + port = "443" + } } diff --git a/templates/terraform/examples/health_check_http2_full.tf.erb b/templates/terraform/examples/health_check_http2_full.tf.erb index b19b9fb23a3f..737c96d8ae8a 100644 --- a/templates/terraform/examples/health_check_http2_full.tf.erb +++ b/templates/terraform/examples/health_check_http2_full.tf.erb @@ -1,5 +1,5 @@ resource "google_compute_health_check" "http2-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via http2" timeout_sec = 1 @@ -8,11 +8,11 @@ resource "google_compute_health_check" "http2-health-check" { unhealthy_threshold = 5 http2_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/health_check_http_full.tf.erb b/templates/terraform/examples/health_check_http_full.tf.erb index 1cb552c0c813..4d8a430c3343 100644 --- a/templates/terraform/examples/health_check_http_full.tf.erb +++ b/templates/terraform/examples/health_check_http_full.tf.erb @@ -1,5 +1,5 @@ resource "google_compute_health_check" "http-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via http" timeout_sec = 1 @@ -8,11 +8,11 @@ resource "google_compute_health_check" "http-health-check" { unhealthy_threshold = 5 http_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/health_check_https.tf.erb b/templates/terraform/examples/health_check_https.tf.erb index 0c41868b17ac..2d6e932cb369 100644 --- a/templates/terraform/examples/health_check_https.tf.erb +++ b/templates/terraform/examples/health_check_https.tf.erb @@ -1,10 +1,10 @@ resource "google_compute_health_check" "https-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - https_health_check { - port = "443" - } + https_health_check { + port = "443" + } } diff --git a/templates/terraform/examples/health_check_https_full.tf.erb b/templates/terraform/examples/health_check_https_full.tf.erb index 9369239fd687..0816179dd71d 100644 --- a/templates/terraform/examples/health_check_https_full.tf.erb +++ b/templates/terraform/examples/health_check_https_full.tf.erb @@ -1,5 +1,5 @@ resource "google_compute_health_check" "https-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via https" timeout_sec = 1 @@ -8,11 +8,11 @@ resource "google_compute_health_check" "https-health-check" { unhealthy_threshold = 5 https_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/health_check_ssl.tf.erb b/templates/terraform/examples/health_check_ssl.tf.erb index 0521af93a3bd..5e340b3b429c 100644 --- a/templates/terraform/examples/health_check_ssl.tf.erb +++ b/templates/terraform/examples/health_check_ssl.tf.erb @@ -1,12 +1,10 @@ resource "google_compute_health_check" "ssl-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - ssl_health_check { - port = "443" - } + ssl_health_check { + port = "443" + } } - - diff --git a/templates/terraform/examples/health_check_ssl_full.tf.erb b/templates/terraform/examples/health_check_ssl_full.tf.erb index 30e78578a421..9ea722c5d49a 100644 --- a/templates/terraform/examples/health_check_ssl_full.tf.erb +++ b/templates/terraform/examples/health_check_ssl_full.tf.erb @@ -1,5 +1,5 @@ resource "google_compute_health_check" "ssl-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via ssl" timeout_sec = 1 @@ -8,10 +8,10 @@ resource "google_compute_health_check" "ssl-health-check" { unhealthy_threshold = 5 ssl_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/health_check_tcp.tf.erb b/templates/terraform/examples/health_check_tcp.tf.erb index a0bcb7def6bd..c4ca9033189d 100644 --- a/templates/terraform/examples/health_check_tcp.tf.erb +++ b/templates/terraform/examples/health_check_tcp.tf.erb @@ -1,10 +1,10 @@ resource "google_compute_health_check" "tcp-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - tcp_health_check { - port = "80" - } + tcp_health_check { + port = "80" + } } diff --git a/templates/terraform/examples/health_check_tcp_full.tf.erb b/templates/terraform/examples/health_check_tcp_full.tf.erb index e452ebc54f60..2baf567275a8 100644 --- a/templates/terraform/examples/health_check_tcp_full.tf.erb +++ b/templates/terraform/examples/health_check_tcp_full.tf.erb @@ -1,5 +1,5 @@ resource "google_compute_health_check" "tcp-health-check" { - name = "<%= ctx[:vars]['health_check_name'] %>" + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via tcp" timeout_sec = 1 @@ -8,11 +8,10 @@ resource "google_compute_health_check" "tcp-health-check" { unhealthy_threshold = 5 tcp_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } - diff --git a/templates/terraform/examples/healthcare_dataset_basic.tf.erb b/templates/terraform/examples/healthcare_dataset_basic.tf.erb index 184b6ff103cb..a8c8c6b98615 100644 --- a/templates/terraform/examples/healthcare_dataset_basic.tf.erb +++ b/templates/terraform/examples/healthcare_dataset_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_healthcare_dataset" "default" { - name = "<%= ctx[:vars]['dataset_name'] %>" - location = "us-central1" - time_zone = "UTC" - provider = "google-beta" + name = "<%= ctx[:vars]['dataset_name'] %>" + location = "us-central1" + time_zone = "UTC" + provider = google-beta } diff --git a/templates/terraform/examples/healthcare_dicom_store_basic.tf.erb b/templates/terraform/examples/healthcare_dicom_store_basic.tf.erb index f8bef37a4444..8828bb2294bb 100644 --- a/templates/terraform/examples/healthcare_dicom_store_basic.tf.erb +++ b/templates/terraform/examples/healthcare_dicom_store_basic.tf.erb @@ -1,25 +1,24 @@ resource "google_healthcare_dicom_store" "default" { - name = "<%= ctx[:vars]['dicom_store_name'] %>" - dataset = "${google_healthcare_dataset.dataset.id}" + name = "<%= ctx[:vars]['dicom_store_name'] %>" + dataset = google_healthcare_dataset.dataset.id notification_config { - pubsub_topic = "${google_pubsub_topic.topic.id}" + pubsub_topic = google_pubsub_topic.topic.id } labels = { label1 = "labelvalue1" } - provider = "google-beta" + provider = google-beta } resource "google_pubsub_topic" "topic" { - name = "<%= ctx[:vars]['pubsub_topic']%>" - provider = "google-beta" + name = "<%= ctx[:vars]['pubsub_topic']%>" + provider = google-beta } resource "google_healthcare_dataset" "dataset" { - name = "<%= ctx[:vars]['dataset_name'] %>" - location = "us-central1" - provider = "google-beta" + name = "<%= ctx[:vars]['dataset_name'] %>" + location = "us-central1" + provider = google-beta } - diff --git a/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb b/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb index 965b57f26db6..6c500b9c872b 100644 --- a/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb +++ b/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_healthcare_fhir_store" "default" { - name = "<%= ctx[:vars]['fhir_store_name'] %>" - dataset = "${google_healthcare_dataset.dataset.id}" + name = "<%= ctx[:vars]['fhir_store_name'] %>" + dataset = google_healthcare_dataset.dataset.id enable_update_create = false disable_referential_integrity = false @@ -8,23 +8,22 @@ resource "google_healthcare_fhir_store" "default" { enable_history_import = false notification_config { - pubsub_topic = "${google_pubsub_topic.topic.id}" + pubsub_topic = google_pubsub_topic.topic.id } labels = { label1 = "labelvalue1" } - provider = "google-beta" + provider = google-beta } resource "google_pubsub_topic" "topic" { - name = "<%= ctx[:vars]['pubsub_topic']%>" - provider = "google-beta" + name = "<%= ctx[:vars]['pubsub_topic']%>" + provider = google-beta } resource "google_healthcare_dataset" "dataset" { - name = "<%= ctx[:vars]['dataset_name'] %>" - location = "us-central1" - provider = "google-beta" + name = "<%= ctx[:vars]['dataset_name'] %>" + location = "us-central1" + provider = google-beta } - diff --git a/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb b/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb index ec3e35057c95..c49aba55018e 100644 --- a/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb +++ b/templates/terraform/examples/healthcare_hl7_v2_store_basic.tf.erb @@ -1,30 +1,29 @@ resource "google_healthcare_hl7_v2_store" "default" { - name = "<%= ctx[:vars]['hl7_v2_store_name'] %>" - dataset = "${google_healthcare_dataset.dataset.id}" + name = "<%= ctx[:vars]['hl7_v2_store_name'] %>" + dataset = google_healthcare_dataset.dataset.id parser_config { - allow_null_header = false + allow_null_header = false segment_terminator = "Jw==" } notification_config { - pubsub_topic = "${google_pubsub_topic.topic.id}" + pubsub_topic = google_pubsub_topic.topic.id } labels = { label1 = "labelvalue1" } - provider = "google-beta" + provider = google-beta } resource "google_pubsub_topic" "topic" { - name = "<%= ctx[:vars]['pubsub_topic']%>" - provider = "google-beta" + name = "<%= ctx[:vars]['pubsub_topic']%>" + provider = google-beta } resource "google_healthcare_dataset" "dataset" { - name = "<%= ctx[:vars]['dataset_name'] %>" - location = "us-central1" - provider = "google-beta" + name = "<%= ctx[:vars]['dataset_name'] %>" + location = "us-central1" + provider = google-beta } - diff --git a/templates/terraform/examples/iap_app_engine_service.tf.erb b/templates/terraform/examples/iap_app_engine_service.tf.erb index 48ce29f3d49d..c1ec0be21d29 100644 --- a/templates/terraform/examples/iap_app_engine_service.tf.erb +++ b/templates/terraform/examples/iap_app_engine_service.tf.erb @@ -6,36 +6,36 @@ resource "google_project" "my_project" { } resource "google_project_service" "project_service" { - project = "${google_project.my_project.project_id}" + project = google_project.my_project.project_id service = "iap.googleapis.com" } resource "google_project_service" "cloudbuild_service" { - project = "${google_project_service.project_service.project}" + project = google_project_service.project_service.project service = "cloudbuild.googleapis.com" } resource "google_app_engine_application" "app" { - project = "${google_project_service.cloudbuild_service.project}" + project = google_project_service.cloudbuild_service.project location_id = "us-central" } resource "google_storage_bucket" "bucket" { - project = "${google_app_engine_application.app.project}" + project = google_app_engine_application.app.project name = "appengine-static-content-%{random_suffix}" } resource "google_storage_bucket_object" "object" { - name = "hello-world.zip" - bucket = "${google_storage_bucket.bucket.name}" - source = "./test-fixtures/appengine/hello-world.zip" + name = "hello-world.zip" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/appengine/hello-world.zip" } resource "google_app_engine_standard_app_version" "version" { - project = "${google_app_engine_application.app.project}" - version_id = "v2" - service = "default" - runtime = "nodejs10" + project = google_app_engine_application.app.project + version_id = "v2" + service = "default" + runtime = "nodejs10" noop_on_destroy = true entrypoint { shell = "node ./app.js" @@ -48,4 +48,4 @@ resource "google_app_engine_standard_app_version" "version" { env_variables = { port = "8080" } -} \ No newline at end of file +} diff --git a/templates/terraform/examples/iap_app_engine_version.tf.erb b/templates/terraform/examples/iap_app_engine_version.tf.erb index 70f741cfa5ea..bcadc9012d03 100644 --- a/templates/terraform/examples/iap_app_engine_version.tf.erb +++ b/templates/terraform/examples/iap_app_engine_version.tf.erb @@ -1,17 +1,17 @@ resource "google_storage_bucket" "bucket" { - name = "appengine-static-content-%{random_suffix}" + name = "appengine-static-content-%{random_suffix}" } resource "google_storage_bucket_object" "object" { - name = "hello-world.zip" - bucket = "${google_storage_bucket.bucket.name}" - source = "./test-fixtures/appengine/hello-world.zip" + name = "hello-world.zip" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/appengine/hello-world.zip" } resource "google_app_engine_standard_app_version" "version" { - version_id = "%{random_suffix}" - service = "default" - runtime = "nodejs10" + version_id = "%{random_suffix}" + service = "default" + runtime = "nodejs10" noop_on_destroy = false entrypoint { shell = "node ./app.js" @@ -24,4 +24,4 @@ resource "google_app_engine_standard_app_version" "version" { env_variables = { port = "8080" } -} \ No newline at end of file +} diff --git a/templates/terraform/examples/iap_appengine.tf.erb b/templates/terraform/examples/iap_appengine.tf.erb index 931226248fd6..a1af1e206dc2 100644 --- a/templates/terraform/examples/iap_appengine.tf.erb +++ b/templates/terraform/examples/iap_appengine.tf.erb @@ -1,15 +1,15 @@ resource "google_project" "my_project" { - name = "%{project_id}" - project_id = "%{project_id}" - org_id = "%{org_id}" + name = "%{project_id}" + project_id = "%{project_id}" + org_id = "%{org_id}" } resource "google_project_service" "project_service" { - project = "${google_project.my_project.project_id}" - service = "iap.googleapis.com" + project = google_project.my_project.project_id + service = "iap.googleapis.com" } resource "google_app_engine_application" "app" { - project = "${google_project_service.project_service.project}" - location_id = "us-central" + project = google_project_service.project_service.project + location_id = "us-central" } diff --git a/templates/terraform/examples/iap_project.tf.erb b/templates/terraform/examples/iap_project.tf.erb index 8d93a6af5a0e..53737074febb 100644 --- a/templates/terraform/examples/iap_project.tf.erb +++ b/templates/terraform/examples/iap_project.tf.erb @@ -1,10 +1,10 @@ resource "google_project" "project" { - project_id = "tf-test%{random_suffix}" - name = "tf-test%{random_suffix}" - org_id = "%{org_id}" + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "%{org_id}" } resource "google_project_service" "project_service" { - project = "${google_project.project.project_id}" - service = "iap.googleapis.com" + project = google_project.project.project_id + service = "iap.googleapis.com" } diff --git a/templates/terraform/examples/instance_with_ip.tf.erb b/templates/terraform/examples/instance_with_ip.tf.erb index bd852bb598d1..eb5e63acf2f0 100644 --- a/templates/terraform/examples/instance_with_ip.tf.erb +++ b/templates/terraform/examples/instance_with_ip.tf.erb @@ -3,25 +3,25 @@ resource "google_compute_address" "<%= ctx[:primary_resource_id] %>" { } data "google_compute_image" "debian_image" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } resource "google_compute_instance" "instance_with_ip" { - name = "<%= ctx[:vars]['instance_name'] %>" - machine_type = "f1-micro" - zone = "us-central1-a" + name = "<%= ctx[:vars]['instance_name'] %>" + machine_type = "f1-micro" + zone = "us-central1-a" - boot_disk { - initialize_params{ - image = "${data.google_compute_image.debian_image.self_link}" - } - } + boot_disk { + initialize_params { + image = data.google_compute_image.debian_image.self_link + } + } - network_interface { - network = "default" - access_config { - nat_ip = "${google_compute_address.static.address}" - } - } + network_interface { + network = "default" + access_config { + nat_ip = google_compute_address.static.address + } + } } diff --git a/templates/terraform/examples/interconnect_attachment_basic.tf.erb b/templates/terraform/examples/interconnect_attachment_basic.tf.erb index d720bf118c68..fbba255e8d09 100644 --- a/templates/terraform/examples/interconnect_attachment_basic.tf.erb +++ b/templates/terraform/examples/interconnect_attachment_basic.tf.erb @@ -1,10 +1,10 @@ resource "google_compute_interconnect_attachment" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['interconnect_attachment_name'] %>" interconnect = "my-interconnect-id" - router = "${google_compute_router.foobar.self_link}" + router = google_compute_router.foobar.self_link } resource "google_compute_router" "foobar" { name = "<%= ctx[:vars]['router_name'] %>" - network = "${google_compute_network.foobar.name}" + network = google_compute_network.foobar.name } diff --git a/templates/terraform/examples/kms_crypto_key_asymmetric_sign.tf.erb b/templates/terraform/examples/kms_crypto_key_asymmetric_sign.tf.erb index 9c2533190adc..2d3152d67351 100644 --- a/templates/terraform/examples/kms_crypto_key_asymmetric_sign.tf.erb +++ b/templates/terraform/examples/kms_crypto_key_asymmetric_sign.tf.erb @@ -5,7 +5,7 @@ resource "google_kms_key_ring" "keyring" { resource "google_kms_crypto_key" "<%= ctx[:primary_resource_id] %>" { name = "crypto-key-example" - key_ring = "${google_kms_key_ring.keyring.self_link}" + key_ring = google_kms_key_ring.keyring.self_link purpose = "ASYMMETRIC_SIGN" version_template { diff --git a/templates/terraform/examples/kms_crypto_key_basic.tf.erb b/templates/terraform/examples/kms_crypto_key_basic.tf.erb index 2444cfc0ded4..9e95e976cce0 100644 --- a/templates/terraform/examples/kms_crypto_key_basic.tf.erb +++ b/templates/terraform/examples/kms_crypto_key_basic.tf.erb @@ -1,11 +1,11 @@ resource "google_kms_key_ring" "keyring" { - name = "keyring-example" + name = "keyring-example" location = "global" } resource "google_kms_crypto_key" "<%= ctx[:primary_resource_id] %>" { name = "crypto-key-example" - key_ring = "${google_kms_key_ring.keyring.self_link}" + key_ring = google_kms_key_ring.keyring.self_link rotation_period = "100000s" lifecycle { diff --git a/templates/terraform/examples/kms_key_ring_basic.tf.erb b/templates/terraform/examples/kms_key_ring_basic.tf.erb index 70ccead587de..998e29d1f832 100644 --- a/templates/terraform/examples/kms_key_ring_basic.tf.erb +++ b/templates/terraform/examples/kms_key_ring_basic.tf.erb @@ -1,4 +1,4 @@ resource "google_kms_key_ring" "<%= ctx[:primary_resource_id] %>" { - name = "keyring-example" + name = "keyring-example" location = "global" } diff --git a/templates/terraform/examples/logging_metric_basic.tf.erb b/templates/terraform/examples/logging_metric_basic.tf.erb index aa2398c81b24..01efe37906eb 100644 --- a/templates/terraform/examples/logging_metric_basic.tf.erb +++ b/templates/terraform/examples/logging_metric_basic.tf.erb @@ -1,24 +1,26 @@ resource "google_logging_metric" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]["logging_metric_name"] %>" + name = "<%= ctx[:vars]["logging_metric_name"] %>" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "DISTRIBUTION" - unit = "1" + value_type = "DISTRIBUTION" + unit = "1" labels { - key = "mass" - value_type = "STRING" - description = "amount of matter" + key = "mass" + value_type = "STRING" + description = "amount of matter" } display_name = "My metric" } value_extractor = "EXTRACT(jsonPayload.request)" - label_extractors = { "mass": "EXTRACT(jsonPayload.request)" } + label_extractors = { + "mass" = "EXTRACT(jsonPayload.request)" + } bucket_options { linear_buckets { num_finite_buckets = 3 - width = 1 - offset = 1 + width = 1 + offset = 1 } } } diff --git a/templates/terraform/examples/logging_metric_counter_basic.tf.erb b/templates/terraform/examples/logging_metric_counter_basic.tf.erb index 018bed142a4d..f6af100da121 100644 --- a/templates/terraform/examples/logging_metric_counter_basic.tf.erb +++ b/templates/terraform/examples/logging_metric_counter_basic.tf.erb @@ -1,8 +1,8 @@ resource "google_logging_metric" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]["logging_metric_name"] %>" + name = "<%= ctx[:vars]["logging_metric_name"] %>" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "INT64" + value_type = "INT64" } } diff --git a/templates/terraform/examples/logging_metric_counter_labels.tf.erb b/templates/terraform/examples/logging_metric_counter_labels.tf.erb index f313747ab6cc..b3a92374c915 100644 --- a/templates/terraform/examples/logging_metric_counter_labels.tf.erb +++ b/templates/terraform/examples/logging_metric_counter_labels.tf.erb @@ -1,14 +1,16 @@ resource "google_logging_metric" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]["logging_metric_name"] %>" + name = "<%= ctx[:vars]["logging_metric_name"] %>" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "INT64" + value_type = "INT64" labels { - key = "mass" - value_type = "STRING" - description = "amount of matter" + key = "mass" + value_type = "STRING" + description = "amount of matter" } } - label_extractors = { "mass": "EXTRACT(jsonPayload.request)" } + label_extractors = { + "mass" = "EXTRACT(jsonPayload.request)" + } } diff --git a/templates/terraform/examples/managed_ssl_certificate_basic.tf.erb b/templates/terraform/examples/managed_ssl_certificate_basic.tf.erb index b9659404dea7..fce3f3fe2403 100644 --- a/templates/terraform/examples/managed_ssl_certificate_basic.tf.erb +++ b/templates/terraform/examples/managed_ssl_certificate_basic.tf.erb @@ -1,5 +1,5 @@ resource "google_compute_managed_ssl_certificate" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['cert_name'] %>" @@ -9,20 +9,20 @@ resource "google_compute_managed_ssl_certificate" "default" { } resource "google_compute_target_https_proxy" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['proxy_name'] %>" - url_map = "${google_compute_url_map.default.self_link}" - ssl_certificates = ["${google_compute_managed_ssl_certificate.default.self_link}"] + url_map = google_compute_url_map.default.self_link + ssl_certificates = [google_compute_managed_ssl_certificate.default.self_link] } resource "google_compute_url_map" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["sslcert.tf-test.club"] @@ -31,28 +31,28 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } resource "google_compute_backend_service" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['backend_service_name'] %>" port_name = "http" protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['http_health_check_name'] %>" request_path = "/" @@ -61,31 +61,31 @@ resource "google_compute_http_health_check" "default" { } resource "google_dns_managed_zone" "zone" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['dns_zone_name'] %>" dns_name = "sslcert.tf-test.club." } resource "google_compute_global_forwarding_rule" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['forwarding_rule_name'] %>" - target = "${google_compute_target_https_proxy.default.self_link}" + target = google_compute_target_https_proxy.default.self_link port_range = 443 } resource "google_dns_record_set" "set" { - provider = "google-beta" + provider = google-beta name = "sslcert.tf-test.club." type = "A" ttl = 3600 - managed_zone = "${google_dns_managed_zone.zone.name}" - rrdatas = ["${google_compute_global_forwarding_rule.default.ip_address}"] + managed_zone = google_dns_managed_zone.zone.name + rrdatas = [google_compute_global_forwarding_rule.default.ip_address] } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/templates/terraform/examples/ml_model_basic.tf.erb b/templates/terraform/examples/ml_model_basic.tf.erb index 879090fae990..b99930e9f974 100644 --- a/templates/terraform/examples/ml_model_basic.tf.erb +++ b/templates/terraform/examples/ml_model_basic.tf.erb @@ -1,5 +1,5 @@ resource "google_ml_engine_model" "<%= ctx[:primary_resource_id] -%>" { - name = "<%= ctx[:vars]['model_name'] -%>" + name = "<%= ctx[:vars]['model_name'] -%>" description = "My model" - regions = ["us-central1"] + regions = ["us-central1"] } diff --git a/templates/terraform/examples/ml_model_full.tf.erb b/templates/terraform/examples/ml_model_full.tf.erb index 7590028bce17..5bd76110e7e9 100644 --- a/templates/terraform/examples/ml_model_full.tf.erb +++ b/templates/terraform/examples/ml_model_full.tf.erb @@ -1,10 +1,10 @@ resource "google_ml_engine_model" "<%= ctx[:primary_resource_id] -%>" { - name = "<%= ctx[:vars]['model_name'] -%>" + name = "<%= ctx[:vars]['model_name'] -%>" description = "My model" - regions = ["us-central1"] - labels = { + regions = ["us-central1"] + labels = { my_model = "foo" } - online_prediction_logging = true + online_prediction_logging = true online_prediction_console_logging = true } diff --git a/templates/terraform/examples/monitoring_alert_policy_basic.tf.erb b/templates/terraform/examples/monitoring_alert_policy_basic.tf.erb index b87a791c9158..0a3b2f487b40 100644 --- a/templates/terraform/examples/monitoring_alert_policy_basic.tf.erb +++ b/templates/terraform/examples/monitoring_alert_policy_basic.tf.erb @@ -1,14 +1,14 @@ resource "google_monitoring_alert_policy" "<%= ctx[:primary_resource_id] %>" { display_name = "<%= ctx[:vars]["alert_policy_display_name"] %>" - combiner = "OR" + combiner = "OR" conditions { display_name = "test condition" condition_threshold { - filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"" - duration = "60s" + filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"" + duration = "60s" comparison = "COMPARISON_GT" aggregations { - alignment_period = "60s" + alignment_period = "60s" per_series_aligner = "ALIGN_RATE" } } diff --git a/templates/terraform/examples/monitoring_group_subgroup.tf.erb b/templates/terraform/examples/monitoring_group_subgroup.tf.erb index 6d4a4b70536f..c433583ef14b 100644 --- a/templates/terraform/examples/monitoring_group_subgroup.tf.erb +++ b/templates/terraform/examples/monitoring_group_subgroup.tf.erb @@ -1,10 +1,10 @@ resource "google_monitoring_group" "parent" { display_name = "tf-test <%= ctx[:vars]["display_name"] %>" - filter = "resource.metadata.region=\"europe-west2\"" + filter = "resource.metadata.region=\"europe-west2\"" } resource "google_monitoring_group" "<%= ctx[:primary_resource_id] %>" { display_name = "tf-test <%= ctx[:vars]["display_name"] %>" - filter = "resource.metadata.region=\"europe-west2\"" - parent_name = "${google_monitoring_group.parent.name}" + filter = "resource.metadata.region=\"europe-west2\"" + parent_name = google_monitoring_group.parent.name } diff --git a/templates/terraform/examples/network_endpoint.tf.erb b/templates/terraform/examples/network_endpoint.tf.erb index 4e51553c5714..52ad5025ebb5 100644 --- a/templates/terraform/examples/network_endpoint.tf.erb +++ b/templates/terraform/examples/network_endpoint.tf.erb @@ -1,9 +1,9 @@ resource "google_compute_network_endpoint" "<%= ctx[:primary_resource_id] %>" { - network_endpoint_group = "${google_compute_network_endpoint_group.neg.name}" + network_endpoint_group = google_compute_network_endpoint_group.neg.name - instance = "${google_compute_instance.endpoint-instance.name}" - port = "${google_compute_network_endpoint_group.neg.default_port}" - ip_address = "${google_compute_instance.endpoint-instance.network_interface.0.network_ip}" + instance = google_compute_instance.endpoint-instance.name + port = google_compute_network_endpoint_group.neg.default_port + ip_address = google_compute_instance.endpoint-instance.network_interface[0].network_ip } data "google_compute_image" "my_image" { @@ -12,31 +12,32 @@ data "google_compute_image" "my_image" { } resource "google_compute_instance" "endpoint-instance" { - name = "<%= ctx[:vars]['instance_name'] %>" + name = "<%= ctx[:vars]['instance_name'] %>" machine_type = "n1-standard-1" boot_disk { - initialize_params{ - image = "${data.google_compute_image.my_image.self_link}" + initialize_params { + image = data.google_compute_image.my_image.self_link } } network_interface { - subnetwork = "${google_compute_subnetwork.default.self_link}" - access_config { } + subnetwork = google_compute_subnetwork.default.self_link + access_config { + } } } resource "google_compute_network_endpoint_group" "group" { name = "<%= ctx[:vars]['neg_name'] %>" - network = "${google_compute_network.default.self_link}" - subnetwork = "${google_compute_subnetwork.default.self_link}" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link default_port = "90" zone = "us-central1-a" } resource "google_compute_network" "default" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } @@ -44,5 +45,5 @@ resource "google_compute_subnetwork" "default" { name = "<%= ctx[:vars]['subnetwork_name'] %>" ip_cidr_range = "10.0.0.1/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } diff --git a/templates/terraform/examples/network_endpoint_group.tf.erb b/templates/terraform/examples/network_endpoint_group.tf.erb index db3200ee6ccf..693d60782028 100644 --- a/templates/terraform/examples/network_endpoint_group.tf.erb +++ b/templates/terraform/examples/network_endpoint_group.tf.erb @@ -1,13 +1,13 @@ resource "google_compute_network_endpoint_group" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['neg_name'] %>" - network = "${google_compute_network.default.self_link}" - subnetwork = "${google_compute_subnetwork.default.self_link}" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link default_port = "90" zone = "us-central1-a" } resource "google_compute_network" "default" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } @@ -15,5 +15,5 @@ resource "google_compute_subnetwork" "default" { name = "<%= ctx[:vars]['subnetwork_name'] %>" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } diff --git a/templates/terraform/examples/node_group_basic.tf.erb b/templates/terraform/examples/node_group_basic.tf.erb index fc303a227944..821d7e16cb40 100644 --- a/templates/terraform/examples/node_group_basic.tf.erb +++ b/templates/terraform/examples/node_group_basic.tf.erb @@ -3,16 +3,16 @@ data "google_compute_node_types" "central1a" { } resource "google_compute_node_template" "soletenant-tmpl" { - name = "<%= ctx[:vars]['template_name'] %>" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "<%= ctx[:vars]['template_name'] %>" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] } resource "google_compute_node_group" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['group_name'] %>" - zone = "us-central1-a" + name = "<%= ctx[:vars]['group_name'] %>" + zone = "us-central1-a" description = "example google_compute_node_group for Terraform Google Provider" - size = 1 - node_template = "${google_compute_node_template.soletenant-tmpl.self_link}" + size = 1 + node_template = google_compute_node_template.soletenant-tmpl.self_link } diff --git a/templates/terraform/examples/node_template_basic.tf.erb b/templates/terraform/examples/node_template_basic.tf.erb index b67e465e6148..7dc0ce41c03d 100644 --- a/templates/terraform/examples/node_template_basic.tf.erb +++ b/templates/terraform/examples/node_template_basic.tf.erb @@ -3,7 +3,7 @@ data "google_compute_node_types" "central1a" { } resource "google_compute_node_template" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['template_name'] %>" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "<%= ctx[:vars]['template_name'] %>" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] } diff --git a/templates/terraform/examples/node_template_server_binding.tf.erb b/templates/terraform/examples/node_template_server_binding.tf.erb index 2aac44490164..f300437aa4dc 100644 --- a/templates/terraform/examples/node_template_server_binding.tf.erb +++ b/templates/terraform/examples/node_template_server_binding.tf.erb @@ -4,16 +4,16 @@ provider "google-beta" { } data "google_compute_node_types" "central1a" { - provider = "google-beta" - zone = "us-central1-a" + provider = google-beta + zone = "us-central1-a" } resource "google_compute_node_template" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta - name = "<%= ctx[:vars]['template_name'] %>" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "<%= ctx[:vars]['template_name'] %>" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] node_affinity_labels = { foo = "baz" @@ -23,4 +23,3 @@ resource "google_compute_node_template" "<%= ctx[:primary_resource_id] %>" { type = "RESTART_NODE_ON_MINIMAL_SERVERS" } } - diff --git a/templates/terraform/examples/notification_channel_basic.tf.erb b/templates/terraform/examples/notification_channel_basic.tf.erb index 421e3acef35d..b5b4a5a8d7ad 100644 --- a/templates/terraform/examples/notification_channel_basic.tf.erb +++ b/templates/terraform/examples/notification_channel_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_monitoring_notification_channel" "<%= ctx[:primary_resource_id] %>" { display_name = "<%= ctx[:vars]["display_name"] %>" - type = "email" + type = "email" labels = { email_address = "fake_email@blahblah.com" } -} \ No newline at end of file +} diff --git a/templates/terraform/examples/pubsub_subscription_different_project.tf.erb b/templates/terraform/examples/pubsub_subscription_different_project.tf.erb index eeebc111ace3..4bf08dfe2460 100644 --- a/templates/terraform/examples/pubsub_subscription_different_project.tf.erb +++ b/templates/terraform/examples/pubsub_subscription_different_project.tf.erb @@ -6,5 +6,5 @@ resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { project = "<%= ctx[:vars]['subscription_project'] %>" name = "<%= ctx[:vars]['subscription_name'] %>" - topic = "${google_pubsub_topic.<%= ctx[:primary_resource_id] %>.name}" + topic = google_pubsub_topic.<%= ctx[:primary_resource_id] %>.name } diff --git a/templates/terraform/examples/pubsub_subscription_pull.tf.erb b/templates/terraform/examples/pubsub_subscription_pull.tf.erb index b9e6c6590418..ed42cee630c2 100644 --- a/templates/terraform/examples/pubsub_subscription_pull.tf.erb +++ b/templates/terraform/examples/pubsub_subscription_pull.tf.erb @@ -4,7 +4,7 @@ resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['subscription_name'] %>" - topic = "${google_pubsub_topic.<%= ctx[:primary_resource_id] %>.name}" + topic = google_pubsub_topic.<%= ctx[:primary_resource_id] %>.name labels = { foo = "bar" @@ -12,7 +12,7 @@ resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { # 20 minutes message_retention_duration = "1200s" - retain_acked_messages = true + retain_acked_messages = true ack_deadline_seconds = 20 diff --git a/templates/terraform/examples/pubsub_subscription_push.tf.erb b/templates/terraform/examples/pubsub_subscription_push.tf.erb index b631dfd30976..690cbca6aa6e 100644 --- a/templates/terraform/examples/pubsub_subscription_push.tf.erb +++ b/templates/terraform/examples/pubsub_subscription_push.tf.erb @@ -4,7 +4,7 @@ resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['subscription_name'] %>" - topic = "${google_pubsub_topic.<%= ctx[:primary_resource_id] %>.name}" + topic = google_pubsub_topic.<%= ctx[:primary_resource_id] %>.name ack_deadline_seconds = 20 @@ -15,7 +15,7 @@ resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { push_config { push_endpoint = "https://example.com/push" - attributes { + attributes = { x-goog-version = "v1" } } diff --git a/templates/terraform/examples/pubsub_topic_cmek.tf.erb b/templates/terraform/examples/pubsub_topic_cmek.tf.erb index 9acefaef0d42..974de2457c3a 100644 --- a/templates/terraform/examples/pubsub_topic_cmek.tf.erb +++ b/templates/terraform/examples/pubsub_topic_cmek.tf.erb @@ -1,11 +1,11 @@ resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['topic_name'] %>" - kms_key_name = "${google_kms_crypto_key.crypto_key.self_link}" + kms_key_name = google_kms_crypto_key.crypto_key.self_link } resource "google_kms_crypto_key" "crypto_key" { name = "<%= ctx[:vars]['key_name'] %>" - key_ring = "${google_kms_key_ring.key_ring.self_link}" + key_ring = google_kms_key_ring.key_ring.self_link } resource "google_kms_key_ring" "key_ring" { diff --git a/templates/terraform/examples/pubsub_topic_geo_restricted.tf.erb b/templates/terraform/examples/pubsub_topic_geo_restricted.tf.erb index 75e066391426..c082d009b841 100644 --- a/templates/terraform/examples/pubsub_topic_geo_restricted.tf.erb +++ b/templates/terraform/examples/pubsub_topic_geo_restricted.tf.erb @@ -6,5 +6,4 @@ resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { "europe-west3", ] } - } diff --git a/templates/terraform/examples/redis_instance_full.tf.erb b/templates/terraform/examples/redis_instance_full.tf.erb index d15e44de0b5a..e48484fbf1b6 100644 --- a/templates/terraform/examples/redis_instance_full.tf.erb +++ b/templates/terraform/examples/redis_instance_full.tf.erb @@ -6,7 +6,7 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { location_id = "us-central1-a" alternative_location_id = "us-central1-f" - authorized_network = "${google_compute_network.auto-network.self_link}" + authorized_network = google_compute_network.auto-network.self_link redis_version = "REDIS_3_2" display_name = "Terraform Test Instance" diff --git a/templates/terraform/examples/region_autoscaler_basic.tf.erb b/templates/terraform/examples/region_autoscaler_basic.tf.erb index 09e358019663..353bb24e2a88 100644 --- a/templates/terraform/examples/region_autoscaler_basic.tf.erb +++ b/templates/terraform/examples/region_autoscaler_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_compute_region_autoscaler" "foobar" { name = "<%= ctx[:vars]['region_autoscaler_name'] %>" region = "us-central1" - target = "${google_compute_region_instance_group_manager.foobar.self_link}" + target = google_compute_region_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -22,7 +22,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -47,15 +47,15 @@ resource "google_compute_region_instance_group_manager" "foobar" { region = "us-central1" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } diff --git a/templates/terraform/examples/region_autoscaler_beta.tf.erb b/templates/terraform/examples/region_autoscaler_beta.tf.erb index 2aa87b54ec44..1049f226965e 100644 --- a/templates/terraform/examples/region_autoscaler_beta.tf.erb +++ b/templates/terraform/examples/region_autoscaler_beta.tf.erb @@ -1,9 +1,9 @@ resource "google_compute_region_autoscaler" "foobar" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['region_autoscaler_name'] %>" region = "us-central1" - target = "${google_compute_region_instance_group_manager.foobar.self_link}" + target = google_compute_region_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -17,7 +17,7 @@ resource "google_compute_region_autoscaler" "foobar" { } resource "google_compute_instance_template" "foobar" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['instance_template_name'] %>" machine_type = "n1-standard-1" @@ -26,7 +26,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -43,34 +43,34 @@ resource "google_compute_instance_template" "foobar" { } resource "google_compute_target_pool" "foobar" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['target_pool_name'] %>" } resource "google_compute_region_instance_group_manager" "foobar" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['rigm_name'] %>" region = "us-central1" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" - name = "primary" + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - provider = "google-beta" + provider = google-beta - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/templates/terraform/examples/region_backend_service_basic.tf.erb b/templates/terraform/examples/region_backend_service_basic.tf.erb index 0f49480b1b75..b7a04720e926 100644 --- a/templates/terraform/examples/region_backend_service_basic.tf.erb +++ b/templates/terraform/examples/region_backend_service_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_compute_region_backend_service" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['region_backend_service_name'] %>" region = "us-central1" - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] connection_draining_timeout_sec = 10 session_affinity = "CLIENT_IP" } diff --git a/templates/terraform/examples/region_backend_service_ilb_ring_hash.tf.erb b/templates/terraform/examples/region_backend_service_ilb_ring_hash.tf.erb index b255aeee763b..4c754df00b24 100644 --- a/templates/terraform/examples/region_backend_service_ilb_ring_hash.tf.erb +++ b/templates/terraform/examples/region_backend_service_ilb_ring_hash.tf.erb @@ -30,6 +30,6 @@ resource "google_compute_health_check" "health_check" { name = "<%= ctx[:vars]['health_check_name'] %>" http_health_check { - + port = 80 } } diff --git a/templates/terraform/examples/region_backend_service_ilb_round_robin.tf.erb b/templates/terraform/examples/region_backend_service_ilb_round_robin.tf.erb index 00c4a38d7342..488de257e029 100644 --- a/templates/terraform/examples/region_backend_service_ilb_round_robin.tf.erb +++ b/templates/terraform/examples/region_backend_service_ilb_round_robin.tf.erb @@ -14,6 +14,6 @@ resource "google_compute_health_check" "health_check" { name = "<%= ctx[:vars]['health_check_name'] %>" http_health_check { - + port = 80 } } diff --git a/templates/terraform/examples/region_disk_basic.tf.erb b/templates/terraform/examples/region_disk_basic.tf.erb index dc00113d21f2..52726d6d9277 100644 --- a/templates/terraform/examples/region_disk_basic.tf.erb +++ b/templates/terraform/examples/region_disk_basic.tf.erb @@ -1,23 +1,23 @@ resource "google_compute_region_disk" "regiondisk" { - name = "<%= ctx[:vars]['region_disk_name'] %>" - snapshot = "${google_compute_snapshot.snapdisk.self_link}" - type = "pd-ssd" - region = "us-central1" + name = "<%= ctx[:vars]['region_disk_name'] %>" + snapshot = google_compute_snapshot.snapdisk.self_link + type = "pd-ssd" + region = "us-central1" physical_block_size_bytes = 4096 replica_zones = ["us-central1-a", "us-central1-f"] } resource "google_compute_disk" "disk" { - name = "<%= ctx[:vars]['disk_name'] %>" + name = "<%= ctx[:vars]['disk_name'] %>" image = "debian-cloud/debian-9" - size = 50 - type = "pd-ssd" - zone = "us-central1-a" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" } resource "google_compute_snapshot" "snapdisk" { - name = "<%= ctx[:vars]['snapshot_name'] %>" - source_disk = "${google_compute_disk.disk.name}" - zone = "us-central1-a" + name = "<%= ctx[:vars]['snapshot_name'] %>" + source_disk = google_compute_disk.disk.name + zone = "us-central1-a" } diff --git a/templates/terraform/examples/region_health_check_http.tf.erb b/templates/terraform/examples/region_health_check_http.tf.erb index 99c8adab1956..eab516fcb12a 100644 --- a/templates/terraform/examples/region_health_check_http.tf.erb +++ b/templates/terraform/examples/region_health_check_http.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "http-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" timeout_sec = 1 check_interval_sec = 1 diff --git a/templates/terraform/examples/region_health_check_http2.tf.erb b/templates/terraform/examples/region_health_check_http2.tf.erb index fe560e571066..f82a15ed7ef3 100644 --- a/templates/terraform/examples/region_health_check_http2.tf.erb +++ b/templates/terraform/examples/region_health_check_http2.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "http2-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" timeout_sec = 1 check_interval_sec = 1 diff --git a/templates/terraform/examples/region_health_check_http2_full.tf.erb b/templates/terraform/examples/region_health_check_http2_full.tf.erb index 5bd4738a27b5..4d68b92ff56b 100644 --- a/templates/terraform/examples/region_health_check_http2_full.tf.erb +++ b/templates/terraform/examples/region_health_check_http2_full.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "http2-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via http2" timeout_sec = 1 @@ -9,11 +9,11 @@ resource "google_compute_region_health_check" "http2-region-health-check" { unhealthy_threshold = 5 http2_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/region_health_check_http_full.tf.erb b/templates/terraform/examples/region_health_check_http_full.tf.erb index 2adf32d5de7b..3be92c4fef74 100644 --- a/templates/terraform/examples/region_health_check_http_full.tf.erb +++ b/templates/terraform/examples/region_health_check_http_full.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "http-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via http" timeout_sec = 1 @@ -9,11 +9,11 @@ resource "google_compute_region_health_check" "http-region-health-check" { unhealthy_threshold = 5 http_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/region_health_check_https.tf.erb b/templates/terraform/examples/region_health_check_https.tf.erb index 693664f29df6..a52fc7661daf 100644 --- a/templates/terraform/examples/region_health_check_https.tf.erb +++ b/templates/terraform/examples/region_health_check_https.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "https-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" timeout_sec = 1 check_interval_sec = 1 diff --git a/templates/terraform/examples/region_health_check_https_full.tf.erb b/templates/terraform/examples/region_health_check_https_full.tf.erb index 7547b6305686..3035bfa69eef 100644 --- a/templates/terraform/examples/region_health_check_https_full.tf.erb +++ b/templates/terraform/examples/region_health_check_https_full.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "https-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via https" timeout_sec = 1 @@ -9,11 +9,11 @@ resource "google_compute_region_health_check" "https-region-health-check" { unhealthy_threshold = 5 https_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/region_health_check_ssl.tf.erb b/templates/terraform/examples/region_health_check_ssl.tf.erb index a41e28000a61..48cb47f4cd61 100644 --- a/templates/terraform/examples/region_health_check_ssl.tf.erb +++ b/templates/terraform/examples/region_health_check_ssl.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "ssl-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" timeout_sec = 1 check_interval_sec = 1 diff --git a/templates/terraform/examples/region_health_check_ssl_full.tf.erb b/templates/terraform/examples/region_health_check_ssl_full.tf.erb index 9d4946d6c9ea..ce1d96551788 100644 --- a/templates/terraform/examples/region_health_check_ssl_full.tf.erb +++ b/templates/terraform/examples/region_health_check_ssl_full.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "ssl-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via ssl" timeout_sec = 1 @@ -9,10 +9,10 @@ resource "google_compute_region_health_check" "ssl-region-health-check" { unhealthy_threshold = 5 ssl_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/region_health_check_tcp.tf.erb b/templates/terraform/examples/region_health_check_tcp.tf.erb index f21b46dfcf49..5f45bda63d24 100644 --- a/templates/terraform/examples/region_health_check_tcp.tf.erb +++ b/templates/terraform/examples/region_health_check_tcp.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "tcp-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" timeout_sec = 1 check_interval_sec = 1 diff --git a/templates/terraform/examples/region_health_check_tcp_full.tf.erb b/templates/terraform/examples/region_health_check_tcp_full.tf.erb index f426609e9659..a620715d1fc6 100644 --- a/templates/terraform/examples/region_health_check_tcp_full.tf.erb +++ b/templates/terraform/examples/region_health_check_tcp_full.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_region_health_check" "tcp-region-health-check" { - provider = "google-beta" - name = "<%= ctx[:vars]['health_check_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['health_check_name'] %>" description = "Health check via tcp" timeout_sec = 1 @@ -9,10 +9,10 @@ resource "google_compute_region_health_check" "tcp-region-health-check" { unhealthy_threshold = 5 tcp_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } diff --git a/templates/terraform/examples/region_ssl_certificate_basic.tf.erb b/templates/terraform/examples/region_ssl_certificate_basic.tf.erb index ef6900463c32..1864dec3441d 100644 --- a/templates/terraform/examples/region_ssl_certificate_basic.tf.erb +++ b/templates/terraform/examples/region_ssl_certificate_basic.tf.erb @@ -1,13 +1,12 @@ resource "google_compute_region_ssl_certificate" "default" { - provider = "google-beta" - region = "us-central1" + provider = google-beta + region = "us-central1" name_prefix = "my-certificate-" description = "a description" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true } } - diff --git a/templates/terraform/examples/region_ssl_certificate_random_provider.tf.erb b/templates/terraform/examples/region_ssl_certificate_random_provider.tf.erb index 9ba60068f168..f731253b8c38 100644 --- a/templates/terraform/examples/region_ssl_certificate_random_provider.tf.erb +++ b/templates/terraform/examples/region_ssl_certificate_random_provider.tf.erb @@ -1,12 +1,13 @@ # You may also want to control name generation explicitly: resource "google_compute_region_ssl_certificate" "default" { - provider = "google-beta" - region = "us-central1" + provider = google-beta + region = "us-central1" + # The name will contain 8 random hex digits, # e.g. "my-certificate-48ab27cd2a" - name = "${random_id.certificate.hex}" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + name = random_id.certificate.hex + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true @@ -19,8 +20,7 @@ resource "random_id" "certificate" { # For security, do not expose raw certificate values in the output keepers = { - private_key = "${base64sha256(file("path/to/private.key"))}" - certificate = "${base64sha256(file("path/to/certificate.crt"))}" + private_key = filebase64sha256("path/to/private.key") + certificate = filebase64sha256("path/to/certificate.crt") } } - diff --git a/templates/terraform/examples/region_ssl_certificate_target_https_proxies.tf.erb b/templates/terraform/examples/region_ssl_certificate_target_https_proxies.tf.erb index 42bd2cb01560..50a6bf311b36 100644 --- a/templates/terraform/examples/region_ssl_certificate_target_https_proxies.tf.erb +++ b/templates/terraform/examples/region_ssl_certificate_target_https_proxies.tf.erb @@ -9,11 +9,11 @@ // name with name_prefix, or use random_id resource. Example: resource "google_compute_region_ssl_certificate" "default" { - provider = "google-beta" - region = "us-central1" + provider = google-beta + region = "us-central1" name_prefix = "my-certificate-" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true @@ -21,20 +21,20 @@ resource "google_compute_region_ssl_certificate" "default" { } resource "google_compute_region_target_https_proxy" "default" { - provider = "google-beta" - region = "us-central1" + provider = google-beta + region = "us-central1" name = "<%= ctx[:vars]['region_target_https_proxy_name'] %>" - url_map = "${google_compute_region_url_map.default.self_link}" - ssl_certificates = ["${google_compute_region_ssl_certificate.default.self_link}"] + url_map = google_compute_region_url_map.default.self_link + ssl_certificates = [google_compute_region_ssl_certificate.default.self_link] } resource "google_compute_region_url_map" "default" { - provider = "google-beta" - region = "us-central1" + provider = google-beta + region = "us-central1" name = "<%= ctx[:vars]['region_url_map_name'] %>" description = "a description" - default_service = "${google_compute_region_backend_service.default.self_link}" + default_service = google_compute_region_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -43,29 +43,30 @@ resource "google_compute_region_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_region_backend_service.default.self_link}" + default_service = google_compute_region_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_region_backend_service.default.self_link}" + service = google_compute_region_backend_service.default.self_link } } } resource "google_compute_region_backend_service" "default" { - provider = "google-beta" - region = "us-central1" + provider = google-beta + region = "us-central1" name = "<%= ctx[:vars]['region_backend_service_name'] %>" protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_region_health_check.default.self_link}"] + health_checks = [google_compute_region_health_check.default.self_link] } resource "google_compute_region_health_check" "default" { - provider = "google-beta" - region = "us-central1" - name = "<%= ctx[:vars]['region_health_check_name'] %>" + provider = google-beta + region = "us-central1" + name = "<%= ctx[:vars]['region_health_check_name'] %>" http_health_check { + port = 80 } } diff --git a/templates/terraform/examples/region_target_http_proxy_basic.tf.erb b/templates/terraform/examples/region_target_http_proxy_basic.tf.erb index a7aff5dbca27..dff5528e3958 100644 --- a/templates/terraform/examples/region_target_http_proxy_basic.tf.erb +++ b/templates/terraform/examples/region_target_http_proxy_basic.tf.erb @@ -1,17 +1,17 @@ resource "google_compute_region_target_http_proxy" "default" { - provider = "google-beta" + provider = google-beta - region = "us-central1" - name = "<%= ctx[:vars]['region_target_http_proxy_name'] %>" - url_map = "${google_compute_region_url_map.default.self_link}" + region = "us-central1" + name = "<%= ctx[:vars]['region_target_http_proxy_name'] %>" + url_map = google_compute_region_url_map.default.self_link } resource "google_compute_region_url_map" "default" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['region_url_map_name'] %>" - default_service = "${google_compute_region_backend_service.default.self_link}" + default_service = google_compute_region_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -20,31 +20,32 @@ resource "google_compute_region_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_region_backend_service.default.self_link}" + default_service = google_compute_region_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_region_backend_service.default.self_link}" + service = google_compute_region_backend_service.default.self_link } } } resource "google_compute_region_backend_service" "default" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['region_backend_service_name'] %>" protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_region_health_check.default.self_link}"] + health_checks = [google_compute_region_health_check.default.self_link] } resource "google_compute_region_health_check" "default" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['region_health_check_name'] %>" http_health_check { + port = 80 } } diff --git a/templates/terraform/examples/region_target_https_proxy_basic.tf.erb b/templates/terraform/examples/region_target_https_proxy_basic.tf.erb index f7564f49c4fd..bde6dde8671c 100644 --- a/templates/terraform/examples/region_target_https_proxy_basic.tf.erb +++ b/templates/terraform/examples/region_target_https_proxy_basic.tf.erb @@ -1,29 +1,29 @@ resource "google_compute_region_target_https_proxy" "default" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['region_target_https_proxy_name'] %>" - url_map = "${google_compute_region_url_map.default.self_link}" - ssl_certificates = ["${google_compute_region_ssl_certificate.default.self_link}"] + url_map = google_compute_region_url_map.default.self_link + ssl_certificates = [google_compute_region_ssl_certificate.default.self_link] } resource "google_compute_region_ssl_certificate" "default" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['region_ssl_certificate_name'] %>" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") } resource "google_compute_region_url_map" "default" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['region_url_map_name'] %>" description = "a description" - default_service = "${google_compute_region_backend_service.default.self_link}" + default_service = google_compute_region_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -32,31 +32,32 @@ resource "google_compute_region_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_region_backend_service.default.self_link}" + default_service = google_compute_region_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_region_backend_service.default.self_link}" + service = google_compute_region_backend_service.default.self_link } } } resource "google_compute_region_backend_service" "default" { - provider = "google-beta" + provider = google-beta region = "us-central1" name = "<%= ctx[:vars]['region_backend_service_name'] %>" protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_region_health_check.default.self_link}"] + health_checks = [google_compute_region_health_check.default.self_link] } resource "google_compute_region_health_check" "default" { - provider = "google-beta" + provider = google-beta - region = "us-central1" - name = "<%= ctx[:vars]['region_health_check_name'] %>" + region = "us-central1" + name = "<%= ctx[:vars]['region_health_check_name'] %>" http_health_check { + port = 80 } } diff --git a/templates/terraform/examples/region_url_map_basic.tf.erb b/templates/terraform/examples/region_url_map_basic.tf.erb index 09f4a16f9070..f819aaaa8dd3 100644 --- a/templates/terraform/examples/region_url_map_basic.tf.erb +++ b/templates/terraform/examples/region_url_map_basic.tf.erb @@ -1,12 +1,12 @@ resource "google_compute_region_url_map" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta - region = "us-central1" + region = "us-central1" name = "<%= ctx[:vars]['region_url_map_name'] %>" description = "a description" - default_service = "${google_compute_region_backend_service.home.self_link}" + default_service = google_compute_region_backend_service.home.self_link host_rule { hosts = ["mysite.com"] @@ -15,59 +15,59 @@ resource "google_compute_region_url_map" "<%= ctx[:primary_resource_id] %>" { path_matcher { name = "allpaths" - default_service = "${google_compute_region_backend_service.home.self_link}" + default_service = google_compute_region_backend_service.home.self_link path_rule { paths = ["/home"] - service = "${google_compute_region_backend_service.home.self_link}" + service = google_compute_region_backend_service.home.self_link } path_rule { paths = ["/login"] - service = "${google_compute_region_backend_service.login.self_link}" + service = google_compute_region_backend_service.login.self_link } } test { - service = "${google_compute_region_backend_service.home.self_link}" + service = google_compute_region_backend_service.home.self_link host = "hi.com" path = "/home" } } resource "google_compute_region_backend_service" "login" { - provider = "google-beta" + provider = google-beta - region = "us-central1" + region = "us-central1" name = "<%= ctx[:vars]['login_region_backend_service_name'] %>" protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_region_health_check.default.self_link}"] + health_checks = [google_compute_region_health_check.default.self_link] } resource "google_compute_region_backend_service" "home" { - provider = "google-beta" + provider = google-beta - region = "us-central1" + region = "us-central1" name = "<%= ctx[:vars]['home_region_backend_service_name'] %>" protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_region_health_check.default.self_link}"] + health_checks = [google_compute_region_health_check.default.self_link] } resource "google_compute_region_health_check" "default" { - provider = "google-beta" + provider = google-beta - region = "us-central1" + region = "us-central1" name = "<%= ctx[:vars]['region_health_check_name'] %>" check_interval_sec = 1 timeout_sec = 1 - http_health_check { + http_health_check { port = 80 request_path = "/" } diff --git a/templates/terraform/examples/reservation_basic.tf.erb b/templates/terraform/examples/reservation_basic.tf.erb index 60fe86a4cefe..7163c95458cf 100644 --- a/templates/terraform/examples/reservation_basic.tf.erb +++ b/templates/terraform/examples/reservation_basic.tf.erb @@ -6,7 +6,7 @@ resource "google_compute_reservation" "<%= ctx[:primary_resource_id] %>" { count = 1 instance_properties { min_cpu_platform = "Intel Cascade Lake" - machine_type = "n2-standard-2" + machine_type = "n2-standard-2" } } } diff --git a/templates/terraform/examples/resource_manager_lien.tf.erb b/templates/terraform/examples/resource_manager_lien.tf.erb index ee1c1f6ef483..fedab4764e14 100644 --- a/templates/terraform/examples/resource_manager_lien.tf.erb +++ b/templates/terraform/examples/resource_manager_lien.tf.erb @@ -1,12 +1,11 @@ resource "google_resource_manager_lien" "<%= ctx[:primary_resource_id] %>" { - parent = "projects/${google_project.project.number}" + parent = "projects/${google_project.project.number}" restrictions = ["resourcemanager.projects.delete"] - origin = "machine-readable-explanation" - reason = "This project is an important environment" + origin = "machine-readable-explanation" + reason = "This project is an important environment" } resource "google_project" "project" { project_id = "<%= ctx[:vars]["project_id"] %>" - name = "A very important project!" + name = "A very important project!" } - diff --git a/templates/terraform/examples/resource_policy_basic.tf.erb b/templates/terraform/examples/resource_policy_basic.tf.erb index e3b2fa0b5038..1aadc57370b3 100644 --- a/templates/terraform/examples/resource_policy_basic.tf.erb +++ b/templates/terraform/examples/resource_policy_basic.tf.erb @@ -1,11 +1,11 @@ resource "google_compute_resource_policy" "foo" { - name = "<%= ctx[:vars]['name'] %>" + name = "<%= ctx[:vars]['name'] %>" region = "us-central1" snapshot_schedule_policy { schedule { daily_schedule { days_in_cycle = 1 - start_time = "04:00" + start_time = "04:00" } } } diff --git a/templates/terraform/examples/resource_policy_full.tf.erb b/templates/terraform/examples/resource_policy_full.tf.erb index b7de3477b841..d699543ed8c0 100644 --- a/templates/terraform/examples/resource_policy_full.tf.erb +++ b/templates/terraform/examples/resource_policy_full.tf.erb @@ -1,15 +1,15 @@ resource "google_compute_resource_policy" "bar" { - name = "<%= ctx[:vars]['name'] %>" + name = "<%= ctx[:vars]['name'] %>" region = "us-central1" snapshot_schedule_policy { schedule { hourly_schedule { hours_in_cycle = 20 - start_time = "23:00" + start_time = "23:00" } } retention_policy { - max_retention_days = 10 + max_retention_days = 10 on_source_disk_delete = "KEEP_AUTO_SNAPSHOTS" } snapshot_properties { @@ -17,7 +17,7 @@ resource "google_compute_resource_policy" "bar" { my_label = "value" } storage_locations = ["us"] - guest_flush = true + guest_flush = true } } } diff --git a/templates/terraform/examples/route_basic.tf.erb b/templates/terraform/examples/route_basic.tf.erb index 57cbc39776ca..feb6b11d206c 100644 --- a/templates/terraform/examples/route_basic.tf.erb +++ b/templates/terraform/examples/route_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_compute_route" "default" { name = "<%= ctx[:vars]['route_name'] %>" dest_range = "15.0.0.0/24" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name next_hop_ip = "10.132.1.5" priority = 100 } diff --git a/templates/terraform/examples/route_ilb_beta.tf.erb b/templates/terraform/examples/route_ilb_beta.tf.erb index ffd8fb5dfb25..20e43d700386 100644 --- a/templates/terraform/examples/route_ilb_beta.tf.erb +++ b/templates/terraform/examples/route_ilb_beta.tf.erb @@ -1,19 +1,19 @@ resource "google_compute_network" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['subnet_name'] %>" ip_cidr_range = "10.0.1.0/24" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } resource "google_compute_health_check" "hc" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['health_check_name'] %>" check_interval_sec = 1 timeout_sec = 1 @@ -24,29 +24,29 @@ resource "google_compute_health_check" "hc" { } resource "google_compute_region_backend_service" "backend" { - provider = "google-beta" - name = "<%= ctx[:vars]['backend_name'] %>" - region = "us-central1" - health_checks = ["${google_compute_health_check.hc.self_link}"] + provider = google-beta + name = "<%= ctx[:vars]['backend_name'] %>" + region = "us-central1" + health_checks = [google_compute_health_check.hc.self_link] } resource "google_compute_forwarding_rule" "default" { - provider = "google-beta" - name = "<%= ctx[:vars]['forwarding_rule_name'] %>" - region = "us-central1" + provider = google-beta + name = "<%= ctx[:vars]['forwarding_rule_name'] %>" + region = "us-central1" load_balancing_scheme = "INTERNAL" - backend_service = "${google_compute_region_backend_service.backend.self_link}" + backend_service = google_compute_region_backend_service.backend.self_link all_ports = true - network = "${google_compute_network.default.name}" - subnetwork = "${google_compute_subnetwork.default.name}" + network = google_compute_network.default.name + subnetwork = google_compute_subnetwork.default.name } resource "google_compute_route" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['route_name'] %>" dest_range = "0.0.0.0/0" - network = "${google_compute_network.default.name}" - next_hop_ilb = "${google_compute_forwarding_rule.default.self_link}" + network = google_compute_network.default.name + next_hop_ilb = google_compute_forwarding_rule.default.self_link priority = 2000 } diff --git a/templates/terraform/examples/router_basic.tf.erb b/templates/terraform/examples/router_basic.tf.erb index 790ca316b981..efdb7182a370 100644 --- a/templates/terraform/examples/router_basic.tf.erb +++ b/templates/terraform/examples/router_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_router" "foobar" { name = "<%= ctx[:vars]['router_name'] %>" - network = "${google_compute_network.foobar.name}" + network = google_compute_network.foobar.name bgp { asn = 64514 advertise_mode = "CUSTOM" @@ -15,7 +15,6 @@ resource "google_compute_router" "foobar" { } resource "google_compute_network" "foobar" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } - diff --git a/templates/terraform/examples/router_nat_basic.tf.erb b/templates/terraform/examples/router_nat_basic.tf.erb index 9944170c5b67..8dfb43925135 100644 --- a/templates/terraform/examples/router_nat_basic.tf.erb +++ b/templates/terraform/examples/router_nat_basic.tf.erb @@ -1,33 +1,33 @@ resource "google_compute_network" "net" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" } resource "google_compute_subnetwork" "subnet" { - name = "<%= ctx[:vars]['subnet_name'] %>" - network = google_compute_network.net.self_link - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" + name = "<%= ctx[:vars]['subnet_name'] %>" + network = google_compute_network.net.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" } -resource "google_compute_router" "router"{ - name = "<%= ctx[:vars]['router_name'] %>" - region = google_compute_subnetwork.subnet.region - network = google_compute_network.net.self_link +resource "google_compute_router" "router" { + name = "<%= ctx[:vars]['router_name'] %>" + region = google_compute_subnetwork.subnet.region + network = google_compute_network.net.self_link - bgp { - asn = 64514 - } + bgp { + asn = 64514 + } } resource "google_compute_router_nat" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['nat_name'] %>" - router = google_compute_router.router.name - region = google_compute_router.router.region - nat_ip_allocate_option = "AUTO_ONLY" - source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" - - log_config { - enable = true - filter = "ERRORS_ONLY" - } -} \ No newline at end of file + name = "<%= ctx[:vars]['nat_name'] %>" + router = google_compute_router.router.name + region = google_compute_router.router.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + + log_config { + enable = true + filter = "ERRORS_ONLY" + } +} diff --git a/templates/terraform/examples/router_nat_manual_ips.tf.erb b/templates/terraform/examples/router_nat_manual_ips.tf.erb index bbe0c38b4d60..231ee13a102c 100644 --- a/templates/terraform/examples/router_nat_manual_ips.tf.erb +++ b/templates/terraform/examples/router_nat_manual_ips.tf.erb @@ -1,37 +1,37 @@ resource "google_compute_network" "net" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" } resource "google_compute_subnetwork" "subnet" { - name = "<%= ctx[:vars]['subnet_name'] %>" - network = google_compute_network.net.self_link - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" + name = "<%= ctx[:vars]['subnet_name'] %>" + network = google_compute_network.net.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" } -resource "google_compute_router" "router"{ - name = "<%= ctx[:vars]['router_name'] %>" - region = google_compute_subnetwork.subnet.region - network = google_compute_network.net.self_link +resource "google_compute_router" "router" { + name = "<%= ctx[:vars]['router_name'] %>" + region = google_compute_subnetwork.subnet.region + network = google_compute_network.net.self_link } resource "google_compute_address" "address" { - count = 2 - name = "<%= ctx[:vars]['address_name'] %>-${count.index}" - region = google_compute_subnetwork.subnet.region + count = 2 + name = "<%= ctx[:vars]['address_name'] %>-${count.index}" + region = google_compute_subnetwork.subnet.region } resource "google_compute_router_nat" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['nat_name'] %>" - router = google_compute_router.router.name - region = google_compute_router.router.region - - nat_ip_allocate_option = "MANUAL_ONLY" - nat_ips = google_compute_address.address[*].self_link - - source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" - subnetwork { - name = google_compute_subnetwork.default.self_link - source_ip_ranges_to_nat = ["ALL_IP_RANGES"] - } + name = "<%= ctx[:vars]['nat_name'] %>" + router = google_compute_router.router.name + region = google_compute_router.router.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = google_compute_address.address.*.self_link + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.default.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } } diff --git a/templates/terraform/examples/runtimeconfig_config_basic.tf.erb b/templates/terraform/examples/runtimeconfig_config_basic.tf.erb index 19562f1e6008..9d79e77dc4cc 100644 --- a/templates/terraform/examples/runtimeconfig_config_basic.tf.erb +++ b/templates/terraform/examples/runtimeconfig_config_basic.tf.erb @@ -1,4 +1,4 @@ resource "google_runtimeconfig_config" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['config_name'] %>" - description = "Runtime configuration values for my service" + name = "<%= ctx[:vars]['config_name'] %>" + description = "Runtime configuration values for my service" } diff --git a/templates/terraform/examples/scan_config_basic.tf.erb b/templates/terraform/examples/scan_config_basic.tf.erb index 9512e67e314e..f74505bec26d 100644 --- a/templates/terraform/examples/scan_config_basic.tf.erb +++ b/templates/terraform/examples/scan_config_basic.tf.erb @@ -1,10 +1,10 @@ resource "google_compute_address" "scanner_static_ip" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['address_name'] %>" } resource "google_security_scanner_scan_config" "<%= ctx[:primary_resource_id] %>" { - provider = "google-beta" + provider = google-beta display_name = "<%= ctx[:vars]['scan_config_name'] %>" starting_urls = ["http://${google_compute_address.scanner_static_ip.address}"] target_platforms = ["COMPUTE"] @@ -13,4 +13,4 @@ resource "google_security_scanner_scan_config" "<%= ctx[:primary_resource_id] %> provider "google-beta" { region = "us-central1" zone = "us-central1-a" -} \ No newline at end of file +} diff --git a/templates/terraform/examples/scc_source_basic.tf.erb b/templates/terraform/examples/scc_source_basic.tf.erb index 19ff19afad89..d8bcd1092509 100644 --- a/templates/terraform/examples/scc_source_basic.tf.erb +++ b/templates/terraform/examples/scc_source_basic.tf.erb @@ -1,5 +1,5 @@ resource "google_scc_source" "<%= ctx[:primary_resource_id] %>" { display_name = "<%= ctx[:vars]['source_display_name'] %>" organization = "<%= ctx[:test_env_vars]['org_id'] %>" - description = "My custom Cloud Security Command Center Finding Source" + description = "My custom Cloud Security Command Center Finding Source" } diff --git a/templates/terraform/examples/scheduled_query.tf.erb b/templates/terraform/examples/scheduled_query.tf.erb index eeb76cd4f97b..2c5ae0b1c57d 100644 --- a/templates/terraform/examples/scheduled_query.tf.erb +++ b/templates/terraform/examples/scheduled_query.tf.erb @@ -1,32 +1,31 @@ -data "google_project" "project" {} +data "google_project" "project" { +} resource "google_project_iam_member" "permissions" { - role = "roles/iam.serviceAccountShortTermTokenMinter" + role = "roles/iam.serviceAccountShortTermTokenMinter" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com" } resource "google_bigquery_data_transfer_config" "<%= ctx[:primary_resource_id] %>" { - depends_on = [google_project_iam_member.permissions] - display_name = "<%= ctx[:vars]['display_name'] %>" - location = "asia-northeast1" - data_source_id = "scheduled_query" - schedule = "first sunday of quarter 00:00" - destination_dataset_id = "${google_bigquery_dataset.my_dataset.dataset_id}" + display_name = "<%= ctx[:vars]['display_name'] %>" + location = "asia-northeast1" + data_source_id = "scheduled_query" + schedule = "first sunday of quarter 00:00" + destination_dataset_id = google_bigquery_dataset.my_dataset.dataset_id params = { destination_table_name_template = "my-table" - write_disposition = "WRITE_APPEND" - query = "SELECT name FROM tabl WHERE x = 'y'" + write_disposition = "WRITE_APPEND" + query = "SELECT name FROM tabl WHERE x = 'y'" } } resource "google_bigquery_dataset" "my_dataset" { - depends_on = [google_project_iam_member.permissions] - dataset_id = "<%= ctx[:vars]['dataset_id'].delete("-") %>" + dataset_id = "<%= ctx[:vars]['dataset_id'].delete("-") %>" friendly_name = "foo" - description = "bar" - location = "asia-northeast1" + description = "bar" + location = "asia-northeast1" } diff --git a/templates/terraform/examples/scheduler_job_app_engine.tf.erb b/templates/terraform/examples/scheduler_job_app_engine.tf.erb index a0a2b2fa7020..3d598b99b451 100644 --- a/templates/terraform/examples/scheduler_job_app_engine.tf.erb +++ b/templates/terraform/examples/scheduler_job_app_engine.tf.erb @@ -1,15 +1,15 @@ resource "google_cloud_scheduler_job" "job" { - name = "<%= ctx[:vars]['job_name'] %>" - schedule = "*/4 * * * *" + name = "<%= ctx[:vars]['job_name'] %>" + schedule = "*/4 * * * *" description = "test app engine job" - time_zone = "Europe/London" + time_zone = "Europe/London" app_engine_http_target { http_method = "POST" app_engine_routing { - service = "web" - version = "prod" + service = "web" + version = "prod" instance = "my-instance-001" } diff --git a/templates/terraform/examples/scheduler_job_http.tf.erb b/templates/terraform/examples/scheduler_job_http.tf.erb index 88a31a4a3beb..22ee9fdc0b75 100644 --- a/templates/terraform/examples/scheduler_job_http.tf.erb +++ b/templates/terraform/examples/scheduler_job_http.tf.erb @@ -1,12 +1,11 @@ resource "google_cloud_scheduler_job" "job" { - name = "<%= ctx[:vars]['job_name'] %>" + name = "<%= ctx[:vars]['job_name'] %>" description = "test http job" - schedule = "*/8 * * * *" - time_zone = "America/New_York" + schedule = "*/8 * * * *" + time_zone = "America/New_York" http_target { http_method = "POST" - uri = "https://example.com/ping" + uri = "https://example.com/ping" } } - diff --git a/templates/terraform/examples/scheduler_job_oauth.tf.erb b/templates/terraform/examples/scheduler_job_oauth.tf.erb index db557042860e..0f6b9d5b881d 100644 --- a/templates/terraform/examples/scheduler_job_oauth.tf.erb +++ b/templates/terraform/examples/scheduler_job_oauth.tf.erb @@ -1,18 +1,18 @@ -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} resource "google_cloud_scheduler_job" "job" { - name = "<%= ctx[:vars]['job_name'] %>" + name = "<%= ctx[:vars]['job_name'] %>" description = "test http job" - schedule = "*/8 * * * *" - time_zone = "America/New_York" + schedule = "*/8 * * * *" + time_zone = "America/New_York" http_target { http_method = "GET" - uri = "https://cloudscheduler.googleapis.com/v1/projects/<%= ctx[:test_env_vars]['project_name'] %>/locations/<%= ctx[:test_env_vars]['region'] %>/jobs" + uri = "https://cloudscheduler.googleapis.com/v1/projects/<%= ctx[:test_env_vars]['project_name'] %>/locations/<%= ctx[:test_env_vars]['region'] %>/jobs" oauth_token { - service_account_email = "${data.google_compute_default_service_account.default.email}" + service_account_email = data.google_compute_default_service_account.default.email } } } - diff --git a/templates/terraform/examples/scheduler_job_oidc.tf.erb b/templates/terraform/examples/scheduler_job_oidc.tf.erb index 5169b7de4251..bb252a585da6 100644 --- a/templates/terraform/examples/scheduler_job_oidc.tf.erb +++ b/templates/terraform/examples/scheduler_job_oidc.tf.erb @@ -1,18 +1,18 @@ -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} resource "google_cloud_scheduler_job" "job" { - name = "<%= ctx[:vars]['job_name'] %>" + name = "<%= ctx[:vars]['job_name'] %>" description = "test http job" - schedule = "*/8 * * * *" - time_zone = "America/New_York" + schedule = "*/8 * * * *" + time_zone = "America/New_York" http_target { http_method = "GET" - uri = "https://example.com/ping" + uri = "https://example.com/ping" oidc_token { - service_account_email = "${data.google_compute_default_service_account.default.email}" + service_account_email = data.google_compute_default_service_account.default.email } } } - diff --git a/templates/terraform/examples/scheduler_job_pubsub.tf.erb b/templates/terraform/examples/scheduler_job_pubsub.tf.erb index b0602e4f9e1f..1d6d100cf98c 100644 --- a/templates/terraform/examples/scheduler_job_pubsub.tf.erb +++ b/templates/terraform/examples/scheduler_job_pubsub.tf.erb @@ -3,13 +3,12 @@ resource "google_pubsub_topic" "topic" { } resource "google_cloud_scheduler_job" "job" { - name = "<%= ctx[:vars]['job_name'] %>" + name = "<%= ctx[:vars]['job_name'] %>" description = "test job" - schedule = "*/2 * * * *" + schedule = "*/2 * * * *" pubsub_target { - topic_name = "${google_pubsub_topic.topic.id}" - data = "${base64encode("test")}" + topic_name = google_pubsub_topic.topic.id + data = base64encode("test") } } - diff --git a/templates/terraform/examples/snapshot_basic.tf.erb b/templates/terraform/examples/snapshot_basic.tf.erb index ab3e6804f1c5..f0e931afe4d3 100644 --- a/templates/terraform/examples/snapshot_basic.tf.erb +++ b/templates/terraform/examples/snapshot_basic.tf.erb @@ -1,22 +1,21 @@ resource "google_compute_snapshot" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['snapshot_name'] %>" - source_disk = "${google_compute_disk.persistent.name}" - zone = "us-central1-a" - labels = { - my_label = "value" - } + name = "<%= ctx[:vars]['snapshot_name'] %>" + source_disk = google_compute_disk.persistent.name + zone = "us-central1-a" + labels = { + my_label = "value" + } } data "google_compute_image" "debian" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } resource "google_compute_disk" "persistent" { - name = "<%= ctx[:vars]['disk_name'] %>" - image = "${data.google_compute_image.debian.self_link}" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" + name = "<%= ctx[:vars]['disk_name'] %>" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" } - diff --git a/templates/terraform/examples/spanner_database_basic.tf.erb b/templates/terraform/examples/spanner_database_basic.tf.erb index 73d90caf801a..a68752f8ef2b 100644 --- a/templates/terraform/examples/spanner_database_basic.tf.erb +++ b/templates/terraform/examples/spanner_database_basic.tf.erb @@ -4,10 +4,10 @@ resource "google_spanner_instance" "main" { } resource "google_spanner_database" "database" { - instance = "${google_spanner_instance.main.name}" - name = "<%= ctx[:vars]['database_name'] %>" - ddl = [ + instance = google_spanner_instance.main.name + name = "<%= ctx[:vars]['database_name'] %>" + ddl = [ "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", - "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)" + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", ] } diff --git a/templates/terraform/examples/spanner_instance_basic.tf.erb b/templates/terraform/examples/spanner_instance_basic.tf.erb index de2d41635c2c..02bd2715d9a9 100644 --- a/templates/terraform/examples/spanner_instance_basic.tf.erb +++ b/templates/terraform/examples/spanner_instance_basic.tf.erb @@ -1,7 +1,7 @@ resource "google_spanner_instance" "example" { - config = "regional-us-central1" - display_name = "Test Spanner Instance" - num_nodes = 2 + config = "regional-us-central1" + display_name = "Test Spanner Instance" + num_nodes = 2 labels = { "foo" = "bar" } diff --git a/templates/terraform/examples/sql_database_basic.tf.erb b/templates/terraform/examples/sql_database_basic.tf.erb index fad36f7266a1..05ecb7333839 100644 --- a/templates/terraform/examples/sql_database_basic.tf.erb +++ b/templates/terraform/examples/sql_database_basic.tf.erb @@ -1,12 +1,12 @@ resource "google_sql_database" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['database_name'] %>" - instance = "${google_sql_database_instance.instance.name}" + name = "<%= ctx[:vars]['database_name'] %>" + instance = google_sql_database_instance.instance.name } resource "google_sql_database_instance" "instance" { - name = "<%= ctx[:vars]['database_instance_name'] %>" - region = "us-central" - settings { - tier = "D0" - } + name = "<%= ctx[:vars]['database_instance_name'] %>" + region = "us-central" + settings { + tier = "D0" + } } diff --git a/templates/terraform/examples/ssl_certificate_basic.tf.erb b/templates/terraform/examples/ssl_certificate_basic.tf.erb index 8c0a68bd6156..f8adfac700ef 100644 --- a/templates/terraform/examples/ssl_certificate_basic.tf.erb +++ b/templates/terraform/examples/ssl_certificate_basic.tf.erb @@ -1,8 +1,8 @@ resource "google_compute_ssl_certificate" "default" { name_prefix = "my-certificate-" description = "a description" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true diff --git a/templates/terraform/examples/ssl_certificate_random_provider.tf.erb b/templates/terraform/examples/ssl_certificate_random_provider.tf.erb index fb80966b3fb1..22447105f033 100644 --- a/templates/terraform/examples/ssl_certificate_random_provider.tf.erb +++ b/templates/terraform/examples/ssl_certificate_random_provider.tf.erb @@ -2,9 +2,9 @@ resource "google_compute_ssl_certificate" "default" { # The name will contain 8 random hex digits, # e.g. "my-certificate-48ab27cd2a" - name = "${random_id.certificate.hex}" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + name = random_id.certificate.hex + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true @@ -17,7 +17,7 @@ resource "random_id" "certificate" { # For security, do not expose raw certificate values in the output keepers = { - private_key = "${base64sha256(file("path/to/private.key"))}" - certificate = "${base64sha256(file("path/to/certificate.crt"))}" + private_key = filebase64sha256("path/to/private.key") + certificate = filebase64sha256("path/to/certificate.crt") } } diff --git a/templates/terraform/examples/ssl_certificate_target_https_proxies.tf.erb b/templates/terraform/examples/ssl_certificate_target_https_proxies.tf.erb index b96947008bb2..85f0644444dd 100644 --- a/templates/terraform/examples/ssl_certificate_target_https_proxies.tf.erb +++ b/templates/terraform/examples/ssl_certificate_target_https_proxies.tf.erb @@ -10,8 +10,8 @@ resource "google_compute_ssl_certificate" "default" { name_prefix = "my-certificate-" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true @@ -20,15 +20,15 @@ resource "google_compute_ssl_certificate" "default" { resource "google_compute_target_https_proxy" "default" { name = "<%= ctx[:vars]['target_https_proxy_name'] %>" - url_map = "${google_compute_url_map.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + url_map = google_compute_url_map.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_url_map" "default" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -37,11 +37,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -52,7 +52,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/templates/terraform/examples/storage_bucket_access_control_public_bucket.tf.erb b/templates/terraform/examples/storage_bucket_access_control_public_bucket.tf.erb index c8034958a9a2..6d6892f7b0af 100644 --- a/templates/terraform/examples/storage_bucket_access_control_public_bucket.tf.erb +++ b/templates/terraform/examples/storage_bucket_access_control_public_bucket.tf.erb @@ -5,5 +5,5 @@ resource "google_storage_bucket_access_control" "<%= ctx[:primary_resource_id] % } resource "google_storage_bucket" "bucket" { - name = "<%= ctx[:vars]['bucket_name'] %>" + name = "<%= ctx[:vars]['bucket_name'] %>" } diff --git a/templates/terraform/examples/storage_default_object_access_control_public.tf.erb b/templates/terraform/examples/storage_default_object_access_control_public.tf.erb index bd83d319ec5c..289cd496dd56 100644 --- a/templates/terraform/examples/storage_default_object_access_control_public.tf.erb +++ b/templates/terraform/examples/storage_default_object_access_control_public.tf.erb @@ -1,9 +1,9 @@ resource "google_storage_default_object_access_control" "<%= ctx[:primary_resource_id] %>" { - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name role = "READER" entity = "allUsers" } resource "google_storage_bucket" "bucket" { - name = "<%= ctx[:vars]['bucket_name'] %>" + name = "<%= ctx[:vars]['bucket_name'] %>" } diff --git a/templates/terraform/examples/storage_object_access_control_public_object.tf.erb b/templates/terraform/examples/storage_object_access_control_public_object.tf.erb index c7474ef47447..e5aa84360af1 100644 --- a/templates/terraform/examples/storage_object_access_control_public_object.tf.erb +++ b/templates/terraform/examples/storage_object_access_control_public_object.tf.erb @@ -1,16 +1,16 @@ resource "google_storage_object_access_control" "<%= ctx[:primary_resource_id] %>" { - object = "${google_storage_bucket_object.object.output_name}" - bucket = "${google_storage_bucket.bucket.name}" + object = google_storage_bucket_object.object.output_name + bucket = google_storage_bucket.bucket.name role = "READER" entity = "allUsers" } resource "google_storage_bucket" "bucket" { - name = "<%= ctx[:vars]['bucket_name'] %>" + name = "<%= ctx[:vars]['bucket_name'] %>" } - resource "google_storage_bucket_object" "object" { - name = "<%= ctx[:vars]['object_name'] %>" - bucket = "${google_storage_bucket.bucket.name}" - source = "../static/img/header-logo.png" +resource "google_storage_bucket_object" "object" { + name = "<%= ctx[:vars]['object_name'] %>" + bucket = google_storage_bucket.bucket.name + source = "../static/img/header-logo.png" } diff --git a/templates/terraform/examples/subnetwork_basic.tf.erb b/templates/terraform/examples/subnetwork_basic.tf.erb index 949fb47d6d56..4cd85ab1e126 100644 --- a/templates/terraform/examples/subnetwork_basic.tf.erb +++ b/templates/terraform/examples/subnetwork_basic.tf.erb @@ -2,7 +2,7 @@ resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" name = "<%= ctx[:vars]['subnetwork_name'] %>" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link secondary_ip_range { range_name = "tf-test-secondary-range-update1" ip_cidr_range = "192.168.10.0/24" @@ -13,4 +13,3 @@ resource "google_compute_network" "custom-test" { name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } - diff --git a/templates/terraform/examples/subnetwork_internal_l7lb.tf.erb b/templates/terraform/examples/subnetwork_internal_l7lb.tf.erb index 1a78571a5b5c..12bf8b878ecb 100644 --- a/templates/terraform/examples/subnetwork_internal_l7lb.tf.erb +++ b/templates/terraform/examples/subnetwork_internal_l7lb.tf.erb @@ -1,16 +1,16 @@ resource "google_compute_subnetwork" "network-for-l7lb" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['subnetwork_name'] %>" ip_cidr_range = "10.0.0.0/22" region = "us-central1" purpose = "INTERNAL_HTTPS_LOAD_BALANCER" role = "ACTIVE" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link } resource "google_compute_network" "custom-test" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false diff --git a/templates/terraform/examples/subnetwork_logging_config.tf.erb b/templates/terraform/examples/subnetwork_logging_config.tf.erb index 9cefebcc99eb..048a98af49a5 100644 --- a/templates/terraform/examples/subnetwork_logging_config.tf.erb +++ b/templates/terraform/examples/subnetwork_logging_config.tf.erb @@ -2,7 +2,7 @@ resource "google_compute_subnetwork" "subnet-with-logging" { name = "<%= ctx[:vars]['subnetwork_name'] %>" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link log_config { aggregation_interval = "INTERVAL_10_MIN" diff --git a/templates/terraform/examples/target_http_proxy_basic.tf.erb b/templates/terraform/examples/target_http_proxy_basic.tf.erb index 6a74902b38bc..a3e099b98fca 100644 --- a/templates/terraform/examples/target_http_proxy_basic.tf.erb +++ b/templates/terraform/examples/target_http_proxy_basic.tf.erb @@ -1,11 +1,11 @@ resource "google_compute_target_http_proxy" "default" { - name = "<%= ctx[:vars]['target_http_proxy_name'] %>" - url_map = "${google_compute_url_map.default.self_link}" + name = "<%= ctx[:vars]['target_http_proxy_name'] %>" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { - name = "<%= ctx[:vars]['url_map_name'] %>" - default_service = "${google_compute_backend_service.default.self_link}" + name = "<%= ctx[:vars]['url_map_name'] %>" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -14,11 +14,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -29,7 +29,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/templates/terraform/examples/target_https_proxy_basic.tf.erb b/templates/terraform/examples/target_https_proxy_basic.tf.erb index c572d72b0d06..d8de469b10c1 100644 --- a/templates/terraform/examples/target_https_proxy_basic.tf.erb +++ b/templates/terraform/examples/target_https_proxy_basic.tf.erb @@ -1,20 +1,20 @@ resource "google_compute_target_https_proxy" "default" { name = "<%= ctx[:vars]['target_https_proxy_name'] %>" - url_map = "${google_compute_url_map.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + url_map = google_compute_url_map.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_ssl_certificate" "default" { name = "<%= ctx[:vars]['ssl_certificate_name'] %>" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") } resource "google_compute_url_map" "default" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -23,11 +23,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -38,7 +38,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/templates/terraform/examples/target_instance_basic.tf.erb b/templates/terraform/examples/target_instance_basic.tf.erb index 748ca8b1b3f6..c1ea4e2daacb 100644 --- a/templates/terraform/examples/target_instance_basic.tf.erb +++ b/templates/terraform/examples/target_instance_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_target_instance" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['target_name'] %>" - instance = "${google_compute_instance.target-vm.self_link}" + name = "<%= ctx[:vars]['target_name'] %>" + instance = google_compute_instance.target-vm.self_link } data "google_compute_image" "vmimage" { @@ -14,8 +14,8 @@ resource "google_compute_instance" "target-vm" { zone = "us-central1-a" boot_disk { - initialize_params{ - image = "${data.google_compute_image.vmimage.self_link}" + initialize_params { + image = data.google_compute_image.vmimage.self_link } } diff --git a/templates/terraform/examples/target_ssl_proxy_basic.tf.erb b/templates/terraform/examples/target_ssl_proxy_basic.tf.erb index b1f0ee912b48..cd1207a9cc69 100644 --- a/templates/terraform/examples/target_ssl_proxy_basic.tf.erb +++ b/templates/terraform/examples/target_ssl_proxy_basic.tf.erb @@ -1,19 +1,19 @@ resource "google_compute_target_ssl_proxy" "default" { name = "<%= ctx[:vars]['target_ssl_proxy_name'] %>" - backend_service = "${google_compute_backend_service.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + backend_service = google_compute_backend_service.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_ssl_certificate" "default" { name = "<%= ctx[:vars]['ssl_certificate_name'] %>" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") } resource "google_compute_backend_service" "default" { name = "<%= ctx[:vars]['backend_service_name'] %>" protocol = "SSL" - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } resource "google_compute_health_check" "default" { diff --git a/templates/terraform/examples/target_tcp_proxy_basic.tf.erb b/templates/terraform/examples/target_tcp_proxy_basic.tf.erb index 25976e9a269b..fd9c04fc5346 100644 --- a/templates/terraform/examples/target_tcp_proxy_basic.tf.erb +++ b/templates/terraform/examples/target_tcp_proxy_basic.tf.erb @@ -1,14 +1,14 @@ resource "google_compute_target_tcp_proxy" "default" { name = "<%= ctx[:vars]['target_tcp_proxy_name'] %>" - backend_service = "${google_compute_backend_service.default.self_link}" + backend_service = google_compute_backend_service.default.self_link } resource "google_compute_backend_service" "default" { - name = "<%= ctx[:vars]['backend_service_name'] %>" - protocol = "TCP" - timeout_sec = 10 + name = "<%= ctx[:vars]['backend_service_name'] %>" + protocol = "TCP" + timeout_sec = 10 - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } resource "google_compute_health_check" "default" { diff --git a/templates/terraform/examples/target_vpn_gateway_basic.tf.erb b/templates/terraform/examples/target_vpn_gateway_basic.tf.erb index e8ef348e184b..824ce65f7772 100644 --- a/templates/terraform/examples/target_vpn_gateway_basic.tf.erb +++ b/templates/terraform/examples/target_vpn_gateway_basic.tf.erb @@ -1,37 +1,37 @@ resource "google_compute_vpn_gateway" "target_gateway" { name = "<%= ctx[:vars]['target_vpn_gateway_name'] %>" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" } resource "google_compute_address" "vpn_static_ip" { - name = "<%= ctx[:vars]['address_name'] %>" + name = "<%= ctx[:vars]['address_name'] %>" } resource "google_compute_forwarding_rule" "fr_esp" { name = "<%= ctx[:vars]['esp_forwarding_rule_name'] %>" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { name = "<%= ctx[:vars]['udp500_forwarding_rule_name'] %>" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { name = "<%= ctx[:vars]['udp4500_forwarding_rule_name'] %>" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_vpn_tunnel" "tunnel1" { @@ -39,20 +39,20 @@ resource "google_compute_vpn_tunnel" "tunnel1" { peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] } resource "google_compute_route" "route1" { name = "<%= ctx[:vars]['route_name'] %>" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } diff --git a/templates/terraform/examples/tpu_node_basic.tf.erb b/templates/terraform/examples/tpu_node_basic.tf.erb index 32fc6509e97a..90701ef38ee5 100644 --- a/templates/terraform/examples/tpu_node_basic.tf.erb +++ b/templates/terraform/examples/tpu_node_basic.tf.erb @@ -1,15 +1,17 @@ <%#- - WARNING: cidr_block must not overlap with other existing TPU blocks - Make sure if you change this value that it does not overlap with the - autogenerated examples. + WARNING: cidr_block must not overlap with other existing TPU blocks + Make sure if you change this value that it does not overlap with the + autogenerated examples. -%> -data "google_tpu_tensorflow_versions" "available" { } + +data "google_tpu_tensorflow_versions" "available" { +} resource "google_tpu_node" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]["node_name"] %>" - zone = "us-central1-b" + name = "<%= ctx[:vars]["node_name"] %>" + zone = "us-central1-b" - accelerator_type = "v3-8" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" - cidr_block = "10.2.0.0/29" + accelerator_type = "v3-8" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] + cidr_block = "10.2.0.0/29" } diff --git a/templates/terraform/examples/tpu_node_full.tf.erb b/templates/terraform/examples/tpu_node_full.tf.erb index 1780ee2436d7..818161d7fb4a 100644 --- a/templates/terraform/examples/tpu_node_full.tf.erb +++ b/templates/terraform/examples/tpu_node_full.tf.erb @@ -1,33 +1,35 @@ -data "google_tpu_tensorflow_versions" "available" { } +data "google_tpu_tensorflow_versions" "available" { +} <%#- - WARNING: cidr_block must not overlap with other existing TPU blocks - Make sure if you change this value that it does not overlap with the - autogenerated examples. + WARNING: cidr_block must not overlap with other existing TPU blocks + Make sure if you change this value that it does not overlap with the + autogenerated examples. -%> + resource "google_tpu_node" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]["node_name"] %>" - zone = "us-central1-b" + name = "<%= ctx[:vars]["node_name"] %>" + zone = "us-central1-b" - accelerator_type = "v3-8" + accelerator_type = "v3-8" - cidr_block = "10.3.0.0/29" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" + cidr_block = "10.3.0.0/29" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] - description = "Terraform Google Provider test TPU" + description = "Terraform Google Provider test TPU" <%#- - We previously used a separate network resource here, but TPUs only allow using 50 - different network names, ever. This caused our tests to start failing, so just - use the default network in order to still demonstrate using as many fields as - possible on the resource. + We previously used a separate network resource here, but TPUs only allow using 50 + different network names, ever. This caused our tests to start failing, so just + use the default network in order to still demonstrate using as many fields as + possible on the resource. -%> - network = "default" + network = "default" - labels = { - foo = "bar" - } + labels = { + foo = "bar" + } - scheduling_config { - preemptible = true - } + scheduling_config { + preemptible = true + } } diff --git a/templates/terraform/examples/uptime_check_config_http.tf.erb b/templates/terraform/examples/uptime_check_config_http.tf.erb index 90210378f047..4c10c16a974b 100644 --- a/templates/terraform/examples/uptime_check_config_http.tf.erb +++ b/templates/terraform/examples/uptime_check_config_http.tf.erb @@ -1,6 +1,6 @@ resource "google_monitoring_uptime_check_config" "<%= ctx[:primary_resource_id] %>" { display_name = "<%= ctx[:vars]["display_name"] %>" - timeout = "60s" + timeout = "60s" http_check { path = "/some-path" @@ -11,11 +11,12 @@ resource "google_monitoring_uptime_check_config" "<%= ctx[:primary_resource_id] type = "uptime_url" labels = { project_id = "<%= ctx[:test_env_vars]["project_id"] %>" - host = "192.168.1.1" + host = "192.168.1.1" } } content_matchers { content = "example" } -} \ No newline at end of file +} + diff --git a/templates/terraform/examples/uptime_check_tcp.tf.erb b/templates/terraform/examples/uptime_check_tcp.tf.erb index 46e3ebf7097b..c86059090301 100644 --- a/templates/terraform/examples/uptime_check_tcp.tf.erb +++ b/templates/terraform/examples/uptime_check_tcp.tf.erb @@ -1,6 +1,6 @@ resource "google_monitoring_uptime_check_config" "<%= ctx[:primary_resource_id] %>" { display_name = "<%= ctx[:vars]["display_name"] %>" - timeout = "60s" + timeout = "60s" tcp_check { port = 888 @@ -8,12 +8,11 @@ resource "google_monitoring_uptime_check_config" "<%= ctx[:primary_resource_id] resource_group { resource_type = "INSTANCE" - group_id = "${google_monitoring_group.check.name}" + group_id = google_monitoring_group.check.name } } - resource "google_monitoring_group" "check" { display_name = "<%= ctx[:vars]["group_display_name"] %>" - filter = "resource.metadata.name=has_substring(\"foo\")" -} \ No newline at end of file + filter = "resource.metadata.name=has_substring(\"foo\")" +} diff --git a/templates/terraform/examples/url_map_basic.tf.erb b/templates/terraform/examples/url_map_basic.tf.erb index 973aaed32755..80146f35e2e7 100644 --- a/templates/terraform/examples/url_map_basic.tf.erb +++ b/templates/terraform/examples/url_map_basic.tf.erb @@ -2,7 +2,7 @@ resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" - default_service = "${google_compute_backend_service.home.self_link}" + default_service = google_compute_backend_service.home.self_link host_rule { hosts = ["mysite.com"] @@ -11,26 +11,26 @@ resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.home.self_link}" + default_service = google_compute_backend_service.home.self_link path_rule { paths = ["/home"] - service = "${google_compute_backend_service.home.self_link}" + service = google_compute_backend_service.home.self_link } path_rule { paths = ["/login"] - service = "${google_compute_backend_service.login.self_link}" + service = google_compute_backend_service.login.self_link } path_rule { paths = ["/static"] - service = "${google_compute_backend_bucket.static.self_link}" + service = google_compute_backend_bucket.static.self_link } } test { - service = "${google_compute_backend_service.home.self_link}" + service = google_compute_backend_service.home.self_link host = "hi.com" path = "/home" } @@ -42,7 +42,7 @@ resource "google_compute_backend_service" "login" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_backend_service" "home" { @@ -51,7 +51,7 @@ resource "google_compute_backend_service" "home" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { @@ -63,7 +63,7 @@ resource "google_compute_http_health_check" "default" { resource "google_compute_backend_bucket" "static" { name = "<%= ctx[:vars]['backend_bucket_name'] %>" - bucket_name = "${google_storage_bucket.static.name}" + bucket_name = google_storage_bucket.static.name enable_cdn = true } diff --git a/templates/terraform/examples/vpc_access_connector.tf.erb b/templates/terraform/examples/vpc_access_connector.tf.erb index 9ef9215dfef5..7b380abfe3d0 100644 --- a/templates/terraform/examples/vpc_access_connector.tf.erb +++ b/templates/terraform/examples/vpc_access_connector.tf.erb @@ -1,8 +1,9 @@ -provider "google-beta" {} +provider "google-beta" { +} resource "google_vpc_access_connector" "connector" { name = "<%= ctx[:vars]['name'] %>" - provider = "google-beta" + provider = google-beta region = "us-central1" ip_cidr_range = "10.8.0.0/28" network = "default" diff --git a/templates/terraform/examples/vpn_tunnel_basic.tf.erb b/templates/terraform/examples/vpn_tunnel_basic.tf.erb index 25e23ebb8c3a..4991d0d0234b 100644 --- a/templates/terraform/examples/vpn_tunnel_basic.tf.erb +++ b/templates/terraform/examples/vpn_tunnel_basic.tf.erb @@ -3,56 +3,56 @@ resource "google_compute_vpn_tunnel" "tunnel1" { peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] } resource "google_compute_vpn_gateway" "target_gateway" { name = "<%= ctx[:vars]['target_vpn_gateway_name'] %>" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - name = "<%= ctx[:vars]['network_name'] %>" + name = "<%= ctx[:vars]['network_name'] %>" } resource "google_compute_address" "vpn_static_ip" { - name = "<%= ctx[:vars]['address_name'] %>" + name = "<%= ctx[:vars]['address_name'] %>" } resource "google_compute_forwarding_rule" "fr_esp" { name = "<%= ctx[:vars]['esp_forwarding_rule_name'] %>" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { name = "<%= ctx[:vars]['udp500_forwarding_rule_name'] %>" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { name = "<%= ctx[:vars]['udp4500_forwarding_rule_name'] %>" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_route" "route1" { name = "<%= ctx[:vars]['route_name'] %>" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } diff --git a/templates/terraform/examples/vpn_tunnel_beta.tf.erb b/templates/terraform/examples/vpn_tunnel_beta.tf.erb index 4553262e209d..a363eb6b5da0 100644 --- a/templates/terraform/examples/vpn_tunnel_beta.tf.erb +++ b/templates/terraform/examples/vpn_tunnel_beta.tf.erb @@ -1,15 +1,15 @@ resource "google_compute_vpn_tunnel" "tunnel1" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['vpn_tunnel_name'] %>" peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] labels = { @@ -18,58 +18,58 @@ resource "google_compute_vpn_tunnel" "tunnel1" { } resource "google_compute_vpn_gateway" "target_gateway" { - provider = "google-beta" - name = "<%= ctx[:vars]['target_vpn_gateway_name'] %>" - network = "${google_compute_network.network1.self_link}" + provider = google-beta + name = "<%= ctx[:vars]['target_vpn_gateway_name'] %>" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - provider = "google-beta" - name = "<%= ctx[:vars]['network_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['network_name'] %>" } resource "google_compute_address" "vpn_static_ip" { - provider = "google-beta" - name = "<%= ctx[:vars]['address_name'] %>" + provider = google-beta + name = "<%= ctx[:vars]['address_name'] %>" } resource "google_compute_forwarding_rule" "fr_esp" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['esp_forwarding_rule_name'] %>" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['udp500_forwarding_rule_name'] %>" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['udp4500_forwarding_rule_name'] %>" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_route" "route1" { - provider = "google-beta" + provider = google-beta name = "<%= ctx[:vars]['route_name'] %>" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/templates/terraform/expand_property_method.erb b/templates/terraform/expand_property_method.erb index 4dc6b09518b5..63cbee02bd7c 100644 --- a/templates/terraform/expand_property_method.erb +++ b/templates/terraform/expand_property_method.erb @@ -15,7 +15,8 @@ <% if property.custom_expand -%> <%= lines(compile_template(property.custom_expand, prefix: prefix, - property: property)) -%> + property: property, + object: object)) -%> <% else -%> <%# Generate expanders for Maps %> @@ -176,7 +177,7 @@ func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d T <%# Map is a map from {key -> object} in the API, but Terraform can't represent that so we treat the key as a property of the object in Terraform schema. %> <% next if property.is_a?(Api::Type::Map) && prop.name == property.key_name -%> -<%= lines(build_expand_method(prefix + titlelize_property(property), prop), 1) -%> +<%= lines(build_expand_method(prefix + titlelize_property(property), prop, object), 1) -%> <% end -%> <% end -%> diff --git a/templates/terraform/extra_schema_entry/alert_policy.erb b/templates/terraform/extra_schema_entry/alert_policy.erb index 241f6e1f319e..d1577a79b0e0 100644 --- a/templates/terraform/extra_schema_entry/alert_policy.erb +++ b/templates/terraform/extra_schema_entry/alert_policy.erb @@ -18,5 +18,5 @@ Elem: &schema.Schema{ Type: schema.TypeString, }, - Deprecated: "labels is removed as it was never used. See user_labels for the correct field", + Removed: "labels is removed as it was never used. See user_labels for the correct field", }, \ No newline at end of file diff --git a/templates/terraform/extra_schema_entry/disk.erb b/templates/terraform/extra_schema_entry/disk.erb deleted file mode 100644 index 70b790775337..000000000000 --- a/templates/terraform/extra_schema_entry/disk.erb +++ /dev/null @@ -1,13 +0,0 @@ -"disk_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - Removed: "Use disk_encryption_key.raw_key instead.", -}, - -"disk_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Removed: "Use disk_encryption_key.sha256 instead.", -}, diff --git a/templates/terraform/custom_expand/binaryauthorization_attestor_name.erb b/templates/terraform/extra_schema_entry/forwarding_rule.erb similarity index 74% rename from templates/terraform/custom_expand/binaryauthorization_attestor_name.erb rename to templates/terraform/extra_schema_entry/forwarding_rule.erb index 490c24471c7e..41a7a0dbe9ef 100644 --- a/templates/terraform/custom_expand/binaryauthorization_attestor_name.erb +++ b/templates/terraform/extra_schema_entry/forwarding_rule.erb @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -%> -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") -} +"ip_version": { + Type: schema.TypeString, + Optional: true, + Removed: "ipVersion is not used for regional forwarding rules. Please remove this field if you are using it.", +}, \ No newline at end of file diff --git a/templates/terraform/extra_schema_entry/monitoring_uptime_check_config_internal.go.erb b/templates/terraform/extra_schema_entry/monitoring_uptime_check_config_internal.go.erb index bf9b2f82a26d..3132ebe20e70 100644 --- a/templates/terraform/extra_schema_entry/monitoring_uptime_check_config_internal.go.erb +++ b/templates/terraform/extra_schema_entry/monitoring_uptime_check_config_internal.go.erb @@ -15,40 +15,38 @@ "is_internal": { Type: schema.TypeBool, Optional: true, - Computed: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Removed: "This field never worked, and will be removed in 3.0.0.", }, "internal_checkers": { Type: schema.TypeList, Optional: true, - Computed: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Removed: "This field never worked, and will be removed in 3.0.0.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "display_name": { Type: schema.TypeString, Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Removed: "This field never worked, and will be removed in 3.0.0.", }, "gcp_zone": { Type: schema.TypeString, Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Removed: "This field never worked, and will be removed in 3.0.0.", }, "name": { Type: schema.TypeString, Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Removed: "This field never worked, and will be removed in 3.0.0.", }, "network": { Type: schema.TypeString, Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Removed: "This field never worked, and will be removed in 3.0.0.", }, "peer_project_id": { Type: schema.TypeString, Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Removed: "This field never worked, and will be removed in 3.0.0.", }, }, }, diff --git a/templates/terraform/extra_schema_entry/backend_service.erb b/templates/terraform/extra_schema_entry/network.erb similarity index 71% rename from templates/terraform/extra_schema_entry/backend_service.erb rename to templates/terraform/extra_schema_entry/network.erb index db1638f05310..a47523c01cca 100644 --- a/templates/terraform/extra_schema_entry/backend_service.erb +++ b/templates/terraform/extra_schema_entry/network.erb @@ -1,5 +1,5 @@ <%# The license inside this block applies to this file. - # Copyright 2018 Google Inc. + # Copyright 2019 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -%> -"region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Removed: "region has been removed as it was never used. For internal load balancing, use google_compute_region_backend_service", -}, +"ipv4_range": { + Type: schema.TypeString, + Computed: true, + Removed: "Legacy Networks are deprecated and you will no longer be able to create them using this field from Feb 1, 2020 onwards.", +}, \ No newline at end of file diff --git a/templates/terraform/extra_schema_entry/snapshot.erb b/templates/terraform/extra_schema_entry/snapshot.erb index 2b4a08020615..1d0b3ae6b0f9 100644 --- a/templates/terraform/extra_schema_entry/snapshot.erb +++ b/templates/terraform/extra_schema_entry/snapshot.erb @@ -16,29 +16,3 @@ Type: schema.TypeString, Computed: true, }, - -"snapshot_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - Removed: "Use snapshot_encryption_key.raw_key instead.", -}, - -"snapshot_encryption_key_sha256": { - Type: schema.TypeString, - Computed: true, - Removed: "Use snapshot_encryption_key.sha256 instead.", -}, - -"source_disk_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - Removed: "Use source_disk_encryption_key.raw_key instead.", -}, - -"source_disk_encryption_key_sha256": { - Type: schema.TypeString, - Computed: true, - Removed: "Use source_disk_encryption_key.sha256 instead.", -}, diff --git a/templates/terraform/custom_expand/repository_name_from_short_name.go.erb b/templates/terraform/extra_schema_entry/subnetwork.erb similarity index 69% rename from templates/terraform/custom_expand/repository_name_from_short_name.go.erb rename to templates/terraform/extra_schema_entry/subnetwork.erb index 8cb062a9c936..87ad70ed4de9 100644 --- a/templates/terraform/custom_expand/repository_name_from_short_name.go.erb +++ b/templates/terraform/extra_schema_entry/subnetwork.erb @@ -1,5 +1,5 @@ <%# The license inside this block applies to this file. - # Copyright 2017 Google Inc. + # Copyright 2019 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -%> -func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/repos/{{name}}") -} +"enable_flow_logs": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Removed: "This field is being removed in favor of log_config. If log_config is present, flow logs are enabled. Please remove this field", +}, \ No newline at end of file diff --git a/templates/terraform/objectlib/base.go.erb b/templates/terraform/objectlib/base.go.erb index 1f097d68ecf3..14018e5b4020 100644 --- a/templates/terraform/objectlib/base.go.erb +++ b/templates/terraform/objectlib/base.go.erb @@ -67,6 +67,6 @@ func resource<%= resource_name -%>Encoder(d TerraformResourceData, meta interfac <% end -%> <% object.settable_properties.each do |prop| -%> -<%= lines(build_expand_method(resource_name, prop), 1) -%> +<%= lines(build_expand_method(resource_name, prop, object), 1) -%> <% end -%> diff --git a/templates/terraform/post_create/cloudbuild_trigger_id.go.erb b/templates/terraform/post_create/cloudbuild_trigger_id.go.erb index d57e2c82073e..27f881281051 100644 --- a/templates/terraform/post_create/cloudbuild_trigger_id.go.erb +++ b/templates/terraform/post_create/cloudbuild_trigger_id.go.erb @@ -7,7 +7,7 @@ d.Set("trigger_id", triggerId.(string)) // Store the ID now. We tried to set it before and it failed because // trigger_id didn't exist yet. -id, err = replaceVars(d, config, "{{project}}/{{trigger_id}}") +id, err = replaceVars(d, config, "projects/{{project}}/triggers/{{trigger_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } diff --git a/templates/terraform/resource.erb b/templates/terraform/resource.erb index 0eaf2f530220..8e2c54184cad 100644 --- a/templates/terraform/resource.erb +++ b/templates/terraform/resource.erb @@ -191,7 +191,7 @@ func resource<%= resource_name -%>Create(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "<%= object.id_format -%>") + id, err := replaceVars(d, config, "<%= id_format(object) -%>") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -630,7 +630,7 @@ func resource<%= resource_name -%>Import(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "<%= object.id_format -%>") + id, err := replaceVars(d, config, "<%= id_format(object) -%>") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -654,7 +654,7 @@ func resource<%= resource_name -%>Import(d *schema.ResourceData, meta interface{ <% end -%> <% object.settable_properties.each do |prop| -%> -<%= lines(build_expand_method(resource_name, prop), 1) -%> +<%= lines(build_expand_method(resource_name, prop, object), 1) -%> <% end -%> <% if object.custom_code.encoder -%> diff --git a/templates/terraform/resource_iam.html.markdown.erb b/templates/terraform/resource_iam.html.markdown.erb index bd0fa53bba26..925a2c53ca19 100644 --- a/templates/terraform/resource_iam.html.markdown.erb +++ b/templates/terraform/resource_iam.html.markdown.erb @@ -259,17 +259,17 @@ Any variables not passed in the import command will be taken from the provider c IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. ``` -$ terraform import <% if object.min_version.name == 'beta' %>-provider=google-beta <% end -%><%= resource_ns_iam -%>_member.editor "<%= object.id_format.gsub('{{name}}', "{{#{object.name.underscore}}}") -%> <%= object.iam_policy.allowed_iam_role -%> jane@example.com" +$ terraform import <% if object.min_version.name == 'beta' %>-provider=google-beta <% end -%><%= resource_ns_iam -%>_member.editor "<%= id_format(object).gsub('{{name}}', "{{#{object.name.underscore}}}") -%> <%= object.iam_policy.allowed_iam_role -%> jane@example.com" ``` IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. ``` -$ terraform import <%= resource_ns_iam -%>_binding.editor "<%= object.id_format.gsub('{{name}}', "{{#{object.name.underscore}}}") -%> <%= object.iam_policy.allowed_iam_role -%>" +$ terraform import <%= resource_ns_iam -%>_binding.editor "<%= id_format(object).gsub('{{name}}', "{{#{object.name.underscore}}}") -%> <%= object.iam_policy.allowed_iam_role -%>" ``` IAM policy imports use the identifier of the resource in question, e.g. ``` -$ terraform import <% if object.min_version.name == 'beta' %>-provider=google-beta <% end -%><%= resource_ns_iam -%>_policy.editor <%= object.id_format.gsub('{{name}}', "{{#{object.name.underscore}}}") %> +$ terraform import <% if object.min_version.name == 'beta' %>-provider=google-beta <% end -%><%= resource_ns_iam -%>_policy.editor <%= id_format(object).gsub('{{name}}', "{{#{object.name.underscore}}}") %> ``` -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` diff --git a/templates/terraform/schema_property.erb b/templates/terraform/schema_property.erb index cee3277b822e..312778ba974a 100644 --- a/templates/terraform/schema_property.erb +++ b/templates/terraform/schema_property.erb @@ -145,6 +145,12 @@ <% conflicting_props = property.conflicting().map(&:name).map(&:underscore) -%> ConflictsWith: <%= go_literal(conflicting_props) -%>, <% end -%> +<% unless property.at_least_one_of_list().empty? -%> + AtLeastOneOf: <%= go_literal(property.at_least_one_of_list) -%>, +<% end -%> +<% unless property.exactly_one_of_list().empty? -%> + ExactlyOneOf: <%= go_literal(property.exactly_one_of_list) -%>, +<% end -%> }, <% else -%> // TODO: Property '<%= property.name -%>' of type <%= property.class -%> is not supported diff --git a/third_party/terraform/data_sources/data_source_compute_network_endpoint_group.go b/third_party/terraform/data_sources/data_source_compute_network_endpoint_group.go index 9945d578fb4f..bf0b16f6e1e4 100644 --- a/third_party/terraform/data_sources/data_source_compute_network_endpoint_group.go +++ b/third_party/terraform/data_sources/data_source_compute_network_endpoint_group.go @@ -25,11 +25,15 @@ func dataSourceGoogleComputeNetworkEndpointGroup() *schema.Resource { func dataSourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) if name, ok := d.GetOk("name"); ok { + project, err := getProject(d, config) + if err != nil { + return err + } zone, err := getZone(d, config) if err != nil { return err } - d.SetId(fmt.Sprintf("%s/%s", zone, name.(string))) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", project, zone, name.(string))) } else if selfLink, ok := d.GetOk("self_link"); ok { parsed, err := ParseNetworkEndpointGroupFieldValue(selfLink.(string), d, config) if err != nil { @@ -38,7 +42,7 @@ func dataSourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta inte d.Set("name", parsed.Name) d.Set("zone", parsed.Zone) d.Set("project", parsed.Project) - d.SetId(fmt.Sprintf("%s/%s", parsed.Zone, parsed.Name)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) } else { return errors.New("Must provide either `self_link` or `zone/name`") } diff --git a/third_party/terraform/data_sources/data_source_dns_managed_zone.go b/third_party/terraform/data_sources/data_source_dns_managed_zone.go index f86699203a50..e5a281b8598e 100644 --- a/third_party/terraform/data_sources/data_source_dns_managed_zone.go +++ b/third_party/terraform/data_sources/data_source_dns_managed_zone.go @@ -1,6 +1,10 @@ package google -import "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) func dataSourceDnsManagedZone() *schema.Resource { return &schema.Resource{ @@ -43,13 +47,13 @@ func dataSourceDnsManagedZone() *schema.Resource { func dataSourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - d.SetId(d.Get("name").(string)) - project, err := getProject(d, config) if err != nil { return err } + d.SetId(fmt.Sprintf("projects/%s/managedZones/%s", project, d.Get("name").(string))) + zone, err := config.clientDns.ManagedZones.Get( project, d.Id()).Do() if err != nil { diff --git a/third_party/terraform/data_sources/data_source_google_client_openid_userinfo.go b/third_party/terraform/data_sources/data_source_google_client_openid_userinfo.go index eb524a0e2f9e..6d8cc2a44fe0 100644 --- a/third_party/terraform/data_sources/data_source_google_client_openid_userinfo.go +++ b/third_party/terraform/data_sources/data_source_google_client_openid_userinfo.go @@ -26,7 +26,7 @@ func dataSourceGoogleClientOpenIDUserinfoRead(d *schema.ResourceData, meta inter // URL retrieved from https://accounts.google.com/.well-known/openid-configuration res, err := sendRequest(config, "GET", "", "https://openidconnect.googleapis.com/v1/userinfo", nil) if err != nil { - return fmt.Errorf("error retrieving userinfo for your provider credentials; have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + return fmt.Errorf("error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) } d.SetId(time.Now().UTC().String()) diff --git a/third_party/terraform/data_sources/data_source_google_cloudfunctions_function.go b/third_party/terraform/data_sources/data_source_google_cloudfunctions_function.go index 6722215b9cc9..303e1b0a3368 100644 --- a/third_party/terraform/data_sources/data_source_google_cloudfunctions_function.go +++ b/third_party/terraform/data_sources/data_source_google_cloudfunctions_function.go @@ -39,7 +39,7 @@ func dataSourceGoogleCloudFunctionsFunctionRead(d *schema.ResourceData, meta int Name: d.Get("name").(string), } - d.SetId(cloudFuncId.terraformId()) + d.SetId(cloudFuncId.cloudFunctionId()) err = resourceCloudFunctionsRead(d, meta) if err != nil { diff --git a/third_party/terraform/data_sources/data_source_google_compute_address.go b/third_party/terraform/data_sources/data_source_google_compute_address.go index c1286e3f9d5b..28bee45f3539 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_address.go +++ b/third_party/terraform/data_sources/data_source_google_compute_address.go @@ -3,7 +3,6 @@ package google import ( "fmt" "regexp" - "strconv" "strings" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" @@ -78,7 +77,7 @@ func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{} d.Set("project", project) d.Set("region", region) - d.SetId(strconv.FormatUint(address.Id, 10)) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/addresses/%s", project, region, name)) return nil } diff --git a/third_party/terraform/data_sources/data_source_google_compute_backend_service.go b/third_party/terraform/data_sources/data_source_google_compute_backend_service.go index 55078c5d5be2..a8ac7db3b9a3 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_backend_service.go +++ b/third_party/terraform/data_sources/data_source_google_compute_backend_service.go @@ -1,6 +1,8 @@ package google import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -20,9 +22,16 @@ func dataSourceGoogleComputeBackendService() *schema.Resource { } func dataSourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + serviceName := d.Get("name").(string) - d.SetId(serviceName) + project, err := getProject(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/global/backendServices/%s", project, serviceName)) return resourceComputeBackendServiceRead(d, meta) } diff --git a/third_party/terraform/data_sources/data_source_google_compute_forwarding_rule.go b/third_party/terraform/data_sources/data_source_google_compute_forwarding_rule.go index 6acbc98c9f31..bb6613fc2ef3 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_forwarding_rule.go +++ b/third_party/terraform/data_sources/data_source_google_compute_forwarding_rule.go @@ -107,7 +107,7 @@ func dataSourceGoogleComputeForwardingRuleRead(d *schema.ResourceData, meta inte if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule Not Found : %s", name)) } - d.SetId(frule.Name) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/forwardingRules/%s", project, region, name)) d.Set("self_link", frule.SelfLink) d.Set("description", frule.Description) diff --git a/third_party/terraform/data_sources/data_source_google_compute_global_address.go b/third_party/terraform/data_sources/data_source_google_compute_global_address.go index 5825e74da10a..3662a2d3666a 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_global_address.go +++ b/third_party/terraform/data_sources/data_source_google_compute_global_address.go @@ -2,7 +2,6 @@ package google import ( "fmt" - "strconv" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -58,7 +57,6 @@ func dataSourceGoogleComputeGlobalAddressRead(d *schema.ResourceData, meta inter d.Set("status", address.Status) d.Set("self_link", address.SelfLink) d.Set("project", project) - - d.SetId(strconv.FormatUint(address.Id, 10)) + d.SetId(fmt.Sprintf("projects/%s/global/addresses/%s", project, name)) return nil } diff --git a/third_party/terraform/data_sources/data_source_google_compute_instance.go b/third_party/terraform/data_sources/data_source_google_compute_instance.go index 286d0ae4e875..3868f405c45c 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_instance.go +++ b/third_party/terraform/data_sources/data_source_google_compute_instance.go @@ -154,6 +154,6 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ d.Set("project", project) d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)) d.Set("name", instance.Name) - d.SetId(ConvertSelfLinkToV1(instance.SelfLink)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, instance.Zone, instance.Name)) return nil } diff --git a/third_party/terraform/data_sources/data_source_google_compute_instance_group.go b/third_party/terraform/data_sources/data_source_google_compute_instance_group.go index 42945a59eba6..9809f7a656a3 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_instance_group.go +++ b/third_party/terraform/data_sources/data_source_google_compute_instance_group.go @@ -87,7 +87,11 @@ func dataSourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{} if err != nil { return err } - d.SetId(fmt.Sprintf("%s/%s", zone, name.(string))) + project, err := getProject(d, config) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name.(string))) } else if selfLink, ok := d.GetOk("self_link"); ok { parsed, err := ParseInstanceGroupFieldValue(selfLink.(string), d, config) if err != nil { @@ -96,7 +100,7 @@ func dataSourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{} d.Set("name", parsed.Name) d.Set("zone", parsed.Zone) d.Set("project", parsed.Project) - d.SetId(fmt.Sprintf("%s/%s", parsed.Zone, parsed.Name)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) } else { return errors.New("Must provide either `self_link` or `zone/name`") } diff --git a/third_party/terraform/data_sources/data_source_google_compute_network.go b/third_party/terraform/data_sources/data_source_google_compute_network.go index 9c4d2cba5617..31e9655497ae 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_network.go +++ b/third_party/terraform/data_sources/data_source_google_compute_network.go @@ -61,6 +61,6 @@ func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{} d.Set("self_link", network.SelfLink) d.Set("description", network.Description) d.Set("subnetworks_self_links", network.Subnetworks) - d.SetId(network.Name) + d.SetId(fmt.Sprintf("projects/%s/global/networks/%s", project, network.Name)) return nil } diff --git a/third_party/terraform/data_sources/data_source_google_compute_region_instance_group.go b/third_party/terraform/data_sources/data_source_google_compute_region_instance_group.go index cd01ae00100b..749797b46615 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_region_instance_group.go +++ b/third_party/terraform/data_sources/data_source_google_compute_region_instance_group.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "strconv" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" @@ -113,7 +112,7 @@ func dataSourceComputeRegionInstanceGroupRead(d *schema.ResourceData, meta inter } else { d.Set("instances", flattenInstancesWithNamedPorts(members.Items)) } - d.SetId(strconv.FormatUint(instanceGroup.Id, 16)) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/instanceGroups/%s", project, region, name)) d.Set("self_link", instanceGroup.SelfLink) d.Set("name", name) d.Set("project", project) diff --git a/third_party/terraform/data_sources/data_source_google_compute_resource_policy.go.erb b/third_party/terraform/data_sources/data_source_google_compute_resource_policy.go.erb index 40dce7229e73..09b3081efb0e 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_resource_policy.go.erb +++ b/third_party/terraform/data_sources/data_source_google_compute_resource_policy.go.erb @@ -59,7 +59,7 @@ func dataSourceGoogleComputeResourcePolicyRead(d *schema.ResourceData, meta inte } d.Set("self_link", resourcePolicy.SelfLink) d.Set("description", resourcePolicy.Description) - d.SetId(resourcePolicy.Name) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)) return nil } <% end -%> diff --git a/third_party/terraform/data_sources/data_source_google_compute_ssl_certificate.go b/third_party/terraform/data_sources/data_source_google_compute_ssl_certificate.go index 3f49e66aae9f..04369989bf51 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_ssl_certificate.go +++ b/third_party/terraform/data_sources/data_source_google_compute_ssl_certificate.go @@ -1,6 +1,8 @@ package google import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -21,9 +23,15 @@ func dataSourceGoogleComputeSslCertificate() *schema.Resource { } func dataSourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } certificateName := d.Get("name").(string) - d.SetId(certificateName) + d.SetId(fmt.Sprintf("projects/%s/global/sslCertificates/%s", project, certificateName)) return resourceComputeSslCertificateRead(d, meta) } diff --git a/third_party/terraform/data_sources/data_source_google_compute_ssl_policy.go b/third_party/terraform/data_sources/data_source_google_compute_ssl_policy.go index cec4d28acd6a..6e994847122e 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_ssl_policy.go +++ b/third_party/terraform/data_sources/data_source_google_compute_ssl_policy.go @@ -1,6 +1,8 @@ package google import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -21,9 +23,15 @@ func dataSourceGoogleComputeSslPolicy() *schema.Resource { } func datasourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } policyName := d.Get("name").(string) - d.SetId(policyName) + d.SetId(fmt.Sprintf("projects/%s/global/sslPolicies/%s", project, policyName)) return resourceComputeSslPolicyRead(d, meta) } diff --git a/third_party/terraform/data_sources/data_source_google_compute_subnetwork.go b/third_party/terraform/data_sources/data_source_google_compute_subnetwork.go index 4ba2649cda1e..51270d513e3d 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_subnetwork.go +++ b/third_party/terraform/data_sources/data_source_google_compute_subnetwork.go @@ -95,7 +95,7 @@ func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interfac d.Set("region", region) d.Set("secondary_ip_range", flattenSecondaryRanges(subnetwork.SecondaryIpRanges)) - d.SetId(fmt.Sprintf("%s/%s", region, name)) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", project, region, name)) return nil } diff --git a/third_party/terraform/data_sources/data_source_google_compute_vpn_gateway.go b/third_party/terraform/data_sources/data_source_google_compute_vpn_gateway.go index b8b39ba915e9..dfab9a5c484b 100644 --- a/third_party/terraform/data_sources/data_source_google_compute_vpn_gateway.go +++ b/third_party/terraform/data_sources/data_source_google_compute_vpn_gateway.go @@ -73,6 +73,6 @@ func dataSourceGoogleComputeVpnGatewayRead(d *schema.ResourceData, meta interfac d.Set("self_link", gateway.SelfLink) d.Set("description", gateway.Description) d.Set("project", project) - d.SetId(gateway.Name) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/targetVpnGateways/%s", project, region, name)) return nil } diff --git a/third_party/terraform/data_sources/data_source_google_container_cluster.go b/third_party/terraform/data_sources/data_source_google_container_cluster.go index 7e884cc61e99..069a17d281d8 100644 --- a/third_party/terraform/data_sources/data_source_google_container_cluster.go +++ b/third_party/terraform/data_sources/data_source_google_container_cluster.go @@ -21,9 +21,21 @@ func dataSourceGoogleContainerCluster() *schema.Resource { } func datasourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + clusterName := d.Get("name").(string) - d.SetId(clusterName) + location, err := getLocation(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.SetId(containerClusterFullName(project, location, clusterName)) return resourceContainerClusterRead(d, meta) } diff --git a/third_party/terraform/data_sources/data_source_google_container_engine_versions.go.erb b/third_party/terraform/data_sources/data_source_google_container_engine_versions.go.erb index e2ff4f9376e0..58fcb5652975 100644 --- a/third_party/terraform/data_sources/data_source_google_container_engine_versions.go.erb +++ b/third_party/terraform/data_sources/data_source_google_container_engine_versions.go.erb @@ -23,19 +23,16 @@ func dataSourceGoogleContainerEngineVersions() *schema.Resource { "location": { Type: schema.TypeString, Optional: true, - ConflictsWith: []string{"zone", "region"}, }, "zone": { Type: schema.TypeString, Optional: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"region", "location"}, + Removed: "Use location instead", }, "region": { Type: schema.TypeString, Optional: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"zone", "location"}, + Removed: "Use location instead", }, "default_cluster_version": { Type: schema.TypeString, @@ -76,7 +73,7 @@ func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta in return err } if len(location) == 0 { - return fmt.Errorf("Cannot determine location: set location, zone, or region in this data source or at provider-level") + return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") } location = fmt.Sprintf("projects/%s/locations/%s", project, location) diff --git a/third_party/terraform/data_sources/data_source_google_folder_organization_policy.go b/third_party/terraform/data_sources/data_source_google_folder_organization_policy.go index 5777ea75d268..b31f18c7687b 100644 --- a/third_party/terraform/data_sources/data_source_google_folder_organization_policy.go +++ b/third_party/terraform/data_sources/data_source_google_folder_organization_policy.go @@ -21,7 +21,7 @@ func dataSourceGoogleFolderOrganizationPolicy() *schema.Resource { func datasourceGoogleFolderOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%s:%s", d.Get("folder"), d.Get("constraint"))) + d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) return resourceGoogleFolderOrganizationPolicyRead(d, meta) } diff --git a/third_party/terraform/data_sources/data_source_google_organization.go b/third_party/terraform/data_sources/data_source_google_organization.go index 0bf0a723ee7c..eed599e1e54c 100644 --- a/third_party/terraform/data_sources/data_source_google_organization.go +++ b/third_party/terraform/data_sources/data_source_google_organization.go @@ -76,7 +76,7 @@ func dataSourceOrganizationRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("one of domain or organization must be set") } - d.SetId(GetResourceNameFromSelfLink(organization.Name)) + d.SetId(organization.Name) d.Set("name", organization.Name) d.Set("domain", organization.DisplayName) d.Set("create_time", organization.CreationTime) diff --git a/third_party/terraform/data_sources/data_source_google_project.go b/third_party/terraform/data_sources/data_source_google_project.go index 618c752d5573..3b23b74cdc9d 100644 --- a/third_party/terraform/data_sources/data_source_google_project.go +++ b/third_party/terraform/data_sources/data_source_google_project.go @@ -1,6 +1,7 @@ package google import ( + "fmt" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -21,13 +22,13 @@ func datasourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error if v, ok := d.GetOk("project_id"); ok { project := v.(string) - d.SetId(project) + d.SetId(fmt.Sprintf("projects/%s", project)) } else { project, err := getProject(d, config) if err != nil { return err } - d.SetId(project) + d.SetId(fmt.Sprintf("projects/%s", project)) } return resourceGoogleProjectRead(d, meta) diff --git a/third_party/terraform/data_sources/data_source_google_project_services.go b/third_party/terraform/data_sources/data_source_google_project_services.go deleted file mode 100644 index 5c50559ee893..000000000000 --- a/third_party/terraform/data_sources/data_source_google_project_services.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -func dataSourceGoogleProjectServices() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(resourceGoogleProjectServices().Schema) - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleProjectServicesRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - d.SetId(project) - - return resourceGoogleProjectServicesRead(d, meta) -} diff --git a/third_party/terraform/data_sources/data_source_google_service_account_key.go b/third_party/terraform/data_sources/data_source_google_service_account_key.go index 3f6206204557..227d69a7637b 100644 --- a/third_party/terraform/data_sources/data_source_google_service_account_key.go +++ b/third_party/terraform/data_sources/data_source_google_service_account_key.go @@ -36,12 +36,6 @@ func dataSourceGoogleServiceAccountKey() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "service_account_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Please use name to specify full service account key path projects/{project}/serviceAccounts/{serviceAccount}/keys/{keyId}", - }, }, } } diff --git a/third_party/terraform/resources/resource_app_engine_application.go b/third_party/terraform/resources/resource_app_engine_application.go index e691feb2ecdd..c2748aa05b62 100644 --- a/third_party/terraform/resources/resource_app_engine_application.go +++ b/third_party/terraform/resources/resource_app_engine_application.go @@ -117,7 +117,7 @@ func appEngineApplicationFeatureSettingsResource() *schema.Resource { Schema: map[string]*schema.Schema{ "split_health_checks": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, } diff --git a/third_party/terraform/resources/resource_bigquery_table.go.erb b/third_party/terraform/resources/resource_bigquery_table.go.erb index ba85b62d3c39..4ee13c90664a 100644 --- a/third_party/terraform/resources/resource_bigquery_table.go.erb +++ b/third_party/terraform/resources/resource_bigquery_table.go.erb @@ -164,17 +164,16 @@ func resourceBigQueryTable() *schema.Resource { // Range: [Optional] Range of a sheet to query from. Only used when non-empty. // Typical format: !: "range": { -<% if version.nil? || version == 'ga' -%> - Removed: "This field is in beta. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/guides/provider_versions.html for more details.", -<% end -%> - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"external_data_configuration.0.google_sheets_options.0.range"}, }, // SkipLeadingRows: [Optional] The number of rows at the top // of the sheet that BigQuery will skip when reading the data. "skip_leading_rows": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: []string{"external_data_configuration.0.google_sheets_options.0.skip_leading_rows"}, }, }, }, @@ -479,7 +478,7 @@ func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[INFO] BigQuery table %s has been created", res.Id) - d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) + d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) return resourceBigQueryTableRead(d, meta) } @@ -847,12 +846,11 @@ type bigQueryTableId struct { } func parseBigQueryTableId(id string) (*bigQueryTableId, error) { - // Expected format is "PROJECT:DATASET.TABLE", but the project can itself have . and : in it. - // Those characters are not valid dataset or table components, so just split on the last two. - matchRegex := regexp.MustCompile("^(.+):([^:.]+)\\.([^:.]+)$") + // Expected format is "projects/{{project}}/datasets/{{dataset}}/tables/{{table}}" + matchRegex := regexp.MustCompile("^projects/(.+)/datasets/(.+)/tables/(.+)$") subMatches := matchRegex.FindStringSubmatch(id) if subMatches == nil { - return nil, fmt.Errorf("Invalid BigQuery table specifier. Expecting {project}:{dataset-id}.{table-id}, got %s", id) + return nil, fmt.Errorf("Invalid BigQuery table specifier. Expecting projects/{{project}}/datasets/{{dataset}}/tables/{{table}}, got %s", id) } return &bigQueryTableId{ Project: subMatches[1], diff --git a/third_party/terraform/resources/resource_bigtable_instance.go b/third_party/terraform/resources/resource_bigtable_instance.go index 56d8f79d2dd7..5b22f30c832e 100644 --- a/third_party/terraform/resources/resource_bigtable_instance.go +++ b/third_party/terraform/resources/resource_bigtable_instance.go @@ -87,34 +87,6 @@ func resourceBigtableInstance() *schema.Resource { Computed: true, ForceNew: true, }, - - "cluster_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, - - "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, - - "num_nodes": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, - - "storage_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, }, } } @@ -159,7 +131,11 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating instance. %s", err) } - d.SetId(conf.InstanceID) + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) return resourceBigtableInstanceRead(d, meta) } @@ -180,11 +156,13 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro defer c.Close() - instance, err := c.InstanceInfo(ctx, d.Id()) + instanceName := d.Get("name").(string) + + instance, err := c.InstanceInfo(ctx, instanceName) if err != nil { - log.Printf("[WARN] Removing %s because it's gone", d.Id()) + log.Printf("[WARN] Removing %s because it's gone", instanceName) d.SetId("") - return fmt.Errorf("Error retrieving instance. Could not find %s. %s", d.Id(), err) + return fmt.Errorf("Error retrieving instance. Could not find %s. %s", instanceName, err) } d.Set("project", project) @@ -276,7 +254,7 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e defer c.Close() - name := d.Id() + name := d.Get("name").(string) err = c.DeleteInstance(ctx, name) if err != nil { return fmt.Errorf("Error deleting instance. %s", err) @@ -394,7 +372,7 @@ func resourceBigtableInstanceImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/third_party/terraform/resources/resource_bigtable_table.go b/third_party/terraform/resources/resource_bigtable_table.go index 0c7678cd5cac..5a24e522402f 100644 --- a/third_party/terraform/resources/resource_bigtable_table.go +++ b/third_party/terraform/resources/resource_bigtable_table.go @@ -111,7 +111,11 @@ func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error } } - d.SetId(name) + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) return resourceBigtableTableRead(d, meta) } @@ -133,7 +137,7 @@ func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { defer c.Close() - name := d.Id() + name := d.Get("name").(string) table, err := c.TableInfo(ctx, name) if err != nil { log.Printf("[WARN] Removing %s because it's gone", name) @@ -199,7 +203,7 @@ func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/third_party/terraform/resources/resource_cloudfunctions_function.go b/third_party/terraform/resources/resource_cloudfunctions_function.go index 54f0d4301744..fe693c051e1b 100644 --- a/third_party/terraform/resources/resource_cloudfunctions_function.go +++ b/third_party/terraform/resources/resource_cloudfunctions_function.go @@ -43,21 +43,19 @@ func (s *cloudFunctionId) locationId() string { return fmt.Sprintf("projects/%s/locations/%s", s.Project, s.Region) } -func (s *cloudFunctionId) terraformId() string { - return fmt.Sprintf("%s/%s/%s", s.Project, s.Region, s.Name) -} - -func parseCloudFunctionId(id string, config *Config) (*cloudFunctionId, error) { - if parts := strings.Split(id, "/"); len(parts) == 3 { - return &cloudFunctionId{ - Project: parts[0], - Region: parts[1], - Name: parts[2], - }, nil - } - - return nil, fmt.Errorf("Invalid CloudFunction id format, expecting " + - "`{projectId}/{regionId}/{cloudFunctionName}`") +func parseCloudFunctionId(d *schema.ResourceData, config *Config) (*cloudFunctionId, error) { + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + return &cloudFunctionId{ + Project: d.Get("project").(string), + Region: d.Get("region").(string), + Name: d.Get("name").(string), + }, nil } func joinMapKeys(mapToJoin *map[int]bool) string { @@ -190,8 +188,7 @@ func resourceCloudFunctionsFunction() *schema.Resource { "runtime": { Type: schema.TypeString, - Optional: true, - Default: "nodejs6", + Required: true, }, "service_account_email": { @@ -212,27 +209,10 @@ func resourceCloudFunctionsFunction() *schema.Resource { Optional: true, }, - "trigger_bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field is removed. Use `event_trigger` instead.", - ConflictsWith: []string{"trigger_http", "trigger_topic"}, - }, - "trigger_http": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"trigger_bucket", "trigger_topic"}, - }, - - "trigger_topic": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field is removed. Use `event_trigger` instead.", - ConflictsWith: []string{"trigger_http", "trigger_bucket"}, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, }, "event_trigger": { @@ -389,7 +369,7 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro } // Name of function should be unique - d.SetId(cloudFuncId.terraformId()) + d.SetId(cloudFuncId.cloudFunctionId()) err = cloudFunctionsOperationWait(config.clientCloudFunctions, op, "Creating CloudFunctions Function", int(d.Timeout(schema.TimeoutCreate).Minutes())) @@ -403,7 +383,7 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + cloudFuncId, err := parseCloudFunctionId(d, config) if err != nil { return err } @@ -464,7 +444,7 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro return err } - cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + cloudFuncId, err := parseCloudFunctionId(d, config) if err != nil { return err } @@ -553,7 +533,7 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + cloudFuncId, err := parseCloudFunctionId(d, config) if err != nil { return err } diff --git a/third_party/terraform/resources/resource_cloudiot_registry.go b/third_party/terraform/resources/resource_cloudiot_registry.go index 69b784437a64..21916631c45b 100644 --- a/third_party/terraform/resources/resource_cloudiot_registry.go +++ b/third_party/terraform/resources/resource_cloudiot_registry.go @@ -56,27 +56,16 @@ func resourceCloudIoTRegistry() *schema.Resource { []string{"", "NONE", "ERROR", "INFO", "DEBUG"}, false), }, "event_notification_config": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - Deprecated: "eventNotificationConfig has been deprecated in favor of eventNotificationConfigs (plural). Please switch to using the plural field.", - ConflictsWith: []string{"event_notification_configs"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_topic_name": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - }, + Type: schema.TypeMap, + Optional: true, + Computed: true, + Removed: "Please use event_notification_configs instead", }, "event_notification_configs": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 10, - ConflictsWith: []string{"event_notification_config"}, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 10, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "pubsub_topic_name": { @@ -143,7 +132,7 @@ func resourceCloudIoTRegistry() *schema.Resource { Schema: map[string]*schema.Schema{ "public_key_certificate": { Type: schema.TypeMap, - Optional: true, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "format": { @@ -241,9 +230,6 @@ func createDeviceRegistry(d *schema.ResourceData) *cloudiot.DeviceRegistry { deviceRegistry := &cloudiot.DeviceRegistry{} if v, ok := d.GetOk("event_notification_configs"); ok { deviceRegistry.EventNotificationConfigs = buildEventNotificationConfigs(v.([]interface{})) - } else if v, ok := d.GetOk("event_notification_config"); ok { - deviceRegistry.EventNotificationConfigs = []*cloudiot.EventNotificationConfig{ - buildEventNotificationConfig(v.(map[string]interface{}))} } if v, ok := d.GetOk("state_notification_config"); ok { @@ -314,15 +300,6 @@ func resourceCloudIoTRegistryUpdate(d *schema.ResourceData, meta interface{}) er } } - if d.HasChange("event_notification_config") { - hasChanged = true - updateMask = append(updateMask, "event_notification_configs") - if v, ok := d.GetOk("event_notification_config"); ok { - deviceRegistry.EventNotificationConfigs = []*cloudiot.EventNotificationConfig{ - buildEventNotificationConfig(v.(map[string]interface{}))} - } - } - if d.HasChange("state_notification_config") { hasChanged = true updateMask = append(updateMask, "state_notification_config.pubsub_topic_name") @@ -402,14 +379,8 @@ func resourceCloudIoTRegistryRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("event_notification_configs", cfgs); err != nil { return fmt.Errorf("Error reading Registry: %s", err) } - if err := d.Set("event_notification_config", map[string]string{ - "pubsub_topic_name": res.EventNotificationConfigs[0].PubsubTopicName, - }); err != nil { - return fmt.Errorf("Error reading Registry: %s", err) - } } else { d.Set("event_notification_configs", nil) - d.Set("event_notification_config", nil) } pubsubTopicName := res.StateNotificationConfig.PubsubTopicName @@ -433,6 +404,8 @@ func resourceCloudIoTRegistryRead(d *schema.ResourceData, meta interface{}) erro } d.Set("credentials", credentials) d.Set("log_level", res.LogLevel) + // Removed Computed field must be set to nil to prevent spurious diffs + d.Set("event_notification_config", nil) return nil } diff --git a/third_party/terraform/resources/resource_composer_environment.go.erb b/third_party/terraform/resources/resource_composer_environment.go.erb index 9870db88ec1e..820ef21f2108 100644 --- a/third_party/terraform/resources/resource_composer_environment.go.erb +++ b/third_party/terraform/resources/resource_composer_environment.go.erb @@ -36,6 +36,14 @@ var composerEnvironmentReservedEnvVar = map[string]struct{}{ "SQL_USER": {}, } +var composerSoftwareConfigKeys = []string{ + "config.0.software_config.0.airflow_config_overrides", + "config.0.software_config.0.pypi_packages", + "config.0.software_config.0.env_variables", + "config.0.software_config.0.image_version", + "config.0.software_config.0.python_version", +} + func resourceComposerEnvironment() *schema.Resource { return &schema.Resource{ Create: resourceComposerEnvironmentCreate, @@ -162,8 +170,7 @@ func resourceComposerEnvironment() *schema.Resource { Schema: map[string]*schema.Schema{ "use_ip_aliases": { Type: schema.TypeBool, - Optional: true, - Default: true, + Required: true, ForceNew: true, }, "cluster_secondary_range_name": { @@ -208,17 +215,20 @@ func resourceComposerEnvironment() *schema.Resource { "airflow_config_overrides": { Type: schema.TypeMap, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, Elem: &schema.Schema{Type: schema.TypeString}, }, "pypi_packages": { Type: schema.TypeMap, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, Elem: &schema.Schema{Type: schema.TypeString}, ValidateFunc: validateComposerEnvironmentPypiPackages, }, "env_variables": { Type: schema.TypeMap, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, Elem: &schema.Schema{Type: schema.TypeString}, ValidateFunc: validateComposerEnvironmentEnvVariables, }, @@ -226,12 +236,14 @@ func resourceComposerEnvironment() *schema.Resource { Type: schema.TypeString, Computed: true, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, ValidateFunc: validateRegexp(composerEnvironmentVersionRegexp), DiffSuppressFunc: composerImageVersionDiffSuppress, }, "python_version": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, Computed: true, ForceNew: true, }, @@ -248,9 +260,8 @@ func resourceComposerEnvironment() *schema.Resource { Schema: map[string]*schema.Schema{ "enable_private_endpoint": { Type: schema.TypeBool, - Optional: true, + Required: true, ForceNew: true, - Default: true, }, "master_ipv4_cidr_block": { Type: schema.TypeString, @@ -314,7 +325,7 @@ func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -575,7 +586,7 @@ func resourceComposerEnvironmentImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/third_party/terraform/resources/resource_compute_attached_disk.go b/third_party/terraform/resources/resource_compute_attached_disk.go index bbc45e6e213a..aa80d24f15e1 100644 --- a/third_party/terraform/resources/resource_compute_attached_disk.go +++ b/third_party/terraform/resources/resource_compute_attached_disk.go @@ -100,7 +100,7 @@ func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error return err } - d.SetId(fmt.Sprintf("%s:%s", zv.Name, diskName)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s/%s", zv.Project, zv.Zone, zv.Name, diskName)) waitErr := computeSharedOperationWaitTime(config.clientCompute, op, zv.Project, int(d.Timeout(schema.TimeoutCreate).Minutes()), "disk to attach") @@ -196,22 +196,17 @@ func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*sc config := meta.(*Config) err := parseImportId( - []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/[^/]+", - "(?P[^/]+)/(?P[^/]+)/[^/]+"}, d, config) + []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) if err != nil { return nil, err } - // In all acceptable id formats the actual id will be the last in the path - id := GetResourceNameFromSelfLink(d.Id()) - d.SetId(id) - - IDParts := strings.Split(d.Id(), ":") - if len(IDParts) != 2 { - return nil, fmt.Errorf("unable to determine attached disk id - id should be '{google_compute_instance.name}:{google_compute_disk.name}'") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}") + if err != nil { + return nil, err } - d.Set("instance", IDParts[0]) - d.Set("disk", IDParts[1]) + d.SetId(id) return []*schema.ResourceData{d}, nil } diff --git a/third_party/terraform/resources/resource_compute_instance.go b/third_party/terraform/resources/resource_compute_instance.go index 0eb67fba1196..a2354137ed4b 100644 --- a/third_party/terraform/resources/resource_compute_instance.go +++ b/third_party/terraform/resources/resource_compute_instance.go @@ -7,7 +7,6 @@ import ( "fmt" "log" "strings" - "time" "github.com/hashicorp/errwrap" @@ -19,6 +18,44 @@ import ( "google.golang.org/api/compute/v1" ) +var ( + bootDiskKeys = []string{ + "boot_disk.0.auto_delete", + "boot_disk.0.device_name", + "boot_disk.0.disk_encryption_key_raw", + "boot_disk.0.kms_key_self_link", + "boot_disk.0.initialize_params", + "boot_disk.0.mode", + "boot_disk.0.source", + } + + initializeParamsKeys = []string{ + "boot_disk.0.initialize_params.0.size", + "boot_disk.0.initialize_params.0.type", + "boot_disk.0.initialize_params.0.image", + "boot_disk.0.initialize_params.0.labels", + } + + accessConfigKeys = []string{ + "network_interface.%d.access_config.%d.nat_ip", + "network_interface.%d.access_config.%d.network_tier", + "network_interface.%d.access_config.%d.public_ptr_domain_name", + } + + schedulingKeys = []string{ + "scheduling.0.on_host_maintenance", + "scheduling.0.automatic_restart", + "scheduling.0.preemptible", + "scheduling.0.node_affinities", + } + + shieldedInstanceConfigKeys = []string{ + "shielded_instance_config.0.enable_secure_boot", + "shielded_instance_config.0.enable_vtpm", + "shielded_instance_config.0.enable_integrity_monitoring", + } +) + func resourceComputeInstance() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceCreate, @@ -50,24 +87,28 @@ func resourceComputeInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "auto_delete": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Default: true, + ForceNew: true, }, "device_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Computed: true, + ForceNew: true, }, "disk_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + ForceNew: true, + ConflictsWith: []string{"boot_disk.0.kms_key_self_link"}, + Sensitive: true, }, "disk_encryption_key_sha256": { @@ -78,6 +119,7 @@ func resourceComputeInstance() *schema.Resource { "kms_key_self_link": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: bootDiskKeys, ForceNew: true, ConflictsWith: []string{"boot_disk.0.disk_encryption_key_raw"}, DiffSuppressFunc: compareSelfLinkRelativePaths, @@ -85,16 +127,18 @@ func resourceComputeInstance() *schema.Resource { }, "initialize_params": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Computed: true, + ForceNew: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "size": { Type: schema.TypeInt, Optional: true, + AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, ValidateFunc: validation.IntAtLeast(1), @@ -103,6 +147,7 @@ func resourceComputeInstance() *schema.Resource { "type": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd"}, false), @@ -111,16 +156,18 @@ func resourceComputeInstance() *schema.Resource { "image": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, DiffSuppressFunc: diskImageDiffSuppress, }, "labels": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, }, }, }, @@ -129,6 +176,7 @@ func resourceComputeInstance() *schema.Resource { "mode": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: bootDiskKeys, ForceNew: true, Default: "READ_WRITE", ValidateFunc: validation.StringInSlice([]string{"READ_WRITE", "READ_ONLY"}, false), @@ -137,6 +185,7 @@ func resourceComputeInstance() *schema.Resource { "source": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: bootDiskKeys, Computed: true, ForceNew: true, ConflictsWith: []string{"boot_disk.initialize_params"}, @@ -216,12 +265,6 @@ func resourceComputeInstance() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), }, - "assigned_nat_ip": { - Type: schema.TypeString, - Computed: true, - Removed: "Use network_interface.access_config.nat_ip instead.", - }, - "public_ptr_domain_name": { Type: schema.TypeString, Optional: true, @@ -247,14 +290,6 @@ func resourceComputeInstance() *schema.Resource { }, }, }, - - "address": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Removed: "Please use network_ip", - }, }, }, }, @@ -328,72 +363,6 @@ func resourceComputeInstance() *schema.Resource { Default: false, }, - "disk": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Removed: "Use boot_disk, scratch_disk, and attached_disk instead", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // TODO(mitchellh): one of image or disk is required - - "disk": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "image": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "scratch": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "auto_delete": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "size": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "device_name": { - Type: schema.TypeString, - Optional: true, - }, - - "disk_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - - "disk_encryption_key_sha256": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "enable_display": { Type: schema.TypeBool, Optional: true, @@ -460,27 +429,31 @@ func resourceComputeInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "on_host_maintenance": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: schedulingKeys, }, "automatic_restart": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingKeys, + Default: true, }, "preemptible": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + AtLeastOneOf: schedulingKeys, + ForceNew: true, }, "node_affinities": { Type: schema.TypeSet, Optional: true, + AtLeastOneOf: schedulingKeys, ForceNew: true, Elem: instanceSchedulingNodeAffinitiesElemSchema(), DiffSuppressFunc: emptyOrDefaultStringSuppress(""), @@ -497,8 +470,7 @@ func resourceComputeInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "interface": { Type: schema.TypeString, - Optional: true, - Default: "SCSI", + Required: true, ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false), }, }, @@ -543,21 +515,24 @@ func resourceComputeInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enable_secure_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: false, }, "enable_vtpm": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: true, }, "enable_integrity_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: true, }, }, }, @@ -633,7 +608,7 @@ func getInstance(config *Config, d *schema.ResourceData) (*computeBeta.Instance, if err != nil { return nil, err } - instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Get("name").(string)).Do() if err != nil { return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) } @@ -776,7 +751,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } // Store the ID now - d.SetId(instance.Name) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) // Wait for the operation to complete waitErr := computeSharedOperationWaitTime(config.clientCompute, op, project, createTimeout, "instance to create") @@ -952,6 +927,8 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } } + zone := GetResourceNameFromSelfLink(instance.Zone) + d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)) d.Set("attached_disk", ads) d.Set("scratch_disk", scratchDisks) @@ -965,11 +942,11 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)) d.Set("instance_id", fmt.Sprintf("%d", instance.Id)) d.Set("project", project) - d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)) + d.Set("zone", zone) d.Set("name", instance.Name) d.Set("description", instance.Description) d.Set("hostname", instance.Hostname) - d.SetId(instance.Name) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, zone, instance.Name)) return nil } @@ -989,9 +966,9 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Use beta api directly in order to read network_interface.fingerprint without having to put it in the schema. // Change back to getInstance(config, d) once updating alias ips is GA. - instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Get("name").(string)).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", instance.Name)) } // Enable partial mode for the resource since it is possible @@ -1013,14 +990,14 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err func() error { // retrieve up-to-date metadata from the API in case several updates hit simultaneously. instances // sometimes but not always share metadata fingerprints. - instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err := config.clientComputeBeta.Instances.Get(project, zone, instance.Name).Do() if err != nil { return fmt.Errorf("Error retrieving metadata: %s", err) } metadataV1.Fingerprint = instance.Metadata.Fingerprint - op, err := config.clientCompute.Instances.SetMetadata(project, zone, d.Id(), metadataV1).Do() + op, err := config.clientCompute.Instances.SetMetadata(project, zone, instance.Name, metadataV1).Do() if err != nil { return fmt.Errorf("Error updating metadata: %s", err) } @@ -1048,7 +1025,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return err } op, err := config.clientCompute.Instances.SetTags( - project, zone, d.Id(), tagsV1).Do() + project, zone, d.Get("name").(string), tagsV1).Do() if err != nil { return fmt.Errorf("Error updating tags: %s", err) } @@ -1066,7 +1043,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err labelFingerprint := d.Get("label_fingerprint").(string) req := compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: labelFingerprint} - op, err := config.clientCompute.Instances.SetLabels(project, zone, d.Id(), &req).Do() + op, err := config.clientCompute.Instances.SetLabels(project, zone, instance.Name, &req).Do() if err != nil { return fmt.Errorf("Error updating labels: %s", err) } @@ -1086,7 +1063,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } op, err := config.clientComputeBeta.Instances.SetScheduling( - project, zone, d.Id(), scheduling).Do() + project, zone, instance.Name, scheduling).Do() if err != nil { return fmt.Errorf("Error updating scheduling policy: %s", err) } @@ -1127,7 +1104,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Delete any accessConfig that currently exists in instNetworkInterface for _, ac := range instNetworkInterface.AccessConfigs { op, err := config.clientCompute.Instances.DeleteAccessConfig( - project, zone, d.Id(), ac.Name, networkName).Do() + project, zone, instance.Name, ac.Name, networkName).Do() if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } @@ -1152,7 +1129,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } op, err := config.clientComputeBeta.Instances.AddAccessConfig( - project, zone, d.Id(), networkName, ac).Do() + project, zone, instance.Name, networkName, ac).Do() if err != nil { return fmt.Errorf("Error adding new access_config: %s", err) } @@ -1172,7 +1149,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err Fingerprint: instNetworkInterface.Fingerprint, ForceSendFields: []string{"AliasIpRanges"}, } - op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, d.Id(), networkName, ni).Do() + op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() if err != nil { return errwrap.Wrapf("Error removing alias_ip_range: {{err}}", err) } @@ -1186,7 +1163,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err ranges := d.Get(prefix + ".alias_ip_range").([]interface{}) if len(ranges) > 0 { if rereadFingerprint { - instance, err = config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err = config.clientComputeBeta.Instances.Get(project, zone, instance.Name).Do() if err != nil { return err } @@ -1196,7 +1173,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err AliasIpRanges: expandAliasIpRanges(ranges), Fingerprint: instNetworkInterface.Fingerprint, } - op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, d.Id(), networkName, ni).Do() + op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() if err != nil { return errwrap.Wrapf("Error adding alias_ip_range: {{err}}", err) } @@ -1322,7 +1299,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("deletion_protection") { nDeletionProtection := d.Get("deletion_protection").(bool) - op, err := config.clientCompute.Instances.SetDeletionProtection(project, zone, d.Id()).DeletionProtection(nDeletionProtection).Do() + op, err := config.clientCompute.Instances.SetDeletionProtection(project, zone, d.Get("name").(string)).DeletionProtection(nDeletionProtection).Do() if err != nil { return fmt.Errorf("Error updating deletion protection flag: %s", err) } @@ -1441,7 +1418,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("shielded_instance_config") { shieldedVmConfig := expandShieldedVmConfigs(d) - op, err := config.clientComputeBeta.Instances.UpdateShieldedVmConfig(project, zone, d.Id(), shieldedVmConfig).Do() + op, err := config.clientComputeBeta.Instances.UpdateShieldedVmConfig(project, zone, instance.Name, shieldedVmConfig).Do() if err != nil { return fmt.Errorf("Error updating shielded vm config: %s", err) } @@ -1595,12 +1572,12 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err if err != nil { return err } - log.Printf("[INFO] Requesting instance deletion: %s", d.Id()) + log.Printf("[INFO] Requesting instance deletion: %s", d.Get("name").(string)) if d.Get("deletion_protection").(bool) { - return fmt.Errorf("Cannot delete instance %s: instance Deletion Protection is enabled. Set deletion_protection to false for this resource and run \"terraform apply\" before attempting to delete it.", d.Id()) + return fmt.Errorf("Cannot delete instance %s: instance Deletion Protection is enabled. Set deletion_protection to false for this resource and run \"terraform apply\" before attempting to delete it.", d.Get("name").(string)) } else { - op, err := config.clientCompute.Instances.Delete(project, zone, d.Id()).Do() + op, err := config.clientCompute.Instances.Delete(project, zone, d.Get("name").(string)).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) } @@ -1625,7 +1602,8 @@ func resourceComputeInstanceImportState(d *schema.ResourceData, meta interface{} d.Set("project", parts[0]) d.Set("zone", parts[1]) - d.SetId(parts[2]) + d.Set("name", parts[2]) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", parts[0], parts[1], parts[2])) return []*schema.ResourceData{d}, nil } diff --git a/third_party/terraform/resources/resource_compute_instance_from_template.go b/third_party/terraform/resources/resource_compute_instance_from_template.go index 4150af0a69de..423a73a3cca2 100644 --- a/third_party/terraform/resources/resource_compute_instance_from_template.go +++ b/third_party/terraform/resources/resource_compute_instance_from_template.go @@ -153,7 +153,7 @@ func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta inte } // Store the ID now - d.SetId(instance.Name) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) // Wait for the operation to complete waitErr := computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), "instance to create") diff --git a/third_party/terraform/resources/resource_compute_instance_group.go b/third_party/terraform/resources/resource_compute_instance_group.go index 4ff3a889e2ef..0a4748fff62d 100644 --- a/third_party/terraform/resources/resource_compute_instance_group.go +++ b/third_party/terraform/resources/resource_compute_instance_group.go @@ -156,7 +156,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} } // It probably maybe worked, so store the ID now - d.SetId(fmt.Sprintf("%s/%s", zone, name)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name)) // Wait for the operation to complete err = computeOperationWait(config.clientCompute, op, project, "Creating InstanceGroup") @@ -379,18 +379,19 @@ func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{} } func resourceComputeInstanceGroupImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - if len(parts) == 2 { - d.Set("zone", parts[0]) - d.Set("name", parts[1]) - } else if len(parts) == 3 { - d.Set("project", parts[0]) - d.Set("zone", parts[1]) - d.Set("name", parts[2]) - d.SetId(parts[1] + "/" + parts[2]) - } else { - return nil, fmt.Errorf("Invalid compute instance group specifier. Expecting {zone}/{name} or {project}/{zone}/{name}") + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{name}}") + if err != nil { + return nil, err } + d.SetId(id) return []*schema.ResourceData{d}, nil } diff --git a/third_party/terraform/resources/resource_compute_instance_group_manager.go.erb b/third_party/terraform/resources/resource_compute_instance_group_manager.go similarity index 73% rename from third_party/terraform/resources/resource_compute_instance_group_manager.go.erb rename to third_party/terraform/resources/resource_compute_instance_group_manager.go index 1623e77cf710..f5068640bbd3 100644 --- a/third_party/terraform/resources/resource_compute_instance_group_manager.go.erb +++ b/third_party/terraform/resources/resource_compute_instance_group_manager.go @@ -1,10 +1,8 @@ -<% autogen_exception -%> package google import ( "fmt" "log" - "regexp" "strings" "time" @@ -32,57 +30,48 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "base_instance_name": &schema.Schema{ + "base_instance_name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - <% if version == 'ga' -%> - "instance_template": &schema.Schema{ + "instance_template": { Type: schema.TypeString, Optional: true, Computed: true, - Deprecated: "This field will be replaced by `version.instance_template` in 3.0.0", - ConflictsWith: []string{"version"}, + Removed: "This field has been replaced by `version.instance_template`", DiffSuppressFunc: compareSelfLinkRelativePaths, }, - <% end -%> - "version": &schema.Schema{ - Type: schema.TypeList, - <%# TODO 3.0.0 - mark as required -%> - <% if version == 'ga' -%> - Optional: true, - Computed: true, - <% else -%> + "version": { + Type: schema.TypeList, Required: true, - <% end -%> Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Optional: true, }, - "instance_template": &schema.Schema{ + "instance_template": { Type: schema.TypeString, Required: true, DiffSuppressFunc: compareSelfLinkRelativePaths, }, - "target_size": &schema.Schema{ + "target_size": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "fixed": &schema.Schema{ + "fixed": { Type: schema.TypeInt, Optional: true, }, - "percent": &schema.Schema{ + "percent": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 100), @@ -94,46 +83,46 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "zone": &schema.Schema{ + "zone": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, }, - "description": &schema.Schema{ + "description": { Type: schema.TypeString, Optional: true, ForceNew: true, }, - "fingerprint": &schema.Schema{ + "fingerprint": { Type: schema.TypeString, Computed: true, }, - "instance_group": &schema.Schema{ + "instance_group": { Type: schema.TypeString, Computed: true, }, - "named_port": &schema.Schema{ + "named_port": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - "port": &schema.Schema{ + "port": { Type: schema.TypeInt, Required: true, }, @@ -141,39 +130,26 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, - "project": &schema.Schema{ + "project": { Type: schema.TypeString, Optional: true, ForceNew: true, Computed: true, }, - "self_link": &schema.Schema{ + "self_link": { Type: schema.TypeString, Computed: true, }, -<% if version == 'ga' -%> - "update_strategy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "REPLACE", - Deprecated: "This field will be replaced by `update_policy` in 3.0.0", - ConflictsWith: []string{"update_policy"}, - ValidateFunc: validation.StringInSlice([]string{"RESTART", "NONE", "ROLLING_UPDATE", "REPLACE"}, false), - DiffSuppressFunc: func(key, old, new string, d *schema.ResourceData) bool { - if old == "REPLACE" && new == "RESTART" { - return true - } - if old == "RESTART" && new == "REPLACE" { - return true - } - return false - }, + "update_strategy": { + Type: schema.TypeString, + Optional: true, + Default: "REPLACE", + Removed: "This field has been replaced by `update_policy`", }, -<% end -%> - "target_pools": &schema.Schema{ + "target_pools": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ @@ -182,25 +158,25 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Set: selfLinkRelativePathHash, }, - "target_size": &schema.Schema{ + "target_size": { Type: schema.TypeInt, Computed: true, Optional: true, }, - "auto_healing_policies": &schema.Schema{ + "auto_healing_policies": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "health_check": &schema.Schema{ + "health_check": { Type: schema.TypeString, Required: true, DiffSuppressFunc: compareSelfLinkRelativePaths, }, - "initial_delay_sec": &schema.Schema{ + "initial_delay_sec": { Type: schema.TypeInt, Required: true, ValidateFunc: validation.IntBetween(0, 3600), @@ -209,108 +185,54 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, -<% if version == 'ga' -%> - "rolling_update_policy": &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Removed: "This field has been replaced by update_policy.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "minimal_action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), - }, - - "max_surge_fixed": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_surge_percent": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "max_unavailable_fixed": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_unavailable_percent": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "min_ready_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 3600), - }, - }, - }, - }, -<% end -%> - "update_policy": &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + "update_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "minimal_action": &schema.Schema{ + "minimal_action": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), }, - "type": &schema.Schema{ + "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), }, - "max_surge_fixed": &schema.Schema{ + "max_surge_fixed": { Type: schema.TypeInt, Optional: true, Computed: true, ConflictsWith: []string{"update_policy.0.max_surge_percent"}, }, - "max_surge_percent": &schema.Schema{ + "max_surge_percent": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, ValidateFunc: validation.IntBetween(0, 100), }, - "max_unavailable_fixed": &schema.Schema{ + "max_unavailable_fixed": { Type: schema.TypeInt, Optional: true, Computed: true, ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, }, - "max_unavailable_percent": &schema.Schema{ + "max_unavailable_percent": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, ValidateFunc: validation.IntBetween(0, 100), }, - "min_ready_sec": &schema.Schema{ + "min_ready_sec": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 3600), @@ -319,7 +241,7 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, - "wait_for_instances": &schema.Schema{ + "wait_for_instances": { Type: schema.TypeBool, Optional: true, Default: false, @@ -372,9 +294,6 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte Name: d.Get("name").(string), Description: d.Get("description").(string), BaseInstanceName: d.Get("base_instance_name").(string), - <% if version == 'ga' -%> - InstanceTemplate: d.Get("instance_template").(string), - <% end -%> TargetSize: int64(d.Get("target_size").(int)), NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), @@ -394,7 +313,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte } // It probably maybe worked, so store the ID now - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") if err != nil { return err } @@ -449,9 +368,6 @@ func flattenFixedOrPercent(fixedOrPercent *computeBeta.FixedOrPercent) []map[str func getManager(d *schema.ResourceData, meta interface{}) (*computeBeta.InstanceGroupManager, error) { config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } project, err := getProject(d, config) if err != nil { @@ -495,9 +411,6 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf } d.Set("base_instance_name", manager.BaseInstanceName) - <% if version == 'ga' -%> - d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate)) - <% end -%> d.Set("name", manager.Name) d.Set("zone", GetResourceNameFromSelfLink(manager.Zone)) d.Set("description", manager.Description) @@ -513,16 +426,6 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)) d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)) - <% if version == 'ga' -%> - update_strategy, ok := d.GetOk("update_strategy") - if !ok { - update_strategy = "REPLACE" - } - d.Set("update_strategy", update_strategy.(string)) - - d.Set("rolling_update_policy" , nil) - <% end -%> - if err = d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) } @@ -533,7 +436,6 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } - if d.Get("wait_for_instances").(bool) { conf := resource.StateChangeConf{ Pending: []string{"creating", "error"}, @@ -549,51 +451,10 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf return nil } -<% if version == 'ga' -%> -// Updates an instance group manager by applying the update strategy (REPLACE, RESTART) -// and rolling update policy (PROACTIVE, OPPORTUNISTIC). Updates performed by API -// are OPPORTUNISTIC by default. -func performZoneUpdate(d *schema.ResourceData, config *Config, id string, updateStrategy string, project string, zone string) error { - if updateStrategy == "RESTART" || updateStrategy == "REPLACE" { - managedInstances, err := config.clientComputeBeta.InstanceGroupManagers.ListManagedInstances(project, zone, id).Do() - if err != nil { - return fmt.Errorf("Error getting instance group managers instances: %s", err) - } - - managedInstanceCount := len(managedInstances.ManagedInstances) - instances := make([]string, managedInstanceCount) - for i, v := range managedInstances.ManagedInstances { - instances[i] = v.Instance - } - - recreateInstances := &computeBeta.InstanceGroupManagersRecreateInstancesRequest{ - Instances: instances, - } - - op, err := config.clientComputeBeta.InstanceGroupManagers.RecreateInstances(project, zone, id, recreateInstances).Do() - if err != nil { - return fmt.Errorf("Error restarting instance group managers instances: %s", err) - } - - // Wait for the operation to complete - timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Restarting InstanceGroupManagers instances") - if err != nil { - return err - } - } - - return nil -} -<% end -%> func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return err - } - project, err := getProject(d, config) if err != nil { return err @@ -692,39 +553,6 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte d.SetPartial("target_size") } - <% if version == 'ga' -%> - // If instance_template changes then update - if d.HasChange("instance_template") { - d.Partial(true) - - name := d.Get("name").(string) - // Build the parameter - setInstanceTemplate := &computeBeta.InstanceGroupManagersSetInstanceTemplateRequest{ - InstanceTemplate: d.Get("instance_template").(string), - } - - op, err := config.clientComputeBeta.InstanceGroupManagers.SetInstanceTemplate(project, zone, name, setInstanceTemplate).Do() - - if err != nil { - return fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - // Wait for the operation to complete - timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Updating InstanceGroupManager") - if err != nil { - return err - } - - updateStrategy := d.Get("update_strategy").(string) - err = performZoneUpdate(d, config, name, updateStrategy, project, zone) - if err != nil { - return err - } - d.SetPartial("instance_template") - } - <% end -%> - d.Partial(false) return resourceComputeInstanceGroupManagerRead(d, meta) @@ -733,9 +561,6 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return err - } project, err := getProject(d, config) if err != nil { return err @@ -834,7 +659,6 @@ func expandFixedOrPercent(configured []interface{}) *computeBeta.FixedOrPercent return fixedOrPercent } - func expandUpdatePolicy(configured []interface{}) *computeBeta.InstanceGroupManagerUpdatePolicy { updatePolicy := &computeBeta.InstanceGroupManagerUpdatePolicy{} @@ -848,7 +672,7 @@ func expandUpdatePolicy(configured []interface{}) *computeBeta.InstanceGroupMana // when the percent values are set, the fixed values will be ignored if v := data["max_surge_percent"]; v.(int) > 0 { updatePolicy.MaxSurge = &computeBeta.FixedOrPercent{ - Percent: int64(v.(int)), + Percent: int64(v.(int)), NullFields: []string{"Fixed"}, } } else { @@ -862,7 +686,7 @@ func expandUpdatePolicy(configured []interface{}) *computeBeta.InstanceGroupMana if v := data["max_unavailable_percent"]; v.(int) > 0 { updatePolicy.MaxUnavailable = &computeBeta.FixedOrPercent{ - Percent: int64(v.(int)), + Percent: int64(v.(int)), NullFields: []string{"Fixed"}, } } else { @@ -923,12 +747,12 @@ func flattenUpdatePolicy(updatePolicy *computeBeta.InstanceGroupManagerUpdatePol func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { d.Set("wait_for_instances", false) config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + if err := parseImportId([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/third_party/terraform/resources/resource_compute_instance_template.go b/third_party/terraform/resources/resource_compute_instance_template.go index 3143f056ac35..5a86f9497045 100644 --- a/third_party/terraform/resources/resource_compute_instance_template.go +++ b/third_party/terraform/resources/resource_compute_instance_template.go @@ -3,25 +3,46 @@ package google import ( "fmt" "reflect" + "strings" "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + computeBeta "google.golang.org/api/compute/v0.beta" ) +var ( + schedulingInstTemplateKeys = []string{ + "scheduling.0.on_host_maintenance", + "scheduling.0.automatic_restart", + "scheduling.0.preemptible", + "scheduling.0.node_affinities", + } + + shieldedInstanceTemplateConfigKeys = []string{ + "shielded_instance_config.0.enable_secure_boot", + "shielded_instance_config.0.enable_vtpm", + "shielded_instance_config.0.enable_integrity_monitoring", + } +) + func resourceComputeInstanceTemplate() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceTemplateCreate, Read: resourceComputeInstanceTemplateRead, Delete: resourceComputeInstanceTemplateDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceComputeInstanceTemplateImportState, }, SchemaVersion: 1, - CustomizeDiff: resourceComputeInstanceTemplateSourceImageCustomizeDiff, - MigrateState: resourceComputeInstanceTemplateMigrateState, + CustomizeDiff: customdiff.All( + resourceComputeInstanceTemplateSourceImageCustomizeDiff, + resourceComputeInstanceTemplateScratchDiskCustomizeDiff, + ), + MigrateState: resourceComputeInstanceTemplateMigrateState, // A compute instance template is more or less a subset of a compute // instance. Please attempt to maintain consistency with the @@ -151,7 +172,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Schema: map[string]*schema.Schema{ "kms_key_self_link": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, DiffSuppressFunc: compareSelfLinkRelativePaths, }, @@ -168,13 +189,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { ForceNew: true, }, - "automatic_restart": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Removed: "Use 'scheduling.automatic_restart' instead.", - }, - "can_ip_forward": { Type: schema.TypeBool, Optional: true, @@ -270,11 +284,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { Computed: true, ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), }, - "assigned_nat_ip": { - Type: schema.TypeString, - Computed: true, - Removed: "Use network_interface.access_config.nat_ip instead.", - }, }, }, }, @@ -299,24 +308,10 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, }, }, - - "address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Removed: "Please use network_ip", - }, }, }, }, - "on_host_maintenance": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Removed: "Use 'scheduling.on_host_maintenance' instead.", - }, - "project": { Type: schema.TypeString, Optional: true, @@ -340,29 +335,33 @@ func resourceComputeInstanceTemplate() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "preemptible": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: false, + ForceNew: true, }, "automatic_restart": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: true, + ForceNew: true, }, "on_host_maintenance": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: schedulingInstTemplateKeys, + ForceNew: true, }, "node_affinities": { Type: schema.TypeSet, Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, ForceNew: true, Elem: instanceSchedulingNodeAffinitiesElemSchema(), DiffSuppressFunc: emptyOrDefaultStringSuppress(""), @@ -418,24 +417,27 @@ func resourceComputeInstanceTemplate() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enable_secure_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: false, + ForceNew: true, }, "enable_vtpm": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, }, "enable_integrity_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, }, }, }, @@ -548,6 +550,34 @@ func resourceComputeInstanceTemplateSourceImageCustomizeDiff(diff *schema.Resour return nil } +func resourceComputeInstanceTemplateScratchDiskCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff) +} + +func resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff TerraformResourceDiff) error { + numDisks := diff.Get("disk.#").(int) + for i := 0; i < numDisks; i++ { + // misspelled on purpose, type is a special symbol + typee := diff.Get(fmt.Sprintf("disk.%d.type", i)).(string) + diskType := diff.Get(fmt.Sprintf("disk.%d.disk_type", i)).(string) + if typee == "SCRATCH" && diskType != "local-ssd" { + return fmt.Errorf("SCRATCH disks must have a disk_type of local-ssd. disk %d has disk_type %s", i, diskType) + } + + if diskType == "local-ssd" && typee != "SCRATCH" { + return fmt.Errorf("disks with a disk_type of local-ssd must be SCRATCH disks. disk %d is a %s disk", i, typee) + } + + diskSize := diff.Get(fmt.Sprintf("disk.%d.disk_size_gb", i)).(int) + if typee == "SCRATCH" && diskSize != 375 { + return fmt.Errorf("SCRATCH disks must be exactly 375GB, disk %d is %d", i, diskSize) + } + } + + return nil +} + func buildDisks(d *schema.ResourceData, config *Config) ([]*computeBeta.AttachedDisk, error) { project, err := getProject(d, config) if err != nil { @@ -737,7 +767,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac } // Store the ID now - d.SetId(instanceTemplate.Name) + d.SetId(fmt.Sprintf("projects/%s/global/instanceTemplates/%s", project, instanceTemplate.Name)) err = computeSharedOperationWait(config.clientCompute, op, project, "Creating Instance Template") if err != nil { @@ -959,7 +989,8 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ return err } - instanceTemplate, err := config.clientComputeBeta.InstanceTemplates.Get(project, d.Id()).Do() + splits := strings.Split(d.Id(), "/") + instanceTemplate, err := config.clientComputeBeta.InstanceTemplates.Get(project, splits[len(splits)-1]).Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) } @@ -1092,8 +1123,9 @@ func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interfac return err } + splits := strings.Split(d.Id(), "/") op, err := config.clientCompute.InstanceTemplates.Delete( - project, d.Id()).Do() + project, splits[len(splits)-1]).Do() if err != nil { return fmt.Errorf("Error deleting instance template: %s", err) } @@ -1130,3 +1162,19 @@ func expandResourceComputeInstanceTemplateScheduling(d *schema.ResourceData, met } return expanded, nil } + +func resourceComputeInstanceTemplateImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/global/instanceTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/global/instanceTemplates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/third_party/terraform/resources/resource_compute_network_peering.go.erb b/third_party/terraform/resources/resource_compute_network_peering.go.erb index 0273e77e125f..a575e0a06971 100644 --- a/third_party/terraform/resources/resource_compute_network_peering.go.erb +++ b/third_party/terraform/resources/resource_compute_network_peering.go.erb @@ -42,13 +42,12 @@ func resourceComputeNetworkPeering() *schema.Resource { DiffSuppressFunc: compareSelfLinkRelativePaths, }, // The API only accepts true as a value for exchange_subnet_routes or auto_create_routes (of which only one can be set in a valid request). - // Also, you can't set auto_create_routes if you use the networkPeering object. auto_create_routes is also deprecated + // Also, you can't set auto_create_routes if you use the networkPeering object. auto_create_routes is also removed "auto_create_routes": { Type: schema.TypeBool, Optional: true, - Deprecated: "auto_create_routes has been deprecated because it's redundant and not user-configurable. It can safely be removed from your config", + Removed: "auto_create_routes has been removed because it's redundant and not user-configurable. It can safely be removed from your config", ForceNew: true, - Default: true, }, "state": { Type: schema.TypeString, diff --git a/third_party/terraform/resources/resource_compute_region_instance_group_manager.go.erb b/third_party/terraform/resources/resource_compute_region_instance_group_manager.go similarity index 69% rename from third_party/terraform/resources/resource_compute_region_instance_group_manager.go.erb rename to third_party/terraform/resources/resource_compute_region_instance_group_manager.go index cb6b374ac478..2f4cc6d72323 100644 --- a/third_party/terraform/resources/resource_compute_region_instance_group_manager.go.erb +++ b/third_party/terraform/resources/resource_compute_region_instance_group_manager.go @@ -1,10 +1,8 @@ -<% autogen_exception -%> package google import ( "fmt" "log" - "regexp" "strings" "time" @@ -16,11 +14,6 @@ import ( computeBeta "google.golang.org/api/compute/v0.beta" ) -var ( - regionInstanceGroupManagerIdRegex = regexp.MustCompile("^" + ProjectRegex + "/[a-z0-9-]+/[a-z0-9-]+$") - regionInstanceGroupManagerIdNameRegex = regexp.MustCompile("^[a-z0-9-]+$") -) - func resourceComputeRegionInstanceGroupManager() *schema.Resource { return &schema.Resource{ Create: resourceComputeRegionInstanceGroupManagerCreate, @@ -37,57 +30,46 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "base_instance_name": &schema.Schema{ + "base_instance_name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - <% if version == 'ga' -%> - "instance_template": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "This field will be replaced by `version.instance_template` in 3.0.0", - ConflictsWith: []string{"version"}, - DiffSuppressFunc: compareSelfLinkRelativePaths, + "instance_template": { + Type: schema.TypeString, + Optional: true, + Removed: "This field has been replaced by `version.instance_template` in 3.0.0", }, - <% end -%> - "version": &schema.Schema{ - <%# TODO 3.0.0 - mark as required -%> - Type: schema.TypeList, - <% if version == 'ga' -%> - Optional: true, - Computed: true, - <% else -%> + "version": { + Type: schema.TypeList, Required: true, - <% end -%> Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Optional: true, }, - "instance_template": &schema.Schema{ + "instance_template": { Type: schema.TypeString, Required: true, DiffSuppressFunc: compareSelfLinkRelativePaths, }, - "target_size": &schema.Schema{ + "target_size": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "fixed": &schema.Schema{ + "fixed": { Type: schema.TypeInt, Optional: true, }, - "percent": &schema.Schema{ + "percent": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 100), @@ -99,45 +81,45 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "region": &schema.Schema{ + "region": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "description": &schema.Schema{ + "description": { Type: schema.TypeString, Optional: true, ForceNew: true, }, - "fingerprint": &schema.Schema{ + "fingerprint": { Type: schema.TypeString, Computed: true, }, - "instance_group": &schema.Schema{ + "instance_group": { Type: schema.TypeString, Computed: true, }, - "named_port": &schema.Schema{ + "named_port": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - "port": &schema.Schema{ + "port": { Type: schema.TypeInt, Required: true, }, @@ -145,28 +127,25 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, - "project": &schema.Schema{ + "project": { Type: schema.TypeString, Optional: true, ForceNew: true, Computed: true, }, - "self_link": &schema.Schema{ + "self_link": { Type: schema.TypeString, Computed: true, }, -<% if version == 'ga' -%> - "update_strategy": &schema.Schema{ - Type: schema.TypeString, - Deprecated: "This field will be replaced by `update_policy` in 3.0.0", - Optional: true, - ConflictsWith: []string{"update_policy"}, + "update_strategy": { + Type: schema.TypeString, + Removed: "This field is removed.", + Optional: true, }, -<% end -%> - "target_pools": &schema.Schema{ + "target_pools": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ @@ -174,7 +153,7 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, Set: selfLinkRelativePathHash, }, - "target_size": &schema.Schema{ + "target_size": { Type: schema.TypeInt, Computed: true, Optional: true, @@ -183,25 +162,25 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { // If true, the resource will report ready only after no instances are being created. // This will not block future reads if instances are being recreated, and it respects // the "createNoRetry" parameter that's available for this resource. - "wait_for_instances": &schema.Schema{ + "wait_for_instances": { Type: schema.TypeBool, Optional: true, Default: false, }, - "auto_healing_policies": &schema.Schema{ + "auto_healing_policies": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "health_check": &schema.Schema{ + "health_check": { Type: schema.TypeString, Required: true, DiffSuppressFunc: compareSelfLinkRelativePaths, }, - "initial_delay_sec": &schema.Schema{ + "initial_delay_sec": { Type: schema.TypeInt, Required: true, ValidateFunc: validation.IntBetween(0, 3600), @@ -210,7 +189,7 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, - "distribution_policy_zones": &schema.Schema{ + "distribution_policy_zones": { Type: schema.TypeSet, Optional: true, ForceNew: true, @@ -222,123 +201,62 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, - <% if version == 'ga' -%> - "rolling_update_policy": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Removed: "This field has been replaced by update_policy.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "minimal_action": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), - }, - - "max_surge_fixed": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_surge_percent": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "max_unavailable_fixed": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_unavailable_percent": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "min_ready_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 3600), - }, - "instance_redistribution_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"PROACTIVE", "NONE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("PROACTIVE"), - }, - }, - }, - }, - <% end -%> - - "update_policy": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Optional: true, - MaxItems: 1, + "update_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "minimal_action": &schema.Schema{ + "minimal_action": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), }, - "type": &schema.Schema{ + "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), }, - "max_surge_fixed": &schema.Schema{ + "max_surge_fixed": { Type: schema.TypeInt, Optional: true, Computed: true, ConflictsWith: []string{"update_policy.0.max_surge_percent"}, }, - "max_surge_percent": &schema.Schema{ + "max_surge_percent": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, ValidateFunc: validation.IntBetween(0, 100), }, - "max_unavailable_fixed": &schema.Schema{ + "max_unavailable_fixed": { Type: schema.TypeInt, Optional: true, Computed: true, ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, }, - "max_unavailable_percent": &schema.Schema{ + "max_unavailable_percent": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, ValidateFunc: validation.IntBetween(0, 100), }, - "min_ready_sec": &schema.Schema{ + "min_ready_sec": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 3600), }, "instance_redistribution_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"PROACTIVE", "NONE", ""}, false), + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"PROACTIVE", "NONE", ""}, false), DiffSuppressFunc: emptyOrDefaultStringSuppress("PROACTIVE"), }, }, @@ -365,9 +283,6 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met Name: d.Get("name").(string), Description: d.Get("description").(string), BaseInstanceName: d.Get("base_instance_name").(string), - <% if version == 'ga' -%> - InstanceTemplate: d.Get("instance_template").(string), - <% end -%> TargetSize: int64(d.Get("target_size").(int)), NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), @@ -385,7 +300,11 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met return fmt.Errorf("Error creating RegionInstanceGroupManager: %s", err) } - d.SetId(regionInstanceGroupManagerId{Project: project, Region: region, Name: manager.Name}.terraformId()) + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) // Wait for the operation to complete timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) @@ -401,28 +320,20 @@ type getInstanceManagerFunc func(*schema.ResourceData, interface{}) (*computeBet func getRegionalManager(d *schema.ResourceData, meta interface{}) (*computeBeta.InstanceGroupManager, error) { config := meta.(*Config) - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) + project, err := getProject(d, config) if err != nil { return nil, err } - if regionalID.Project == "" { - regionalID.Project, err = getProject(d, config) - if err != nil { - return nil, err - } - } - - if regionalID.Region == "" { - regionalID.Region, err = getRegion(d, config) - if err != nil { - return nil, err - } + region, err := getRegion(d, config) + if err != nil { + return nil, err } - manager, err := config.clientComputeBeta.RegionInstanceGroupManagers.Get(regionalID.Project, regionalID.Region, regionalID.Name).Do() + name := d.Get("name").(string) + manager, err := config.clientComputeBeta.RegionInstanceGroupManagers.Get(project, region, name).Do() if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", regionalID.Name)) + return nil, handleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", name)) } return manager, nil @@ -455,25 +366,16 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta return nil } - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) + project, err := getProject(d, config) if err != nil { return err } - if regionalID.Project == "" { - regionalID.Project, err = getProject(d, config) - if err != nil { - return err - } - } d.Set("base_instance_name", manager.BaseInstanceName) - <% if version == 'ga' -%> - d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate)) - <% end -%> d.Set("name", manager.Name) d.Set("region", GetResourceNameFromSelfLink(manager.Region)) d.Set("description", manager.Description) - d.Set("project", regionalID.Project) + d.Set("project", project) d.Set("target_size", manager.TargetSize) if err := d.Set("target_pools", mapStringArr(manager.TargetPools, ConvertSelfLinkToV1)); err != nil { return fmt.Errorf("Error setting target_pools in state: %s", err.Error()) @@ -488,10 +390,6 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta } d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)) - <% if version == 'ga' -%> - d.Set("rolling_update_policy" , nil) - <% end -%> - if err := d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) } @@ -613,32 +511,6 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met d.SetPartial("target_size") } - <% if version == 'ga' -%> - if d.HasChange("instance_template") { - d.Partial(true) - // Build the parameter - setInstanceTemplate := &computeBeta.RegionInstanceGroupManagersSetTemplateRequest{ - InstanceTemplate: d.Get("instance_template").(string), - } - - op, err := config.clientComputeBeta.RegionInstanceGroupManagers.SetInstanceTemplate( - project, region, d.Get("name").(string), setInstanceTemplate).Do() - - if err != nil { - return fmt.Errorf("Error updating RegionInstanceGroupManager: %s", err) - } - - // Wait for the operation to complete - timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Updating InstanceGroupManager") - if err != nil { - return err - } - - d.SetPartial("instance_template") - } - <% end -%> - d.Partial(false) return resourceComputeRegionInstanceGroupManagerRead(d, meta) @@ -647,26 +519,19 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met func resourceComputeRegionInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) + project, err := getProject(d, config) if err != nil { return err } - if regionalID.Project == "" { - regionalID.Project, err = getProject(d, config) - if err != nil { - return err - } + region, err := getRegion(d, config) + if err != nil { + return err } - if regionalID.Region == "" { - regionalID.Region, err = getRegion(d, config) - if err != nil { - return err - } - } + name := d.Get("name").(string) - op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Delete(regionalID.Project, regionalID.Region, regionalID.Name).Do() + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Delete(project, region, name).Do() if err != nil { return fmt.Errorf("Error deleting region instance group manager: %s", err) @@ -674,7 +539,7 @@ func resourceComputeRegionInstanceGroupManagerDelete(d *schema.ResourceData, met // Wait for the operation to complete timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, regionalID.Project, timeoutInMinutes, "Deleting RegionInstanceGroupManager") + err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Deleting RegionInstanceGroupManager") if err != nil { return fmt.Errorf("Error waiting for delete to complete: %s", err) } @@ -697,7 +562,7 @@ func expandRegionUpdatePolicy(configured []interface{}) *computeBeta.InstanceGro // when the percent values are set, the fixed values will be ignored if v := data["max_surge_percent"]; v.(int) > 0 { updatePolicy.MaxSurge = &computeBeta.FixedOrPercent{ - Percent: int64(v.(int)), + Percent: int64(v.(int)), NullFields: []string{"Fixed"}, } } else { @@ -711,7 +576,7 @@ func expandRegionUpdatePolicy(configured []interface{}) *computeBeta.InstanceGro if v := data["max_unavailable_percent"]; v.(int) > 0 { updatePolicy.MaxUnavailable = &computeBeta.FixedOrPercent{ - Percent: int64(v.(int)), + Percent: int64(v.(int)), NullFields: []string{"Fixed"}, } } else { @@ -796,40 +661,17 @@ func hashZoneFromSelfLinkOrResourceName(value interface{}) int { func resourceRegionInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { d.Set("wait_for_instances", false) - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) - if err != nil { + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } - d.Set("project", regionalID.Project) - d.Set("region", regionalID.Region) - d.Set("name", regionalID.Name) - return []*schema.ResourceData{d}, nil -} -type regionInstanceGroupManagerId struct { - Project string - Region string - Name string -} - -func (r regionInstanceGroupManagerId) terraformId() string { - return fmt.Sprintf("%s/%s/%s", r.Project, r.Region, r.Name) -} - -func parseRegionInstanceGroupManagerId(id string) (*regionInstanceGroupManagerId, error) { - switch { - case regionInstanceGroupManagerIdRegex.MatchString(id): - parts := strings.Split(id, "/") - return ®ionInstanceGroupManagerId{ - Project: parts[0], - Region: parts[1], - Name: parts[2], - }, nil - case regionInstanceGroupManagerIdNameRegex.MatchString(id): - return ®ionInstanceGroupManagerId{ - Name: id, - }, nil - default: - return nil, fmt.Errorf("Invalid region instance group manager specifier. Expecting either {projectId}/{region}/{name} or {name}, where {projectId} and {region} will be derived from the provider.") + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) } + d.SetId(id) + + return []*schema.ResourceData{d}, nil } diff --git a/third_party/terraform/resources/resource_compute_router_peer.go b/third_party/terraform/resources/resource_compute_router_peer.go index 88ec144407c0..0201ca16d50c 100644 --- a/third_party/terraform/resources/resource_compute_router_peer.go +++ b/third_party/terraform/resources/resource_compute_router_peer.go @@ -85,7 +85,7 @@ func resourceComputeRouterPeer() *schema.Resource { }, "range": { Type: schema.TypeString, - Optional: true, + Required: true, }, }, }, diff --git a/third_party/terraform/resources/resource_compute_security_policy.go b/third_party/terraform/resources/resource_compute_security_policy.go index 566c05bd0849..6f560a7f8df8 100644 --- a/third_party/terraform/resources/resource_compute_security_policy.go +++ b/third_party/terraform/resources/resource_compute_security_policy.go @@ -19,7 +19,7 @@ func resourceComputeSecurityPolicy() *schema.Resource { Update: resourceComputeSecurityPolicyUpdate, Delete: resourceComputeSecurityPolicyDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceSecurityPolicyStateImporter, }, Timeouts: &schema.ResourceTimeout{ @@ -148,7 +148,11 @@ func resourceComputeSecurityPolicyCreate(d *schema.ResourceData, meta interface{ return errwrap.Wrapf("Error creating SecurityPolicy: {{err}}", err) } - d.SetId(securityPolicy.Name) + id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), fmt.Sprintf("Creating SecurityPolicy %q", sp)) if err != nil { @@ -166,7 +170,8 @@ func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) return err } - securityPolicy, err := config.clientComputeBeta.SecurityPolicies.Get(project, d.Id()).Do() + sp := d.Get("name").(string) + securityPolicy, err := config.clientComputeBeta.SecurityPolicies.Get(project, sp).Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("SecurityPolicy %q", d.Id())) } @@ -191,7 +196,7 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ return err } - sp := d.Id() + sp := d.Get("name").(string) if d.HasChange("description") { securityPolicy := &compute.SecurityPolicy{ @@ -282,7 +287,7 @@ func resourceComputeSecurityPolicyDelete(d *schema.ResourceData, meta interface{ } // Delete the SecurityPolicy - op, err := config.clientComputeBeta.SecurityPolicies.Delete(project, d.Id()).Do() + op, err := config.clientComputeBeta.SecurityPolicies.Delete(project, d.Get("name").(string)).Do() if err != nil { return errwrap.Wrapf("Error deleting SecurityPolicy: {{err}}", err) } @@ -363,3 +368,19 @@ func flattenSecurityPolicyRules(rules []*compute.SecurityPolicyRule) []map[strin } return rulesSchema } + +func resourceSecurityPolicyStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/global/securityPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/third_party/terraform/resources/resource_compute_target_pool.go b/third_party/terraform/resources/resource_compute_target_pool.go index 9596bd944c16..a51a05dbf6bc 100644 --- a/third_party/terraform/resources/resource_compute_target_pool.go +++ b/third_party/terraform/resources/resource_compute_target_pool.go @@ -20,7 +20,7 @@ func resourceComputeTargetPool() *schema.Resource { Delete: resourceComputeTargetPoolDelete, Update: resourceComputeTargetPoolUpdate, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceTargetPoolStateImporter, }, Schema: map[string]*schema.Schema{ @@ -207,7 +207,11 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e } // It probably maybe worked, so store the ID now - d.SetId(tpool.Name) + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) err = computeOperationWait(config.clientCompute, op, project, "Creating Target Pool") if err != nil { @@ -229,6 +233,8 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return err } + name := d.Get("name").(string) + d.Partial(true) if d.HasChange("health_checks") { @@ -251,7 +257,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err := config.clientCompute.TargetPools.RemoveHealthCheck( - project, region, d.Id(), removeReq).Do() + project, region, name, removeReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } @@ -267,7 +273,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err = config.clientCompute.TargetPools.AddHealthCheck( - project, region, d.Id(), addReq).Do() + project, region, name, addReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } @@ -301,7 +307,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err := config.clientCompute.TargetPools.AddInstance( - project, region, d.Id(), addReq).Do() + project, region, name, addReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } @@ -317,7 +323,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err = config.clientCompute.TargetPools.RemoveInstance( - project, region, d.Id(), removeReq).Do() + project, region, name, removeReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } @@ -334,7 +340,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e Target: bpool_name, } op, err := config.clientCompute.TargetPools.SetBackup( - project, region, d.Id(), tref).Do() + project, region, name, tref).Do() if err != nil { return fmt.Errorf("Error updating backup_pool: %s", err) } @@ -375,7 +381,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err } tpool, err := config.clientCompute.TargetPools.Get( - project, region, d.Id()).Do() + project, region, d.Get("name").(string)).Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) } @@ -412,7 +418,7 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e // Delete the TargetPool op, err := config.clientCompute.TargetPools.Delete( - project, region, d.Id()).Do() + project, region, d.Get("name").(string)).Do() if err != nil { return fmt.Errorf("Error deleting TargetPool: %s", err) } @@ -424,3 +430,24 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e d.SetId("") return nil } + +func resourceTargetPoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/targetPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/third_party/terraform/resources/resource_container_cluster.go.erb b/third_party/terraform/resources/resource_container_cluster.go.erb index abe17bb3b6a3..24bd0f385659 100644 --- a/third_party/terraform/resources/resource_container_cluster.go.erb +++ b/third_party/terraform/resources/resource_container_cluster.go.erb @@ -24,6 +24,9 @@ var ( Schema: map[string]*schema.Schema{ "cidr_blocks": { Type: schema.TypeSet, + // Despite being the only entry in a nested block, this should be kept + // Optional. Expressing the parent with no entries and omitting the + // parent entirely are semantically different. Optional: true, Elem: cidrBlockConfig, }, @@ -43,9 +46,18 @@ var ( }, } - ipAllocationSubnetFields = []string{"ip_allocation_policy.0.create_subnetwork", "ip_allocation_policy.0.subnetwork_name"} - ipAllocationCidrBlockFields = []string{"ip_allocation_policy.0.cluster_ipv4_cidr_block", "ip_allocation_policy.0.services_ipv4_cidr_block", "ip_allocation_policy.0.node_ipv4_cidr_block"} + ipAllocationCidrBlockFields = []string{"ip_allocation_policy.0.cluster_ipv4_cidr_block", "ip_allocation_policy.0.services_ipv4_cidr_block"} ipAllocationRangeFields = []string{"ip_allocation_policy.0.cluster_secondary_range_name", "ip_allocation_policy.0.services_secondary_range_name"} + + addonsConfigKeys = []string{ + "addons_config.0.http_load_balancing", + "addons_config.0.horizontal_pod_autoscaling", + "addons_config.0.network_policy_config", + <% unless version == 'ga' -%> + "addons_config.0.istio_config", + "addons_config.0.cloudrun_config", + <% end -%> + } ) <% unless version == 'ga' -%> @@ -80,7 +92,6 @@ func resourceContainerCluster() *schema.Resource { Delete: resourceContainerClusterDelete, CustomizeDiff: customdiff.All( - resourceContainerClusterIpAllocationCustomizeDiff, resourceNodeConfigEmptyGuestAccelerator, containerClusterPrivateClusterConfigCustomDiff, ), @@ -131,25 +142,18 @@ func resourceContainerCluster() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ConflictsWith: []string{"zone", "region"}, }, "region": { Type: schema.TypeString, Optional: true, - Computed: true, - ForceNew: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"zone", "location"}, + Removed: "Use location instead", }, "zone": { Type: schema.TypeString, Optional: true, - Computed: true, - ForceNew: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"region", "location"}, + Removed: "Use location instead", }, "node_locations": { @@ -162,8 +166,7 @@ func resourceContainerCluster() *schema.Resource { "additional_zones": { Type: schema.TypeSet, Optional: true, - Computed: true, - Deprecated: "Use node_locations instead", + Removed: "Use node_locations instead", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -175,75 +178,76 @@ func resourceContainerCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "http_load_balancing": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, }, }, "horizontal_pod_autoscaling": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, }, }, "kubernetes_dashboard": { Type: schema.TypeList, - Optional: true, - Computed: true, - Deprecated: "The Kubernetes Dashboard addon is deprecated for clusters on GKE.", + Optional: true, + Removed: "The Kubernetes Dashboard addon is removed for clusters on GKE.", MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, Optional: true, - Default: true, }, }, }, }, "network_policy_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, }, }, <% unless version == 'ga' -%> "istio_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Default: false, - Optional: true, + Required: true, }, "auth": { Type: schema.TypeString, @@ -256,17 +260,17 @@ func resourceContainerCluster() *schema.Resource { }, }, "cloudrun_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + ForceNew: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Default: false, - Optional: true, + Required: true, }, }, }, @@ -323,6 +327,7 @@ func resourceContainerCluster() *schema.Resource { Computed: true, ForceNew: true, ValidateFunc: orEmpty(validateRFC1918Network(8, 32)), + ConflictsWith: []string{"ip_allocation_policy"}, }, "description": { @@ -402,7 +407,7 @@ func resourceContainerCluster() *schema.Resource { "logging_service": { Type: schema.TypeString, Optional: true, - Computed: true, + Default: "logging.googleapis.com/kubernetes", ValidateFunc: validation.StringInSlice([]string{"logging.googleapis.com", "logging.googleapis.com/kubernetes", "none"}, false), }, @@ -476,12 +481,14 @@ func resourceContainerCluster() *schema.Resource { "password": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: []string{"master_auth.0.password", "master_auth.0.username", "master_auth.0.client_certificate_config"}, Sensitive: true, }, "username": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: []string{"master_auth.0.password", "master_auth.0.username", "master_auth.0.client_certificate_config"}, }, // Ideally, this would be Optional (and not Computed). @@ -493,6 +500,7 @@ func resourceContainerCluster() *schema.Resource { MaxItems: 1, Optional: true, Computed: true, + AtLeastOneOf: []string{"master_auth.0.password", "master_auth.0.username", "master_auth.0.client_certificate_config"}, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -539,7 +547,7 @@ func resourceContainerCluster() *schema.Resource { "monitoring_service": { Type: schema.TypeString, Optional: true, - Computed: true, + Default: "monitoring.googleapis.com/kubernetes", ValidateFunc: validation.StringInSlice([]string{"monitoring.googleapis.com", "monitoring.googleapis.com/kubernetes", "none"}, false), }, @@ -560,8 +568,7 @@ func resourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "enabled": { Type: schema.TypeBool, - Optional: true, - Default: false, + Required: true, }, "provider": { Type: schema.TypeString, @@ -653,37 +660,9 @@ func resourceContainerCluster() *schema.Resource { MaxItems: 1, ForceNew: true, Optional: true, - Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, + ConflictsWith: []string{"cluster_ipv4_cidr"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "use_ip_aliases": { - Type: schema.TypeBool, - Deprecated: "This field is being removed in 3.0.0. If set to true, remove it from your config. If false, remove i.", - Optional: true, - Default: true, - ForceNew: true, - }, - - // GKE creates subnetwork automatically - "create_subnetwork": { - Type: schema.TypeBool, - Deprecated: "This field is being removed in 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", - Computed: true, - Optional: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - }, - - "subnetwork_name": { - Type: schema.TypeString, - Deprecated: "This field is being removed in 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", - Computed: true, - Optional: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - }, - // GKE creates/deletes secondary ranges in VPC "cluster_ipv4_cidr_block": { Type: schema.TypeString, @@ -693,6 +672,7 @@ func resourceContainerCluster() *schema.Resource { ConflictsWith: ipAllocationRangeFields, DiffSuppressFunc: cidrOrSizeDiffSuppress, }, + "services_ipv4_cidr_block": { Type: schema.TypeString, Optional: true, @@ -701,15 +681,6 @@ func resourceContainerCluster() *schema.Resource { ConflictsWith: ipAllocationRangeFields, DiffSuppressFunc: cidrOrSizeDiffSuppress, }, - "node_ipv4_cidr_block": { - Type: schema.TypeString, - Deprecated: "This field is being removed in 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", - Computed: true, - Optional: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, - }, // User manages secondary ranges manually "cluster_secondary_range_name": { @@ -717,14 +688,44 @@ func resourceContainerCluster() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ConflictsWith: append(ipAllocationSubnetFields, ipAllocationCidrBlockFields...), + ConflictsWith: ipAllocationCidrBlockFields, }, + "services_secondary_range_name": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, - ConflictsWith: append(ipAllocationSubnetFields, ipAllocationCidrBlockFields...), + ConflictsWith: ipAllocationCidrBlockFields, + }, + + "use_ip_aliases": { + Type: schema.TypeBool, + Removed: "This field is removed as of 3.0.0. If previously set to true, remove it from your config. If false, remove it.", + Computed: true, + Optional: true, + }, + + // GKE creates subnetwork automatically + "create_subnetwork": { + Type: schema.TypeBool, + Removed: "This field is removed as of 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", + Computed: true, + Optional: true, + }, + + "subnetwork_name": { + Type: schema.TypeString, + Removed: "This field is removed as of 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", + Computed: true, + Optional: true, + }, + + "node_ipv4_cidr_block": { + Type: schema.TypeString, + Removed: "This field is removed as of 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", + Computed: true, + Optional: true, }, }, }, @@ -745,7 +746,7 @@ func resourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "enable_private_endpoint": { Type: schema.TypeBool, - Optional: true, + Required: true, ForceNew: true, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, }, @@ -797,8 +798,7 @@ func resourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "channel": { Type: schema.TypeString, - Default: "UNSPECIFIED", - Optional: true, + Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE"}, false), DiffSuppressFunc: emptyOrDefaultStringSuppress("UNSPECIFIED"), @@ -815,7 +815,7 @@ func resourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "enabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, }, @@ -962,36 +962,6 @@ func resourceNodeConfigEmptyGuestAccelerator(diff *schema.ResourceDiff, meta int return nil } -func resourceContainerClusterIpAllocationCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { - // separate func to allow unit testing - return resourceContainerClusterIpAllocationCustomizeDiffFunc(diff) -} - -func resourceContainerClusterIpAllocationCustomizeDiffFunc(diff TerraformResourceDiff) error { - o, n := diff.GetChange("ip_allocation_policy") - - oList := o.([]interface{}) - nList := n.([]interface{}) - if len(oList) > 0 || len(nList) == 0 { - // we only care about going from unset to set, so return early if the field was set before - // or is unset now - return nil - } - - // Unset is equivalent to a block where all the values are zero - // This might change if use_ip_aliases ends up defaulting to true server-side. - // The console says it will eventually, but it's unclear whether that's in the API - // too or just client code. - polMap := nList[0].(map[string]interface{}) - for _, v := range polMap { - if !isEmptyValue(reflect.ValueOf(v)) { - // found a non-empty value, so continue with the diff as it was - return nil - } - } - return diff.Clear("ip_allocation_policy") -} - func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -1005,14 +975,6 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er return err } - // When parsing a subnetwork by name, we expect region or zone to be set. - // Users may have set location to either value, so set that value. - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) - } - clusterName := d.Get("name").(string) cluster := &containerBeta.Cluster{ @@ -1072,20 +1034,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er if v, ok := d.GetOk("node_locations"); ok { locationsSet := v.(*schema.Set) if locationsSet.Contains(location) { - return fmt.Errorf("when using a multi-zonal cluster, additional_zones should not contain the original 'zone'") - } - - // GKE requires a full list of node locations - // but when using a multi-zonal cluster our schema only asks for the - // additional zones, so append the cluster location if it's a zone - if isZone(location) { - locationsSet.Add(location) - } - cluster.Locations = convertStringSet(locationsSet) - } else if v, ok := d.GetOk("additional_zones"); ok { - locationsSet := v.(*schema.Set) - if locationsSet.Contains(location) { - return fmt.Errorf("when using a multi-zonal cluster, additional_zones should not contain the original 'zone'") + return fmt.Errorf("when using a multi-zonal cluster, node_locations should not contain the original 'zone'") } // GKE requires a full list of node locations @@ -1179,7 +1128,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er return err } - d.SetId(clusterName) + d.SetId(containerClusterFullName(project, location, clusterName)) // Wait until it's created timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) @@ -1259,16 +1208,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro } d.Set("location", cluster.Location) - if isZone(cluster.Location) { - d.Set("zone", cluster.Location) - } else { - d.Set("region", cluster.Location) - } locations := schema.NewSet(schema.HashString, convertStringArrToInterface(cluster.Locations)) locations.Remove(cluster.Zone) // Remove the original zone since we only store additional zones d.Set("node_locations", locations) - d.Set("additional_zones", locations) d.Set("endpoint", cluster.Endpoint) if err := d.Set("maintenance_policy", flattenMaintenancePolicy(cluster.MaintenancePolicy)); err != nil { @@ -1575,57 +1518,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er d.SetPartial("maintenance_policy") } - // we can only ever see a change to one of additional_zones and node_locations; because - // thy conflict with each other and are each computed, Terraform will suppress the diff - // on one of them even when migrating from one to the other. - if d.HasChange("additional_zones") { - azSetOldI, azSetNewI := d.GetChange("additional_zones") - azSetNew := azSetNewI.(*schema.Set) - azSetOld := azSetOldI.(*schema.Set) - if azSetNew.Contains(location) { - return fmt.Errorf("additional_zones should not contain the original 'zone'") - } - // Since we can't add & remove zones in the same request, first add all the - // zones, then remove the ones we aren't using anymore. - azSet := azSetOld.Union(azSetNew) - - if isZone(location) { - azSet.Add(location) - } - - req := &containerBeta.UpdateClusterRequest{ - Update: &containerBeta.ClusterUpdate{ - DesiredLocations: convertStringSet(azSet), - }, - } - - updateF := updateFunc(req, "updating GKE cluster node locations") - // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - if isZone(location) { - azSetNew.Add(location) - } - if !azSet.Equal(azSetNew) { - req = &containerBeta.UpdateClusterRequest{ - Update: &containerBeta.ClusterUpdate{ - DesiredLocations: convertStringSet(azSetNew), - }, - } - - updateF := updateFunc(req, "updating GKE cluster node locations") - // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - } - - log.Printf("[INFO] GKE cluster %s node locations have been updated to %v", d.Id(), azSet.List()) - - d.SetPartial("additional_zones") - } else if d.HasChange("node_locations") { + if d.HasChange("node_locations") { azSetOldI, azSetNewI := d.GetChange("node_locations") azSetNew := azSetNewI.(*schema.Set) azSetOld := azSetOldI.(*schema.Set) @@ -2220,14 +2113,6 @@ func expandClusterAddonsConfig(configured interface{}) *containerBeta.AddonsConf } } - if v, ok := config["kubernetes_dashboard"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.KubernetesDashboard = &containerBeta.KubernetesDashboard{ - Disabled: addon["disabled"].(bool), - ForceSendFields: []string{"Disabled"}, - } - } - if v, ok := config["network_policy_config"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) ac.NetworkPolicyConfig = &containerBeta.NetworkPolicyConfig{ @@ -2261,20 +2146,17 @@ func expandClusterAddonsConfig(configured interface{}) *containerBeta.AddonsConf func expandIPAllocationPolicy(configured interface{}) *containerBeta.IPAllocationPolicy { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { - return nil + return &containerBeta.IPAllocationPolicy{ + UseIpAliases: false, + ForceSendFields: []string{"UseIpAliases"}, + } } config := l[0].(map[string]interface{}) - return &containerBeta.IPAllocationPolicy{ - UseIpAliases: config["use_ip_aliases"].(bool), - - CreateSubnetwork: config["create_subnetwork"].(bool), - SubnetworkName: config["subnetwork_name"].(string), - + UseIpAliases: true, ClusterIpv4CidrBlock: config["cluster_ipv4_cidr_block"].(string), ServicesIpv4CidrBlock: config["services_ipv4_cidr_block"].(string), - NodeIpv4CidrBlock: config["node_ipv4_cidr_block"].(string), ClusterSecondaryRangeName: config["cluster_secondary_range_name"].(string), ServicesSecondaryRangeName: config["services_secondary_range_name"].(string), @@ -2628,13 +2510,6 @@ func flattenClusterAddonsConfig(c *containerBeta.AddonsConfig) []map[string]inte }, } } - if c.KubernetesDashboard != nil { - result["kubernetes_dashboard"] = []map[string]interface{}{ - { - "disabled": c.KubernetesDashboard.Disabled, - }, - } - } if c.NetworkPolicyConfig != nil { result["network_policy_config"] = []map[string]interface{}{ { @@ -2745,33 +2620,16 @@ func flattenWorkloadIdentityConfig(c *containerBeta.WorkloadIdentityConfig) []ma <% end -%> func flattenIPAllocationPolicy(c *containerBeta.Cluster, d *schema.ResourceData, config *Config) []map[string]interface{} { - if c == nil || c.IpAllocationPolicy == nil { + // If IP aliasing isn't enabled, none of the values in this block can be set. + if c == nil || c.IpAllocationPolicy == nil || c.IpAllocationPolicy.UseIpAliases == false { return nil } - nodeCidrBlock := "" - if c.Subnetwork != "" { - subnetwork, err := ParseSubnetworkFieldValue(c.Subnetwork, d, config) - if err == nil { - sn, err := config.clientCompute.Subnetworks.Get(subnetwork.Project, subnetwork.Region, subnetwork.Name).Do() - if err == nil { - nodeCidrBlock = sn.IpCidrRange - } - } else { - log.Printf("[WARN] Unable to parse subnetwork name, got error while trying to get new subnetwork: %s", err) - } - } + p := c.IpAllocationPolicy return []map[string]interface{}{ { - "use_ip_aliases": p.UseIpAliases, - - "create_subnetwork": p.CreateSubnetwork, - "subnetwork_name": p.SubnetworkName, - "cluster_ipv4_cidr_block": p.ClusterIpv4CidrBlock, "services_ipv4_cidr_block": p.ServicesIpv4CidrBlock, - "node_ipv4_cidr_block": nodeCidrBlock, - "cluster_secondary_range_name": p.ClusterSecondaryRangeName, "services_secondary_range_name": p.ServicesSecondaryRangeName, }, @@ -2925,42 +2783,28 @@ func flattenDatabaseEncryption(c *containerBeta.DatabaseEncryption) []map[string func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) - parts := strings.Split(d.Id(), "/") - var project, location, clusterName string - switch len(parts) { - case 2: - location = parts[0] - clusterName = parts[1] - case 3: - project = parts[0] - location = parts[1] - clusterName = parts[2] - default: - return nil, fmt.Errorf("Invalid container cluster specifier. Expecting {location}/{name} or {project}/{location}/{name}") - } - - if len(project) == 0 { - var err error - project, err = getProject(d, config) - if err != nil { - return nil, err - } + if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + project, err := getProject(d, config) + if err != nil { + return nil, err } - d.Set("project", project) - d.Set("location", location) - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) + location, err := getLocation(d, config) + if err != nil { + return nil, err } - d.Set("name", clusterName) - d.SetId(clusterName) + clusterName := d.Get("name").(string) + + d.Set("location", location) if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutCreate)); err != nil { return nil, err } + d.SetId(containerClusterFullName(project, location, clusterName)) + return []*schema.ResourceData{d}, nil } diff --git a/third_party/terraform/resources/resource_container_node_pool.go.erb b/third_party/terraform/resources/resource_container_node_pool.go.erb index 5f5630ca446f..e11d6eefddd2 100644 --- a/third_party/terraform/resources/resource_container_node_pool.go.erb +++ b/third_party/terraform/resources/resource_container_node_pool.go.erb @@ -55,16 +55,12 @@ func resourceContainerNodePool() *schema.Resource { "zone": { Type: schema.TypeString, Optional: true, - Computed: true, - Deprecated: "use location instead", - ForceNew: true, + Removed: "use location instead", }, "region": { Type: schema.TypeString, Optional: true, - Computed: true, - Deprecated: "use location instead", - ForceNew: true, + Removed: "use location instead", }, "location": { Type: schema.TypeString, @@ -269,7 +265,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e } timeout -= time.Since(startTime) - d.SetId(fmt.Sprintf("%s/%s/%s", nodePoolInfo.location, nodePoolInfo.cluster, nodePool.Name)) + d.SetId(fmt.Sprintf("projects/%s/locations/%s/clusters/%s/nodePools/%s", nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, nodePool.Name)) waitErr := containerOperationWait(config, operation, nodePoolInfo.project, @@ -323,12 +319,6 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err d.Set(k, v) } - if isZone(nodePoolInfo.location) { - d.Set("zone", nodePoolInfo.location) - } else { - d.Set("region", nodePoolInfo.location) - } - d.Set("location", nodePoolInfo.location) d.Set("project", nodePoolInfo.project) @@ -424,40 +414,17 @@ func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) ( } func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - - switch len(parts) { - case 3: - location := parts[0] - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) - } - - d.Set("location", location) - d.Set("cluster", parts[1]) - d.Set("name", parts[2]) - case 4: - d.Set("project", parts[0]) - - location := parts[1] - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) - } - - d.Set("location", location) - d.Set("cluster", parts[2]) - d.Set("name", parts[3]) - - // override the inputted ID with the // format - d.SetId(strings.Join(parts[1:], "/")) - default: - return nil, fmt.Errorf("Invalid container cluster specifier. Expecting {location}/{cluster}/{name} or {project}/{location}/{cluster}/{name}") + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)/nodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err } + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}") + if err != nil { + return nil, err + } + d.SetId(id) + return []*schema.ResourceData{d}, nil } @@ -815,5 +782,6 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node func getNodePoolName(id string) string { // name can be specified with name, name_prefix, or neither, so read it from the id. - return strings.Split(id, "/")[2] + splits := strings.Split(id, "/") + return splits[len(splits)-1] } diff --git a/third_party/terraform/resources/resource_dataproc_cluster.go.erb b/third_party/terraform/resources/resource_dataproc_cluster.go.erb index 556ae53c7b4e..7037e5343955 100644 --- a/third_party/terraform/resources/resource_dataproc_cluster.go.erb +++ b/third_party/terraform/resources/resource_dataproc_cluster.go.erb @@ -16,7 +16,46 @@ import ( "google.golang.org/api/dataproc/v1beta2" ) -var resolveDataprocImageVersion = regexp.MustCompile(`(?P[^\s.-]+)\.(?P[^\s.-]+)(?:\.(?P[^\s.-]+))?(?:\-(?P[^\s.-]+))?`) +var ( + resolveDataprocImageVersion = regexp.MustCompile(`(?P[^\s.-]+)\.(?P[^\s.-]+)(?:\.(?P[^\s.-]+))?(?:\-(?P[^\s.-]+))?`) + + gceClusterConfigKeys = []string{ + "cluster_config.0.gce_cluster_config.0.zone", + "cluster_config.0.gce_cluster_config.0.network", + "cluster_config.0.gce_cluster_config.0.subnetwork", + "cluster_config.0.gce_cluster_config.0.tags", + "cluster_config.0.gce_cluster_config.0.service_account", + "cluster_config.0.gce_cluster_config.0.service_account_scopes", + "cluster_config.0.gce_cluster_config.0.internal_ip_only", + "cluster_config.0.gce_cluster_config.0.metadata", + } + + preemptibleWorkerDiskConfigKeys = []string{ + "cluster_config.0.preemptible_worker_config.0.disk_config.0.num_local_ssds", + "cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_size_gb", + "cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_type", + } + + clusterSoftwareConfigKeys = []string{ + "cluster_config.0.software_config.0.image_version", + "cluster_config.0.software_config.0.override_properties", + "cluster_config.0.software_config.0.optional_components", + } + + clusterConfigKeys = []string{ + "cluster_config.0.staging_bucket", + "cluster_config.0.gce_cluster_config", + "cluster_config.0.master_config", + "cluster_config.0.worker_config", + "cluster_config.0.preemptible_worker_config", + "cluster_config.0.software_config", + "cluster_config.0.initialization_action", + "cluster_config.0.encryption_config", +<% unless version == 'ga' -%> + "cluster_config.0.autoscaling_config", +<% end -%> + } +) func resourceDataprocCluster() *schema.Resource { return &schema.Resource{ @@ -91,18 +130,11 @@ func resourceDataprocCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "delete_autogen_bucket": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Removed: "If you need a bucket that can be deleted, please create" + - "a new one and set the `staging_bucket` field", - }, - "staging_bucket": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + ForceNew: true, }, // If the user does not specify a staging bucket, GCP will allocate one automatically. // The staging_bucket field provides a way for the user to supply their own @@ -115,24 +147,27 @@ func resourceDataprocCluster() *schema.Resource { }, "gce_cluster_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, }, "network": { Type: schema.TypeString, Optional: true, Computed: true, + AtLeastOneOf: gceClusterConfigKeys, ForceNew: true, ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"}, DiffSuppressFunc: compareSelfLinkOrResourceName, @@ -141,29 +176,33 @@ func resourceDataprocCluster() *schema.Resource { "subnetwork": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: gceClusterConfigKeys, ForceNew: true, ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"}, DiffSuppressFunc: compareSelfLinkOrResourceName, }, "tags": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, }, "service_account_scopes": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeSet, + Optional: true, + Computed: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, Elem: &schema.Schema{ Type: schema.TypeString, StateFunc: func(v interface{}) string { @@ -174,36 +213,43 @@ func resourceDataprocCluster() *schema.Resource { }, "internal_ip_only": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, + Default: false, }, "metadata": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, }, }, }, }, - "master_config": instanceConfigSchema(), - "worker_config": instanceConfigSchema(), + "master_config": instanceConfigSchema("master_config"), + "worker_config": instanceConfigSchema("worker_config"), // preemptible_worker_config has a slightly different config "preemptible_worker_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_instances": { Type: schema.TypeInt, Optional: true, Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.num_instances", + "cluster_config.0.preemptible_worker_config.0.disk_config", + }, }, // API does not honour this if set ... @@ -216,21 +262,27 @@ func resourceDataprocCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.num_instances", + "cluster_config.0.preemptible_worker_config.0.disk_config", + }, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_local_ssds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + AtLeastOneOf: preemptibleWorkerDiskConfigKeys, + ForceNew: true, }, "boot_disk_size_gb": { Type: schema.TypeInt, Optional: true, Computed: true, + AtLeastOneOf: preemptibleWorkerDiskConfigKeys, ForceNew: true, ValidateFunc: validation.IntAtLeast(10), }, @@ -238,6 +290,7 @@ func resourceDataprocCluster() *schema.Resource { "boot_disk_type": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: preemptibleWorkerDiskConfigKeys, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), Default: "pd-standard", @@ -256,10 +309,11 @@ func resourceDataprocCluster() *schema.Resource { }, "software_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -267,15 +321,17 @@ func resourceDataprocCluster() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + AtLeastOneOf: clusterSoftwareConfigKeys, ForceNew: true, DiffSuppressFunc: dataprocImageVersionDiffSuppress, }, "override_properties": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: clusterSoftwareConfigKeys, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "properties": { @@ -293,8 +349,9 @@ func resourceDataprocCluster() *schema.Resource { // is overridden, this will be empty. "optional_components": { - Type: schema.TypeSet, - Optional: true, + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: clusterSoftwareConfigKeys, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DRUID", "HIVE_WEBHCAT", @@ -306,9 +363,10 @@ func resourceDataprocCluster() *schema.Resource { }, "initialization_action": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "script": { @@ -327,9 +385,10 @@ func resourceDataprocCluster() *schema.Resource { }, }, "encryption_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "kms_key_name": { @@ -341,14 +400,15 @@ func resourceDataprocCluster() *schema.Resource { }, <% unless version == 'ga' -%> "autoscaling_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "policy_uri": { Type: schema.TypeString, - Optional: true, + Required: true, }, }, }, @@ -361,61 +421,88 @@ func resourceDataprocCluster() *schema.Resource { } } -func instanceConfigSchema() *schema.Schema { +func instanceConfigSchema(parent string) *schema.Schema { + var instanceConfigKeys = []string{ + "cluster_config.0."+parent+".0.num_instances", + "cluster_config.0."+parent+".0.image_uri", + "cluster_config.0."+parent+".0.machine_type", +<% unless version == 'ga' -%> + "cluster_config.0."+parent+".0.min_cpu_platform", +<% end -%> + "cluster_config.0."+parent+".0.disk_config", + "cluster_config.0."+parent+".0.accelerators", + } + return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_instances": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, }, "image_uri": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, + ForceNew: true, }, "machine_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, + ForceNew: true, }, <% unless version == 'ga' -%> "min_cpu_platform": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, + ForceNew: true, }, <% end -%> "disk_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_local_ssds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0."+parent+".0.disk_config.0.num_local_ssds", + "cluster_config.0."+parent+".0.disk_config.0.boot_disk_size_gb", + "cluster_config.0."+parent+".0.disk_config.0.boot_disk_type", + }, + ForceNew: true, }, "boot_disk_size_gb": { Type: schema.TypeInt, Optional: true, Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0."+parent+".0.disk_config.0.num_local_ssds", + "cluster_config.0."+parent+".0.disk_config.0.boot_disk_size_gb", + "cluster_config.0."+parent+".0.disk_config.0.boot_disk_type", + }, ForceNew: true, ValidateFunc: validation.IntAtLeast(10), }, @@ -423,6 +510,11 @@ func instanceConfigSchema() *schema.Schema { "boot_disk_type": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: []string{ + "cluster_config.0."+parent+".0.disk_config.0.num_local_ssds", + "cluster_config.0."+parent+".0.disk_config.0.boot_disk_size_gb", + "cluster_config.0."+parent+".0.disk_config.0.boot_disk_type", + }, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), Default: "pd-standard", @@ -433,10 +525,11 @@ func instanceConfigSchema() *schema.Schema { // Note: preemptible workers don't support accelerators "accelerators": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: acceleratorsSchema(), + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: instanceConfigKeys, + ForceNew: true, + Elem: acceleratorsSchema(), }, "instance_names": { @@ -504,7 +597,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error creating Dataproc cluster: %s", err) } - d.SetId(cluster.ClusterName) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/clusters/%s", project, region, cluster.ClusterName)) // Wait until it's created timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) diff --git a/third_party/terraform/resources/resource_dataproc_job.go b/third_party/terraform/resources/resource_dataproc_job.go index ffc9658738f6..e4ced4e41b63 100644 --- a/third_party/terraform/resources/resource_dataproc_job.go +++ b/third_party/terraform/resources/resource_dataproc_job.go @@ -3,6 +3,7 @@ package google import ( "fmt" "log" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" @@ -148,7 +149,7 @@ func resourceDataprocJob() *schema.Resource { "max_failures_per_hour": { Type: schema.TypeInt, Description: "Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.", - Optional: true, + Required: true, ForceNew: true, ValidateFunc: validation.IntAtMost(10), }, @@ -181,7 +182,6 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { return err } - jobConfCount := 0 clusterName := d.Get("placement.0.cluster_name").(string) region := d.Get("region").(string) @@ -204,52 +204,42 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } if v, ok := d.GetOk("pyspark_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.PysparkJob = expandPySparkJob(config) } if v, ok := d.GetOk("spark_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkJob = expandSparkJob(config) } if v, ok := d.GetOk("hadoop_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.HadoopJob = expandHadoopJob(config) } if v, ok := d.GetOk("hive_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.HiveJob = expandHiveJob(config) } if v, ok := d.GetOk("pig_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.PigJob = expandPigJob(config) } if v, ok := d.GetOk("sparksql_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkSqlJob = expandSparkSqlJob(config) } - if jobConfCount != 1 { - return fmt.Errorf("You must define and configure exactly one xxx_config block") - } - // Submit the job job, err := config.clientDataproc.Projects.Regions.Jobs.Submit( project, region, submitReq).Do() if err != nil { return err } - d.SetId(job.Reference.JobId) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/jobs/%s", project, region, job.Reference.JobId)) timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) waitErr := dataprocJobOperationWait(config, region, project, job.Reference.JobId, @@ -271,10 +261,12 @@ func resourceDataprocJobRead(d *schema.ResourceData, meta interface{}) error { return err } + parts := strings.Split(d.Id(), "/") + jobId := parts[len(parts)-1] job, err := config.clientDataproc.Projects.Regions.Jobs.Get( - project, region, d.Id()).Do() + project, region, jobId).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", d.Id())) + return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", jobId)) } d.Set("force_delete", d.Get("force_delete")) @@ -320,15 +312,17 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { forceDelete := d.Get("force_delete").(bool) timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) + parts := strings.Split(d.Id(), "/") + jobId := parts[len(parts)-1] if forceDelete { log.Printf("[DEBUG] Attempting to first cancel Dataproc job %s if it's still running ...", d.Id()) // ignore error if we get one - job may be finished already and not need to // be cancelled. We do however wait for the state to be one that is // at least not active - _, _ = config.clientDataproc.Projects.Regions.Jobs.Cancel(project, region, d.Id(), &dataproc.CancelJobRequest{}).Do() + _, _ = config.clientDataproc.Projects.Regions.Jobs.Cancel(project, region, jobId, &dataproc.CancelJobRequest{}).Do() - waitErr := dataprocJobOperationWait(config, region, project, d.Id(), + waitErr := dataprocJobOperationWait(config, region, project, jobId, "Cancelling Dataproc job", timeoutInMinutes, 1) if waitErr != nil { return waitErr @@ -338,12 +332,12 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Deleting Dataproc job %s", d.Id()) _, err = config.clientDataproc.Projects.Regions.Jobs.Delete( - project, region, d.Id()).Do() + project, region, jobId).Do() if err != nil { return err } - waitErr := dataprocDeleteOperationWait(config, region, project, d.Id(), + waitErr := dataprocDeleteOperationWait(config, region, project, jobId, "Deleting Dataproc job", timeoutInMinutes, 1) if waitErr != nil { return waitErr @@ -368,7 +362,7 @@ var loggingConfig = &schema.Schema{ "driver_log_levels": { Type: schema.TypeMap, Description: "Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'.", - Optional: true, + Required: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -377,11 +371,11 @@ var loggingConfig = &schema.Schema{ } var pySparkSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"pyspark_config", "spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "main_python_file_uri": { @@ -494,26 +488,26 @@ func expandPySparkJob(config map[string]interface{}) *dataproc.PySparkJob { // ---- Spark Job ---- var sparkSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"pyspark_config", "spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main driver: can be only one of the class | jar_file "main_class": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"spark_config.0.main_jar_file_uri"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"spark_config.0.main_class", "spark_config.0.main_jar_file_uri"}, }, "main_jar_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"spark_config.0.main_class"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"spark_config.0.main_jar_file_uri", "spark_config.0.main_class"}, }, "args": { @@ -607,26 +601,26 @@ func expandSparkJob(config map[string]interface{}) *dataproc.SparkJob { // ---- Hadoop Job ---- var hadoopSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hive_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main driver: can be only one of the main_class | main_jar_file_uri "main_class": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"hadoop_config.0.main_jar_file_uri"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, }, "main_jar_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"hadoop_config.0.main_class"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, }, "args": { @@ -720,27 +714,27 @@ func expandHadoopJob(config map[string]interface{}) *dataproc.HadoopJob { // ---- Hive Job ---- var hiveSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main query: can be only one of query_list | query_file_uri "query_list": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"hive_config.0.query_file_uri"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, }, "query_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"hive_config.0.query_list"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, }, "continue_on_failure": { @@ -819,27 +813,27 @@ func expandHiveJob(config map[string]interface{}) *dataproc.HiveJob { // ---- Pig Job ---- var pigSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main query: can be only one of query_list | query_file_uri "query_list": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"pig_config.0.query_file_uri"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, }, "query_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"pig_config.0.query_list"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, }, "continue_on_failure": { @@ -921,27 +915,27 @@ func expandPigJob(config map[string]interface{}) *dataproc.PigJob { // ---- Spark SQL Job ---- var sparkSqlSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main query: can be only one of query_list | query_file_uri "query_list": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"pig_config.0.query_file_uri"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, }, "query_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"pig_config.0.query_list"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, }, "script_variables": { diff --git a/third_party/terraform/resources/resource_endpoints_service.go b/third_party/terraform/resources/resource_endpoints_service.go index 0411a050c704..b1fd24584154 100644 --- a/third_party/terraform/resources/resource_endpoints_service.go +++ b/third_party/terraform/resources/resource_endpoints_service.go @@ -35,12 +35,6 @@ func resourceEndpointsService() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "protoc_output": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Please use protoc_output_base64 instead.", - }, "protoc_output_base64": { Type: schema.TypeString, Optional: true, diff --git a/third_party/terraform/resources/resource_google_folder_organization_policy.go b/third_party/terraform/resources/resource_google_folder_organization_policy.go index 275c6dc12731..6bed8a291b24 100644 --- a/third_party/terraform/resources/resource_google_folder_organization_policy.go +++ b/third_party/terraform/resources/resource_google_folder_organization_policy.go @@ -43,8 +43,8 @@ func resourceFolderOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ( config := meta.(*Config) if err := parseImportId([]string{ - "folders/(?P[^/]+):constraints/(?P[^/]+)", - "(?P[^/]+):(?P[^/]+)"}, + "folders/(?P[^/]+)/constraints/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { return nil, err } @@ -59,7 +59,7 @@ func resourceFolderOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ( } func resourceGoogleFolderOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%s:%s", d.Get("folder"), d.Get("constraint"))) + d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) if isOrganizationPolicyUnset(d) { return resourceGoogleFolderOrganizationPolicyDelete(d, meta) diff --git a/third_party/terraform/resources/resource_google_organization_policy.go b/third_party/terraform/resources/resource_google_organization_policy.go index b322d9e9425d..66a4c72ec324 100644 --- a/third_party/terraform/resources/resource_google_organization_policy.go +++ b/third_party/terraform/resources/resource_google_organization_policy.go @@ -17,10 +17,10 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ DiffSuppressFunc: compareSelfLinkOrResourceName, }, "boolean_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"list_policy", "restore_policy"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{"list_policy", "boolean_policy", "restore_policy"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enforced": { @@ -31,10 +31,10 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ }, }, "list_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"boolean_policy", "restore_policy"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{"list_policy", "boolean_policy", "restore_policy"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "allow": { @@ -45,37 +45,40 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "all": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ConflictsWith: []string{"list_policy.0.allow.0.values"}, + Type: schema.TypeBool, + Optional: true, + Default: false, + ExactlyOneOf: []string{"list_policy.0.allow.0.all", "list_policy.0.allow.0.values"}, }, "values": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Type: schema.TypeSet, + Optional: true, + ExactlyOneOf: []string{"list_policy.0.allow.0.all", "list_policy.0.allow.0.values"}, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, }, }, }, "deny": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"list_policy.0.allow"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "all": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ConflictsWith: []string{"list_policy.0.deny.0.values"}, + Type: schema.TypeBool, + Optional: true, + Default: false, + ExactlyOneOf: []string{"list_policy.0.deny.0.all", "list_policy.0.deny.0.values"}, }, "values": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Type: schema.TypeSet, + Optional: true, + ExactlyOneOf: []string{"list_policy.0.deny.0.all", "list_policy.0.deny.0.values"}, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, }, }, @@ -106,10 +109,10 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ Computed: true, }, "restore_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"boolean_policy", "list_policy"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{"restore_policy", "boolean_policy", "list_policy"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default": { @@ -152,7 +155,7 @@ func resourceGoogleOrganizationPolicy() *schema.Resource { } func resourceGoogleOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%s:%s", d.Get("org_id"), d.Get("constraint").(string))) + d.SetId(fmt.Sprintf("%s/%s", d.Get("org_id"), d.Get("constraint").(string))) if isOrganizationPolicyUnset(d) { return resourceGoogleOrganizationPolicyDelete(d, meta) @@ -221,9 +224,9 @@ func resourceGoogleOrganizationPolicyDelete(d *schema.ResourceData, meta interfa } func resourceGoogleOrganizationPolicyImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), ":") + parts := strings.SplitN(d.Id(), "/", 2) if len(parts) != 2 { - return nil, fmt.Errorf("Invalid id format. Expecting {org_id}:{constraint}, got '%s' instead.", d.Id()) + return nil, fmt.Errorf("Invalid id format. Expecting {org_id}/{constraint}, got '%s' instead.", d.Id()) } d.Set("org_id", parts[0]) diff --git a/third_party/terraform/resources/resource_google_project.go b/third_party/terraform/resources/resource_google_project.go index 5e03f413f2f3..d6655836d178 100644 --- a/third_party/terraform/resources/resource_google_project.go +++ b/third_party/terraform/resources/resource_google_project.go @@ -1,6 +1,7 @@ package google import ( + "context" "fmt" "log" "regexp" @@ -13,6 +14,7 @@ import ( "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/googleapi" + "google.golang.org/api/serviceusage/v1" ) // resourceGoogleProject returns a *schema.Resource that allows a customer @@ -72,17 +74,6 @@ func resourceGoogleProject() *schema.Resource { Computed: true, StateFunc: parseFolderId, }, - "policy_data": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", - }, - "policy_etag": { - Type: schema.TypeString, - Computed: true, - Removed: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", - }, "number": { Type: schema.TypeString, Computed: true, @@ -96,109 +87,6 @@ func resourceGoogleProject() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "app_engine": { - Type: schema.TypeList, - Elem: appEngineResource(), - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - }, - } -} - -func appEngineResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "auth_domain": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "location_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "serving_status": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "feature_settings": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - Elem: appEngineFeatureSettingsResource(), - }, - "name": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "url_dispatch_rule": { - Type: schema.TypeList, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - Elem: appEngineURLDispatchRuleResource(), - }, - "code_bucket": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "default_hostname": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "default_bucket": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "gcr_domain": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - }, - } -} - -func appEngineURLDispatchRuleResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "path": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "service": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - }, - } -} - -func appEngineFeatureSettingsResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "split_health_checks": { - Type: schema.TypeBool, - Optional: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, }, } } @@ -236,7 +124,7 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error project.ProjectId, project.Name, err) } - d.SetId(pid) + d.SetId(fmt.Sprintf("projects/%s", pid)) // Wait for the operation to complete opAsMap, err := ConvertToMap(op) @@ -287,7 +175,8 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] p, err := readGoogleProject(d, config) if err != nil { @@ -306,10 +195,6 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { d.Set("name", p.Name) d.Set("labels", p.Labels) - // We get app_engine.#: "" => "" without this set - // Remove when app_engine field is removed from schema completely - d.Set("app_engine", nil) - if p.Parent != nil { switch p.Parent.Type { case "organization": @@ -387,7 +272,8 @@ func parseFolderId(v interface{}) string { func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] project_name := d.Get("name").(string) // Read the project @@ -464,7 +350,8 @@ func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error config := meta.(*Config) // Only delete projects if skip_delete isn't set if !d.Get("skip_delete").(bool) { - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] if err := retryTimeDuration(func() error { _, delErr := config.clientResourceManager.Projects.Delete(pid).Do() return delErr @@ -477,7 +364,8 @@ func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error } func resourceProjectImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] // Prevent importing via project number, this will cause issues later matched, err := regexp.MatchString("^\\d+$", pid) if err != nil { @@ -488,6 +376,9 @@ func resourceProjectImportState(d *schema.ResourceData, meta interface{}) ([]*sc return nil, fmt.Errorf("Error importing project %q, please use project_id", pid) } + // Ensure the id format includes projects/ + d.SetId(fmt.Sprintf("projects/%s", pid)) + // Explicitly set to default as a workaround for `ImportStateVerify` tests, and so that users // don't see a diff immediately after import. d.Set("auto_create_network", true) @@ -530,7 +421,8 @@ func forceDeleteComputeNetwork(d *schema.ResourceData, config *Config, projectId } func updateProjectBillingAccount(d *schema.ResourceData, config *Config) error { - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] name := d.Get("billing_account").(string) ba := &cloudbilling.ProjectBillingInfo{} // If we're unlinking an existing billing account, an empty request does that, not an empty-string billing account. @@ -577,8 +469,10 @@ func deleteComputeNetwork(project, network string, config *Config) error { func readGoogleProject(d *schema.ResourceData, config *Config) (*cloudresourcemanager.Project, error) { var p *cloudresourcemanager.Project // Read the project + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] err := retryTimeDuration(func() (reqErr error) { - p, reqErr = config.clientResourceManager.Projects.Get(d.Id()).Do() + p, reqErr = config.clientResourceManager.Projects.Get(pid).Do() return reqErr }, d.Timeout(schema.TimeoutRead)) return p, err @@ -610,6 +504,76 @@ func enableServiceUsageProjectServices(services []string, project string, config return waitForServiceUsageEnabledServices(services, project, config, timeout) } +func doEnableServicesRequest(services []string, project string, config *Config, timeout time.Duration) error { + var op *serviceusage.Operation + + err := retryTimeDuration(func() error { + var rerr error + if len(services) == 1 { + // BatchEnable returns an error for a single item, so just enable + // using service endpoint. + name := fmt.Sprintf("projects/%s/services/%s", project, services[0]) + req := &serviceusage.EnableServiceRequest{} + op, rerr = config.clientServiceUsage.Services.Enable(name, req).Do() + } else { + // Batch enable for multiple services. + name := fmt.Sprintf("projects/%s", project) + req := &serviceusage.BatchEnableServicesRequest{ServiceIds: services} + op, rerr = config.clientServiceUsage.Services.BatchEnable(name, req).Do() + } + return handleServiceUsageRetryableError(rerr) + }, timeout) + if err != nil { + return errwrap.Wrapf("failed to send enable services request: {{err}}", err) + } + // Poll for the API to return + waitErr := serviceUsageOperationWait(config, op, fmt.Sprintf("Enable Project %q Services: %+v", project, services)) + if waitErr != nil { + return waitErr + } + return nil +} + +// Retrieve a project's services from the API +func listCurrentlyEnabledServices(project string, config *Config, timeout time.Duration) (map[string]struct{}, error) { + // Verify project for services still exists + p, err := config.clientResourceManager.Projects.Get(project).Do() + if err != nil { + return nil, err + } + if p.LifecycleState == "DELETE_REQUESTED" { + // Construct a 404 error for handleNotFoundError + return nil, &googleapi.Error{ + Code: 404, + Message: "Project deletion was requested", + } + } + + log.Printf("[DEBUG] Listing enabled services for project %s", project) + apiServices := make(map[string]struct{}) + err = retryTimeDuration(func() error { + ctx := context.Background() + return config.clientServiceUsage.Services. + List(fmt.Sprintf("projects/%s", project)). + Fields("services/name,nextPageToken"). + Filter("state:ENABLED"). + Pages(ctx, func(r *serviceusage.ListServicesResponse) error { + for _, v := range r.Services { + // services are returned as "projects/PROJECT/services/NAME" + name := GetResourceNameFromSelfLink(v.Name) + if _, ok := ignoredProjectServicesSet[name]; !ok { + apiServices[name] = struct{}{} + } + } + return nil + }) + }, timeout) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Failed to list enabled services for project %s: {{err}}", project), err) + } + return apiServices, nil +} + // waitForServiceUsageEnabledServices doesn't resend enable requests - it just // waits for service enablement status to propagate. Essentially, it waits until // all services show up as enabled when listing services on the project. diff --git a/third_party/terraform/resources/resource_google_project_iam_policy.go.erb b/third_party/terraform/resources/resource_google_project_iam_policy.go.erb index c2c48191adca..4ece61ad2f11 100644 --- a/third_party/terraform/resources/resource_google_project_iam_policy.go.erb +++ b/third_party/terraform/resources/resource_google_project_iam_policy.go.erb @@ -35,21 +35,6 @@ func resourceGoogleProjectIamPolicy() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "authoritative": { - Removed: "The authoritative field was removed. To ignore changes not managed by Terraform, use google_project_iam_binding and google_project_iam_member instead. See https://www.terraform.io/docs/providers/google/r/google_project_iam.html for more information.", - Type: schema.TypeBool, - Optional: true, - }, - "restore_policy": { - Removed: "This field was removed alongside the authoritative field. To ignore changes not managed by Terraform, use google_project_iam_binding and google_project_iam_member instead. See https://www.terraform.io/docs/providers/google/r/google_project_iam.html for more information.", - Type: schema.TypeString, - Computed: true, - }, - "disable_project": { - Removed: "This field was removed alongside the authoritative field. Use lifecycle.prevent_destroy instead.", - Type: schema.TypeBool, - Optional: true, - }, }, } } diff --git a/third_party/terraform/resources/resource_google_project_service.go b/third_party/terraform/resources/resource_google_project_service.go index 86c06e6f07f9..fe1a79ccf6bd 100644 --- a/third_party/terraform/resources/resource_google_project_service.go +++ b/third_party/terraform/resources/resource_google_project_service.go @@ -10,12 +10,15 @@ import ( "google.golang.org/api/serviceusage/v1" ) -var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"} - // These services can only be enabled as a side-effect of enabling other services, // so don't bother storing them in the config or using them for diffing. +var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"} var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices) +// Services that can't be user-specified but are otherwise valid. Renamed +// services should be added to this set during major releases. +var bannedProjectServices = []string{"bigquery-json.googleapis.com"} + // Service Renames // we expect when a service is renamed: // - both service names will continue to be able to be set @@ -41,7 +44,7 @@ var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices) // upon removal, we should disallow the old name from being used even if it's // not gone from the underlying API yet var renamedServices = map[string]string{ - "bigquery-json.googleapis.com": "bigquery.googleapis.com", // DEPRECATED FOR 3.0.0 + "bigquery-json.googleapis.com": "bigquery.googleapis.com", // DEPRECATED FOR 4.0.0. Originally for 3.0.0, but the migration did not happen server-side yet. } // renamedServices in reverse (new -> old) @@ -50,6 +53,8 @@ var renamedServicesByNewServiceNames = reverseStringMap(renamedServices) // renamedServices expressed as both old -> new and new -> old var renamedServicesByOldAndNewServiceNames = mergeStringMaps(renamedServices, renamedServicesByNewServiceNames) +const maxServiceUsageBatchSize = 20 + func resourceGoogleProjectService() *schema.Resource { return &schema.Resource{ Create: resourceGoogleProjectServiceCreate, @@ -73,7 +78,7 @@ func resourceGoogleProjectService() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: StringNotInSlice(ignoredProjectServices, false), + ValidateFunc: StringNotInSlice(append(ignoredProjectServices, bannedProjectServices...), false), }, "project": { Type: schema.TypeString, diff --git a/third_party/terraform/resources/resource_google_project_services.go b/third_party/terraform/resources/resource_google_project_services.go deleted file mode 100644 index 30c8a0eb11d5..000000000000 --- a/third_party/terraform/resources/resource_google_project_services.go +++ /dev/null @@ -1,335 +0,0 @@ -package google - -import ( - "context" - "fmt" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "google.golang.org/api/googleapi" - "google.golang.org/api/serviceusage/v1" - "log" - "strings" - "time" -) - -const maxServiceUsageBatchSize = 20 - -func resourceGoogleProjectServices() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleProjectServicesCreateUpdate, - Read: resourceGoogleProjectServicesRead, - Update: resourceGoogleProjectServicesCreateUpdate, - Delete: resourceGoogleProjectServicesDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - DeprecationMessage: "google_project_services is deprecated - many users reported " + - "issues with dependent services that were not resolvable. Please use google_project_service or the " + - "https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/project_services" + - " module. It's recommended that you use a provider version of 2.13.0 or higher when you migrate so that requests are" + - " batched to the API, reducing the request rate. This resource will be removed in version 3.0.0.", - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Read: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "services": { - Type: schema.TypeSet, - Required: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: StringNotInSlice(ignoredProjectServices, false), - }, - }, - "disable_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func resourceGoogleProjectServicesCreateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Get services from config - services, err := expandServiceUsageProjectServicesServices(d.Get("services"), d, config) - if err != nil { - return err - } - - log.Printf("[DEBUG]: Enabling Project Services for %s: %+v", d.Id(), services) - if err := setServiceUsageProjectEnabledServices(services, project, d, config); err != nil { - return fmt.Errorf("Error authoritatively enabling Project %s Services: %v", project, err) - } - log.Printf("[DEBUG]: Finished enabling Project Services for %s: %+v", d.Id(), services) - - d.SetId(project) - return resourceGoogleProjectServicesRead(d, meta) -} - -func resourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - enabledSet, err := listCurrentlyEnabledServices(d.Id(), config, d.Timeout(schema.TimeoutRead)) - if err != nil { - return err - } - - // use old services to set the correct renamed service names in state - s, _ := expandServiceUsageProjectServicesServices(d.Get("services"), d, config) - log.Printf("[DEBUG] Saw services in state on Read: %s ", s) - sset := golangSetFromStringSlice(s) - for ov, nv := range renamedServices { - _, ook := sset[ov] - _, nok := sset[nv] - - // preserve the values set in prior state if they're identical. If none - // were set, we delete the new value if it exists. By doing that that - // we only store the old value if the service is enabled, and no value - // if it isn't. - if ook && nok { - continue - } else if ook { - delete(enabledSet, nv) - } else if nok { - delete(enabledSet, ov) - } else { - delete(enabledSet, nv) - } - } - - services := stringSliceFromGolangSet(enabledSet) - - d.Set("project", d.Id()) - d.Set("services", flattenServiceUsageProjectServicesServices(services, d)) - - return nil -} - -func resourceGoogleProjectServicesDelete(d *schema.ResourceData, meta interface{}) error { - if disable := d.Get("disable_on_destroy"); !(disable.(bool)) { - log.Printf("[WARN] Project Services disable_on_destroy set to false, skip disabling services for %s.", d.Id()) - d.SetId("") - return nil - } - - config := meta.(*Config) - - // Get services from config - services, err := expandServiceUsageProjectServicesServices(d.Get("services"), d, config) - if err != nil { - return err - } - project := d.Id() - - log.Printf("[DEBUG]: Disabling Project Services %s: %+v", project, services) - for _, s := range services { - if err := disableServiceUsageProjectService(s, project, d, config, true); err != nil { - return fmt.Errorf("Unable to destroy google_project_services for %s: %s", d.Id(), err) - } - } - log.Printf("[DEBUG] Finished disabling Project Services %s: %+v", project, services) - - d.SetId("") - return nil -} - -// *Authoritatively* sets enabled services. -func setServiceUsageProjectEnabledServices(services []string, project string, d *schema.ResourceData, config *Config) error { - currentlyEnabled, err := listCurrentlyEnabledServices(project, config, d.Timeout(schema.TimeoutRead)) - if err != nil { - return err - } - - toEnable := map[string]struct{}{} - for _, srv := range services { - // We don't have to enable a service if it's already enabled. - if _, ok := currentlyEnabled[srv]; !ok { - toEnable[srv] = struct{}{} - } - } - - if len(toEnable) > 0 { - log.Printf("[DEBUG] Enabling services: %s", toEnable) - if err := BatchRequestEnableServices(toEnable, project, d, config); err != nil { - return fmt.Errorf("unable to enable Project Services %s (%+v): %s", project, services, err) - } - } else { - log.Printf("[DEBUG] No services to enable.") - } - - srvSet := golangSetFromStringSlice(services) - - srvSetWithRenames := map[string]struct{}{} - - // we'll always list both names for renamed services, so allow both forms if - // we see both. - for k := range srvSet { - srvSetWithRenames[k] = struct{}{} - if v, ok := renamedServicesByOldAndNewServiceNames[k]; ok { - srvSetWithRenames[v] = struct{}{} - } - } - - for srv := range currentlyEnabled { - // Disable any services that are currently enabled for project but are not - // in our list of acceptable services. - if _, ok := srvSetWithRenames[srv]; !ok { - // skip deleting services by their new names and prefer the old name. - if _, ok := renamedServicesByNewServiceNames[srv]; ok { - continue - } - - log.Printf("[DEBUG] Disabling project %s service %s", project, srv) - err := disableServiceUsageProjectService(srv, project, d, config, true) - if err != nil { - log.Printf("[DEBUG] Saw error %s deleting service %s", err, srv) - - // if we got the right error and the service is renamed, delete by the new name - if n, ok := renamedServices[srv]; ok && strings.Contains(err.Error(), "not found or permission denied.") { - log.Printf("[DEBUG] Failed to delete service %s, it doesn't exist. Trying %s", srv, n) - err = disableServiceUsageProjectService(n, project, d, config, true) - if err == nil { - return nil - } - } - - return fmt.Errorf("unable to disable unwanted Project Service %s %s): %s", project, srv, err) - } - } - } - return nil -} - -func doEnableServicesRequest(services []string, project string, config *Config, timeout time.Duration) error { - var op *serviceusage.Operation - - err := retryTimeDuration(func() error { - var rerr error - if len(services) == 1 { - // BatchEnable returns an error for a single item, so just enable - // using service endpoint. - name := fmt.Sprintf("projects/%s/services/%s", project, services[0]) - req := &serviceusage.EnableServiceRequest{} - op, rerr = config.clientServiceUsage.Services.Enable(name, req).Do() - } else { - // Batch enable for multiple services. - name := fmt.Sprintf("projects/%s", project) - req := &serviceusage.BatchEnableServicesRequest{ServiceIds: services} - op, rerr = config.clientServiceUsage.Services.BatchEnable(name, req).Do() - } - return handleServiceUsageRetryableError(rerr) - }, timeout) - if err != nil { - return errwrap.Wrapf("failed to send enable services request: {{err}}", err) - } - - // Poll for the API to return - waitErr := serviceUsageOperationWait(config, op, fmt.Sprintf("Enable Project %q Services: %+v", project, services)) - if waitErr != nil { - return waitErr - } - return nil -} - -func handleServiceUsageRetryableError(err error) error { - if err == nil { - return nil - } - if gerr, ok := err.(*googleapi.Error); ok { - if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { - return &googleapi.Error{ - Code: 503, - Message: "api returned \"precondition failed\" while enabling service", - } - } - } - return err -} - -func flattenServiceUsageProjectServicesServices(v interface{}, d *schema.ResourceData) interface{} { - if v == nil { - return v - } - if strV, ok := v.([]string); ok { - v = convertStringArrToInterface(strV) - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func expandServiceUsageProjectServicesServices(v interface{}, d TerraformResourceData, config *Config) ([]string, error) { - if v == nil { - return nil, nil - } - return convertStringArr(v.(*schema.Set).List()), nil -} - -// Retrieve a project's services from the API -// if a service has been renamed, this function will list both the old and new -// forms of the service. LIST responses are expected to return only the old or -// new form, but we'll always return both. -func listCurrentlyEnabledServices(project string, config *Config, timeout time.Duration) (map[string]struct{}, error) { - // Verify project for services still exists - p, err := config.clientResourceManager.Projects.Get(project).Do() - if err != nil { - return nil, err - } - if p.LifecycleState == "DELETE_REQUESTED" { - // Construct a 404 error for handleNotFoundError - return nil, &googleapi.Error{ - Code: 404, - Message: "Project deletion was requested", - } - } - - log.Printf("[DEBUG] Listing enabled services for project %s", project) - apiServices := make(map[string]struct{}) - err = retryTimeDuration(func() error { - ctx := context.Background() - return config.clientServiceUsage.Services. - List(fmt.Sprintf("projects/%s", project)). - Fields("services/name,nextPageToken"). - Filter("state:ENABLED"). - Pages(ctx, func(r *serviceusage.ListServicesResponse) error { - for _, v := range r.Services { - // services are returned as "projects/{{project}}/services/{{name}}" - name := GetResourceNameFromSelfLink(v.Name) - - // if name not in ignoredProjectServicesSet - if _, ok := ignoredProjectServicesSet[name]; !ok { - apiServices[name] = struct{}{} - - // if a service has been renamed, set both. We'll deal - // with setting the right values later. - if v, ok := renamedServicesByOldAndNewServiceNames[name]; ok { - log.Printf("[DEBUG] Adding service alias for %s to enabled services: %s", name, v) - apiServices[v] = struct{}{} - } - } - } - return nil - }) - }, timeout) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Failed to list enabled services for project %s: {{err}}", project), err) - } - return apiServices, nil -} diff --git a/third_party/terraform/resources/resource_google_service_account.go b/third_party/terraform/resources/resource_google_service_account.go index 60f57f69b981..e1ab14fad2b4 100644 --- a/third_party/terraform/resources/resource_google_service_account.go +++ b/third_party/terraform/resources/resource_google_service_account.go @@ -51,11 +51,6 @@ func resourceGoogleServiceAccount() *schema.Resource { Optional: true, ForceNew: true, }, - "policy_data": { - Type: schema.TypeString, - Optional: true, - Removed: "Use the 'google_service_account_iam_policy' resource to define policies for a service account", - }, }, } } diff --git a/third_party/terraform/resources/resource_iam_audit_config.go b/third_party/terraform/resources/resource_iam_audit_config.go index 3dbbd32979cd..bec5b1634d9d 100644 --- a/third_party/terraform/resources/resource_iam_audit_config.go +++ b/third_party/terraform/resources/resource_iam_audit_config.go @@ -44,9 +44,9 @@ func ResourceIamAuditConfig(parentSpecificSchema map[string]*schema.Schema, newU func ResourceIamAuditConfigWithBatching(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool) *schema.Resource { return &schema.Resource{ - Create: resourceIamAuditConfigCreate(newUpdaterFunc, enableBatching), + Create: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), Read: resourceIamAuditConfigRead(newUpdaterFunc), - Update: resourceIamAuditConfigUpdate(newUpdaterFunc, enableBatching), + Update: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), Delete: resourceIamAuditConfigDelete(newUpdaterFunc, enableBatching), Schema: mergeSchemas(iamAuditConfigSchema, parentSpecificSchema), Importer: &schema.ResourceImporter{ @@ -55,34 +55,6 @@ func ResourceIamAuditConfigWithBatching(parentSpecificSchema map[string]*schema. } } -func resourceIamAuditConfigCreate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.CreateFunc { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - ac := getResourceIamAuditConfig(d) - modifyF := func(ep *cloudresourcemanager.Policy) error { - ep.AuditConfigs = mergeAuditConfigs(append(ep.AuditConfigs, ac)) - return nil - } - - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, fmt.Sprintf( - "Add audit config for service %s on resource %q", ac.Service, updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return err - } - d.SetId(updater.GetResourceId() + "/audit_config/" + ac.Service) - return resourceIamAuditConfigRead(newUpdaterFunc)(d, meta) - } -} - func resourceIamAuditConfigRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { return func(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -150,7 +122,7 @@ func iamAuditConfigImport(resourceIdParser resourceIdParserFunc) schema.StateFun } } -func resourceIamAuditConfigUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.UpdateFunc { +func resourceIamAuditConfigCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) func(*schema.ResourceData, interface{}) error { return func(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) updater, err := newUpdaterFunc(d, config) @@ -173,7 +145,7 @@ func resourceIamAuditConfigUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enab if err != nil { return err } - + d.SetId(updater.GetResourceId() + "/audit_config/" + ac.Service) return resourceIamAuditConfigRead(newUpdaterFunc)(d, meta) } } diff --git a/third_party/terraform/resources/resource_sql_database_instance.go b/third_party/terraform/resources/resource_sql_database_instance.go index dbf55492eecd..27217f7fae22 100644 --- a/third_party/terraform/resources/resource_sql_database_instance.go +++ b/third_party/terraform/resources/resource_sql_database_instance.go @@ -30,11 +30,55 @@ var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{ }, "value": { Type: schema.TypeString, - Optional: true, + Required: true, }, }, } +var ( + backupConfigurationKeys = []string{ + "settings.0.backup_configuration.0.binary_log_enabled", + "settings.0.backup_configuration.0.enabled", + "settings.0.backup_configuration.0.start_time", + "settings.0.backup_configuration.0.location", + } + + ipConfigurationKeys = []string{ + "settings.0.ip_configuration.0.authorized_networks", + "settings.0.ip_configuration.0.ipv4_enabled", + "settings.0.ip_configuration.0.require_ssl", + "settings.0.ip_configuration.0.private_network", + } + + maintenanceWindowKeys = []string{ + "settings.0.maintenance_window.0.day", + "settings.0.maintenance_window.0.hour", + "settings.0.maintenance_window.0.update_track", + } + + serverCertsKeys = []string{ + "server_ca_cert.0.cert", + "server_ca_cert.0.common_name", + "server_ca_cert.0.create_time", + "server_ca_cert.0.expiration_time", + "server_ca_cert.0.sha1_fingerprint", + } + + replicaConfigurationKeys = []string{ + "replica_configuration.0.ca_certificate", + "replica_configuration.0.client_certificate", + "replica_configuration.0.client_key", + "replica_configuration.0.connect_retry_interval", + "replica_configuration.0.dump_file_path", + "replica_configuration.0.failover_target", + "replica_configuration.0.master_heartbeat_period", + "replica_configuration.0.password", + "replica_configuration.0.ssl_cipher", + "replica_configuration.0.username", + "replica_configuration.0.verify_server_certificate", + } +) + func resourceSqlDatabaseInstance() *schema.Resource { return &schema.Resource{ Create: resourceSqlDatabaseInstanceCreate, @@ -105,22 +149,26 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "binary_log_enabled": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, }, "enabled": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, }, "start_time": { Type: schema.TypeString, Optional: true, // start_time is randomly assigned if not set - Computed: true, + Computed: true, + AtLeastOneOf: backupConfigurationKeys, }, "location": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, }, }, }, @@ -137,11 +185,11 @@ func resourceSqlDatabaseInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "value": { Type: schema.TypeString, - Optional: true, + Required: true, }, "name": { Type: schema.TypeString, - Optional: true, + Required: true, }, }, }, @@ -172,26 +220,30 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "authorized_networks": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), - Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), + Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + AtLeastOneOf: ipConfigurationKeys, }, "ipv4_enabled": { Type: schema.TypeBool, Optional: true, // Defaults differ between first and second gen instances - Computed: true, + Computed: true, + AtLeastOneOf: ipConfigurationKeys, }, "require_ssl": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, }, "private_network": { Type: schema.TypeString, Optional: true, ValidateFunc: orEmpty(validateRegexp(privateNetworkLinkRegex)), DiffSuppressFunc: compareSelfLinkRelativePaths, + AtLeastOneOf: ipConfigurationKeys, }, }, }, @@ -204,12 +256,14 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "follow_gae_application": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, }, "zone": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, }, }, }, @@ -224,15 +278,18 @@ func resourceSqlDatabaseInstance() *schema.Resource { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(1, 7), + AtLeastOneOf: maintenanceWindowKeys, }, "hour": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 23), + AtLeastOneOf: maintenanceWindowKeys, }, "update_track": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: maintenanceWindowKeys, }, }, }, @@ -334,60 +391,71 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "ca_certificate": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "client_certificate": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "client_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "connect_retry_interval": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "dump_file_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "failover_target": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "master_heartbeat_period": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "password": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + AtLeastOneOf: replicaConfigurationKeys, }, "ssl_cipher": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "username": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "verify_server_certificate": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, }, }, @@ -399,24 +467,29 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cert": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "common_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "create_time": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "expiration_time": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "sha1_fingerprint": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, }, }, @@ -503,7 +576,11 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) } - d.SetId(instance.Name) + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) err = sqlAdminOperationWaitTime(config.clientSqlAdmin, op, project, "Create Instance", int(d.Timeout(schema.TimeoutCreate).Minutes())) if err != nil { @@ -708,7 +785,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e var instance *sqladmin.DatabaseInstance err = retryTimeDuration(func() (rerr error) { - instance, rerr = config.clientSqlAdmin.Instances.Get(project, d.Id()).Do() + instance, rerr = config.clientSqlAdmin.Instances.Get(project, d.Get("name").(string)).Do() return rerr }, d.Timeout(schema.TimeoutRead), isSqlOperationInProgressError) if err != nil { @@ -843,7 +920,7 @@ func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/third_party/terraform/resources/resource_sql_ssl_cert.go b/third_party/terraform/resources/resource_sql_ssl_cert.go index edfbaba623ff..5afce1f0a18f 100644 --- a/third_party/terraform/resources/resource_sql_ssl_cert.go +++ b/third_party/terraform/resources/resource_sql_ssl_cert.go @@ -105,7 +105,7 @@ func resourceSqlSslCertCreate(d *schema.ResourceData, meta interface{}) error { } fingerprint := resp.ClientCert.CertInfo.Sha1Fingerprint - d.SetId(fmt.Sprintf("%s/%s", instance, fingerprint)) + d.SetId(fmt.Sprintf("projects/%s/instances/%s/sslCerts/%s", project, instance, fingerprint)) d.Set("sha1_fingerprint", fingerprint) // The private key is only returned on the initial insert so set it here. @@ -148,7 +148,7 @@ func resourceSqlSslCertRead(d *schema.ResourceData, meta interface{}) error { d.Set("create_time", sslCerts.CreateTime) d.Set("expiration_time", sslCerts.ExpirationTime) - d.SetId(fmt.Sprintf("%s/%s", instance, fingerprint)) + d.SetId(fmt.Sprintf("projects/%s/instances/%s/sslCerts/%s", project, instance, fingerprint)) return nil } diff --git a/third_party/terraform/resources/resource_storage_bucket.go b/third_party/terraform/resources/resource_storage_bucket.go index 683cd690a246..c924fef91609 100644 --- a/third_party/terraform/resources/resource_storage_bucket.go +++ b/third_party/terraform/resources/resource_storage_bucket.go @@ -32,7 +32,8 @@ func resourceStorageBucket() *schema.Resource { State: resourceStorageBucketStateImporter, }, CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked)), + customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked), + ), Schema: map[string]*schema.Schema{ "name": { @@ -82,13 +83,6 @@ func resourceStorageBucket() *schema.Resource { }, }, - "predefined_acl": { - Type: schema.TypeString, - Removed: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, - }, - "project": { Type: schema.TypeString, Optional: true, @@ -154,10 +148,9 @@ func resourceStorageBucket() *schema.Resource { Optional: true, }, "is_live": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Deprecated: "Please use `with_state` instead", + Type: schema.TypeBool, + Optional: true, + Removed: "Please use `with_state` instead", }, "with_state": { Type: schema.TypeString, @@ -190,8 +183,7 @@ func resourceStorageBucket() *schema.Resource { Schema: map[string]*schema.Schema{ "enabled": { Type: schema.TypeBool, - Optional: true, - Default: false, + Required: true, }, }, }, @@ -204,12 +196,14 @@ func resourceStorageBucket() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "main_page_suffix": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.not_found_page", "website.0.main_page_suffix"}, }, "not_found_page": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.main_page_suffix", "website.0.not_found_page"}, }, }, }, @@ -886,10 +880,8 @@ func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleC } else { if *condition.IsLive { ruleCondition["with_state"] = "LIVE" - ruleCondition["is_live"] = true } else { ruleCondition["with_state"] = "ARCHIVED" - ruleCondition["is_live"] = false } } return ruleCondition @@ -1052,18 +1044,9 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi transformed.IsLive = googleapi.Bool(true) case "ARCHIVED": transformed.IsLive = googleapi.Bool(false) - case "ANY": + case "ANY", "": // This is unnecessary, but set explicitly to nil for readability. transformed.IsLive = nil - case "": - // Support deprecated `is_live` behavior - // is_live was always read (ok always true) - // so it can only support LIVE/ARCHIVED. - // TODO: When removing is_live, combine this case with case "ANY" - if v, ok := condition["is_live"]; ok { - log.Printf("[WARN] using deprecated field `is_live` because with_state is empty") - transformed.IsLive = googleapi.Bool(v.(bool)) - } default: return nil, fmt.Errorf("unexpected value %q for condition.with_state", withStateV.(string)) } @@ -1119,32 +1102,8 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } - // Note that we are keeping the boolean notation from when is_live was - // the only field (i.e. not deprecated) in order to prevent a diff from - // hash key. - // There are three possible states for the actual condition - // and correspond to the following hash codes: - // - // 1. LIVE only: "true-" - // Applies for one of: - // with_state = "" && is_live = true - // with_state = "LIVE" - // - // 2. ARCHIVED only: "false-" - // Applies for one of: - // with_state = "" && is_live = false - // with_state = "ARCHIVED" - // - // 3. ANY (i.e. LIVE and ARCHIVED): "" - // Applies for one of: - // with_state = "ANY" - withStateV, withStateOk := m["with_state"] - if !withStateOk || withStateV.(string) == "" { - if isLiveV, ok := m["is_live"]; ok { - buf.WriteString(fmt.Sprintf("%t-", isLiveV.(bool))) - } - } else if withStateOk { + if withStateOk { switch withStateV.(string) { case "LIVE": buf.WriteString(fmt.Sprintf("%t-", true)) diff --git a/third_party/terraform/resources/resource_storage_bucket_object.go b/third_party/terraform/resources/resource_storage_bucket_object.go index 31a000ecdbb6..96c0a6b7bfec 100644 --- a/third_party/terraform/resources/resource_storage_bucket_object.go +++ b/third_party/terraform/resources/resource_storage_bucket_object.go @@ -85,13 +85,6 @@ func resourceStorageBucketObject() *schema.Resource { Computed: true, }, - "predefined_acl": { - Type: schema.TypeString, - Removed: "Please use resource \"storage_object_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, - }, - "source": { Type: schema.TypeString, Optional: true, diff --git a/third_party/terraform/resources/resource_storage_transfer_job.go b/third_party/terraform/resources/resource_storage_transfer_job.go index cc8c18cea94d..494e330b77da 100644 --- a/third_party/terraform/resources/resource_storage_transfer_job.go +++ b/third_party/terraform/resources/resource_storage_transfer_job.go @@ -11,6 +11,27 @@ import ( "time" ) +var ( + objectConditionsKeys = []string{ + "transfer_spec.0.object_conditions.0.min_time_elapsed_since_last_modification", + "transfer_spec.0.object_conditions.0.max_time_elapsed_since_last_modification", + "transfer_spec.0.object_conditions.0.include_prefixes", + "transfer_spec.0.object_conditions.0.exclude_prefixes", + } + + transferOptionsKeys = []string{ + "transfer_spec.0.transfer_options.0.overwrite_objects_already_existing_in_sink", + "transfer_spec.0.transfer_options.0.delete_objects_unique_in_sink", + "transfer_spec.0.transfer_options.0.delete_objects_from_source_after_transfer", + } + + transferSpecDataSourceKeys = []string{ + "transfer_spec.0.gcs_data_source", + "transfer_spec.0.aws_s3_data_source", + "transfer_spec.0.http_data_source", + } +) + func resourceStorageTransferJob() *schema.Resource { return &schema.Resource{ Create: resourceStorageTransferJobCreate, @@ -52,25 +73,25 @@ func resourceStorageTransferJob() *schema.Resource { Elem: gcsDataSchema(), }, "gcs_data_source": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: gcsDataSchema(), - ConflictsWith: []string{"transfer_spec.aws_s3_data_source", "transfer_spec.http_data_source"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: gcsDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, }, "aws_s3_data_source": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: awsS3DataSchema(), - ConflictsWith: []string{"transfer_spec.gcs_data_source", "transfer_spec.http_data_source"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: awsS3DataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, }, "http_data_source": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: httpDataSchema(), - ConflictsWith: []string{"transfer_spec.aws_s3_data_source", "transfer_spec.gcs_data_source"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: httpDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, }, }, }, @@ -139,23 +160,27 @@ func objectConditionsSchema() *schema.Schema { Type: schema.TypeString, ValidateFunc: validateDuration(), Optional: true, + AtLeastOneOf: objectConditionsKeys, }, "max_time_elapsed_since_last_modification": { Type: schema.TypeString, ValidateFunc: validateDuration(), Optional: true, + AtLeastOneOf: objectConditionsKeys, }, "include_prefixes": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: objectConditionsKeys, Elem: &schema.Schema{ MaxItems: 1000, Type: schema.TypeString, }, }, "exclude_prefixes": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: objectConditionsKeys, Elem: &schema.Schema{ MaxItems: 1000, Type: schema.TypeString, @@ -174,17 +199,20 @@ func transferOptionsSchema() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "overwrite_objects_already_existing_in_sink": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: transferOptionsKeys, }, "delete_objects_unique_in_sink": { Type: schema.TypeBool, Optional: true, + AtLeastOneOf: transferOptionsKeys, ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_from_source_after_transfer"}, }, "delete_objects_from_source_after_transfer": { Type: schema.TypeBool, Optional: true, + AtLeastOneOf: transferOptionsKeys, ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_unique_in_sink"}, }, }, diff --git a/third_party/terraform/tests/data_source_compute_lb_ip_ranges_test.go b/third_party/terraform/tests/data_source_compute_lb_ip_ranges_test.go index 2bba95ed2988..8950d39b3874 100644 --- a/third_party/terraform/tests/data_source_compute_lb_ip_ranges_test.go +++ b/third_party/terraform/tests/data_source_compute_lb_ip_ranges_test.go @@ -30,5 +30,6 @@ func TestAccDataSourceComputeLbIpRanges_basic(t *testing.T) { } const testAccComputeLbIpRangesConfig = ` -data "google_compute_lb_ip_ranges" "some" {} +data "google_compute_lb_ip_ranges" "some" { +} ` diff --git a/third_party/terraform/tests/data_source_container_registry_test.go b/third_party/terraform/tests/data_source_container_registry_test.go index c0d1577770a3..d10a8caa79fe 100644 --- a/third_party/terraform/tests/data_source_container_registry_test.go +++ b/third_party/terraform/tests/data_source_container_registry_test.go @@ -67,26 +67,29 @@ func TestDataSourceGoogleContainerRegistryImage(t *testing.T) { const testAccCheckGoogleContainerRegistryImage_basic = ` data "google_container_registry_image" "test" { - project = "foo" - region = "bar" - name = "baz" + project = "foo" + region = "bar" + name = "baz" } + data "google_container_registry_image" "test2" { - project = "foo" - region = "bar" - name = "baz" - tag = "qux" + project = "foo" + region = "bar" + name = "baz" + tag = "qux" } + data "google_container_registry_image" "test3" { - project = "foo" - region = "bar" - name = "baz" - digest = "1234" + project = "foo" + region = "bar" + name = "baz" + digest = "1234" } + data "google_container_registry_image" "testScoped" { - project = "example.com:foo" - region = "bar" - name = "baz" - tag = "qux" + project = "example.com:foo" + region = "bar" + name = "baz" + tag = "qux" } ` diff --git a/third_party/terraform/tests/data_source_dns_managed_zone_test.go b/third_party/terraform/tests/data_source_dns_managed_zone_test.go index b2b48ef2c760..34d014f504d6 100644 --- a/third_party/terraform/tests/data_source_dns_managed_zone_test.go +++ b/third_party/terraform/tests/data_source_dns_managed_zone_test.go @@ -27,13 +27,13 @@ func TestAccDataSourceDnsManagedZone_basic(t *testing.T) { func testAccDataSourceDnsManagedZone_basic() string { return fmt.Sprintf(` resource "google_dns_managed_zone" "foo" { - name = "qa-zone-%s" - dns_name = "qa.tf-test.club." - description = "QA DNS zone" + name = "qa-zone-%s" + dns_name = "qa.tf-test.club." + description = "QA DNS zone" } data "google_dns_managed_zone" "qa" { - name = "${google_dns_managed_zone.foo.name}" + name = google_dns_managed_zone.foo.name } `, acctest.RandString(10)) } diff --git a/third_party/terraform/tests/data_source_google_active_folder_test.go b/third_party/terraform/tests/data_source_google_active_folder_test.go index 8e18b1f2451e..b887ccbde833 100644 --- a/third_party/terraform/tests/data_source_google_active_folder_test.go +++ b/third_party/terraform/tests/data_source_google_active_folder_test.go @@ -82,13 +82,14 @@ func testAccDataSourceGoogleActiveFolderCheck(data_source_name string, resource_ func testAccDataSourceGoogleActiveFolderConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_active_folder" "my_folder" { - parent = "${google_folder.foobar.parent}" - display_name = "${google_folder.foobar.display_name}" + parent = google_folder.foobar.parent + display_name = google_folder.foobar.display_name } + `, parent, displayName) } diff --git a/third_party/terraform/tests/data_source_google_billing_account_test.go b/third_party/terraform/tests/data_source_google_billing_account_test.go index 5cd9ade7ff4c..749d6d3b3790 100644 --- a/third_party/terraform/tests/data_source_google_billing_account_test.go +++ b/third_party/terraform/tests/data_source_google_billing_account_test.go @@ -85,20 +85,23 @@ func testAccCheckGoogleBillingAccount_byName(name string) string { return fmt.Sprintf(` data "google_billing_account" "acct" { billing_account = "%s" -}`, name) +} +`, name) } func testAccCheckGoogleBillingAccount_byNameClosed(name string) string { return fmt.Sprintf(` data "google_billing_account" "acct" { billing_account = "%s" - open = false -}`, name) + open = false +} +`, name) } func testAccCheckGoogleBillingAccount_byDisplayName(name string) string { return fmt.Sprintf(` data "google_billing_account" "acct" { display_name = "%s" -}`, name) +} +`, name) } diff --git a/third_party/terraform/tests/data_source_google_client_openid_userinfo_test.go b/third_party/terraform/tests/data_source_google_client_openid_userinfo_test.go index 0f7fc42578ad..f293f8b17fda 100644 --- a/third_party/terraform/tests/data_source_google_client_openid_userinfo_test.go +++ b/third_party/terraform/tests/data_source_google_client_openid_userinfo_test.go @@ -24,23 +24,5 @@ func TestAccDataSourceGoogleClientOpenIDUserinfo_basic(t *testing.T) { } const testAccCheckGoogleClientOpenIDUserinfo_basic = ` -provider "google" { - alias = "google-scoped" - - # We need to add an additional scope to test this; because our tests rely on - # every env var being set, we can just add an alias with the appropriate - # scopes. This will fail if someone uses an access token instead of creds - # unless they've configured the userinfo.email scope. - scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/userinfo.email", - ] -} - -data "google_client_openid_userinfo" "me" { - provider = "google.google-scoped" -} +data "google_client_openid_userinfo" "me" {} ` diff --git a/third_party/terraform/tests/data_source_google_cloudfunctions_function_test.go b/third_party/terraform/tests/data_source_google_cloudfunctions_function_test.go index b569742c48b5..9c473fa8c9b3 100644 --- a/third_party/terraform/tests/data_source_google_cloudfunctions_function_test.go +++ b/third_party/terraform/tests/data_source_google_cloudfunctions_function_test.go @@ -43,23 +43,24 @@ resource "google_storage_bucket" "bucket" { resource "google_storage_bucket_object" "archive" { name = "index.zip" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name source = "%s" } resource "google_cloudfunctions_function" "function_http" { name = "%s-http" + runtime = "nodejs8" description = "test function" available_memory_mb = 128 - source_archive_bucket = "${google_storage_bucket.bucket.name}" - source_archive_object = "${google_storage_bucket_object.archive.name}" + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name trigger_http = true timeout = 61 entry_point = "helloGET" } data "google_cloudfunctions_function" "function_http" { - name = "${google_cloudfunctions_function.function_http.name}" + name = google_cloudfunctions_function.function_http.name } `, bucketName, zipFilePath, functionName) } diff --git a/third_party/terraform/tests/data_source_google_compute_address_test.go b/third_party/terraform/tests/data_source_google_compute_address_test.go index b8631cf25921..7e618003c7c6 100644 --- a/third_party/terraform/tests/data_source_google_compute_address_test.go +++ b/third_party/terraform/tests/data_source_google_compute_address_test.go @@ -161,11 +161,11 @@ func testAccCheckDataSourceComputeAddressDestroy(resource_name string) resource. func testAccDataSourceComputeAddressConfig(rsName, dsName string) string { return fmt.Sprintf(` resource "google_compute_address" "%s" { - name = "address-test" + name = "address-test" } data "google_compute_address" "%s" { - name = "${google_compute_address.%s.name}" + name = google_compute_address.%s.name } `, rsName, dsName, rsName) } diff --git a/third_party/terraform/tests/data_source_google_compute_backend_service_test.go b/third_party/terraform/tests/data_source_google_compute_backend_service_test.go index 50ba234348d0..6009f3b85368 100644 --- a/third_party/terraform/tests/data_source_google_compute_backend_service_test.go +++ b/third_party/terraform/tests/data_source_google_compute_backend_service_test.go @@ -32,7 +32,7 @@ func testAccDataSourceComputeBackendService_basic(serviceName, checkName string) resource "google_compute_backend_service" "foobar" { name = "%s" description = "foobar backend service" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] + health_checks = [google_compute_http_health_check.zero.self_link] } resource "google_compute_http_health_check" "zero" { @@ -43,7 +43,7 @@ resource "google_compute_http_health_check" "zero" { } data "google_compute_backend_service" "baz" { - name = "${google_compute_backend_service.foobar.name}" + name = google_compute_backend_service.foobar.name } `, serviceName, checkName) } diff --git a/third_party/terraform/tests/data_source_google_compute_forwarding_rule_test.go b/third_party/terraform/tests/data_source_google_compute_forwarding_rule_test.go index 110aab1fae1b..8205113a01b1 100644 --- a/third_party/terraform/tests/data_source_google_compute_forwarding_rule_test.go +++ b/third_party/terraform/tests/data_source_google_compute_forwarding_rule_test.go @@ -83,20 +83,22 @@ func testAccDataSourceGoogleForwardingRuleCheck(data_source_name string, resourc func testAccDataSourceGoogleForwardingRuleConfig(poolName, ruleName string) string { return fmt.Sprintf(` - resource "google_compute_target_pool" "foobar-tp" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "%s" - } - resource "google_compute_forwarding_rule" "foobar-fr" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "UDP" - name = "%s" - port_range = "80-81" - target = "${google_compute_target_pool.foobar-tp.self_link}" - } - data "google_compute_forwarding_rule" "my_forwarding_rule" { - name = "${google_compute_forwarding_rule.foobar-fr.name}" - } +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} + +resource "google_compute_forwarding_rule" "foobar-fr" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = google_compute_target_pool.foobar-tp.self_link +} + +data "google_compute_forwarding_rule" "my_forwarding_rule" { + name = google_compute_forwarding_rule.foobar-fr.name +} `, poolName, ruleName) } diff --git a/third_party/terraform/tests/data_source_google_compute_global_address_test.go b/third_party/terraform/tests/data_source_google_compute_global_address_test.go index 3e93935d271b..3f9ff0bc5420 100644 --- a/third_party/terraform/tests/data_source_google_compute_global_address_test.go +++ b/third_party/terraform/tests/data_source_google_compute_global_address_test.go @@ -77,11 +77,11 @@ func testAccDataSourceComputeGlobalAddressCheck(data_source_name string, resourc func testAccDataSourceComputeGlobalAddressConfig(rsName, dsName string) string { return fmt.Sprintf(` resource "google_compute_global_address" "%s" { - name = "address-test" + name = "address-test" } data "google_compute_global_address" "%s" { - name = "${google_compute_global_address.%s.name}" + name = google_compute_global_address.%s.name } `, rsName, dsName, rsName) } diff --git a/third_party/terraform/tests/data_source_google_compute_image_test.go b/third_party/terraform/tests/data_source_google_compute_image_test.go index acb00158fd75..24c08ad753e7 100644 --- a/third_party/terraform/tests/data_source_google_compute_image_test.go +++ b/third_party/terraform/tests/data_source_google_compute_image_test.go @@ -87,19 +87,22 @@ func testAccDataSourceCustomImageConfig(family, name string) string { resource "google_compute_image" "image" { family = "%s" name = "%s" - source_disk = "${google_compute_disk.disk.self_link}" + source_disk = google_compute_disk.disk.self_link } + resource "google_compute_disk" "disk" { name = "%s-disk" zone = "us-central1-b" } + data "google_compute_image" "from_name" { - project = "${google_compute_image.image.project}" - name = "${google_compute_image.image.name}" + project = google_compute_image.image.project + name = google_compute_image.image.name } + data "google_compute_image" "from_family" { - project = "${google_compute_image.image.project}" - family = "${google_compute_image.image.family}" + project = google_compute_image.image.project + family = google_compute_image.image.family } `, family, name, name) } diff --git a/third_party/terraform/tests/data_source_google_compute_instance_group_test.go.erb b/third_party/terraform/tests/data_source_google_compute_instance_group_test.go.erb index 6c258c5ad496..0be8d2ff8970 100644 --- a/third_party/terraform/tests/data_source_google_compute_instance_group_test.go.erb +++ b/third_party/terraform/tests/data_source_google_compute_instance_group_test.go.erb @@ -212,7 +212,7 @@ resource "google_compute_instance" "test" { boot_disk { initialize_params { - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link } } @@ -227,16 +227,16 @@ resource "google_compute_instance" "test" { resource "google_compute_instance_group" "test" { name = "tf-test-%s" - zone = "${google_compute_instance.test.zone}" + zone = google_compute_instance.test.zone instances = [ - "${google_compute_instance.test.self_link}", + google_compute_instance.test.self_link, ] } data "google_compute_instance_group" "test" { - name = "${google_compute_instance_group.test.name}" - zone = "${google_compute_instance_group.test.zone}" + name = google_compute_instance_group.test.name + zone = google_compute_instance_group.test.zone } `, acctest.RandString(10), acctest.RandString(10)) } @@ -255,7 +255,7 @@ resource "google_compute_instance" "test" { boot_disk { initialize_params { - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link } } @@ -270,7 +270,7 @@ resource "google_compute_instance" "test" { resource "google_compute_instance_group" "test" { name = "tf-test-%s" - zone = "${google_compute_instance.test.zone}" + zone = google_compute_instance.test.zone named_port { name = "http" @@ -283,13 +283,13 @@ resource "google_compute_instance_group" "test" { } instances = [ - "${google_compute_instance.test.self_link}", + google_compute_instance.test.self_link, ] } data "google_compute_instance_group" "test" { - name = "${google_compute_instance_group.test.name}" - zone = "${google_compute_instance_group.test.zone}" + name = google_compute_instance_group.test.name + zone = google_compute_instance_group.test.zone } `, acctest.RandString(10), acctest.RandString(10)) } @@ -302,13 +302,13 @@ data "google_compute_image" "my_image" { } resource "google_compute_instance_template" "igm-basic" { - name = "%s" + name = "%s" machine_type = "n1-standard-1" disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true } network_interface { @@ -317,24 +317,24 @@ resource "google_compute_instance_template" "igm-basic" { } resource "google_compute_instance_group_manager" "igm" { - name = "%s" + name = "%s" <% if version.nil? || version == 'ga' -%> - instance_template = "${google_compute_instance_template.igm-basic.self_link}" + instance_template = google_compute_instance_template.igm-basic.self_link <% else -%> version { - instance_template = "${google_compute_instance_template.igm-basic.self_link}" - name = "primary" + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" } <% end -%> base_instance_name = "igm" - zone = "us-central1-a" - target_size = 10 + zone = "us-central1-a" + target_size = 10 wait_for_instances = true } data "google_compute_instance_group" "test" { - self_link = "${google_compute_instance_group_manager.igm.instance_group}" + self_link = google_compute_instance_group_manager.igm.instance_group } `, acctest.RandomWithPrefix("test-igm"), acctest.RandomWithPrefix("test-igm")) } diff --git a/third_party/terraform/tests/data_source_google_compute_instance_test.go b/third_party/terraform/tests/data_source_google_compute_instance_test.go index 08aab937fcee..108b0dc4e1ee 100644 --- a/third_party/terraform/tests/data_source_google_compute_instance_test.go +++ b/third_party/terraform/tests/data_source_google_compute_instance_test.go @@ -88,50 +88,50 @@ func testAccDataSourceComputeInstanceCheck(datasourceName string, resourceName s func testAccDataSourceComputeInstanceConfig(instanceName string) string { return fmt.Sprintf(` resource "google_compute_instance" "foo" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - boot_disk { - initialize_params{ - image = "debian-8-jessie-v20160803" - } - } - - scratch_disk { - } - - network_interface { - network = "default" - - access_config { - // Ephemeral IP - } - } - - metadata = { - foo = "bar" - baz = "qux" - startup-script = "echo Hello" - } - - labels = { - my_key = "my_value" - my_other_key = "my_other_value" - } - - enable_display = true + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-8-jessie-v20160803" + } + } + + scratch_disk { + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + + metadata = { + foo = "bar" + baz = "qux" + startup-script = "echo Hello" + } + + labels = { + my_key = "my_value" + my_other_key = "my_other_value" + } + + enable_display = true } data "google_compute_instance" "bar" { - name = "${google_compute_instance.foo.name}" - zone = "us-central1-a" + name = google_compute_instance.foo.name + zone = "us-central1-a" } data "google_compute_instance" "baz" { - self_link = "${google_compute_instance.foo.self_link}" + self_link = google_compute_instance.foo.self_link } `, instanceName) } diff --git a/third_party/terraform/tests/data_source_google_compute_network_test.go b/third_party/terraform/tests/data_source_google_compute_network_test.go index 16cec24c47d2..b4bd6edcb1f8 100644 --- a/third_party/terraform/tests/data_source_google_compute_network_test.go +++ b/third_party/terraform/tests/data_source_google_compute_network_test.go @@ -69,11 +69,12 @@ func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name func testAccDataSourceGoogleNetworkConfig(name string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" - description = "my-description" + name = "%s" + description = "my-description" } data "google_compute_network" "my_network" { - name = "${google_compute_network.foobar.name}" -}`, name) + name = google_compute_network.foobar.name +} +`, name) } diff --git a/third_party/terraform/tests/data_source_google_compute_region_instance_group_test.go.erb b/third_party/terraform/tests/data_source_google_compute_region_instance_group_test.go.erb index 26dadb563deb..66cfa0e93320 100644 --- a/third_party/terraform/tests/data_source_google_compute_region_instance_group_test.go.erb +++ b/third_party/terraform/tests/data_source_google_compute_region_instance_group_test.go.erb @@ -30,50 +30,50 @@ func TestAccDataSourceRegionInstanceGroup(t *testing.T) { func testAccDataSourceRegionInstanceGroup_basic(instanceManagerName string) string { return fmt.Sprintf(` resource "google_compute_target_pool" "foo" { - name = "%s" + name = "%s" } data "google_compute_image" "debian" { - project = "debian-cloud" - name = "debian-9-stretch-v20171129" + project = "debian-cloud" + name = "debian-9-stretch-v20171129" } resource "google_compute_instance_template" "foo" { - machine_type = "n1-standard-1" - disk { - source_image = "${data.google_compute_image.debian.self_link}" - } - network_interface { - access_config { - } - network = "default" - } + machine_type = "n1-standard-1" + disk { + source_image = data.google_compute_image.debian.self_link + } + network_interface { + access_config { + } + network = "default" + } } resource "google_compute_region_instance_group_manager" "foo" { - name = "%s" - base_instance_name = "foo" + name = "%s" + base_instance_name = "foo" <% if version.nil? || version == 'ga' -%> - instance_template = "${google_compute_instance_template.foo.self_link}" + instance_template = google_compute_instance_template.foo.self_link <% else -%> - version { - instance_template = "${google_compute_instance_template.foo.self_link}" - name = "primary" - } + version { + instance_template = google_compute_instance_template.foo.self_link + name = "primary" + } <% end -%> - region = "us-central1" - target_pools = ["${google_compute_target_pool.foo.self_link}"] - target_size = 1 + region = "us-central1" + target_pools = [google_compute_target_pool.foo.self_link] + target_size = 1 - named_port { - name = "web" - port = 80 - } - wait_for_instances = true + named_port { + name = "web" + port = 80 + } + wait_for_instances = true } data "google_compute_region_instance_group" "data_source" { - self_link = "${google_compute_region_instance_group_manager.foo.instance_group}" + self_link = google_compute_region_instance_group_manager.foo.instance_group } `, acctest.RandomWithPrefix("test-rigm-"), instanceManagerName) } diff --git a/third_party/terraform/tests/data_source_google_compute_ssl_certificate_test.go b/third_party/terraform/tests/data_source_google_compute_ssl_certificate_test.go index 93a51a619907..1e0c4a502e09 100644 --- a/third_party/terraform/tests/data_source_google_compute_ssl_certificate_test.go +++ b/third_party/terraform/tests/data_source_google_compute_ssl_certificate_test.go @@ -34,14 +34,14 @@ func TestAccDataSourceComputeSslCertificate(t *testing.T) { func testAccDataSourceComputeSslCertificateConfig() string { return fmt.Sprintf(` resource "google_compute_ssl_certificate" "foobar" { - name = "cert-test-%s" - description = "really descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" + name = "cert-test-%s" + description = "really descriptive" + private_key = file("test-fixtures/ssl_cert/test.key") + certificate = file("test-fixtures/ssl_cert/test.crt") } data "google_compute_ssl_certificate" "cert" { - name = "${google_compute_ssl_certificate.foobar.name}" + name = google_compute_ssl_certificate.foobar.name } `, acctest.RandString(10)) } diff --git a/third_party/terraform/tests/data_source_google_compute_ssl_policy_test.go b/third_party/terraform/tests/data_source_google_compute_ssl_policy_test.go index 52c2b261a7e5..7d8ce20a1fa9 100644 --- a/third_party/terraform/tests/data_source_google_compute_ssl_policy_test.go +++ b/third_party/terraform/tests/data_source_google_compute_ssl_policy_test.go @@ -68,16 +68,15 @@ func testAccDataSourceGoogleSslPolicyCheck(data_source_name string, resource_nam func testAccDataSourceGoogleSslPolicy() string { return fmt.Sprintf(` - resource "google_compute_ssl_policy" "foobar" { - name = "%s" - description = "my-description" - min_tls_version = "TLS_1_2" - profile = "MODERN" + name = "%s" + description = "my-description" + min_tls_version = "TLS_1_2" + profile = "MODERN" } data "google_compute_ssl_policy" "ssl_policy" { - name = "${google_compute_ssl_policy.foobar.name}" + name = google_compute_ssl_policy.foobar.name } `, acctest.RandomWithPrefix("test-ssl-policy")) } diff --git a/third_party/terraform/tests/data_source_google_compute_subnetwork_test.go b/third_party/terraform/tests/data_source_google_compute_subnetwork_test.go index 36a34f90c023..3da51de52cc7 100644 --- a/third_party/terraform/tests/data_source_google_compute_subnetwork_test.go +++ b/third_party/terraform/tests/data_source_google_compute_subnetwork_test.go @@ -76,28 +76,28 @@ func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_na func testAccDataSourceGoogleSubnetwork() string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" - description = "my-description" + name = "%s" + description = "my-description" } resource "google_compute_subnetwork" "foobar" { - name = "subnetwork-test" - description = "my-description" - ip_cidr_range = "10.0.0.0/24" - network = "${google_compute_network.foobar.self_link}" - private_ip_google_access = true - secondary_ip_range { - range_name = "tf-test-secondary-range" - ip_cidr_range = "192.168.1.0/24" - } + name = "subnetwork-test" + description = "my-description" + ip_cidr_range = "10.0.0.0/24" + network = google_compute_network.foobar.self_link + private_ip_google_access = true + secondary_ip_range { + range_name = "tf-test-secondary-range" + ip_cidr_range = "192.168.1.0/24" + } } data "google_compute_subnetwork" "my_subnetwork" { - name = "${google_compute_subnetwork.foobar.name}" + name = google_compute_subnetwork.foobar.name } data "google_compute_subnetwork" "my_subnetwork_self_link" { - self_link = "${google_compute_subnetwork.foobar.self_link}" + self_link = google_compute_subnetwork.foobar.self_link } `, acctest.RandomWithPrefix("network-test")) } diff --git a/third_party/terraform/tests/data_source_google_compute_vpn_gateway_test.go b/third_party/terraform/tests/data_source_google_compute_vpn_gateway_test.go index 09542f19ce82..8202cf514e00 100644 --- a/third_party/terraform/tests/data_source_google_compute_vpn_gateway_test.go +++ b/third_party/terraform/tests/data_source_google_compute_vpn_gateway_test.go @@ -70,12 +70,13 @@ func testAccDataSourceGoogleVpnGatewayCheck(data_source_name string, resource_na func testAccDataSourceGoogleVpnGatewayConfig(name string) string { return fmt.Sprintf(` resource "google_compute_vpn_gateway" "foobar" { - name = "%s" - description = "my-description" - network = "default" + name = "%s" + description = "my-description" + network = "default" } data "google_compute_vpn_gateway" "my_vpn_gateway" { - name = "${google_compute_vpn_gateway.foobar.name}" -}`, name) + name = google_compute_vpn_gateway.foobar.name +} +`, name) } diff --git a/third_party/terraform/tests/data_source_google_container_cluster_test.go b/third_party/terraform/tests/data_source_google_container_cluster_test.go index 662f39591aa7..0e536bbec901 100644 --- a/third_party/terraform/tests/data_source_google_container_cluster_test.go +++ b/third_party/terraform/tests/data_source_google_container_cluster_test.go @@ -63,19 +63,19 @@ func TestAccContainerClusterDatasource_regional(t *testing.T) { func testAccContainerClusterDatasource_zonal() string { return fmt.Sprintf(` resource "google_container_cluster" "kubes" { - name = "cluster-test-%s" - location = "us-central1-a" - initial_node_count = 1 + name = "cluster-test-%s" + location = "us-central1-a" + initial_node_count = 1 - master_auth { - username = "mr.yoda" - password = "adoy.rm.123456789" - } + master_auth { + username = "mr.yoda" + password = "adoy.rm.123456789" + } } data "google_container_cluster" "kubes" { - name = "${google_container_cluster.kubes.name}" - location = "${google_container_cluster.kubes.zone}" + name = google_container_cluster.kubes.name + location = google_container_cluster.kubes.location } `, acctest.RandString(10)) } @@ -83,14 +83,14 @@ data "google_container_cluster" "kubes" { func testAccContainerClusterDatasource_regional() string { return fmt.Sprintf(` resource "google_container_cluster" "kubes" { - name = "cluster-test-%s" - location = "us-central1" - initial_node_count = 1 + name = "cluster-test-%s" + location = "us-central1" + initial_node_count = 1 } data "google_container_cluster" "kubes" { - name = "${google_container_cluster.kubes.name}" - location = "${google_container_cluster.kubes.region}" + name = google_container_cluster.kubes.name + location = google_container_cluster.kubes.location } `, acctest.RandString(10)) } diff --git a/third_party/terraform/tests/data_source_google_container_engine_versions_test.go b/third_party/terraform/tests/data_source_google_container_engine_versions_test.go index 84e13eae07db..eef37286ab6d 100644 --- a/third_party/terraform/tests/data_source_google_container_engine_versions_test.go +++ b/third_party/terraform/tests/data_source_google_container_engine_versions_test.go @@ -21,7 +21,6 @@ func TestAccContainerEngineVersions_basic(t *testing.T) { Config: testAccCheckGoogleContainerEngineVersionsConfig, Check: resource.ComposeTestCheckFunc( testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.location"), - testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"), ), }, }, @@ -46,24 +45,6 @@ func TestAccContainerEngineVersions_filtered(t *testing.T) { }) } -func TestAccContainerEngineVersions_regional(t *testing.T) { - t.Parallel() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckGoogleContainerEngineVersionsRegionalConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.location"), - testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"), - ), - }, - }, - }) -} - func testAccCheckGoogleContainerEngineVersionsMeta(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -138,25 +119,11 @@ var testAccCheckGoogleContainerEngineVersionsConfig = ` data "google_container_engine_versions" "location" { location = "us-central1-b" } - -data "google_container_engine_versions" "versions" { - zone = "us-central1-b" -} ` var testAccCheckGoogleContainerEngineVersions_filtered = ` data "google_container_engine_versions" "versions" { - zone = "us-central1-b" + location = "us-central1-b" version_prefix = "1.1." } ` - -var testAccCheckGoogleContainerEngineVersionsRegionalConfig = ` -data "google_container_engine_versions" "location" { - location = "us-central1" -} - -data "google_container_engine_versions" "versions" { - region = "us-central1" -} -` diff --git a/third_party/terraform/tests/data_source_google_folder_organization_policy_test.go b/third_party/terraform/tests/data_source_google_folder_organization_policy_test.go index e4afe828e229..63935f80134e 100644 --- a/third_party/terraform/tests/data_source_google_folder_organization_policy_test.go +++ b/third_party/terraform/tests/data_source_google_folder_organization_policy_test.go @@ -35,17 +35,17 @@ resource "google_folder" "orgpolicy" { } resource "google_folder_organization_policy" "resource" { - folder = "${google_folder.orgpolicy.name}" - constraint = "serviceuser.services" + folder = google_folder.orgpolicy.name + constraint = "serviceuser.services" - restore_policy { - default = true - } + restore_policy { + default = true + } } data "google_folder_organization_policy" "data" { - folder = "${google_folder_organization_policy.resource.folder}" + folder = google_folder_organization_policy.resource.folder constraint = "serviceuser.services" } - `, folder, "organizations/"+org) +`, folder, "organizations/"+org) } diff --git a/third_party/terraform/tests/data_source_google_folder_test.go b/third_party/terraform/tests/data_source_google_folder_test.go index 3b3b5334b5c9..91e4be72bddd 100644 --- a/third_party/terraform/tests/data_source_google_folder_test.go +++ b/third_party/terraform/tests/data_source_google_folder_test.go @@ -119,43 +119,47 @@ func testAccDataSourceGoogleFolderCheck(data_source_name string, resource_name s func testAccCheckGoogleFolder_byFullNameConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_folder" "folder" { - folder = "${google_folder.foobar.name}" -}`, parent, displayName) + folder = google_folder.foobar.name +} +`, parent, displayName) } func testAccCheckGoogleFolder_byShortNameConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_folder" "folder" { - folder = "${replace(google_folder.foobar.name, "folders/", "")}" -}`, parent, displayName) + folder = replace(google_folder.foobar.name, "folders/", "") +} +`, parent, displayName) } func testAccCheckGoogleFolder_lookupOrganizationConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_folder" "folder" { - folder = "${google_folder.foobar.name}" + folder = google_folder.foobar.name lookup_organization = true -}`, parent, displayName) +} +`, parent, displayName) } func testAccCheckGoogleFolder_byFullNameNotFoundConfig(name string) string { return fmt.Sprintf(` data "google_folder" "folder" { folder = "%s" -}`, name) +} +`, name) } diff --git a/third_party/terraform/tests/data_source_google_iam_role_test.go b/third_party/terraform/tests/data_source_google_iam_role_test.go index 7b16ea439384..049da7e0bb6b 100644 --- a/third_party/terraform/tests/data_source_google_iam_role_test.go +++ b/third_party/terraform/tests/data_source_google_iam_role_test.go @@ -45,7 +45,7 @@ func testAccCheckGoogleIAMRoleCheck(n string) resource.TestCheckFunc { func testAccCheckGoogleIamRoleConfig(name string) string { return fmt.Sprintf(` data "google_iam_role" "role" { - name = "%s" + name = "%s" } `, name) } diff --git a/third_party/terraform/tests/data_source_google_kms_crypto_key_test.go b/third_party/terraform/tests/data_source_google_kms_crypto_key_test.go index 887bfcbc4958..b9d7ec3147be 100644 --- a/third_party/terraform/tests/data_source_google_kms_crypto_key_test.go +++ b/third_party/terraform/tests/data_source_google_kms_crypto_key_test.go @@ -31,8 +31,8 @@ func TestAccDataSourceGoogleKmsCryptoKey_basic(t *testing.T) { func testAccDataSourceGoogleKmsCryptoKey_basic(keyRingName, cryptoKeyName string) string { return fmt.Sprintf(` data "google_kms_crypto_key" "kms_crypto_key" { - key_ring = "%s" - name = "%s" + key_ring = "%s" + name = "%s" } - `, keyRingName, cryptoKeyName) +`, keyRingName, cryptoKeyName) } diff --git a/third_party/terraform/tests/data_source_google_kms_crypto_key_version_test.go b/third_party/terraform/tests/data_source_google_kms_crypto_key_version_test.go index 48d62707ded0..a009f06a075c 100644 --- a/third_party/terraform/tests/data_source_google_kms_crypto_key_version_test.go +++ b/third_party/terraform/tests/data_source_google_kms_crypto_key_version_test.go @@ -41,7 +41,7 @@ func TestAccDataSourceGoogleKmsCryptoKeyVersion_basic(t *testing.T) { func testAccDataSourceGoogleKmsCryptoKeyVersion_basic(kmsKey string) string { return fmt.Sprintf(` data "google_kms_crypto_key_version" "version" { - crypto_key = "%s" - } + crypto_key = "%s" +} `, kmsKey) } diff --git a/third_party/terraform/tests/data_source_google_kms_key_ring_test.go b/third_party/terraform/tests/data_source_google_kms_key_ring_test.go index 084da2f9e08e..21239d508c65 100644 --- a/third_party/terraform/tests/data_source_google_kms_key_ring_test.go +++ b/third_party/terraform/tests/data_source_google_kms_key_ring_test.go @@ -29,10 +29,9 @@ func TestAccDataSourceGoogleKmsKeyRing_basic(t *testing.T) { func testAccDataSourceGoogleKmsKeyRing_basic(keyRingName string) string { return fmt.Sprintf(` - data "google_kms_key_ring" "kms_key_ring" { - name = "%s" - location = "global" + name = "%s" + location = "global" } - `, keyRingName) +`, keyRingName) } diff --git a/third_party/terraform/tests/data_source_google_kms_secret_ciphertext_test.go b/third_party/terraform/tests/data_source_google_kms_secret_ciphertext_test.go index 60589a220265..16675d09066c 100644 --- a/third_party/terraform/tests/data_source_google_kms_secret_ciphertext_test.go +++ b/third_party/terraform/tests/data_source_google_kms_secret_ciphertext_test.go @@ -111,8 +111,8 @@ func testAccDecryptSecretDataWithCryptoKey(s *terraform.State, cryptoKeyId *kmsC func testGoogleKmsSecretCiphertext_datasource(cryptoKeyTerraformId, plaintext string) string { return fmt.Sprintf(` data "google_kms_secret_ciphertext" "acceptance" { - crypto_key = "%s" - plaintext = "%s" + crypto_key = "%s" + plaintext = "%s" } - `, cryptoKeyTerraformId, plaintext) +`, cryptoKeyTerraformId, plaintext) } diff --git a/third_party/terraform/tests/data_source_google_kms_secret_test.go b/third_party/terraform/tests/data_source_google_kms_secret_test.go index 31267359a840..1b3d49a8f2f3 100644 --- a/third_party/terraform/tests/data_source_google_kms_secret_test.go +++ b/third_party/terraform/tests/data_source_google_kms_secret_test.go @@ -89,8 +89,8 @@ func testAccEncryptSecretDataWithCryptoKey(s *terraform.State, cryptoKeyResource func testGoogleKmsSecret_datasource(cryptoKeyTerraformId, ciphertext string) string { return fmt.Sprintf(` data "google_kms_secret" "acceptance" { - crypto_key = "%s" - ciphertext = "%s" + crypto_key = "%s" + ciphertext = "%s" } - `, cryptoKeyTerraformId, ciphertext) +`, cryptoKeyTerraformId, ciphertext) } diff --git a/third_party/terraform/tests/data_source_google_organization_test.go b/third_party/terraform/tests/data_source_google_organization_test.go index 4f055dd5720f..ffb4dc4aabeb 100644 --- a/third_party/terraform/tests/data_source_google_organization_test.go +++ b/third_party/terraform/tests/data_source_google_organization_test.go @@ -20,7 +20,7 @@ func TestAccDataSourceGoogleOrganization_byFullName(t *testing.T) { { Config: testAccCheckGoogleOrganization_byName(name), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.google_organization.org", "id", orgId), + resource.TestCheckResourceAttr("data.google_organization.org", "id", name), resource.TestCheckResourceAttr("data.google_organization.org", "name", name), ), }, @@ -39,7 +39,7 @@ func TestAccDataSourceGoogleOrganization_byShortName(t *testing.T) { { Config: testAccCheckGoogleOrganization_byName(orgId), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.google_organization.org", "id", orgId), + resource.TestCheckResourceAttr("data.google_organization.org", "id", name), resource.TestCheckResourceAttr("data.google_organization.org", "name", name), ), }, @@ -66,12 +66,14 @@ func testAccCheckGoogleOrganization_byName(name string) string { return fmt.Sprintf(` data "google_organization" "org" { organization = "%s" -}`, name) +} +`, name) } func testAccCheckGoogleOrganization_byDomain(name string) string { return fmt.Sprintf(` data "google_organization" "org" { domain = "%s" -}`, name) +} +`, name) } diff --git a/third_party/terraform/tests/data_source_google_project_organization_policy_test.go b/third_party/terraform/tests/data_source_google_project_organization_policy_test.go index f9bf52a96139..14f672f7d969 100644 --- a/third_party/terraform/tests/data_source_google_project_organization_policy_test.go +++ b/third_party/terraform/tests/data_source_google_project_organization_policy_test.go @@ -26,8 +26,6 @@ func TestAccDataSourceGoogleProjectOrganizationPolicy_basic(t *testing.T) { func testAccDataSourceGoogleProjectOrganizationPolicy_basic(project string) string { return fmt.Sprintf(` - - resource "google_project_organization_policy" "resource" { project = "%s" constraint = "constraints/compute.trustedImageProjects" @@ -40,8 +38,8 @@ resource "google_project_organization_policy" "resource" { } data "google_project_organization_policy" "data" { - project = "${google_project_organization_policy.resource.project}" + project = google_project_organization_policy.resource.project constraint = "constraints/compute.trustedImageProjects" } - `, project) +`, project) } diff --git a/third_party/terraform/tests/data_source_google_project_services_test.go b/third_party/terraform/tests/data_source_google_project_services_test.go deleted file mode 100644 index 30ff56a849f5..000000000000 --- a/third_party/terraform/tests/data_source_google_project_services_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" -) - -func TestAccDataSourceGoogleProjectServices_basic(t *testing.T) { - t.Parallel() - org := getTestOrgFromEnv(t) - project := "terraform-" + acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckGoogleProjectServicesConfig(project, org), - Check: resource.ComposeTestCheckFunc( - checkDataSourceStateMatchesResourceStateWithIgnores( - "data.google_project_services.project_services", - "google_project_services.project_services", - map[string]struct{}{ - // Virtual fields - "disable_on_destroy": {}, - }, - ), - ), - }, - }, - }) -} - -func testAccCheckGoogleProjectServicesConfig(project, org string) string { - return fmt.Sprintf(` -resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" -} - -resource "google_project_services" "project_services" { - project = "${google_project.project.project_id}" - services = ["admin.googleapis.com"] -} - -data "google_project_services" "project_services" { - project = "${google_project_services.project_services.project}" -}`, project, project, org) -} diff --git a/third_party/terraform/tests/data_source_google_project_test.go b/third_party/terraform/tests/data_source_google_project_test.go index 9fe586f3cd29..3b20afa22662 100644 --- a/third_party/terraform/tests/data_source_google_project_test.go +++ b/third_party/terraform/tests/data_source_google_project_test.go @@ -37,12 +37,13 @@ func TestAccDataSourceGoogleProject_basic(t *testing.T) { func testAccCheckGoogleProjectConfig(project, org string) string { return fmt.Sprintf(` resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" + project_id = "%s" + name = "%s" + org_id = "%s" } - + data "google_project" "project" { - project_id = "${google_project.project.project_id}" -}`, project, project, org) + project_id = google_project.project.project_id +} +`, project, project, org) } diff --git a/third_party/terraform/tests/data_source_google_service_account_access_token_test.go b/third_party/terraform/tests/data_source_google_service_account_access_token_test.go index 2709649c2e54..3c8f45c49142 100644 --- a/third_party/terraform/tests/data_source_google_service_account_access_token_test.go +++ b/third_party/terraform/tests/data_source_google_service_account_access_token_test.go @@ -52,15 +52,14 @@ func TestAccDataSourceGoogleServiceAccountAccessToken_basic(t *testing.T) { func testAccCheckGoogleServiceAccountAccessToken_datasource(targetServiceAccountID string) string { return fmt.Sprintf(` +data "google_service_account_access_token" "default" { + target_service_account = "%s" + scopes = ["userinfo-email", "https://www.googleapis.com/auth/cloud-platform"] + lifetime = "30s" +} - data "google_service_account_access_token" "default" { - target_service_account = "%s" - scopes = ["userinfo-email", "https://www.googleapis.com/auth/cloud-platform"] - lifetime = "30s" - } - - output "access_token" { - value = "${data.google_service_account_access_token.default.access_token}" - } - `, targetServiceAccountID) +output "access_token" { + value = data.google_service_account_access_token.default.access_token +} +`, targetServiceAccountID) } diff --git a/third_party/terraform/tests/data_source_google_service_account_key_test.go b/third_party/terraform/tests/data_source_google_service_account_key_test.go index e0a1d4aa0f43..6ab5d8982669 100644 --- a/third_party/terraform/tests/data_source_google_service_account_key_test.go +++ b/third_party/terraform/tests/data_source_google_service_account_key_test.go @@ -42,15 +42,16 @@ func TestAccDatasourceGoogleServiceAccountKey_basic(t *testing.T) { func testAccDatasourceGoogleServiceAccountKey(account string) string { return fmt.Sprintf(` resource "google_service_account" "acceptance" { - account_id = "%s" + account_id = "%s" } resource "google_service_account_key" "acceptance" { - service_account_id = "${google_service_account.acceptance.name}" - public_key_type = "TYPE_X509_PEM_FILE" + service_account_id = google_service_account.acceptance.name + public_key_type = "TYPE_X509_PEM_FILE" } data "google_service_account_key" "acceptance" { - name = "${google_service_account_key.acceptance.name}" -}`, account) + name = google_service_account_key.acceptance.name +} +`, account) } diff --git a/third_party/terraform/tests/data_source_google_service_account_test.go b/third_party/terraform/tests/data_source_google_service_account_test.go index 672cc5789260..7d52ab956406 100644 --- a/third_party/terraform/tests/data_source_google_service_account_test.go +++ b/third_party/terraform/tests/data_source_google_service_account_test.go @@ -41,7 +41,7 @@ resource "google_service_account" "acceptance" { } data "google_service_account" "acceptance" { - account_id = "${google_service_account.acceptance.account_id}" + account_id = google_service_account.acceptance.account_id } `, account) } diff --git a/third_party/terraform/tests/data_source_storage_object_signed_url_test.go b/third_party/terraform/tests/data_source_storage_object_signed_url_test.go index 94ad4752845b..3b4ececefc8e 100644 --- a/third_party/terraform/tests/data_source_storage_object_signed_url_test.go +++ b/third_party/terraform/tests/data_source_storage_object_signed_url_test.go @@ -226,42 +226,42 @@ data "google_storage_object_signed_url" "blerg" { func testAccTestGoogleStorageObjectSignedURL(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { - name = "%s" + name = "%s" } resource "google_storage_bucket_object" "story" { name = "path/to/file" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name content = "once upon a time..." } data "google_storage_object_signed_url" "story_url" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" - + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name } data "google_storage_object_signed_url" "story_url_w_headers" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name extension_headers = { - x-goog-test = "foo" - x-goog-if-generation-match = 1 + x-goog-test = "foo" + x-goog-if-generation-match = 1 } } data "google_storage_object_signed_url" "story_url_w_content_type" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name content_type = "text/plain" } data "google_storage_object_signed_url" "story_url_w_md5" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name - content_md5 = "${google_storage_bucket_object.story.md5hash}" -}`, bucketName) + content_md5 = google_storage_bucket_object.story.md5hash +} +`, bucketName) } diff --git a/third_party/terraform/tests/resource_bigtable_table_test.go b/third_party/terraform/tests/resource_bigtable_table_test.go index 470b56c7f716..f8b7176fabe8 100644 --- a/third_party/terraform/tests/resource_bigtable_table_test.go +++ b/third_party/terraform/tests/resource_bigtable_table_test.go @@ -28,8 +28,6 @@ func TestAccBigtableTable_basic(t *testing.T) { ResourceName: "google_bigtable_table.table", ImportState: true, ImportStateVerify: true, - //TODO(rileykarson): Remove ImportStateId when id format is fixed in 3.0.0 - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) @@ -54,7 +52,6 @@ func TestAccBigtableTable_splitKeys(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"split_keys"}, - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) @@ -79,7 +76,6 @@ func TestAccBigtableTable_family(t *testing.T) { ResourceName: "google_bigtable_table.table", ImportState: true, ImportStateVerify: true, - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) @@ -104,7 +100,6 @@ func TestAccBigtableTable_familyMany(t *testing.T) { ResourceName: "google_bigtable_table.table", ImportState: true, ImportStateVerify: true, - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) diff --git a/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb b/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb index d0f56257e49e..ec9cf4dbcd04 100644 --- a/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb +++ b/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb @@ -524,6 +524,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" description = "test function" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" @@ -628,7 +629,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" - runtime = "nodejs6" + runtime = "nodejs8" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" @@ -659,6 +660,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" @@ -686,6 +688,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" @@ -702,6 +705,7 @@ func testAccCloudFunctionsFunction_sourceRepo(functionName, project string) stri return fmt.Sprintf(` resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" source_repository { // There isn't yet an API that'll allow us to create a source repository and @@ -733,6 +737,7 @@ data "google_compute_default_service_account" "default" { } resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" @@ -778,6 +783,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" provider = "google-beta" description = "test function" diff --git a/third_party/terraform/tests/resource_cloudiot_registry_test.go b/third_party/terraform/tests/resource_cloudiot_registry_test.go index 2468de141262..a002e8216670 100644 --- a/third_party/terraform/tests/resource_cloudiot_registry_test.go +++ b/third_party/terraform/tests/resource_cloudiot_registry_test.go @@ -116,7 +116,7 @@ func TestAccCloudIoTRegistry_update(t *testing.T) { }) } -func TestAccCloudIoTRegistry_eventNotificationConfigDeprecatedSingleToPlural(t *testing.T) { +func TestAccCloudIoTRegistry_eventNotificationConfigsSingle(t *testing.T) { t.Parallel() registryName := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) @@ -128,29 +128,18 @@ func TestAccCloudIoTRegistry_eventNotificationConfigDeprecatedSingleToPlural(t * CheckDestroy: testAccCheckCloudIoTRegistryDestroy, Steps: []resource.TestStep{ { - // Use deprecated field (event_notification_config) to create - Config: testAccCloudIoTRegistry_singleEventNotificationConfig(topic, registryName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "google_cloudiot_registry.foobar", "event_notification_configs.#", "1"), - ), + Config: testAccCloudIoTRegistry_singleEventNotificationConfigs(topic, registryName), }, { ResourceName: "google_cloudiot_registry.foobar", ImportState: true, ImportStateVerify: true, }, - { - // Use new field (event_notification_configs) to see if plan changed - Config: testAccCloudIoTRegistry_pluralEventNotificationConfigs(topic, registryName), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, }, }) } -func TestAccCloudIoTRegistry_eventNotificationConfigMultiple(t *testing.T) { +func TestAccCloudIoTRegistry_eventNotificationConfigsMultiple(t *testing.T) { t.Parallel() registryName := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) @@ -173,40 +162,6 @@ func TestAccCloudIoTRegistry_eventNotificationConfigMultiple(t *testing.T) { }) } -func TestAccCloudIoTRegistry_eventNotificationConfigPluralToDeprecatedSingle(t *testing.T) { - t.Parallel() - - registryName := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) - topic := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudIoTRegistryDestroy, - Steps: []resource.TestStep{ - { - // Use new field (event_notification_configs) to create - Config: testAccCloudIoTRegistry_pluralEventNotificationConfigs(topic, registryName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "google_cloudiot_registry.foobar", "event_notification_configs.#", "1"), - ), - }, - { - ResourceName: "google_cloudiot_registry.foobar", - ImportState: true, - ImportStateVerify: true, - }, - { - // Use old field (event_notification_config) to see if plan changed - Config: testAccCloudIoTRegistry_singleEventNotificationConfig(topic, registryName), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - func testAccCheckCloudIoTRegistryDestroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "google_cloudiot_registry" { @@ -276,37 +231,14 @@ resource "google_cloudiot_registry" "foobar" { `, acctest.RandString(10), acctest.RandString(10), registryName) } -func testAccCloudIoTRegistry_singleEventNotificationConfig(topic, registryName string) string { - return fmt.Sprintf(` -resource "google_project_iam_binding" "cloud-iot-iam-binding" { - members = ["serviceAccount:cloud-iot@system.gserviceaccount.com"] - role = "roles/pubsub.publisher" -} - -resource "google_pubsub_topic" "event-topic" { - name = "%s" -} - -resource "google_cloudiot_registry" "foobar" { - depends_on = ["google_project_iam_binding.cloud-iot-iam-binding"] - - name = "%s" - - event_notification_config = { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" - } -} -`, topic, registryName) -} - -func testAccCloudIoTRegistry_pluralEventNotificationConfigs(topic, registryName string) string { +func testAccCloudIoTRegistry_singleEventNotificationConfigs(topic, registryName string) string { return fmt.Sprintf(` resource "google_project_iam_binding" "cloud-iot-iam-binding" { members = ["serviceAccount:cloud-iot@system.gserviceaccount.com"] role = "roles/pubsub.publisher" } -resource "google_pubsub_topic" "event-topic" { +resource "google_pubsub_topic" "event-topic-1" { name = "%s" } @@ -315,8 +247,9 @@ resource "google_cloudiot_registry" "foobar" { name = "%s" - event_notification_config = { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" + event_notification_configs { + pubsub_topic_name = "${google_pubsub_topic.event-topic-1.id}" + subfolder_matches = "" } } `, topic, registryName) diff --git a/third_party/terraform/tests/resource_composer_environment_test.go.erb b/third_party/terraform/tests/resource_composer_environment_test.go.erb index b5e4375329b6..9d4e2e8cee54 100644 --- a/third_party/terraform/tests/resource_composer_environment_test.go.erb +++ b/third_party/terraform/tests/resource_composer_environment_test.go.erb @@ -285,13 +285,13 @@ func testAccComposerEnvironmentDestroy(s *terraform.State) error { } idTokens := strings.Split(rs.Primary.ID, "/") - if len(idTokens) != 3 { - return fmt.Errorf("Invalid ID %q, expected format {project}/{region}/{environment}", rs.Primary.ID) + if len(idTokens) != 6 { + return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}", rs.Primary.ID) } envName := &composerEnvironmentName{ - Project: idTokens[0], - Region: idTokens[1], - Environment: idTokens[2], + Project: idTokens[1], + Region: idTokens[3], + Environment: idTokens[5], } _, err := config.clientComposer.Projects.Locations.Environments.Get(envName.resourceName()).Do() diff --git a/third_party/terraform/tests/resource_compute_attached_disk_test.go b/third_party/terraform/tests/resource_compute_attached_disk_test.go index e412157a14d2..717d22190a07 100644 --- a/third_party/terraform/tests/resource_compute_attached_disk_test.go +++ b/third_party/terraform/tests/resource_compute_attached_disk_test.go @@ -14,7 +14,7 @@ func TestAccComputeAttachedDisk_basic(t *testing.T) { diskName := acctest.RandomWithPrefix("tf-test-disk") instanceName := acctest.RandomWithPrefix("tf-test-inst") - importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName) + importID := fmt.Sprintf("%s/us-central1-a/%s/%s", getTestProjectFromEnv(), instanceName, diskName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -46,7 +46,7 @@ func TestAccComputeAttachedDisk_full(t *testing.T) { diskName := acctest.RandomWithPrefix("tf-test") instanceName := acctest.RandomWithPrefix("tf-test") - importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName) + importID := fmt.Sprintf("%s/us-central1-a/%s/%s", getTestProjectFromEnv(), instanceName, diskName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -73,7 +73,7 @@ func TestAccComputeAttachedDisk_region(t *testing.T) { diskName := acctest.RandomWithPrefix("tf-test") instanceName := acctest.RandomWithPrefix("tf-test") - importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName) + importID := fmt.Sprintf("%s/us-central1-a/%s/%s", getTestProjectFromEnv(), instanceName, diskName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/third_party/terraform/tests/resource_compute_backend_bucket_signed_url_key_test.go b/third_party/terraform/tests/resource_compute_backend_bucket_signed_url_key_test.go index 5d0f1db0b980..4c5944ed5089 100644 --- a/third_party/terraform/tests/resource_compute_backend_bucket_signed_url_key_test.go +++ b/third_party/terraform/tests/resource_compute_backend_bucket_signed_url_key_test.go @@ -85,7 +85,7 @@ func checkComputeBackendBucketSignedUrlKeyExists(s *terraform.State) (bool, erro } config := testAccProvider.Meta().(*Config) - keyName := rs.Primary.ID + keyName := rs.Primary.Attributes["name"] url, err := replaceVarsForTest(config, rs, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}") if err != nil { diff --git a/third_party/terraform/tests/resource_compute_backend_service_signed_url_key_test.go b/third_party/terraform/tests/resource_compute_backend_service_signed_url_key_test.go index baad22edcd6e..ba546e6b036c 100644 --- a/third_party/terraform/tests/resource_compute_backend_service_signed_url_key_test.go +++ b/third_party/terraform/tests/resource_compute_backend_service_signed_url_key_test.go @@ -85,7 +85,7 @@ func checkComputeBackendServiceSignedUrlKeyExists(s *terraform.State) (bool, err } config := testAccProvider.Meta().(*Config) - keyName := rs.Primary.ID + keyName := rs.Primary.Attributes["name"] url, err := replaceVarsForTest(config, rs, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}") if err != nil { diff --git a/third_party/terraform/tests/resource_compute_backend_service_test.go.erb b/third_party/terraform/tests/resource_compute_backend_service_test.go.erb index 2dcf00869839..ca472b30302d 100644 --- a/third_party/terraform/tests/resource_compute_backend_service_test.go.erb +++ b/third_party/terraform/tests/resource_compute_backend_service_test.go.erb @@ -692,7 +692,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, checkName) @@ -720,7 +720,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, checkName) @@ -757,7 +757,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, checkName) @@ -818,7 +818,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, checkName) @@ -1283,7 +1283,9 @@ resource "google_compute_instance" "endpoint-instance" { network_interface { subnetwork = "${google_compute_subnetwork.default.self_link}" - access_config { } + access_config { + network_tier = "PREMIUM" + } } } @@ -1359,7 +1361,9 @@ resource "google_compute_instance" "endpoint-instance" { network_interface { subnetwork = "${google_compute_subnetwork.default.self_link}" - access_config { } + access_config { + network_tier = "PREMIUM" + } } } diff --git a/third_party/terraform/tests/resource_compute_disk_test.go.erb b/third_party/terraform/tests/resource_compute_disk_test.go.erb index 3d54d6d653be..2c9f446f8be2 100644 --- a/third_party/terraform/tests/resource_compute_disk_test.go.erb +++ b/third_party/terraform/tests/resource_compute_disk_test.go.erb @@ -468,12 +468,12 @@ func testAccCheckComputeDiskExists(n, p string, disk *compute.Disk) resource.Tes config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Disks.Get( - p, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Disk not found") } diff --git a/third_party/terraform/tests/resource_compute_health_check_test.go b/third_party/terraform/tests/resource_compute_health_check_test.go index f91419cb9604..6fa1f5f2cf03 100644 --- a/third_party/terraform/tests/resource_compute_health_check_test.go +++ b/third_party/terraform/tests/resource_compute_health_check_test.go @@ -164,6 +164,7 @@ resource "google_compute_health_check" "foobar" { timeout_sec = 2 unhealthy_threshold = 3 tcp_health_check { + port = 443 } } `, hckName) @@ -326,8 +327,10 @@ resource "google_compute_health_check" "foobar" { unhealthy_threshold = 3 tcp_health_check { + port = 443 } ssl_health_check { + port = 443 } } `, hckName) diff --git a/third_party/terraform/tests/resource_compute_http_health_check_test.go b/third_party/terraform/tests/resource_compute_http_health_check_test.go index d1af7d14c7c7..cd18b78846c4 100644 --- a/third_party/terraform/tests/resource_compute_http_health_check_test.go +++ b/third_party/terraform/tests/resource_compute_http_health_check_test.go @@ -55,19 +55,19 @@ func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.Htt return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No name is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.HttpHealthChecks.Get( - config.Project, rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("HttpHealthCheck not found") } diff --git a/third_party/terraform/tests/resource_compute_image_test.go b/third_party/terraform/tests/resource_compute_image_test.go index 59a956375e19..336a6eb931b9 100644 --- a/third_party/terraform/tests/resource_compute_image_test.go +++ b/third_party/terraform/tests/resource_compute_image_test.go @@ -107,19 +107,19 @@ func testAccCheckComputeImageExists(n string, image *compute.Image) resource.Tes return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No name is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Images.Get( - config.Project, rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Image not found") } diff --git a/third_party/terraform/tests/resource_compute_instance_from_template_test.go b/third_party/terraform/tests/resource_compute_instance_from_template_test.go index 84d5d77d064b..283fa38e367c 100644 --- a/third_party/terraform/tests/resource_compute_instance_from_template_test.go +++ b/third_party/terraform/tests/resource_compute_instance_from_template_test.go @@ -246,6 +246,7 @@ resource "google_compute_instance_template" "foobar" { disk_type = "local-ssd" type = "SCRATCH" interface = "NVME" + disk_size_gb = 375 } network_interface { @@ -447,6 +448,8 @@ resource "google_compute_instance_template" "template" { disk { type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 375 interface = "SCSI" auto_delete = true boot = false diff --git a/third_party/terraform/tests/resource_compute_instance_group_manager_test.go.erb b/third_party/terraform/tests/resource_compute_instance_group_manager_test.go similarity index 84% rename from third_party/terraform/tests/resource_compute_instance_group_manager_test.go.erb rename to third_party/terraform/tests/resource_compute_instance_group_manager_test.go index 39a887f77021..f4c29d6b4ac5 100644 --- a/third_party/terraform/tests/resource_compute_instance_group_manager_test.go.erb +++ b/third_party/terraform/tests/resource_compute_instance_group_manager_test.go @@ -1,4 +1,3 @@ -<% autogen_exception -%> package google import ( @@ -129,30 +128,6 @@ func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) { }) } -<% if version == 'ga' -%> -func TestAccInstanceGroupManager_updateStrategy(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceGroupManager_updateStrategy(igm), - }, - { - ResourceName: "google_compute_instance_group_manager.igm-update-strategy", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} -<% end -%> - func TestAccInstanceGroupManager_updatePolicy(t *testing.T) { t.Parallel() @@ -285,35 +260,6 @@ func TestAccInstanceGroupManager_autoHealingPolicies(t *testing.T) { }) } -<% if version == 'ga' -%> -func TestAccInstanceGroupManager_upgradeInstanceTemplate(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceGroupManager_upgradeInstanceTemplate1(igm), - }, - { - ResourceName: "google_compute_instance_group_manager.igm-instance-template-upgrade", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccInstanceGroupManager_upgradeInstanceTemplate2(igm), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} -<% end -%> - func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -632,54 +578,6 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { }`, tag, igm) } -<% if version == 'ga' -%> -func testAccInstanceGroupManager_updateStrategy(igm string) string { - return fmt.Sprintf(` - data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" - } - - resource "google_compute_instance_template" "igm-update-strategy" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - - lifecycle { - create_before_destroy = true - } - } - - resource "google_compute_instance_group_manager" "igm-update-strategy" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}" - base_instance_name = "igm-update-strategy" - zone = "us-central1-c" - target_size = 2 - update_strategy = "REPLACE" - named_port { - name = "customhttp" - port = 8080 - } - }`, igm) -} -<% end -%> - func testAccInstanceGroupManager_rollingUpdatePolicy(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -1110,92 +1008,3 @@ resource "google_compute_instance_group_manager" "igm-basic" { } `, primaryTemplate, canaryTemplate, igm) } -<% if version == 'ga' -%> -func testAccInstanceGroupManager_upgradeInstanceTemplate1(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} - -func testAccInstanceGroupManager_upgradeInstanceTemplate2(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - version { - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - } - - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} -<% end -%> diff --git a/third_party/terraform/tests/resource_compute_instance_group_test.go b/third_party/terraform/tests/resource_compute_instance_group_test.go index 928ecc50a32a..a7f6c2edf413 100644 --- a/third_party/terraform/tests/resource_compute_instance_group_test.go +++ b/third_party/terraform/tests/resource_compute_instance_group_test.go @@ -302,7 +302,7 @@ func testAccComputeInstanceGroup_hasCorrectNetwork(nInstanceGroup string, nNetwo return fmt.Errorf("No ID is set") } network, err := config.clientCompute.Networks.Get( - config.Project, rsNetwork.Primary.ID).Do() + config.Project, rsNetwork.Primary.Attributes["name"]).Do() if err != nil { return err } diff --git a/third_party/terraform/tests/resource_compute_instance_template_test.go b/third_party/terraform/tests/resource_compute_instance_template_test.go index ca453fbcb8f0..43695f5cf11b 100644 --- a/third_party/terraform/tests/resource_compute_instance_template_test.go +++ b/third_party/terraform/tests/resource_compute_instance_template_test.go @@ -161,6 +161,54 @@ func TestComputeInstanceTemplate_reorderDisks(t *testing.T) { } } +func TestComputeInstanceTemplate_scratchDiskSizeCustomizeDiff(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + Typee string // misspelled on purpose, type is a special symbol + DiskType string + DiskSize int + ExpectError bool + }{ + "scratch disk correct size": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 375, + ExpectError: false, + }, + "scratch disk incorrect size": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 300, + ExpectError: true, + }, + "non-scratch disk": { + Typee: "PERSISTENT", + DiskType: "", + DiskSize: 300, + ExpectError: false, + }, + } + + for tn, tc := range cases { + d := &ResourceDiffMock{ + After: map[string]interface{}{ + "disk.#": 1, + "disk.0.type": tc.Typee, + "disk.0.disk_type": tc.DiskType, + "disk.0.disk_size_gb": tc.DiskSize, + }, + } + err := resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(d) + if tc.ExpectError && err == nil { + t.Errorf("%s failed, expected error but was none", tn) + } + if !tc.ExpectError && err != nil { + t.Errorf("%s failed, found unexpected error: %s", tn, err) + } + } +} + func TestAccComputeInstanceTemplate_basic(t *testing.T) { t.Parallel() @@ -756,6 +804,21 @@ func TestAccComputeInstanceTemplate_enableDisplay(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_invalidDiskType(t *testing.T) { + t.Parallel() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_invalidDiskType(), + ExpectError: regexp.MustCompile("SCRATCH disks must have a disk_type of local-ssd"), + }, + }, + }) +} + func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -764,8 +827,9 @@ func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { continue } + splits := strings.Split(rs.Primary.ID, "/") _, err := config.clientCompute.InstanceTemplates.Get( - config.Project, rs.Primary.ID).Do() + config.Project, splits[len(splits)-1]).Do() if err == nil { return fmt.Errorf("Instance template still exists") } @@ -802,13 +866,15 @@ func testAccCheckComputeInstanceTemplateExistsInProject(n, p string, instanceTem config := testAccProvider.Meta().(*Config) + splits := strings.Split(rs.Primary.ID, "/") + templateName := splits[len(splits)-1] found, err := config.clientCompute.InstanceTemplates.Get( - p, rs.Primary.ID).Do() + p, templateName).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != templateName { return fmt.Errorf("Instance template not found") } @@ -831,13 +897,15 @@ func testAccCheckComputeBetaInstanceTemplateExistsInProject(n, p string, instanc config := testAccProvider.Meta().(*Config) + splits := strings.Split(rs.Primary.ID, "/") + templateName := splits[len(splits)-1] found, err := config.clientComputeBeta.InstanceTemplates.Get( - p, rs.Primary.ID).Do() + p, templateName).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != templateName { return fmt.Errorf("Instance template not found") } @@ -1895,22 +1963,52 @@ data "google_compute_image" "my_image" { family = "centos-7" project = "gce-uefi-images" } - resource "google_compute_instance_template" "foobar" { name = "instancet-test-%s" machine_type = "n1-standard-1" can_ip_forward = false - disk { source_image = "${data.google_compute_image.my_image.self_link}" auto_delete = true boot = true } - network_interface { network = "default" } - enable_display = true }`, acctest.RandString(10)) } + +func testAccComputeInstanceTemplate_invalidDiskType() string { + return fmt.Sprintf(` +# Use this datasource insead of hardcoded values when https://github.com/hashicorp/terraform/issues/22679 +# is resolved. +# data "google_compute_image" "my_image" { +# family = "centos-7" +# project = "gce-uefi-images" +# } +resource "google_compute_instance_template" "foobar" { + name = "instancet-test-%s" + machine_type = "n1-standard-1" + can_ip_forward = false + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 375 + type = "SCRATCH" + disk_type = "local-ssd" + } + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + type = "SCRATCH" + } + network_interface { + network = "default" + } +}`, acctest.RandString(10)) +} diff --git a/third_party/terraform/tests/resource_compute_instance_test.go b/third_party/terraform/tests/resource_compute_instance_test.go index 9972b8a50e5d..1811593048c9 100644 --- a/third_party/terraform/tests/resource_compute_instance_test.go +++ b/third_party/terraform/tests/resource_compute_instance_test.go @@ -1236,7 +1236,7 @@ func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFu config := testAccProvider.Meta().(*Config) - op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return fmt.Errorf("Could not stop instance: %s", err) } @@ -1250,7 +1250,7 @@ func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFu } op, err = config.clientCompute.Instances.SetMachineType( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID, &machineType).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"], &machineType).Do() if err != nil { return fmt.Errorf("Could not change machine type: %s", err) } @@ -1271,7 +1271,7 @@ func testAccCheckComputeInstanceDestroy(s *terraform.State) error { } _, err := config.clientCompute.Instances.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err == nil { return fmt.Errorf("Instance still exists") } @@ -1309,12 +1309,12 @@ func testAccCheckComputeInstanceExistsInProject(n, p string, instance *compute.I config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Instances.Get( - p, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Instance not found") } @@ -1338,12 +1338,12 @@ func testAccCheckComputeBetaInstanceExistsInProject(n, p string, instance *compu config := testAccProvider.Meta().(*Config) found, err := config.clientComputeBeta.Instances.Get( - p, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Instance not found") } diff --git a/third_party/terraform/tests/resource_compute_network_peering_test.go.erb b/third_party/terraform/tests/resource_compute_network_peering_test.go.erb index 0fe3a3c29ec1..d53ee1080e87 100644 --- a/third_party/terraform/tests/resource_compute_network_peering_test.go.erb +++ b/third_party/terraform/tests/resource_compute_network_peering_test.go.erb @@ -151,7 +151,6 @@ func testAccComputeNetworkPeering_basic() string { network = "${google_compute_network.network2.self_link}" peer_network = "${google_compute_network.network1.self_link}" name = "peering-test-2-%s" - auto_create_routes = true ` <% unless version == 'ga' -%> diff --git a/third_party/terraform/tests/resource_compute_network_test.go b/third_party/terraform/tests/resource_compute_network_test.go index 4f1d6ccac598..ffea9c27cdae 100644 --- a/third_party/terraform/tests/resource_compute_network_test.go +++ b/third_party/terraform/tests/resource_compute_network_test.go @@ -66,31 +66,6 @@ func TestAccComputeNetwork_customSubnet(t *testing.T) { }) } -func TestAccComputeNetwork_legacyNetwork(t *testing.T) { - t.Parallel() - - var network compute.Network - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeNetwork_legacyNetwork(), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeNetworkExists("google_compute_network.default", &network), - resource.TestCheckResourceAttrSet("google_compute_network.default", "ipv4_range"), - ), - }, - { - ResourceName: "google_compute_network.default", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccComputeNetwork_routingModeAndUpdate(t *testing.T) { t.Parallel() @@ -172,19 +147,19 @@ func testAccCheckComputeNetworkExists(n string, network *compute.Network) resour return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { + if rs.Primary.Attributes["name"] == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Networks.Get( - config.Project, rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Network not found") } @@ -275,15 +250,6 @@ resource "google_compute_network" "bar" { }`, acctest.RandString(10)) } -func testAccComputeNetwork_legacyNetwork() string { - return fmt.Sprintf(` -resource "google_compute_network" "default" { - name = "network-test-%s" - auto_create_subnetworks = false - ipv4_range = "10.0.0.0/16" -}`, acctest.RandString(10)) -} - func testAccComputeNetwork_custom_subnet() string { return fmt.Sprintf(` resource "google_compute_network" "baz" { diff --git a/third_party/terraform/tests/resource_compute_region_backend_service_test.go.erb b/third_party/terraform/tests/resource_compute_region_backend_service_test.go.erb index 79eddd2d37b9..dce271eec256 100644 --- a/third_party/terraform/tests/resource_compute_region_backend_service_test.go.erb +++ b/third_party/terraform/tests/resource_compute_region_backend_service_test.go.erb @@ -235,7 +235,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, checkName) @@ -264,7 +264,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, checkName) @@ -302,7 +302,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, checkName) @@ -401,7 +401,7 @@ resource "google_compute_health_check" "health_check" { name = "%s" http_health_check { - + port = 80 } } `, serviceName, igName, instanceName, checkName) @@ -442,6 +442,7 @@ resource "google_compute_health_check" "zero" { timeout_sec = 1 tcp_health_check { + port = 443 } } @@ -451,6 +452,7 @@ resource "google_compute_health_check" "one" { timeout_sec = 30 tcp_health_check { + port = 443 } } `, serviceName, checkOne, checkTwo) @@ -523,7 +525,7 @@ resource "google_compute_health_check" "default" { timeout_sec = 1 tcp_health_check { - + port = 443 } } `, serviceName, timeout, igName, itName, checkName) diff --git a/third_party/terraform/tests/resource_compute_region_disk_test.go b/third_party/terraform/tests/resource_compute_region_disk_test.go index cb1b2c498ef9..9f7e737affd5 100644 --- a/third_party/terraform/tests/resource_compute_region_disk_test.go +++ b/third_party/terraform/tests/resource_compute_region_disk_test.go @@ -183,19 +183,19 @@ func testAccCheckComputeRegionDiskExists(n string, disk *computeBeta.Disk) resou return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { + if rs.Primary.Attributes["name"] == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientComputeBeta.RegionDisks.Get( - p, rs.Primary.Attributes["region"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["region"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("RegionDisk not found") } diff --git a/third_party/terraform/tests/resource_compute_region_health_check_test.go.erb b/third_party/terraform/tests/resource_compute_region_health_check_test.go.erb index ba2156e76058..0f0b77184a71 100644 --- a/third_party/terraform/tests/resource_compute_region_health_check_test.go.erb +++ b/third_party/terraform/tests/resource_compute_region_health_check_test.go.erb @@ -172,6 +172,7 @@ resource "google_compute_region_health_check" "foobar" { timeout_sec = 2 unhealthy_threshold = 3 tcp_health_check { + port = 443 } } `, hckName) @@ -334,8 +335,10 @@ resource "google_compute_region_health_check" "foobar" { unhealthy_threshold = 3 tcp_health_check { + port = 443 } ssl_health_check { + port = 443 } } `, hckName) diff --git a/third_party/terraform/tests/resource_compute_region_instance_group_manager_test.go.erb b/third_party/terraform/tests/resource_compute_region_instance_group_manager_test.go similarity index 82% rename from third_party/terraform/tests/resource_compute_region_instance_group_manager_test.go.erb rename to third_party/terraform/tests/resource_compute_region_instance_group_manager_test.go index b9e3778991e4..c0094fa1f821 100644 --- a/third_party/terraform/tests/resource_compute_region_instance_group_manager_test.go.erb +++ b/third_party/terraform/tests/resource_compute_region_instance_group_manager_test.go @@ -1,4 +1,3 @@ -<% autogen_exception -%> package google import ( @@ -130,30 +129,6 @@ func TestAccRegionInstanceGroupManager_updateLifecycle(t *testing.T) { }) } -<% if version == 'ga' -%> -func TestAccRegionInstanceGroupManager_updateStrategy(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccRegionInstanceGroupManager_updateStrategy(igm), - }, - { - ResourceName: "google_compute_region_instance_group_manager.igm-update-strategy", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} -<% end -%> - func TestAccRegionInstanceGroupManager_rollingUpdatePolicy(t *testing.T) { t.Parallel() @@ -173,8 +148,8 @@ func TestAccRegionInstanceGroupManager_rollingUpdatePolicy(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccRegionInstanceGroupManager_rollingUpdatePolicySetToDefault(igm), - PlanOnly: true, + Config: testAccRegionInstanceGroupManager_rollingUpdatePolicySetToDefault(igm), + PlanOnly: true, ExpectNonEmptyPlan: false, }, { @@ -298,35 +273,6 @@ func TestAccRegionInstanceGroupManager_distributionPolicy(t *testing.T) { }) } -<% if version == 'ga' -%> -func TestAccRegionInstanceGroupManager_upgradeInstanceTemplate(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRegionInstanceGroupManager_upgradeInstanceTemplate1(igm), - }, - { - ResourceName: "google_compute_region_instance_group_manager.igm-instance-template-upgrade", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccRegionInstanceGroupManager_upgradeInstanceTemplate2(igm), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} -<% end -%> - func testAccCheckRegionInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -334,18 +280,8 @@ func testAccCheckRegionInstanceGroupManagerDestroy(s *terraform.State) error { if rs.Type != "google_compute_region_instance_group_manager" { continue } - id, err := parseRegionInstanceGroupManagerId(rs.Primary.ID) - if err != nil { - return err - } - if id.Project == "" { - id.Project = config.Project - } - if id.Region == "" { - id.Region = rs.Primary.Attributes["region"] - } - _, err = config.clientCompute.RegionInstanceGroupManagers.Get( - id.Project, id.Region, id.Name).Do() + _, err := config.clientCompute.RegionInstanceGroupManagers.Get( + rs.Primary.Attributes["project"], rs.Primary.Attributes["region"], rs.Primary.Attributes["name"]).Do() if err == nil { return fmt.Errorf("RegionInstanceGroupManager still exists") } @@ -712,7 +648,6 @@ func testAccRegionInstanceGroupManager_separateRegions(igm1, igm2 string) string `, igm1, igm2) } - func testAccRegionInstanceGroupManager_autoHealingPolicies(template, target, igm, hck string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -927,52 +862,6 @@ resource "google_compute_region_instance_group_manager" "igm-basic" { } `, template, igm, strings.Join(zones, "\",\"")) } -<% if version == 'ga' -%> -func testAccRegionInstanceGroupManager_updateStrategy(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-update-strategy" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_region_instance_group_manager" "igm-update-strategy" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}" - base_instance_name = "rigm-update-strategy" - region = "us-central1" - target_size = 2 - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} -<% end -%> func testAccRegionInstanceGroupManager_rollingUpdatePolicy(igm string) string { return fmt.Sprintf(` @@ -1143,96 +1032,3 @@ resource "google_compute_region_instance_group_manager" "igm-rolling-update-poli } }`, igm) } -<% if version == 'ga' -%> -func testAccRegionInstanceGroupManager_upgradeInstanceTemplate1(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_region_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - - region = "us-central1" - distribution_policy_zones = ["us-central1-a", "us-central1-f"] - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} - -func testAccRegionInstanceGroupManager_upgradeInstanceTemplate2(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_region_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - version { - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - } - - region = "us-central1" - distribution_policy_zones = ["us-central1-a", "us-central1-f"] - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} -<% end -%> diff --git a/third_party/terraform/tests/resource_compute_region_target_http_proxy_test.go.erb b/third_party/terraform/tests/resource_compute_region_target_http_proxy_test.go.erb index fbaf647d3bd9..c1d1da277e0f 100644 --- a/third_party/terraform/tests/resource_compute_region_target_http_proxy_test.go.erb +++ b/third_party/terraform/tests/resource_compute_region_target_http_proxy_test.go.erb @@ -60,7 +60,9 @@ func testAccComputeRegionTargetHttpProxy_basic1(target, backend, hc, urlmap1, ur resource "google_compute_region_health_check" "zero" { name = "%s" - http_health_check {} + http_health_check { + port = 443 + } } resource "google_compute_region_url_map" "foobar1" { @@ -125,7 +127,9 @@ func testAccComputeRegionTargetHttpProxy_basic2(target, backend, hc, urlmap1, ur resource "google_compute_region_health_check" "zero" { name = "%s" - http_health_check {} + http_health_check { + port = 443 + } } resource "google_compute_region_url_map" "foobar1" { diff --git a/third_party/terraform/tests/resource_compute_region_target_https_proxy_test.go.erb b/third_party/terraform/tests/resource_compute_region_target_https_proxy_test.go.erb index 709a251507f4..08f648381f24 100644 --- a/third_party/terraform/tests/resource_compute_region_target_https_proxy_test.go.erb +++ b/third_party/terraform/tests/resource_compute_region_target_https_proxy_test.go.erb @@ -63,12 +63,16 @@ resource "google_compute_region_backend_service" "foobar2" { resource "google_compute_region_health_check" "zero" { name = "httpsproxy-test-health-check1-%s" - http_health_check {} + http_health_check { + port = 443 + } } resource "google_compute_region_health_check" "one" { name = "httpsproxy-test-health-check2-%s" - http_health_check {} + http_health_check { + port = 443 + } } resource "google_compute_region_url_map" "foobar1" { @@ -157,12 +161,16 @@ resource "google_compute_region_backend_service" "foobar2" { resource "google_compute_region_health_check" "zero" { name = "httpsproxy-test-health-check1-%s" - http_health_check {} + http_health_check { + port = 443 + } } resource "google_compute_region_health_check" "one" { name = "httpsproxy-test-health-check2-%s" - http_health_check {} + http_health_check { + port = 443 + } } resource "google_compute_region_url_map" "foobar1" { diff --git a/third_party/terraform/tests/resource_compute_region_url_map_test.go.erb b/third_party/terraform/tests/resource_compute_region_url_map_test.go.erb index 7153f996ad82..5ef1eb069a01 100644 --- a/third_party/terraform/tests/resource_compute_region_url_map_test.go.erb +++ b/third_party/terraform/tests/resource_compute_region_url_map_test.go.erb @@ -114,6 +114,7 @@ resource "google_compute_region_health_check" "zero" { region = "us-central1" name = "regionurlmap-test-%s" http_health_check { + port = 80 } } @@ -159,6 +160,7 @@ resource "google_compute_region_health_check" "zero" { region = "us-central1" name = "regionurlmap-test-%s" http_health_check { + port = 80 } } @@ -204,6 +206,7 @@ resource "google_compute_region_health_check" "zero" { region = "us-central1" name = "regionurlmap-test-%s" http_health_check { + port = 80 } } @@ -258,6 +261,7 @@ resource "google_compute_region_health_check" "zero" { region = "us-central1" name = "regionurlmap-test-%s" http_health_check { + port = 80 } } @@ -332,6 +336,7 @@ resource "google_compute_region_health_check" "zero" { region = "us-central1" name = "regionurlmap-test-%s" http_health_check { + port = 80 } } diff --git a/third_party/terraform/tests/resource_compute_security_policy_test.go b/third_party/terraform/tests/resource_compute_security_policy_test.go index 0cc9124f0f19..69700ee37b4c 100644 --- a/third_party/terraform/tests/resource_compute_security_policy_test.go +++ b/third_party/terraform/tests/resource_compute_security_policy_test.go @@ -101,7 +101,7 @@ func testAccCheckComputeSecurityPolicyDestroy(s *terraform.State) error { continue } - pol := rs.Primary.ID + pol := rs.Primary.Attributes["name"] _, err := config.clientComputeBeta.SecurityPolicies.Get(config.Project, pol).Do() if err == nil { diff --git a/third_party/terraform/tests/resource_compute_ssl_certificate_test.go b/third_party/terraform/tests/resource_compute_ssl_certificate_test.go index 20cde1201f0a..4970fef84f19 100644 --- a/third_party/terraform/tests/resource_compute_ssl_certificate_test.go +++ b/third_party/terraform/tests/resource_compute_ssl_certificate_test.go @@ -45,14 +45,16 @@ func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + // We don't specify a name, but it is saved during create + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.SslCertificates.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("Certificate not found") } diff --git a/third_party/terraform/tests/resource_compute_ssl_policy_test.go b/third_party/terraform/tests/resource_compute_ssl_policy_test.go index 401a293de672..b63b1e57fc70 100644 --- a/third_party/terraform/tests/resource_compute_ssl_policy_test.go +++ b/third_party/terraform/tests/resource_compute_ssl_policy_test.go @@ -177,7 +177,7 @@ func testAccCheckComputeSslPolicyExists(n string, sslPolicy *compute.SslPolicy) return fmt.Errorf("Error Reading SSL Policy %s: %s", name, err) } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("SSL Policy not found") } diff --git a/third_party/terraform/tests/resource_compute_subnetwork_test.go b/third_party/terraform/tests/resource_compute_subnetwork_test.go index 95db19864044..bf86bacec48a 100644 --- a/third_party/terraform/tests/resource_compute_subnetwork_test.go +++ b/third_party/terraform/tests/resource_compute_subnetwork_test.go @@ -318,8 +318,8 @@ func testAccCheckComputeSubnetworkExists(n string, subnetwork *compute.Subnetwor } config := testAccProvider.Meta().(*Config) - - region, subnet_name := splitSubnetID(rs.Primary.ID) + region := rs.Primary.Attributes["region"] + subnet_name := rs.Primary.Attributes["name"] found, err := config.clientCompute.Subnetworks.Get( config.Project, region, subnet_name).Do() @@ -549,7 +549,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_5_SEC" flow_sampling = 0.5 @@ -571,7 +570,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_30_SEC" flow_sampling = 0.8 @@ -593,7 +591,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = false } `, cnName, subnetworkName) } @@ -610,7 +607,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_30_SEC" flow_sampling = 0.6 @@ -653,7 +649,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_30_SEC" flow_sampling = 0.8 diff --git a/third_party/terraform/tests/resource_compute_target_http_proxy_test.go b/third_party/terraform/tests/resource_compute_target_http_proxy_test.go index 194b5c8f46c2..e8be2e57a0e4 100644 --- a/third_party/terraform/tests/resource_compute_target_http_proxy_test.go +++ b/third_party/terraform/tests/resource_compute_target_http_proxy_test.go @@ -54,14 +54,15 @@ func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetHttpProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetHttpProxy not found") } diff --git a/third_party/terraform/tests/resource_compute_target_https_proxy_test.go b/third_party/terraform/tests/resource_compute_target_https_proxy_test.go index 4529b86b5295..250645783f93 100644 --- a/third_party/terraform/tests/resource_compute_target_https_proxy_test.go +++ b/third_party/terraform/tests/resource_compute_target_https_proxy_test.go @@ -61,14 +61,15 @@ func testAccCheckComputeTargetHttpsProxyExists(n string, proxy *compute.TargetHt } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetHttpsProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetHttpsProxy not found") } diff --git a/third_party/terraform/tests/resource_compute_target_pool_test.go b/third_party/terraform/tests/resource_compute_target_pool_test.go index a297d6c2f2bd..7a0eafd2e14f 100644 --- a/third_party/terraform/tests/resource_compute_target_pool_test.go +++ b/third_party/terraform/tests/resource_compute_target_pool_test.go @@ -89,7 +89,7 @@ func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { } _, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, rs.Primary.ID).Do() + config.Project, config.Region, rs.Primary.Attributes["name"]).Do() if err == nil { return fmt.Errorf("TargetPool still exists") } @@ -112,12 +112,12 @@ func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, rs.Primary.ID).Do() + config.Project, config.Region, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("TargetPool not found") } @@ -139,7 +139,7 @@ func testAccCheckComputeTargetPoolHealthCheck(targetPool, healthCheck string) re hcLink := healthCheckRes.Primary.Attributes["self_link"] if targetPoolRes.Primary.Attributes["health_checks.0"] != hcLink { - return fmt.Errorf("Health check not set up. Expected %q", hcLink) + return fmt.Errorf("Health check not set up. Expected %q to equal %q", targetPoolRes.Primary.Attributes["health_checks.0"], hcLink) } return nil diff --git a/third_party/terraform/tests/resource_compute_target_ssl_proxy_test.go b/third_party/terraform/tests/resource_compute_target_ssl_proxy_test.go index 55c55dbebb8c..b2e8c5f53039 100644 --- a/third_party/terraform/tests/resource_compute_target_ssl_proxy_test.go +++ b/third_party/terraform/tests/resource_compute_target_ssl_proxy_test.go @@ -53,14 +53,15 @@ func testAccCheckComputeTargetSslProxy(n, proxyHeader, sslCert string) resource. } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetSslProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetSslProxy not found") } diff --git a/third_party/terraform/tests/resource_compute_target_tcp_proxy_test.go b/third_party/terraform/tests/resource_compute_target_tcp_proxy_test.go index 7214981fd8f2..a9b744350ff3 100644 --- a/third_party/terraform/tests/resource_compute_target_tcp_proxy_test.go +++ b/third_party/terraform/tests/resource_compute_target_tcp_proxy_test.go @@ -51,14 +51,15 @@ func testAccCheckComputeTargetTcpProxyExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetTcpProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetTcpProxy not found") } diff --git a/third_party/terraform/tests/resource_compute_url_map_test.go b/third_party/terraform/tests/resource_compute_url_map_test.go index 9251aabdc0d2..82e5d8b087e3 100644 --- a/third_party/terraform/tests/resource_compute_url_map_test.go +++ b/third_party/terraform/tests/resource_compute_url_map_test.go @@ -107,14 +107,15 @@ func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.UrlMaps.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("Url map not found") } return nil diff --git a/third_party/terraform/tests/resource_container_cluster_test.go.erb b/third_party/terraform/tests/resource_container_cluster_test.go.erb index 3a0a704f56de..e6a3f7ffbdb9 100644 --- a/third_party/terraform/tests/resource_container_cluster_test.go.erb +++ b/third_party/terraform/tests/resource_container_cluster_test.go.erb @@ -14,69 +14,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestContainerClusterIpAllocationCustomizeDiff(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - BeforePolicy []interface{} - AfterPolicy []interface{} - ExpectDiffCleared bool - }{ - "empty to false value": { - BeforePolicy: []interface{}{}, - AfterPolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": false, - }, - }, - ExpectDiffCleared: true, - }, - "empty to true value": { - BeforePolicy: []interface{}{}, - AfterPolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": true, - }, - }, - ExpectDiffCleared: false, - }, - "empty to empty": { - BeforePolicy: []interface{}{}, - AfterPolicy: []interface{}{}, - ExpectDiffCleared: false, - }, - "non-empty to non-empty": { - BeforePolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": false, - }, - }, - AfterPolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": false, - }, - }, - }, - } - - for tn, tc := range cases { - d := &ResourceDiffMock{ - Before: map[string]interface{}{ - "ip_allocation_policy": tc.BeforePolicy, - }, - After: map[string]interface{}{ - "ip_allocation_policy": tc.AfterPolicy, - }, - } - if err := resourceContainerClusterIpAllocationCustomizeDiffFunc(d); err != nil { - t.Errorf("%s failed, error calculating diff: %s", tn, err) - } - if _, ok := d.Cleared["ip_allocation_policy"]; ok != tc.ExpectDiffCleared { - t.Errorf("%s failed, expected cleared to be %v, was %v", tn, tc.ExpectDiffCleared, ok) - } - } -} - func TestAccContainerCluster_basic(t *testing.T) { t.Parallel() @@ -93,16 +30,21 @@ func TestAccContainerCluster_basic(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName), + ImportState: true, + ImportStateVerify: true, }, { - ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: fmt.Sprintf("%s/us-central1-a/", getTestProjectFromEnv()), - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("%s/us-central1-a/%s", getTestProjectFromEnv(), clusterName), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -126,7 +68,6 @@ func TestAccContainerCluster_misc(t *testing.T) { }, { ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -136,7 +77,6 @@ func TestAccContainerCluster_misc(t *testing.T) { }, { ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -160,7 +100,6 @@ func TestAccContainerCluster_withAddons(t *testing.T) { }, { ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -169,7 +108,6 @@ func TestAccContainerCluster_withAddons(t *testing.T) { }, { ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -192,7 +130,6 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -205,7 +142,6 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -218,7 +154,6 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -231,7 +166,6 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -255,7 +189,6 @@ func TestAccContainerCluster_withMasterAuthConfig_NoCert(t *testing.T) { }, { ResourceName: "google_container_cluster.with_master_auth_no_cert", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -277,7 +210,6 @@ func TestAccContainerCluster_withAuthenticatorGroupsConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_authenticator_groups", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -305,7 +237,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -319,7 +250,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -333,7 +263,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -347,7 +276,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -478,7 +406,6 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { ResourceName: "google_container_cluster.with_master_authorized_networks", ImportState: true, ImportStateVerify: true, - ImportStateIdPrefix: "us-central1-a/", }, { Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{"10.0.0.0/8", "8.8.8.8/32"}, ""), @@ -487,7 +414,6 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { ResourceName: "google_container_cluster.with_master_authorized_networks", ImportState: true, ImportStateVerify: true, - ImportStateIdPrefix: "us-central1-a/", }, { Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{}, ""), @@ -498,7 +424,6 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_master_authorized_networks", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -507,7 +432,6 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_master_authorized_networks", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -530,7 +454,6 @@ func TestAccContainerCluster_regional(t *testing.T) { }, { ResourceName: "google_container_cluster.regional", - ImportStateIdPrefix: "us-central1/", ImportState: true, ImportStateVerify: true, }, @@ -554,7 +477,6 @@ func TestAccContainerCluster_regionalWithNodePool(t *testing.T) { }, { ResourceName: "google_container_cluster.regional", - ImportStateIdPrefix: "us-central1/", ImportState: true, ImportStateVerify: true, }, @@ -562,7 +484,7 @@ func TestAccContainerCluster_regionalWithNodePool(t *testing.T) { }) } -func TestAccContainerCluster_regionalWithAdditionalZones(t *testing.T) { +func TestAccContainerCluster_regionalWithNodeLocations(t *testing.T) { t.Parallel() clusterName := fmt.Sprintf("cluster-test-%s", acctest.RandString(10)) @@ -573,20 +495,18 @@ func TestAccContainerCluster_regionalWithAdditionalZones(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccContainerCluster_regionalAdditionalZones(clusterName), + Config: testAccContainerCluster_regionalNodeLocations(clusterName), }, { - ResourceName: "google_container_cluster.with_additional_zones", - ImportStateIdPrefix: "us-central1/", + ResourceName: "google_container_cluster.with_node_locations", ImportState: true, ImportStateVerify: true, }, { - Config: testAccContainerCluster_regionalUpdateAdditionalZones(clusterName), + Config: testAccContainerCluster_regionalUpdateNodeLocations(clusterName), }, { - ResourceName: "google_container_cluster.with_additional_zones", - ImportStateIdPrefix: "us-central1/", + ResourceName: "google_container_cluster.with_node_locations", ImportState: true, ImportStateVerify: true, }, @@ -613,7 +533,6 @@ func TestAccContainerCluster_withTpu(t *testing.T) { }, { ResourceName: "google_container_cluster.with_tpu", - ImportStateIdPrefix: "us-central1-b/", ImportState: true, ImportStateVerify: true, }, @@ -637,7 +556,6 @@ func TestAccContainerCluster_withPrivateClusterConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_private_cluster", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -682,7 +600,6 @@ func TestAccContainerCluster_withIntraNodeVisibility(t *testing.T) { }, { ResourceName: "google_container_cluster.with_intranode_visibility", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -694,7 +611,6 @@ func TestAccContainerCluster_withIntraNodeVisibility(t *testing.T) { }, { ResourceName: "google_container_cluster.with_intranode_visibility", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -718,7 +634,6 @@ func TestAccContainerCluster_withVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_version", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -742,7 +657,6 @@ func TestAccContainerCluster_updateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_version", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -752,7 +666,6 @@ func TestAccContainerCluster_updateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_version", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -776,7 +689,6 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_config", - ImportStateIdPrefix: "us-central1-f/", ImportState: true, ImportStateVerify: true, }, @@ -785,7 +697,6 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_config", - ImportStateIdPrefix: "us-central1-f/", ImportState: true, ImportStateVerify: true, }, @@ -806,7 +717,6 @@ func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_config_scope_alias", - ImportStateIdPrefix: "us-central1-f/", ImportState: true, ImportStateVerify: true, }, @@ -829,7 +739,6 @@ func TestAccContainerCluster_withNodeConfigShieldedInstanceConfig(t *testing.T) }, { ResourceName: "google_container_cluster.with_node_config", - ImportStateIdPrefix: "us-central1-f/", ImportState: true, ImportStateVerify: true, }, @@ -855,7 +764,6 @@ func TestAccContainerCluster_withWorkloadMetadataConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_workload_metadata_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -883,7 +791,6 @@ func TestAccContainerCluster_withSandboxConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_sandbox_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -906,13 +813,11 @@ func TestAccContainerCluster_network(t *testing.T) { }, { ResourceName: "google_container_cluster.with_net_ref_by_url", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, { ResourceName: "google_container_cluster.with_net_ref_by_name", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -933,7 +838,6 @@ func TestAccContainerCluster_backend(t *testing.T) { }, { ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -957,7 +861,6 @@ func TestAccContainerCluster_withNodePoolBasic(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -981,7 +884,6 @@ func TestAccContainerCluster_withNodePoolUpdateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -991,7 +893,6 @@ func TestAccContainerCluster_withNodePoolUpdateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -1011,14 +912,13 @@ func TestAccContainerCluster_withNodePoolResize(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccContainerCluster_withNodePoolAdditionalZones(clusterName, npName), + Config: testAccContainerCluster_withNodePoolNodeLocations(clusterName, npName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.node_count", "2"), ), }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1030,7 +930,6 @@ func TestAccContainerCluster_withNodePoolResize(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1058,7 +957,6 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1071,7 +969,6 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1084,7 +981,6 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1105,7 +1001,6 @@ func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool_name_prefix", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"node_pool.0.name_prefix"}, @@ -1127,7 +1022,6 @@ func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool_multiple", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1164,7 +1058,6 @@ func TestAccContainerCluster_withNodePoolNodeConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool_node_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1187,7 +1080,6 @@ func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) { }, { ResourceName: resourceName, - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1200,7 +1092,6 @@ func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) { }, { ResourceName: resourceName, - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, // maintenance_policy.# = 0 is equivalent to no maintenance policy at all, @@ -1274,7 +1165,6 @@ func TestAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(t *t }, { ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1296,7 +1186,6 @@ func TestAccContainerCluster_withIPAllocationPolicy_specificIPRanges(t *testing. }, { ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1318,73 +1207,6 @@ func TestAccContainerCluster_withIPAllocationPolicy_specificSizes(t *testing.T) }, { ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccContainerCluster_withIPAllocationPolicy_createSubnetwork(t *testing.T) { - t.Parallel() - - cluster := fmt.Sprintf("cluster-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccContainerCluster_withIPAllocationPolicy_createSubnetwork(cluster), - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccContainerCluster_withIPAllocationPolicy_createSubnetworkUpdated(cluster), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -// This test will intentionally perform a recreate. Without attr syntax, there's -// no way to go from allocation policy set -> unset without one. -func TestAccContainerCluster_withIPAllocationPolicy_explicitEmpty(t *testing.T) { - t.Parallel() - - cluster := fmt.Sprintf("cluster-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccContainerCluster_withIPAllocationPolicy_createSubnetwork(cluster), - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccContainerCluster_withIPAllocationPolicy_explicitEmpty(cluster), - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1413,7 +1235,6 @@ func TestAccContainerCluster_nodeAutoprovisioning(t *testing.T) { }, { ResourceName: "google_container_cluster.with_autoprovisioning", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -1427,7 +1248,6 @@ func TestAccContainerCluster_nodeAutoprovisioning(t *testing.T) { }, { ResourceName: "google_container_cluster.with_autoprovisioning", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -1454,7 +1274,7 @@ func TestAccContainerCluster_sharedVpc(t *testing.T) { }, { ResourceName: "google_container_cluster.shared_vpc_cluster", - ImportStateIdPrefix: fmt.Sprintf("%s-service/us-central1-a/", projectName), + ImportStateId: fmt.Sprintf("%s-service/us-central1-a/%s", projectName, clusterName), ImportState: true, ImportStateVerify: true, }, @@ -1478,7 +1298,6 @@ func TestAccContainerCluster_withWorkloadIdentityConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_workload_identity_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1487,7 +1306,6 @@ func TestAccContainerCluster_withWorkloadIdentityConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_workload_identity_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1496,7 +1314,6 @@ func TestAccContainerCluster_withWorkloadIdentityConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_workload_identity_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1505,7 +1322,6 @@ func TestAccContainerCluster_withWorkloadIdentityConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_workload_identity_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1529,7 +1345,6 @@ func TestAccContainerCluster_withBinaryAuthorization(t *testing.T) { }, { ResourceName: "google_container_cluster.with_binary_authorization", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1538,7 +1353,6 @@ func TestAccContainerCluster_withBinaryAuthorization(t *testing.T) { }, { ResourceName: "google_container_cluster.with_binary_authorization", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1561,7 +1375,6 @@ func TestAccContainerCluster_withShieldedNodes(t *testing.T) { }, { ResourceName: "google_container_cluster.with_shielded_nodes", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1570,7 +1383,6 @@ func TestAccContainerCluster_withShieldedNodes(t *testing.T) { }, { ResourceName: "google_container_cluster.with_shielded_nodes", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1593,7 +1405,6 @@ func TestAccContainerCluster_withFlexiblePodCIDR(t *testing.T) { }, { ResourceName: "google_container_cluster.with_flexible_cidr", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1622,7 +1433,6 @@ func TestAccContainerCluster_errorCleanDanglingCluster(t *testing.T) { }, { ResourceName: "google_container_cluster.cidr_error_preempt", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1679,7 +1489,6 @@ func TestAccContainerCluster_withDatabaseEncryption(t *testing.T) { }, { ResourceName: "google_container_cluster.with_database_encryption", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1704,7 +1513,6 @@ func TestAccContainerCluster_withResourceUsageExportConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_resource_usage_export_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1713,7 +1521,6 @@ func TestAccContainerCluster_withResourceUsageExportConfig(t *testing.T) { }, { ResourceName: "google_container_cluster.with_resource_usage_export_config", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1741,7 +1548,6 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksDisabled(t *testing.T) }, { ResourceName: "google_container_cluster.with_private_cluster", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, }, @@ -1760,7 +1566,7 @@ func testAccContainerCluster_masterAuthorizedNetworksDisabled(resource_name stri attributes := rs.Primary.Attributes cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( - config.Project, attributes["zone"], attributes["name"]).Do() + config.Project, attributes["location"], attributes["name"]).Do() if err != nil { return err } @@ -1783,7 +1589,7 @@ func testAccCheckContainerClusterDestroy(s *terraform.State) error { attributes := rs.Primary.Attributes _, err := config.clientContainer.Projects.Zones.Clusters.Get( - config.Project, attributes["zone"], attributes["name"]).Do() + config.Project, attributes["location"], attributes["name"]).Do() if err == nil { return fmt.Errorf("Cluster still exists") } @@ -1896,13 +1702,12 @@ func testAccContainerCluster_misc(name string) string { return fmt.Sprintf(` resource "google_container_cluster" "primary" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true - # This uses zone/additional_zones over location/node_locations to ensure we can update from old -> new - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] @@ -1910,8 +1715,8 @@ resource "google_container_cluster" "primary" { enable_kubernetes_alpha = true enable_legacy_abac = true - logging_service = "logging.googleapis.com/kubernetes" - monitoring_service = "monitoring.googleapis.com/kubernetes" + logging_service = "logging.googleapis.com" + monitoring_service = "monitoring.googleapis.com" resource_labels = { created-by = "terraform" @@ -1974,7 +1779,6 @@ resource "google_container_cluster" "primary" { addons_config { http_load_balancing { disabled = true } horizontal_pod_autoscaling { disabled = true } - kubernetes_dashboard { disabled = true } network_policy_config { disabled = true } <% unless version == 'ga' -%> istio_config { @@ -1996,7 +1800,6 @@ resource "google_container_cluster" "primary" { addons_config { http_load_balancing { disabled = false } - kubernetes_dashboard { disabled = false } horizontal_pod_autoscaling { disabled = false } network_policy_config { disabled = false } <% unless version == 'ga' -%> @@ -2013,8 +1816,8 @@ resource "google_container_cluster" "primary" { func testAccContainerCluster_withMasterAuth(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -2027,8 +1830,8 @@ resource "google_container_cluster" "with_master_auth" { func testAccContainerCluster_updateMasterAuth(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -2041,8 +1844,8 @@ resource "google_container_cluster" "with_master_auth" { func testAccContainerCluster_disableMasterAuth(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -2055,8 +1858,8 @@ resource "google_container_cluster" "with_master_auth" { func testAccContainerCluster_withMasterAuthNoCert() string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth_no_cert" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -2072,8 +1875,8 @@ resource "google_container_cluster" "with_master_auth_no_cert" { func testAccContainerCluster_withNetworkPolicyEnabled(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true @@ -2118,8 +1921,8 @@ resource "google_container_cluster" "with_default_release_channel" { func testAccContainerCluster_removeNetworkPolicy(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true }`, clusterName) @@ -2128,8 +1931,8 @@ resource "google_container_cluster" "with_network_policy_enabled" { func testAccContainerCluster_withNetworkPolicyDisabled(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true @@ -2140,8 +1943,8 @@ resource "google_container_cluster" "with_network_policy_enabled" { func testAccContainerCluster_withNetworkPolicyConfigDisabled(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true @@ -2167,7 +1970,7 @@ resource "google_compute_subnetwork" "container_subnetwork" { name = "${google_compute_network.container_network.name}" network = "${google_compute_network.container_network.name}" ip_cidr_range = "10.0.36.0/24" - region = "us-central1" + region = "us-central1" private_ip_google_access = true secondary_ip_range { @@ -2182,8 +1985,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "with_authenticator_groups" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.name}" subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" @@ -2249,8 +2052,8 @@ resource "google_container_cluster" "regional" { func testAccContainerCluster_regionalWithNodePool(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "regional" { - name = "%s" - region = "us-central1" + name = "%s" + location = "us-central1" node_pool { name = "%s" @@ -2258,24 +2061,23 @@ resource "google_container_cluster" "regional" { }`, cluster, nodePool) } -// This uses region/additional_zones over location/node_locations to ensure we can update from old -> new -func testAccContainerCluster_regionalAdditionalZones(clusterName string) string { +func testAccContainerCluster_regionalNodeLocations(clusterName string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_additional_zones" { - name = "%s" - region = "us-central1" +resource "google_container_cluster" "with_node_locations" { + name = "%s" + location = "us-central1" initial_node_count = 1 - additional_zones = [ + node_locations = [ "us-central1-f", "us-central1-c", ] }`, clusterName) } -func testAccContainerCluster_regionalUpdateAdditionalZones(clusterName string) string { +func testAccContainerCluster_regionalUpdateNodeLocations(clusterName string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_additional_zones" { +resource "google_container_cluster" "with_node_locations" { name = "%s" location = "us-central1" initial_node_count = 1 @@ -2296,9 +2098,9 @@ resource "google_compute_network" "container_network" { } resource "google_compute_subnetwork" "container_subnetwork" { - name = "${google_compute_network.container_network.name}" - network = "${google_compute_network.container_network.name}" - region = "us-central1" + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + region = "us-central1" ip_cidr_range = "10.0.35.0/24" private_ip_google_access = true @@ -2316,7 +2118,7 @@ resource "google_compute_subnetwork" "container_subnetwork" { resource "google_container_cluster" "with_tpu" { name = "cluster-test-%s" - zone = "us-central1-b" + location = "us-central1-b" initial_node_count = 1 enable_tpu = true @@ -2343,8 +2145,8 @@ resource "google_container_cluster" "with_tpu" { func testAccContainerCluster_withIntraNodeVisibility(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_intranode_visibility" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 enable_intranode_visibility = true }`, clusterName) @@ -2353,8 +2155,8 @@ resource "google_container_cluster" "with_intranode_visibility" { func testAccContainerCluster_updateIntraNodeVisibility(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_intranode_visibility" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 enable_intranode_visibility = false }`, clusterName) @@ -2365,12 +2167,12 @@ resource "google_container_cluster" "with_intranode_visibility" { func testAccContainerCluster_withVersion(clusterName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_version" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" initial_node_count = 1 }`, clusterName) @@ -2379,12 +2181,12 @@ resource "google_container_cluster" "with_version" { func testAccContainerCluster_withLowerVersion(clusterName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_version" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.2}" initial_node_count = 1 }`, clusterName) @@ -2393,12 +2195,12 @@ resource "google_container_cluster" "with_version" { func testAccContainerCluster_updateVersion(clusterName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_version" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.1}" node_version = "${data.google_container_engine_versions.central1a.valid_node_versions.1}" initial_node_count = 1 @@ -2408,8 +2210,8 @@ resource "google_container_cluster" "with_version" { func testAccContainerCluster_withNodeConfig(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { - name = "%s" - zone = "us-central1-f" + name = "%s" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -2456,8 +2258,8 @@ resource "google_container_cluster" "with_node_config" { func testAccContainerCluster_withNodeConfigUpdate(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { - name = "%s" - zone = "us-central1-f" + name = "%s" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -2504,8 +2306,8 @@ resource "google_container_cluster" "with_node_config" { func testAccContainerCluster_withNodeConfigScopeAlias() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config_scope_alias" { - name = "cluster-test-%s" - zone = "us-central1-f" + name = "cluster-test-%s" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -2520,7 +2322,7 @@ func testAccContainerCluster_withNodeConfigShieldedInstanceConfig(clusterName st return fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -2561,12 +2363,12 @@ resource "google_container_cluster" "with_node_config" { func testAccContainerCluster_withWorkloadMetadataConfig() string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_workload_metadata_config" { name = "cluster-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" @@ -2591,8 +2393,8 @@ data "google_project" "project" { } resource "google_container_cluster" "with_workload_identity_config" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 node_config { @@ -2611,12 +2413,12 @@ resource "google_container_cluster" "with_workload_identity_config" { func testAccContainerCluster_withSandboxConfig() string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_sandbox_config" { name = "cluster-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" @@ -2645,16 +2447,16 @@ resource "google_compute_network" "container_network" { } resource "google_container_cluster" "with_net_ref_by_url" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.self_link}" } resource "google_container_cluster" "with_net_ref_by_name" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.name}" @@ -2684,10 +2486,10 @@ resource "google_compute_http_health_check" "default" { resource "google_container_cluster" "primary" { name = "terraform-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 3 - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c", ] @@ -2707,8 +2509,8 @@ resource "google_container_cluster" "primary" { func testAccContainerCluster_withNodePoolBasic(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" node_pool { name = "%s" @@ -2720,12 +2522,12 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolLowerVersion(cluster, nodePool string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.1}" @@ -2740,12 +2542,12 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolUpdateVersion(cluster, nodePool string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.1}" @@ -2757,13 +2559,13 @@ resource "google_container_cluster" "with_node_pool" { }`, cluster, nodePool) } -func testAccContainerCluster_withNodePoolAdditionalZones(cluster, nodePool string) string { +func testAccContainerCluster_withNodePoolNodeLocations(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] @@ -2778,10 +2580,10 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolResize(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] @@ -2797,12 +2599,12 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_autoprovisioning(cluster string, autoprovisioning bool) string { config := fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_autoprovisioning" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" initial_node_count = 1 `, cluster) @@ -2834,8 +2636,8 @@ resource "google_container_cluster" "with_autoprovisioning" { func testAccContainerCluster_withNodePoolAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" node_pool { name = "%s" @@ -2851,8 +2653,8 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolUpdateAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" node_pool { name = "%s" @@ -2868,8 +2670,8 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolNamePrefix() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_name_prefix" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { name_prefix = "tf-np-test" @@ -2881,8 +2683,8 @@ resource "google_container_cluster" "with_node_pool_name_prefix" { func testAccContainerCluster_withNodePoolMultiple() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_multiple" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { name = "tf-cluster-nodepool-test-%s" @@ -2899,8 +2701,8 @@ resource "google_container_cluster" "with_node_pool_multiple" { func testAccContainerCluster_withNodePoolConflictingNameFields() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_multiple" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { # ERROR: name and name_prefix cannot be both specified @@ -2915,8 +2717,8 @@ func testAccContainerCluster_withNodePoolNodeConfig() string { testId := acctest.RandString(10) return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_node_config" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { name = "tf-cluster-nodepool-test-%s" node_count = 2 @@ -2960,8 +2762,8 @@ func testAccContainerCluster_withMaintenanceWindow(clusterName string, startTime return fmt.Sprintf(` resource "google_container_cluster" "with_maintenance_window" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 %s @@ -2986,7 +2788,7 @@ func testAccContainerCluster_withRecurringMaintenanceWindow(clusterName string, return fmt.Sprintf(` resource "google_container_cluster" "with_recurring_maintenance_window" { name = "cluster-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 %s @@ -3003,31 +2805,31 @@ resource "google_compute_network" "container_network" { } resource "google_compute_subnetwork" "container_subnetwork" { - name = "${google_compute_network.container_network.name}" - network = "${google_compute_network.container_network.name}" + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + region = "us-central1" + ip_cidr_range = "10.0.0.0/24" - region = "us-central1" secondary_ip_range { - range_name = "pods" + range_name = "pods" ip_cidr_range = "10.1.0.0/16" } secondary_ip_range { - range_name = "services" + range_name = "services" ip_cidr_range = "10.2.0.0/20" } } resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" initial_node_count = 1 ip_allocation_policy { - use_ip_aliases = true cluster_secondary_range_name = "pods" services_secondary_range_name = "services" } @@ -3041,18 +2843,24 @@ resource "google_compute_network" "container_network" { auto_create_subnetworks = false } +resource "google_compute_subnetwork" "container_subnetwork" { + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" +} + resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" + subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" initial_node_count = 1 ip_allocation_policy { - use_ip_aliases = true - create_subnetwork = true cluster_ipv4_cidr_block = "10.0.0.0/16" services_ipv4_cidr_block = "10.1.0.0/16" - node_ipv4_cidr_block = "10.2.0.0/16" } }`, cluster, cluster) } @@ -3065,93 +2873,24 @@ resource "google_compute_network" "container_network" { } resource "google_compute_subnetwork" "container_subnetwork" { - name = "${google_compute_network.container_network.name}" - network = "${google_compute_network.container_network.name}" - ip_cidr_range = "10.0.0.0/24" - region = "us-central1" + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" } resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" - - network = "${google_compute_network.container_network.name}" + name = "%s" + location = "us-central1-a" + network = "${google_compute_network.container_network.name}" + subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" initial_node_count = 1 ip_allocation_policy { - use_ip_aliases = true - create_subnetwork = true - subnetwork_name = "tf-test-%s" cluster_ipv4_cidr_block = "/16" services_ipv4_cidr_block = "/22" - node_ipv4_cidr_block = "/22" } -}`, cluster, cluster, cluster) -} - -func testAccContainerCluster_withIPAllocationPolicy_createSubnetwork(cluster string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-network" - auto_create_subnetworks = false -} - -resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - location = "us-central1-a" - network = "${google_compute_network.container_network.name}" - - initial_node_count = 1 - - ip_allocation_policy { - use_ip_aliases = true - create_subnetwork = true - subnetwork_name = "%s-subnet" - cluster_ipv4_cidr_block = "10.0.0.0/16" - services_ipv4_cidr_block = "10.1.0.0/16" - node_ipv4_cidr_block = "10.2.0.0/16" - } -}`, cluster, cluster, cluster) -} - - -func testAccContainerCluster_withIPAllocationPolicy_createSubnetworkUpdated(cluster string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-network" - auto_create_subnetworks = false -} - -resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - location = "us-central1-a" - network = "${google_compute_network.container_network.name}" - subnetwork = "%s-subnet" - - initial_node_count = 1 - - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.0.0.0/16" - services_ipv4_cidr_block = "10.1.0.0/16" - } -}`, cluster, cluster, cluster) -} - -func testAccContainerCluster_withIPAllocationPolicy_explicitEmpty(cluster string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-network" - auto_create_subnetworks = false -} - -resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" - - initial_node_count = 1 - - ip_allocation_policy = [] }`, cluster, cluster) } @@ -3178,7 +2917,7 @@ func testAccContainerCluster_withResourceUsageExportConfig(clusterName, datasetI resource "google_container_cluster" "with_resource_usage_export_config" { name = "cluster-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 %s }`, datasetId, clusterName, resourceUsageConfig) @@ -3197,7 +2936,7 @@ resource "google_compute_subnetwork" "container_subnetwork" { name = "${google_compute_network.container_network.name}" network = "${google_compute_network.container_network.name}" ip_cidr_range = "10.0.36.0/24" - region = "us-central1" + region = "us-central1" private_ip_google_access = true secondary_ip_range { @@ -3212,8 +2951,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "with_private_cluster" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.name}" @@ -3242,7 +2981,7 @@ resource "google_compute_subnetwork" "container_subnetwork" { name = "${google_compute_network.container_network.name}" network = "${google_compute_network.container_network.name}" ip_cidr_range = "10.0.36.0/24" - region = "us-central1" + region = "us-central1" private_ip_google_access = true secondary_ip_range { @@ -3257,8 +2996,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "with_private_cluster" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.name}" @@ -3344,7 +3083,7 @@ resource "google_compute_network" "shared_network" { resource "google_compute_subnetwork" "shared_subnetwork" { name = "test-%s" ip_cidr_range = "10.0.0.0/16" - region = "us-central1" + region = "us-central1" network = "${google_compute_network.shared_network.self_link}" project = "${google_compute_shared_vpc_host_project.host_project.project}" @@ -3361,7 +3100,7 @@ resource "google_compute_subnetwork" "shared_subnetwork" { resource "google_container_cluster" "shared_vpc_cluster" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 project = "${google_compute_shared_vpc_service_project.service_project.service_project}" @@ -3388,8 +3127,8 @@ data "google_project" "project" { } resource "google_container_cluster" "with_workload_identity_config" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 workload_identity_config { @@ -3413,8 +3152,8 @@ data "google_project" "project" { } resource "google_container_cluster" "with_workload_identity_config" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 %s @@ -3424,8 +3163,8 @@ resource "google_container_cluster" "with_workload_identity_config" { func testAccContainerCluster_withBinaryAuthorization(clusterName string, enabled bool) string { return fmt.Sprintf(` resource "google_container_cluster" "with_binary_authorization" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 enable_binary_authorization = %v @@ -3437,7 +3176,7 @@ func testAccContainerCluster_withShieldedNodes(clusterName string, enabled bool) return fmt.Sprintf(` resource "google_container_cluster" "with_shielded_nodes" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 enable_shielded_nodes = %v @@ -3456,7 +3195,7 @@ resource "google_compute_subnetwork" "container_subnetwork" { name = "${google_compute_network.container_network.name}" network = "${google_compute_network.container_network.name}" ip_cidr_range = "10.0.35.0/24" - region = "us-central1" + region = "us-central1" private_ip_google_access = true secondary_ip_range { @@ -3471,8 +3210,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "with_flexible_cidr" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 network = "${google_compute_network.container_network.name}" @@ -3511,8 +3250,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "cidr_error_preempt" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" @@ -3532,8 +3271,8 @@ func testAccContainerCluster_withCIDROverlap(initConfig, secondCluster string) s %s resource "google_container_cluster" "cidr_error_overlap" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" @@ -3551,8 +3290,8 @@ resource "google_container_cluster" "cidr_error_overlap" { func testAccContainerCluster_withInvalidLocation(location string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_resource_labels" { - name = "invalid-gke-cluster" - zone = "%s" + name = "invalid-gke-cluster" + location = "%s" initial_node_count = 1 } `, location) @@ -3579,8 +3318,8 @@ resource "google_kms_key_ring_iam_policy" "test_key_ring_iam_policy" { } resource "google_container_cluster" "with_database_encryption" { - name = "cluster-test-%[3]s" - zone = "us-central1-a" + name = "cluster-test-%[3]s" + location = "us-central1-a" initial_node_count = 1 database_encryption { @@ -3594,45 +3333,47 @@ resource "google_container_cluster" "with_database_encryption" { func testAccContainerCluster_withMasterAuthorizedNetworksDisabled(clusterName string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { - name = "container-net-%s" - auto_create_subnetworks = false + name = "container-net-%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "container_subnetwork" { - name = "${google_compute_network.container_network.name}" - network = "${google_compute_network.container_network.name}" - ip_cidr_range = "10.0.36.0/24" - region = "us-central1" - private_ip_google_access = true - - secondary_ip_range { - range_name = "pod" - ip_cidr_range = "10.0.0.0/19" - } + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } - secondary_ip_range { - range_name = "svc" - ip_cidr_range = "10.0.32.0/22" - } + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } } resource "google_container_cluster" "with_private_cluster" { - name = "cluster-test-%s" - zone = "us-central1-a" - initial_node_count = 1 + name = "cluster-test-%s" + location = "us-central1-a" + initial_node_count = 1 - network = "${google_compute_network.container_network.name}" - subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name - private_cluster_config { - enable_private_endpoint = false - enable_private_nodes = true - master_ipv4_cidr_block = "10.42.0.0/28" - } + private_cluster_config { + enable_private_endpoint = false + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } - ip_allocation_policy { - cluster_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.0.range_name}" - services_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.1.range_name}" - } -}`, clusterName, clusterName) + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } +} + +`, clusterName, clusterName) } diff --git a/third_party/terraform/tests/resource_container_node_pool_test.go.erb b/third_party/terraform/tests/resource_container_node_pool_test.go.erb index 013e651aa1a7..f4cd15744b94 100644 --- a/third_party/terraform/tests/resource_container_node_pool_test.go.erb +++ b/third_party/terraform/tests/resource_container_node_pool_test.go.erb @@ -653,17 +653,17 @@ func testAccCheckContainerNodePoolDestroy(s *terraform.State) error { } attributes := rs.Primary.Attributes - zone := attributes["zone"] + location := attributes["location"] var err error - if zone != "" { + if location != "" { _, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get( - config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + config.Project, attributes["location"], attributes["cluster"], attributes["name"]).Do() } else { name := fmt.Sprintf( "projects/%s/locations/%s/clusters/%s/nodePools/%s", config.Project, - attributes["region"], + attributes["location"], attributes["cluster"], attributes["name"], ) @@ -778,8 +778,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 network = "${google_compute_network.container_network.name}" @@ -800,8 +800,8 @@ resource "google_container_cluster" "cluster" { } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" max_pods_per_node = 30 initial_node_count = 2 @@ -827,13 +827,13 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_namePrefix(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { name_prefix = "%s" - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 }`, cluster, np) @@ -842,13 +842,13 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_noName(cluster string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 }`, cluster) @@ -857,14 +857,14 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_regionalAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - region = "us-central1" + name = "%s" + location = "us-central1" initial_node_count = 3 } resource "google_container_node_pool" "np" { - name = "%s" - region = "us-central1" + name = "%s" + location = "us-central1" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 autoscaling { @@ -877,14 +877,14 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_autoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 autoscaling { @@ -897,14 +897,14 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_updateAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 autoscaling { @@ -914,23 +914,22 @@ resource "google_container_node_pool" "np" { }`, cluster, np) } -// This uses zone/additional_zones over location/node_locations to ensure we can update from old -> new func testAccContainerNodePool_additionalZones(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" node_count = 2 }`, cluster, nodePool) @@ -961,13 +960,13 @@ func testAccContainerNodePool_withManagement(cluster, nodePool, management strin return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_management" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -984,13 +983,13 @@ resource "google_container_node_pool" "np_with_management" { func testAccContainerNodePool_withNodeConfig(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_node_config" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -1026,13 +1025,13 @@ resource "google_container_node_pool" "np_with_node_config" { func testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_node_config" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -1069,19 +1068,19 @@ resource "google_container_node_pool" "np_with_node_config" { func testAccContainerNodePool_withWorkloadMetadataConfig() string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "cluster" { name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" } resource "google_container_node_pool" "with_workload_metadata_config" { - name = "tf-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-nodepool-test-%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -1105,12 +1104,12 @@ data "google_project" "project" { } data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "cluster" { name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" @@ -1120,8 +1119,8 @@ resource "google_container_cluster" "cluster" { } resource "google_container_node_pool" "with_workload_metadata_config" { - name = "tf-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-nodepool-test-%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -1142,19 +1141,19 @@ resource "google_container_node_pool" "with_workload_metadata_config" { func testAccContainerNodePool_withSandboxConfig() string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "cluster" { name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" } resource "google_container_node_pool" "with_sandbox_config" { name = "tf-nodepool-test-%s" - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -1175,20 +1174,20 @@ resource "google_container_node_pool" "with_sandbox_config" { func testAccContainerNodePool_withGPU() string { return fmt.Sprintf(` data "google_container_engine_versions" "central1c" { - zone = "us-central1-c" + location = "us-central1-c" } resource "google_container_cluster" "cluster" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-c" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-c" initial_node_count = 1 node_version = "${data.google_container_engine_versions.central1c.latest_node_version}" min_master_version = "${data.google_container_engine_versions.central1c.latest_master_version}" } resource "google_container_node_pool" "np_with_gpu" { - name = "tf-nodepool-test-%s" - zone = "us-central1-c" + name = "tf-nodepool-test-%s" + location = "us-central1-c" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1221,13 +1220,13 @@ resource "google_container_node_pool" "np_with_gpu" { func testAccContainerNodePool_withNodeConfigScopeAlias() string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_node_config_scope_alias" { name = "tf-nodepool-test-%s" - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -1241,19 +1240,19 @@ resource "google_container_node_pool" "np_with_node_config_scope_alias" { func testAccContainerNodePool_version(cluster, np string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1264,19 +1263,19 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_updateVersion(cluster, np string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1288,13 +1287,13 @@ func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1311,13 +1310,13 @@ func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1331,13 +1330,13 @@ func testAccContainerNodePool_EmptyGuestAccelerator(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1354,13 +1353,13 @@ func testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np string, c return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1382,13 +1381,13 @@ func testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np string) return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 diff --git a/third_party/terraform/tests/resource_dataproc_cluster_test.go.erb b/third_party/terraform/tests/resource_dataproc_cluster_test.go.erb index e5f2b2a184ee..0aeaa2a87291 100644 --- a/third_party/terraform/tests/resource_dataproc_cluster_test.go.erb +++ b/third_party/terraform/tests/resource_dataproc_cluster_test.go.erb @@ -624,8 +624,10 @@ func testAccCheckDataprocClusterDestroy() resource.TestCheckFunc { return err } + parts := strings.Split(rs.Primary.ID, "/") + clusterId := parts[len(parts)-1] _, err = config.clientDataprocBeta.Projects.Regions.Clusters.Get( - project, attributes["region"], rs.Primary.ID).Do() + project, attributes["region"], clusterId).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusNotFound { @@ -786,14 +788,16 @@ func testAccCheckDataprocClusterExists(n string, cluster *dataproc.Cluster) reso return err } + parts := strings.Split(rs.Primary.ID, "/") + clusterId := parts[len(parts)-1] found, err := config.clientDataprocBeta.Projects.Regions.Clusters.Get( - project, rs.Primary.Attributes["region"], rs.Primary.ID).Do() + project, rs.Primary.Attributes["region"], clusterId).Do() if err != nil { return err } - if found.ClusterName != rs.Primary.ID { - return fmt.Errorf("Dataproc cluster %s not found, found %s instead", rs.Primary.ID, cluster.ClusterName) + if found.ClusterName != clusterId { + return fmt.Errorf("Dataproc cluster %s not found, found %s instead", clusterId, cluster.ClusterName) } *cluster = *found diff --git a/third_party/terraform/tests/resource_dataproc_job_test.go b/third_party/terraform/tests/resource_dataproc_job_test.go index fcaa33bce425..b9f8bb04dffe 100644 --- a/third_party/terraform/tests/resource_dataproc_job_test.go +++ b/third_party/terraform/tests/resource_dataproc_job_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "regexp" + // "regexp" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" @@ -22,21 +22,22 @@ type jobTestField struct { gcp_attr interface{} } -func TestAccDataprocJob_failForMissingJobConfig(t *testing.T) { - t.Parallel() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDataprocJobDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDataprocJob_missingJobConf(), - ExpectError: regexp.MustCompile("You must define and configure exactly one xxx_config block"), - }, - }, - }) -} +// TODO (mbang): Test `ExactlyOneOf` here +// func TestAccDataprocJob_failForMissingJobConfig(t *testing.T) { +// t.Parallel() + +// resource.Test(t, resource.TestCase{ +// PreCheck: func() { testAccPreCheck(t) }, +// Providers: testAccProviders, +// CheckDestroy: testAccCheckDataprocJobDestroy, +// Steps: []resource.TestStep{ +// { +// Config: testAccDataprocJob_missingJobConf(), +// ExpectError: regexp.MustCompile("You must define and configure exactly one xxx_config block"), +// }, +// }, +// }) +// } func TestAccDataprocJob_updatable(t *testing.T) { t.Parallel() @@ -283,8 +284,10 @@ func testAccCheckDataprocJobDestroy(s *terraform.State) error { return err } + parts := strings.Split(rs.Primary.ID, "/") + job_id := parts[len(parts)-1] _, err = config.clientDataproc.Projects.Regions.Jobs.Get( - project, attributes["region"], rs.Primary.ID).Do() + project, attributes["region"], job_id).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { return nil @@ -367,7 +370,8 @@ func testAccCheckDataprocJobExists(n string, job *dataproc.Job) resource.TestChe } config := testAccProvider.Meta().(*Config) - jobId := s.RootModule().Resources[n].Primary.ID + parts := strings.Split(s.RootModule().Resources[n].Primary.ID, "/") + jobId := parts[len(parts)-1] project, err := getTestProject(s.RootModule().Resources[n].Primary, config) if err != nil { return err @@ -469,16 +473,17 @@ func testAccCheckDataprocJobAttrMatch(n, jobType string, job *dataproc.Job) reso } } -func testAccDataprocJob_missingJobConf() string { - return ` -resource "google_dataproc_job" "missing_config" { - placement { - cluster_name = "na" - } - - force_delete = true -}` -} +// TODO (mbang): Test `ExactlyOneOf` here +// func testAccDataprocJob_missingJobConf() string { +// return ` +// resource "google_dataproc_job" "missing_config" { +// placement { +// cluster_name = "na" +// } + +// force_delete = true +// }` +// } var singleNodeClusterConfig = ` resource "google_dataproc_cluster" "basic" { diff --git a/third_party/terraform/tests/resource_google_project_service_test.go b/third_party/terraform/tests/resource_google_project_service_test.go index 6d8dfd81ff3f..08e1484c448f 100644 --- a/third_party/terraform/tests/resource_google_project_service_test.go +++ b/third_party/terraform/tests/resource_google_project_service_test.go @@ -142,7 +142,7 @@ func TestAccProjectService_renamedService(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccProjectService_single("bigquery-json.googleapis.com", pid, pname, org), + Config: testAccProjectService_single("bigquery.googleapis.com", pid, pname, org), }, { ResourceName: "google_project_service.test", diff --git a/third_party/terraform/tests/resource_google_project_services_test.go b/third_party/terraform/tests/resource_google_project_services_test.go deleted file mode 100644 index 3b351a1cef0c..000000000000 --- a/third_party/terraform/tests/resource_google_project_services_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "testing" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// Test that services can be enabled and disabled on a project -func TestAccProjectServices_basic(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - services1 := []string{"logging.googleapis.com", "cloudresourcemanager.googleapis.com"} - services2 := []string{"cloudresourcemanager.googleapis.com"} - oobService := "logging.googleapis.com" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with some services - { - Config: testAccProjectAssociateServicesBasic(services1, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services1, pid), - ), - }, - // Update services to remove one - { - Config: testAccProjectAssociateServicesBasic(services2, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services2, pid), - ), - }, - // Add a service out-of-band and ensure it is removed - { - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - if err := enableServiceUsageProjectServices([]string{oobService}, pid, config, time.Minute*20); err != nil { - t.Fatalf("Error enabling %q: %v", oobService, err) - } - }, - Config: testAccProjectAssociateServicesBasic(services2, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services2, pid), - ), - }, - { - ResourceName: "google_project_services.acceptance", - ImportState: true, - ImportStateId: pid, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - }, - }, - }) -} - -// Test that services are authoritative when a project has existing -// services not represented in config -func TestAccProjectServices_authoritative(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - services := []string{"cloudresourcemanager.googleapis.com"} - oobService := "logging.googleapis.com" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with no services - { - Config: testAccProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - // Add a service out-of-band, then apply a config that creates a service. - // It should remove the out-of-band service. - { - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - if err := enableServiceUsageProjectServices([]string{oobService}, pid, config, time.Minute*20); err != nil { - t.Fatalf("Error enabling %q: %v", oobService, err) - } - }, - Config: testAccProjectAssociateServicesBasic(services, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -// Test that services are authoritative when a project has existing -// services, some which are represented in the config and others -// that are not -func TestAccProjectServices_authoritative2(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - oobServices := []string{"logging.googleapis.com", "cloudresourcemanager.googleapis.com"} - services := []string{"logging.googleapis.com"} - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with no services - { - Config: testAccProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - // Add a service out-of-band, then apply a config that creates a service. - // It should remove the out-of-band service. - { - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - for _, s := range oobServices { - if err := enableServiceUsageProjectServices([]string{s}, pid, config, time.Minute*20); err != nil { - t.Fatalf("Error enabling %q: %v", s, err) - } - } - }, - Config: testAccProjectAssociateServicesBasic(services, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com) -// don't end up causing diffs when they are enabled as a side-effect of a different service's -// enablement. -func TestAccProjectServices_ignoreUnenablableServices(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - billingId := getTestBillingAccountFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - services := []string{ - "dataproc.googleapis.com", - // The following services are enabled as a side-effect of dataproc's enablement - "storage-component.googleapis.com", - "deploymentmanager.googleapis.com", - "replicapool.googleapis.com", - "replicapoolupdater.googleapis.com", - "resourceviews.googleapis.com", - "compute.googleapis.com", - "container.googleapis.com", - "containerregistry.googleapis.com", - "storage-api.googleapis.com", - "pubsub.googleapis.com", - "oslogin.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc(testProjectServicesMatch(services, pid)), - }, - }, - }) -} - -func TestAccProjectServices_pagination(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - billingId := getTestBillingAccountFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - - // we need at least 50 services (doesn't matter what they are) to exercise the - // pagination handling code. - services := []string{ - "actions.googleapis.com", - "appengine.googleapis.com", - "appengineflex.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerydatatransfer.googleapis.com", - "bigquerystorage.googleapis.com", - "bigtableadmin.googleapis.com", - "bigtabletableadmin.googleapis.com", - "cloudbuild.googleapis.com", - "clouderrorreporting.googleapis.com", - "cloudfunctions.googleapis.com", - "cloudiot.googleapis.com", - "cloudkms.googleapis.com", - "cloudmonitoring.googleapis.com", - "cloudresourcemanager.googleapis.com", - "cloudtrace.googleapis.com", - "compute.googleapis.com", - "container.googleapis.com", - "containerregistry.googleapis.com", - "dataflow.googleapis.com", - "dataproc.googleapis.com", - "datastore.googleapis.com", - "deploymentmanager.googleapis.com", - "dialogflow.googleapis.com", - "dns.googleapis.com", - "endpoints.googleapis.com", - "firebaserules.googleapis.com", - "firestore.googleapis.com", - "genomics.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "language.googleapis.com", - "logging.googleapis.com", - "ml.googleapis.com", - "monitoring.googleapis.com", - "oslogin.googleapis.com", - "pubsub.googleapis.com", - "replicapool.googleapis.com", - "replicapoolupdater.googleapis.com", - "resourceviews.googleapis.com", - "runtimeconfig.googleapis.com", - "servicecontrol.googleapis.com", - "servicemanagement.googleapis.com", - "sourcerepo.googleapis.com", - "spanner.googleapis.com", - "speech.googleapis.com", - "sql-component.googleapis.com", - "storage-api.googleapis.com", - "storage-component.googleapis.com", - "storagetransfer.googleapis.com", - "testing.googleapis.com", - "toolresults.googleapis.com", - "translate.googleapis.com", - "videointelligence.googleapis.com", - "vision.googleapis.com", - "zync.googleapis.com", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -func TestAccProjectServices_renamedServices(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - // create new - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // transition to old - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // transition to new - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // remove new - Config: testAccProjectAssociateServicesBasic([]string{ - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // create both - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // remove new - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // import imports old - ResourceName: "google_project_services.acceptance", - ImportState: true, - ImportStateId: pid, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - }, - { - // transition to both - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // remove both - Config: testAccProjectAssociateServicesBasic([]string{ - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - }, - }) -} - -func testAccProjectAssociateServicesBasic(services []string, pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -} -resource "google_project_services" "acceptance" { - project = "${google_project.acceptance.project_id}" - services = [%s] - disable_on_destroy = true -} -`, pid, name, org, testStringsToString(services)) -} - -func testAccProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -} -resource "google_project_services" "acceptance" { - project = "${google_project.acceptance.project_id}" - services = [%s] - disable_on_destroy = false -} -`, pid, name, org, billing, testStringsToString(services)) -} - -func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - currentlyEnabled, err := listCurrentlyEnabledServices(pid, config, time.Minute*10) - if err != nil { - return fmt.Errorf("Error listing services for project %q: %v", pid, err) - } - - servicesSet := golangSetFromStringSlice(services) - // add renamed service aliases because listCurrentlyEnabledServices will - // have both - for k := range servicesSet { - if v, ok := renamedServicesByOldAndNewServiceNames[k]; ok { - servicesSet[v] = struct{}{} - } - } - - services = stringSliceFromGolangSet(servicesSet) - - apiServices := stringSliceFromGolangSet(currentlyEnabled) - sort.Strings(services) - sort.Strings(apiServices) - if !reflect.DeepEqual(services, apiServices) { - return fmt.Errorf("Services in config (%v) do not exactly match services returned by API (%v)", services, apiServices) - } - - return nil - } -} - -func testStringsToString(s []string) string { - var b bytes.Buffer - for i, v := range s { - b.WriteString(fmt.Sprintf("\"%s\"", v)) - if i < len(s)-1 { - b.WriteString(",") - } - } - return b.String() -} diff --git a/third_party/terraform/tests/resource_google_project_test.go b/third_party/terraform/tests/resource_google_project_test.go index 39ac099fe820..6df0cbcc17ce 100644 --- a/third_party/terraform/tests/resource_google_project_test.go +++ b/third_party/terraform/tests/resource_google_project_test.go @@ -200,8 +200,9 @@ func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - if rs.Primary.ID != pid { - return fmt.Errorf("Expected project %q to match ID %q in state", pid, rs.Primary.ID) + projectId := fmt.Sprintf("projects/%s", pid) + if rs.Primary.ID != projectId { + return fmt.Errorf("Expected project %q to match ID %q in state", projectId, rs.Primary.ID) } return nil diff --git a/third_party/terraform/tests/resource_pubsub_subscription_test.go b/third_party/terraform/tests/resource_pubsub_subscription_test.go index 71e3cef79e57..d7c7e2d8a2b6 100644 --- a/third_party/terraform/tests/resource_pubsub_subscription_test.go +++ b/third_party/terraform/tests/resource_pubsub_subscription_test.go @@ -12,7 +12,7 @@ func TestAccPubsubSubscription_emptyTTL(t *testing.T) { t.Parallel() topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(10)) - subscription := fmt.Sprintf("projects/%s/subscriptions/tf-test-sub-%s", getTestProjectFromEnv(), acctest.RandString(10)) + subscription := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -32,11 +32,11 @@ func TestAccPubsubSubscription_emptyTTL(t *testing.T) { }) } -func TestAccPubsubSubscription_fullName(t *testing.T) { +func TestAccPubsubSubscription_basic(t *testing.T) { t.Parallel() topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(10)) - subscription := fmt.Sprintf("projects/%s/subscriptions/tf-test-sub-%s", getTestProjectFromEnv(), acctest.RandString(10)) + subscription := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -44,7 +44,7 @@ func TestAccPubsubSubscription_fullName(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroy, Steps: []resource.TestStep{ { - Config: testAccPubsubSubscription_fullName(topic, subscription, "bar", 20), + Config: testAccPubsubSubscription_basic(topic, subscription, "bar", 20), }, { ResourceName: "google_pubsub_subscription.foo", @@ -69,28 +69,16 @@ func TestAccPubsubSubscription_update(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroy, Steps: []resource.TestStep{ { - Config: testAccPubsubSubscription_fullName(topic, subscriptionLong, "bar", 20), + Config: testAccPubsubSubscription_basic(topic, subscriptionShort, "bar", 20), }, { ResourceName: "google_pubsub_subscription.foo", - ImportStateId: subscriptionLong, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPubsubSubscription_fullName(topic, subscriptionLong, "baz", 30), - Check: resource.TestCheckResourceAttr( - "google_pubsub_subscription.foo", "path", subscriptionLong, - ), - }, - { - ResourceName: "google_pubsub_subscription.foo", - ImportStateId: subscriptionLong, + ImportStateId: subscriptionShort, ImportState: true, ImportStateVerify: true, }, { - Config: testAccPubsubSubscription_fullName(topic, subscriptionShort, "baz", 30), + Config: testAccPubsubSubscription_basic(topic, subscriptionShort, "baz", 30), Check: resource.TestCheckResourceAttr( "google_pubsub_subscription.foo", "path", subscriptionLong, ), @@ -109,7 +97,7 @@ func TestAccPubsubSubscription_push(t *testing.T) { t.Parallel() topicFoo := fmt.Sprintf("tf-test-topic-foo-%s", acctest.RandString(10)) - subscription := fmt.Sprintf("projects/%s/subscriptions/tf-test-topic-foo-%s", getTestProjectFromEnv(), acctest.RandString(10)) + subscription := fmt.Sprintf("tf-test-topic-foo-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -142,7 +130,9 @@ resource "google_pubsub_subscription" "foo" { message_retention_duration = "1200s" retain_acked_messages = true ack_deadline_seconds = 20 - expiration_policy {} + expiration_policy { + ttl = "" + } } `, topic, subscription) } @@ -183,7 +173,7 @@ resource "google_pubsub_subscription" "foo" { `, topicFoo, subscription) } -func testAccPubsubSubscription_fullName(topic, subscription, label string, deadline int) string { +func testAccPubsubSubscription_basic(topic, subscription, label string, deadline int) string { return fmt.Sprintf(` resource "google_pubsub_topic" "foo" { name = "%s" diff --git a/third_party/terraform/tests/resource_storage_bucket_test.go b/third_party/terraform/tests/resource_storage_bucket_test.go index 94e2081522e4..56663df37769 100644 --- a/third_party/terraform/tests/resource_storage_bucket_test.go +++ b/third_party/terraform/tests/resource_storage_bucket_test.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" "log" + "regexp" + "strings" "testing" "time" @@ -165,31 +167,12 @@ func TestAccStorageBucket_lifecycleRuleStateLive(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ - { - Config: testAccStorageBucket_lifecycleRule_IsLiveTrue(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(true), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "true"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"with_state", "LIVE"), - ), - }, - { - ResourceName: "google_storage_bucket.bucket", - ImportState: true, - ImportStateVerify: true, - }, { Config: testAccStorageBucket_lifecycleRule_withStateLive(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( "google_storage_bucket.bucket", bucketName, &bucket), testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(true), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "true"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", attrPrefix+"with_state", "LIVE"), ), @@ -226,28 +209,7 @@ func TestAccStorageBucket_lifecycleRuleStateArchived(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "false"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"with_state", "ARCHIVED"), - ), - }, - { - ResourceName: "google_storage_bucket.bucket", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccStorageBucket_lifecycleRule_isLiveFalse(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "false"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"with_state", "ARCHIVED"), + testAccCheckStorageBucketLifecycleConditionState(nil, &bucket), ), }, { @@ -261,8 +223,6 @@ func TestAccStorageBucket_lifecycleRuleStateArchived(t *testing.T) { testAccCheckStorageBucketExists( "google_storage_bucket.bucket", bucketName, &bucket), testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "false"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", attrPrefix+"with_state", "ARCHIVED"), ), @@ -870,11 +830,27 @@ func TestAccStorageBucket_website(t *testing.T) { bucketSuffix := acctest.RandomWithPrefix("tf-website-test") + websiteKeys := []string{"website.0.main_page_suffix", "website.0.not_found_page"} + errMsg := fmt.Sprintf("one of `%s` must be specified", strings.Join(websiteKeys, ",")) + fullErr := fmt.Sprintf("config is invalid: 2 problems:\n\n- \"%s\": %s\n- \"%s\": %s", websiteKeys[0], errMsg, websiteKeys[1], errMsg) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_websiteNoAttributes(bucketSuffix), + ExpectError: regexp.MustCompile(fullErr), + }, + { + Config: testAccStorageBucket_websiteOneAttribute(bucketSuffix), + }, + { + ResourceName: "google_storage_bucket.website", + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccStorageBucket_website(bucketSuffix), }, @@ -1302,24 +1278,6 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testAccStorageBucket_lifecycleRule_isLiveFalse(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" - lifecycle_rule { - action { - type = "Delete" - } - - condition { - age = 10 - is_live = false - } - } -} -`, bucketName) -} - func testAccStorageBucket_lifecycleRule_withStateArchived(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { @@ -1338,24 +1296,6 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testAccStorageBucket_lifecycleRule_IsLiveTrue(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" - lifecycle_rule { - action { - type = "Delete" - } - - condition { - age = 10 - is_live = true - } - } -} -`, bucketName) -} - func testAccStorageBucket_lifecycleRule_withStateLive(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { @@ -1421,17 +1361,14 @@ resource "google_project" "acceptance" { billing_account = "%{billing_account}" } -resource "google_project_services" "acceptance" { +resource "google_project_service" "acceptance" { project = "${google_project.acceptance.project_id}" - - services = [ - "cloudkms.googleapis.com", - ] + service = "cloudkms.googleapis.com" } resource "google_kms_key_ring" "key_ring" { name = "tf-test-%{random_suffix}" - project = "${google_project_services.acceptance.project}" + project = "${google_project_service.acceptance.project}" location = "us" } @@ -1501,3 +1438,29 @@ resource "google_storage_bucket" "bucket" { } `, bucketName) } + +func testAccStorageBucket_websiteNoAttributes(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "MULTI_REGIONAL" + + website {} + } +`, bucketName) +} + +func testAccStorageBucket_websiteOneAttribute(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "MULTI_REGIONAL" + + website { + main_page_suffix = "index.html" + } + } +`, bucketName) +} diff --git a/third_party/terraform/utils/batcher.go b/third_party/terraform/utils/batcher.go index 85aa0005cc38..caa6b45958ab 100644 --- a/third_party/terraform/utils/batcher.go +++ b/third_party/terraform/utils/batcher.go @@ -125,7 +125,7 @@ func (b *RequestBatcher) stop() { // may choose to use a key with method if needed to diff GET/read and // POST/create) // -// As an example, for google_project_service and google_project_services, the +// As an example, for google_project_service, the // batcher is called to batch services.batchEnable() calls for a project // $PROJECT. The calling code uses the template // "serviceusage:projects/$PROJECT/services:batchEnable", which mirrors the HTTP request: diff --git a/third_party/terraform/utils/config.go.erb b/third_party/terraform/utils/config.go.erb index e08703748e80..46ebc83adc76 100644 --- a/third_party/terraform/utils/config.go.erb +++ b/third_party/terraform/utils/config.go.erb @@ -193,6 +193,7 @@ var defaultClientScopes = []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite", "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/userinfo.email", } func (c *Config) LoadAndValidate() error { diff --git a/third_party/terraform/utils/iam.go.erb b/third_party/terraform/utils/iam.go.erb index 658661ed13e7..9c290e2642e3 100644 --- a/third_party/terraform/utils/iam.go.erb +++ b/third_party/terraform/utils/iam.go.erb @@ -279,12 +279,6 @@ func listFromIamBindingMap(bm map[iamBindingKey]map[string]struct{}) []*cloudres return rb } -// Flatten AuditConfigs so each service has a single exemption list of log type to members -func mergeAuditConfigs(auditConfigs []*cloudresourcemanager.AuditConfig) []*cloudresourcemanager.AuditConfig { - am := createIamAuditConfigsMap(auditConfigs) - return listFromIamAuditConfigMap(am) -} - // Flattens AuditConfigs so each role has a single Binding with combined members\ func removeAllAuditConfigsWithService(ac []*cloudresourcemanager.AuditConfig, service string) []*cloudresourcemanager.AuditConfig { acMap := createIamAuditConfigsMap(ac) diff --git a/third_party/terraform/utils/iam_test.go.erb b/third_party/terraform/utils/iam_test.go.erb index 33205dd40594..cdff2515c2bd 100644 --- a/third_party/terraform/utils/iam_test.go.erb +++ b/third_party/terraform/utils/iam_test.go.erb @@ -787,167 +787,6 @@ func TestIamListFromIamBindingMap(t *testing.T) { } } -func TestIamMergeAuditConfigs(t *testing.T) { - testCases := []struct { - input []*cloudresourcemanager.AuditConfig - expect []*cloudresourcemanager.AuditConfig - }{ - { - input: []*cloudresourcemanager.AuditConfig{}, - expect: []*cloudresourcemanager.AuditConfig{}, - }, - { - input: []*cloudresourcemanager.AuditConfig{ - { - Service: "foo.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - }, - }, - }, - { - Service: "bar.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - ExemptedMembers: []string{"user-1"}, - }, - }, - }, - }, - expect: []*cloudresourcemanager.AuditConfig{ - { - Service: "foo.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - }, - }, - }, - { - Service: "bar.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - ExemptedMembers: []string{"user-1"}, - }, - }, - }, - }, - }, - { - input: []*cloudresourcemanager.AuditConfig{ - { - Service: "kms.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - }, - { - LogType: "DATA_WRITE", - ExemptedMembers: []string{"user-1"}, - }, - }, - }, - { - Service: "iam.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - ExemptedMembers: []string{"user-1"}, - }, - }, - }, - { - Service: "kms.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "DATA_WRITE", - ExemptedMembers: []string{"user-2"}, - }, - }, - }, - { - Service: "iam.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - ExemptedMembers: []string{"user-2"}, - }, - }, - }, - { - Service: "foo.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "DATA_WRITE", - ExemptedMembers: []string{"user-1"}, - }, - }, - }, - { - Service: "kms.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "DATA_WRITE", - ExemptedMembers: []string{"user-3", "user-4"}, - }, - { - LogType: "DATA_READ", - ExemptedMembers: []string{"user-1", "user-2"}, - }, - }, - }, - }, - expect: []*cloudresourcemanager.AuditConfig{ - { - Service: "kms.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - }, - { - LogType: "DATA_WRITE", - ExemptedMembers: []string{"user-1", "user-2", "user-3", "user-4"}, - }, - { - LogType: "DATA_READ", - ExemptedMembers: []string{"user-1", "user-2"}, - }, - }, - }, - { - Service: "iam.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "ADMIN_READ", - ExemptedMembers: []string{"user-1", "user-2"}, - }, - }, - }, - { - Service: "foo.googleapis.com", - AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ - { - LogType: "DATA_WRITE", - ExemptedMembers: []string{"user-1"}, - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - got := mergeAuditConfigs(tc.input) - if !compareAuditConfigs(got, tc.expect) { - t.Errorf("Unexpected value for mergeAuditConfigs(%s).\nActual: %s\nExpected: %s\n", - debugPrintAuditConfigs(tc.input), debugPrintAuditConfigs(got), debugPrintAuditConfigs(tc.expect)) - } - } -} - func TestIamRemoveAllAuditConfigsWithService(t *testing.T) { testCases := []struct { input []*cloudresourcemanager.AuditConfig diff --git a/third_party/terraform/utils/node_config.go.erb b/third_party/terraform/utils/node_config.go.erb index ece5f5d61f20..3b7fb892a3ec 100644 --- a/third_party/terraform/utils/node_config.go.erb +++ b/third_party/terraform/utils/node_config.go.erb @@ -178,7 +178,9 @@ var schemaNodeConfig = &schema.Schema{ // Computed=true because GKE Sandbox will automatically add taints to nodes that can/cannot run sandboxed pods. Computed: true, ForceNew: true, - DiffSuppressFunc: taintDiffSuppress, + // Legacy config mode allows explicitly defining an empty taint. + // See https://www.terraform.io/docs/configuration/attr-as-blocks.html + ConfigMode: schema.SchemaConfigModeAttr, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -469,20 +471,3 @@ func flattenSandboxConfig(c *containerBeta.SandboxConfig) []map[string]interface return result } <% end -%> - -func taintDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if strings.HasSuffix(k, "#") { - oldCount, oldErr := strconv.Atoi(old) - newCount, newErr := strconv.Atoi(new) - // If either of them isn't a number somehow, or if there's one that we didn't have before. - return oldErr != nil || newErr != nil || oldCount == newCount+1 - } else { - lastDot := strings.LastIndex(k, ".") - taintKey := d.Get(k[:lastDot] + ".key").(string) - if taintKey == "nvidia.com/gpu" { - return true - } else { - return false - } - } -} diff --git a/third_party/terraform/utils/provider.go.erb b/third_party/terraform/utils/provider.go.erb index 0e8cea462bdd..22ad21d756c6 100644 --- a/third_party/terraform/utils/provider.go.erb +++ b/third_party/terraform/utils/provider.go.erb @@ -190,7 +190,6 @@ func Provider() terraform.ResourceProvider { "google_project": dataSourceGoogleProject(), "google_projects": dataSourceGoogleProjects(), "google_project_organization_policy": dataSourceGoogleProjectOrganizationPolicy(), - "google_project_services": dataSourceGoogleProjectServices(), "google_service_account": dataSourceGoogleServiceAccount(), "google_service_account_access_token": dataSourceGoogleServiceAccountAccessToken(), "google_service_account_key": dataSourceGoogleServiceAccountKey(), @@ -372,7 +371,6 @@ end # products.each do "google_project_iam_custom_role": resourceGoogleProjectIamCustomRole(), "google_project_organization_policy": resourceGoogleProjectOrganizationPolicy(), "google_project_usage_export_bucket": resourceProjectUsageBucket(), - "google_project_services": resourceGoogleProjectServices(), "google_pubsub_subscription_iam_binding": ResourceIamBinding(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), "google_pubsub_subscription_iam_member": ResourceIamMember(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), "google_pubsub_subscription_iam_policy": ResourceIamPolicy(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), diff --git a/third_party/terraform/utils/provider_test.go.erb b/third_party/terraform/utils/provider_test.go.erb index 68388caf0347..c5329b53bdbd 100644 --- a/third_party/terraform/utils/provider_test.go.erb +++ b/third_party/terraform/utils/provider_test.go.erb @@ -264,13 +264,6 @@ resource "google_compute_address" "default" { // which causes the create to fail unless user_project_override is set to true. func testAccProviderUserProjectOverride(pid, name, org, billing, sa string) string { return fmt.Sprintf(` -provider "google" { - scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/userinfo.email", - ] -} - resource "google_project" "project-1" { project_id = "%s" name = "%s" diff --git a/third_party/terraform/utils/pubsub_utils.go b/third_party/terraform/utils/pubsub_utils.go index 83573d211e26..592fc57bc309 100644 --- a/third_party/terraform/utils/pubsub_utils.go +++ b/third_party/terraform/utils/pubsub_utils.go @@ -3,18 +3,8 @@ package google import ( "fmt" "regexp" - - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) -func comparePubsubSubscriptionBasename(_, old, new string, _ *schema.ResourceData) bool { - if GetResourceNameFromSelfLink(old) == GetResourceNameFromSelfLink(new) { - return true - } - - return false -} - func getComputedSubscriptionName(project, subscription string) string { match, _ := regexp.MatchString("projects\\/.*\\/subscriptions\\/.*", subscription) if match { diff --git a/third_party/terraform/utils/serviceusage_operation.go b/third_party/terraform/utils/serviceusage_operation.go index 7eea8347d2ad..7e4794fc49ab 100644 --- a/third_party/terraform/utils/serviceusage_operation.go +++ b/third_party/terraform/utils/serviceusage_operation.go @@ -3,6 +3,7 @@ package google import ( "fmt" + "google.golang.org/api/googleapi" "google.golang.org/api/serviceusage/v1" ) @@ -37,3 +38,18 @@ func serviceUsageOperationWaitTime(config *Config, op *serviceusage.Operation, a } return OperationWait(w, activity, timeoutMinutes) } + +func handleServiceUsageRetryableError(err error) error { + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok { + if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { + return &googleapi.Error{ + Code: 503, + Message: "api returned \"precondition failed\" while enabling service", + } + } + } + return err +} diff --git a/third_party/terraform/utils/test_utils.go b/third_party/terraform/utils/test_utils.go index 3a9b611ad37b..b00ea13672e7 100644 --- a/third_party/terraform/utils/test_utils.go +++ b/third_party/terraform/utils/test_utils.go @@ -71,6 +71,10 @@ func (d *ResourceDiffMock) GetChange(key string) (interface{}, interface{}) { return d.Before[key], d.After[key] } +func (d *ResourceDiffMock) Get(key string) interface{} { + return d.After[key] +} + func (d *ResourceDiffMock) Clear(key string) error { if d.Cleared == nil { d.Cleared = map[string]struct{}{} diff --git a/third_party/terraform/utils/utils.go.erb b/third_party/terraform/utils/utils.go.erb index 1da2266b4d18..9b540cf3a6e2 100644 --- a/third_party/terraform/utils/utils.go.erb +++ b/third_party/terraform/utils/utils.go.erb @@ -30,6 +30,7 @@ type TerraformResourceData interface { type TerraformResourceDiff interface { GetChange(string) (interface{}, interface{}) + Get(string) interface{} Clear(string) error } diff --git a/third_party/terraform/utils/validation.go b/third_party/terraform/utils/validation.go index 07b169799ad3..3a59f006b4e7 100644 --- a/third_party/terraform/utils/validation.go +++ b/third_party/terraform/utils/validation.go @@ -232,6 +232,14 @@ func validateNonNegativeDuration() schema.SchemaValidateFunc { } } +func validateIpAddress(i interface{}, val string) ([]string, []error) { + ip := net.ParseIP(i.(string)) + if ip == nil { + return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} + } + return nil, nil +} + // StringNotInSlice returns a SchemaValidateFunc which tests if the provided value // is of type string and that it matches none of the element in the invalid slice. // if ignorecase is true, case is ignored. diff --git a/third_party/terraform/website-compiled/google.erb b/third_party/terraform/website-compiled/google.erb index 3ad769d67f7c..6882c950c7ca 100644 --- a/third_party/terraform/website-compiled/google.erb +++ b/third_party/terraform/website-compiled/google.erb @@ -178,9 +178,6 @@ > google_project_organization_policy - > - google_project_services - > google_service_account @@ -431,9 +428,6 @@ > google_project_service - > - google_project_services - > google_project_usage_export_bucket diff --git a/third_party/terraform/website/docs/d/datasource_client_config.html.markdown b/third_party/terraform/website/docs/d/datasource_client_config.html.markdown index 2627a0f86807..4c570679a357 100644 --- a/third_party/terraform/website/docs/d/datasource_client_config.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_client_config.html.markdown @@ -14,29 +14,33 @@ Use this data source to access the configuration of the Google Cloud provider. ## Example Usage ```tf -data "google_client_config" "current" {} +data "google_client_config" "current" { +} output "project" { - value = "${data.google_client_config.current.project}" + value = data.google_client_config.current.project } ``` ## Example Usage: Configure Kubernetes provider with OAuth2 access token ```tf -data "google_client_config" "default" {} +data "google_client_config" "default" { +} data "google_container_cluster" "my_cluster" { - name = "my-cluster" - zone = "us-east1-a" + name = "my-cluster" + zone = "us-east1-a" } provider "kubernetes" { load_config_file = false - host = "https://${data.google_container_cluster.my_cluster.endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(data.google_container_cluster.my_cluster.master_auth.0.cluster_ca_certificate)}" + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate, + ) } ``` diff --git a/third_party/terraform/website/docs/d/datasource_compute_address.html.markdown b/third_party/terraform/website/docs/d/datasource_compute_address.html.markdown index 91eec27df68d..656b71cb4b64 100644 --- a/third_party/terraform/website/docs/d/datasource_compute_address.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_compute_address.html.markdown @@ -24,9 +24,9 @@ resource "google_dns_record_set" "frontend" { type = "A" ttl = 300 - managed_zone = "${google_dns_managed_zone.prod.name}" + managed_zone = google_dns_managed_zone.prod.name - rrdatas = ["${data.google_compute_address.my_address.address}"] + rrdatas = [data.google_compute_address.my_address.address] } resource "google_dns_managed_zone" "prod" { diff --git a/third_party/terraform/website/docs/d/datasource_compute_global_address.html.markdown b/third_party/terraform/website/docs/d/datasource_compute_global_address.html.markdown index d985e2fe9cb7..4f238efe6039 100644 --- a/third_party/terraform/website/docs/d/datasource_compute_global_address.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_compute_global_address.html.markdown @@ -24,9 +24,9 @@ resource "google_dns_record_set" "frontend" { type = "A" ttl = 300 - managed_zone = "${google_dns_managed_zone.prod.name}" + managed_zone = google_dns_managed_zone.prod.name - rrdatas = ["${data.google_compute_global_address.my_address.address}"] + rrdatas = [data.google_compute_global_address.my_address.address] } resource "google_dns_managed_zone" "prod" { diff --git a/third_party/terraform/website/docs/d/datasource_compute_image.html.markdown b/third_party/terraform/website/docs/d/datasource_compute_image.html.markdown index 53597c27c49b..f30e03f6ecce 100644 --- a/third_party/terraform/website/docs/d/datasource_compute_image.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_compute_image.html.markdown @@ -25,7 +25,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link } } } diff --git a/third_party/terraform/website/docs/d/datasource_compute_instance.html.markdown b/third_party/terraform/website/docs/d/datasource_compute_instance.html.markdown index 40ae8bd983fe..45898b94404d 100644 --- a/third_party/terraform/website/docs/d/datasource_compute_instance.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_compute_instance.html.markdown @@ -19,8 +19,8 @@ and ```hcl data "google_compute_instance" "appserver" { - name = "primary-application-server" - zone = "us-central1-a" + name = "primary-application-server" + zone = "us-central1-a" } ``` diff --git a/third_party/terraform/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown b/third_party/terraform/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown index cce7318e0207..a2c52ccc4e65 100644 --- a/third_party/terraform/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown @@ -16,20 +16,21 @@ https://cloud.google.com/compute/docs/load-balancing/health-checks#health_check_ ## Example Usage ```tf -data "google_compute_lb_ip_ranges" "ranges" {} +data "google_compute_lb_ip_ranges" "ranges" { +} resource "google_compute_firewall" "lb" { name = "lb-firewall" - network = "${google_compute_network.main.name}" + network = google_compute_network.main.name allow { protocol = "tcp" ports = ["80"] } - source_ranges = ["${data.google_compute_lb_ip_ranges.ranges.network}"] + source_ranges = data.google_compute_lb_ip_ranges.ranges.network target_tags = [ - "InstanceBehindLoadBalancer" + "InstanceBehindLoadBalancer", ] } ``` diff --git a/third_party/terraform/website/docs/d/datasource_compute_region_instance_group.html.markdown b/third_party/terraform/website/docs/d/datasource_compute_region_instance_group.html.markdown index 592c220da5c7..19b0090452f4 100644 --- a/third_party/terraform/website/docs/d/datasource_compute_region_instance_group.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_compute_region_instance_group.html.markdown @@ -12,29 +12,28 @@ description: |- Get a Compute Region Instance Group within GCE. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroups). -``` +```hcl data "google_compute_region_instance_group" "group" { - name = "instance-group-name" + name = "instance-group-name" } ``` The most common use of this datasource will be to fetch information about the instances inside regional managed instance groups, for instance: -``` +```hcl resource "google_compute_region_instance_group_manager" "foo" { - name = "some_name" + name = "some_name" ... - base_instance_name = "foo" + base_instance_name = "foo" ... - instance_template = "${google_compute_instance_template.foo.self_link}" - target_pools = ["${google_compute_target_pool.foo.self_link}"] + instance_template = google_compute_instance_template.foo.self_link + target_pools = [google_compute_target_pool.foo.self_link] ... } data "google_compute_region_instance_group" "data_source" { - self_link = "${google_compute_region_instance_group_manager.foo.instance_group}" + self_link = google_compute_region_instance_group_manager.foo.instance_group } - ``` ## Argument Reference diff --git a/third_party/terraform/website/docs/d/datasource_compute_ssl_certificate.html.markdown b/third_party/terraform/website/docs/d/datasource_compute_ssl_certificate.html.markdown index deddd521f72e..daa3d6c7d4ac 100644 --- a/third_party/terraform/website/docs/d/datasource_compute_ssl_certificate.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_compute_ssl_certificate.html.markdown @@ -15,19 +15,19 @@ Get info about a Google Compute SSL Certificate from its name. ```tf data "google_compute_ssl_certificate" "my_cert" { - name = "my-cert" + name = "my-cert" } output "certificate" { - value = "${data.google_compute_ssl_certificate.my_cert.certificate}" + value = data.google_compute_ssl_certificate.my_cert.certificate } output "certificate_id" { - value = "${data.google_compute_ssl_certificate.my_cert.certificate_id}" + value = data.google_compute_ssl_certificate.my_cert.certificate_id } output "self_link" { - value = "${data.google_compute_ssl_certificate.my_cert.self_link}" + value = data.google_compute_ssl_certificate.my_cert.self_link } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_client_openid_userinfo.html.markdown b/third_party/terraform/website/docs/d/datasource_google_client_openid_userinfo.html.markdown index ad33f675d40d..5a4dcd3068ec 100644 --- a/third_party/terraform/website/docs/d/datasource_google_client_openid_userinfo.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_client_openid_userinfo.html.markdown @@ -12,9 +12,8 @@ description: |- Get OpenID userinfo about the credentials used with the Google provider, specifically the email. -When the `https://www.googleapis.com/auth/userinfo.email` scope is enabled in -your provider block, this datasource enables you to export the email of the -account you've authenticated the provider with; this can be used alongside +This datasource enables you to export the email of the account you've +authenticated the provider with; this can be used alongside `data.google_client_config`'s `access_token` to perform OpenID Connect authentication with GKE and configure an RBAC role for the email used. @@ -25,51 +24,36 @@ receive an error otherwise. ## Example Usage - exporting an email ```hcl -provider "google" { - scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/userinfo.email", - ] +data "google_client_openid_userinfo" "me" { } -data "google_client_openid_userinfo" "me" {} - output "my-email" { - value = "${data.google_client_openid_userinfo.me.email}" + value = data.google_client_openid_userinfo.me.email } ``` ## Example Usage - OpenID Connect w/ Kubernetes provider + RBAC IAM role ```hcl -provider "google" { - scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/userinfo.email", - ] +data "google_client_openid_userinfo" "provider_identity" { } -data "google_client_openid_userinfo" "provider_identity" {} - -data "google_client_config" "provider" {} +data "google_client_config" "provider" { +} data "google_container_cluster" "my_cluster" { - name = "my-cluster" - zone = "us-east1-a" + name = "my-cluster" + zone = "us-east1-a" } provider "kubernetes" { load_config_file = false - host = "https://${data.google_container_cluster.my_cluster.endpoint}" - token = "${data.google_client_config.provider.access_token}" - cluster_ca_certificate = "${base64decode(data.google_container_cluster.my_cluster.master_auth.0.cluster_ca_certificate)}" + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.provider.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate, + ) } resource "kubernetes_cluster_role_binding" "user" { @@ -85,7 +69,7 @@ resource "kubernetes_cluster_role_binding" "user" { subject { kind = "User" - name = "${data.google_client_openid_userinfo.provider_identity.email}" + name = data.google_client_openid_userinfo.provider_identity.email } } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_composer_image_versions.html.markdown b/third_party/terraform/website/docs/d/datasource_google_composer_image_versions.html.markdown index a1bfec475b06..ac07c9d81251 100644 --- a/third_party/terraform/website/docs/d/datasource_google_composer_image_versions.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_composer_image_versions.html.markdown @@ -18,11 +18,11 @@ data "google_composer_image_versions" "all" { } resource "google_composer_environment" "test" { - name = "test-env" - region = "us-central1" + name = "test-env" + region = "us-central1" config { software_config { - image_version = "${data.google_composer_image_versions.all.image_versions.0.image_version_id}" + image_version = data.google_composer_image_versions.all.image_versions[0].image_version_id } } } diff --git a/third_party/terraform/website/docs/d/datasource_google_compute_backend_service.html.markdown b/third_party/terraform/website/docs/d/datasource_google_compute_backend_service.html.markdown index 1bb0f3d8d582..21f7d69df7f0 100644 --- a/third_party/terraform/website/docs/d/datasource_google_compute_backend_service.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_compute_backend_service.html.markdown @@ -22,7 +22,7 @@ data "google_compute_backend_service" "baz" { resource "google_compute_backend_service" "default" { name = "backend-service" - health_checks = ["${tolist(data.google_compute_backend_service.baz.health_checks)[0]}"] + health_checks = [tolist(data.google_compute_backend_service.baz.health_checks)[0]] } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown b/third_party/terraform/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown index f9e89e12691a..8aedcf36b0d0 100644 --- a/third_party/terraform/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown @@ -17,12 +17,12 @@ The NEG may be found by providing either a `self_link`, or a `name` and a `zone` ```hcl data "google_compute_network_endpoint_group" "neg1" { - name = "k8s1-abcdef01-myns-mysvc-8080-4b6bac43" - zone = "us-central1-a" + name = "k8s1-abcdef01-myns-mysvc-8080-4b6bac43" + zone = "us-central1-a" } data "google_compute_network_endpoint_group" "neg2" { - self_link = "https://www.googleapis.com/compute/v1/projects/myproject/zones/us-central1-a/networkEndpointGroups/k8s1-abcdef01-myns-mysvc-8080-4b6bac43" + self_link = "https://www.googleapis.com/compute/v1/projects/myproject/zones/us-central1-a/networkEndpointGroups/k8s1-abcdef01-myns-mysvc-8080-4b6bac43" } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_folder_organization_policy.html.markdown b/third_party/terraform/website/docs/d/datasource_google_folder_organization_policy.html.markdown index e1e18b801b44..c3e88de7132e 100644 --- a/third_party/terraform/website/docs/d/datasource_google_folder_organization_policy.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_folder_organization_policy.html.markdown @@ -22,7 +22,7 @@ data "google_folder_organization_policy" "policy" { } output "version" { - value = "${data.google_folder_organization_policy.policy.version}" + value = data.google_folder_organization_policy.policy.version } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_iam_role.html.markdown b/third_party/terraform/website/docs/d/datasource_google_iam_role.html.markdown index a438b8eef362..c4e178290def 100644 --- a/third_party/terraform/website/docs/d/datasource_google_iam_role.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_iam_role.html.markdown @@ -17,9 +17,8 @@ data "google_iam_role" "roleinfo" { } output "the_role_permissions" { - value = "${data.google_iam_role.roleinfo.included_permissions}" + value = data.google_iam_role.roleinfo.included_permissions } - ``` ## Argument Reference diff --git a/third_party/terraform/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown b/third_party/terraform/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown index 95d0b1874076..219e2b38a8de 100644 --- a/third_party/terraform/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown @@ -14,18 +14,19 @@ Use this data source to get the IP addresses from different special IP ranges on ## Example Usage - Cloud Ranges ```tf -data "google_netblock_ip_ranges" "netblock" {} +data "google_netblock_ip_ranges" "netblock" { +} output "cidr_blocks" { - value = "${data.google_netblock_ip_ranges.netblock.cidr_blocks}" + value = data.google_netblock_ip_ranges.netblock.cidr_blocks } output "cidr_blocks_ipv4" { - value = "${data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv4}" + value = data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv4 } output "cidr_blocks_ipv6" { - value = "${data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv6}" + value = data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv6 } ``` @@ -38,14 +39,14 @@ data "google_netblock_ip_ranges" "legacy-hcs" { resource "google_compute_firewall" "allow-hcs" { name = "allow-hcs" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name allow { protocol = "tcp" ports = ["80"] } - source_ranges = ["${data.google_netblock_ip_ranges.legacy-hcs.cidr_blocks_ipv4}"] + source_ranges = data.google_netblock_ip_ranges.legacy-hcs.cidr_blocks_ipv4 } resource "google_compute_network" "default" { diff --git a/third_party/terraform/website/docs/d/datasource_google_project_organization_policy.html.markdown b/third_party/terraform/website/docs/d/datasource_google_project_organization_policy.html.markdown index 0e4a697c0fe5..ee6e7b01830f 100644 --- a/third_party/terraform/website/docs/d/datasource_google_project_organization_policy.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_project_organization_policy.html.markdown @@ -22,7 +22,7 @@ data "google_project_organization_policy" "policy" { } output "version" { - value = "${data.google_project_organization_policy.policy.version}" + value = data.google_project_organization_policy.policy.version } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_service_account.html.markdown b/third_party/terraform/website/docs/d/datasource_google_service_account.html.markdown index cc38459c2a38..1f319ea3f7b3 100644 --- a/third_party/terraform/website/docs/d/datasource_google_service_account.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_service_account.html.markdown @@ -27,15 +27,15 @@ data "google_service_account" "myaccount" { } resource "google_service_account_key" "mykey" { - service_account_id = "${data.google_service_account.myaccount.name}" + service_account_id = data.google_service_account.myaccount.name } resource "kubernetes_secret" "google-application-credentials" { - metadata = { + metadata { name = "google-application-credentials" } - data { - credentials.json = "${base64decode(google_service_account_key.mykey.private_key)}" + data = { + credentials.json = base64decode(google_service_account_key.mykey.private_key) } } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_service_account_access_token.html.markdown b/third_party/terraform/website/docs/d/datasource_google_service_account_access_token.html.markdown index 476c14ffc265..c18d6b154b6d 100644 --- a/third_party/terraform/website/docs/d/datasource_google_service_account_access_token.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_service_account_access_token.html.markdown @@ -35,30 +35,31 @@ Once the IAM permissions are set, you can apply the new token to a provider boot In the example below, `google_project` will run as `service_B`. ```hcl -provider "google" {} +provider "google" { +} data "google_client_config" "default" { - provider = "google" + provider = google } data "google_service_account_access_token" "default" { - provider = "google" - target_service_account = "service_B@projectB.iam.gserviceaccount.com" - scopes = ["userinfo-email", "cloud-platform"] - lifetime = "300s" + provider = google + target_service_account = "service_B@projectB.iam.gserviceaccount.com" + scopes = ["userinfo-email", "cloud-platform"] + lifetime = "300s" } provider "google" { - alias = "impersonated" - access_token = "${data.google_service_account_access_token.default.access_token}" + alias = "impersonated" + access_token = data.google_service_account_access_token.default.access_token } data "google_client_openid_userinfo" "me" { - provider = "google.impersonated" + provider = google.impersonated } output "target-email" { - value = "${data.google_client_openid_userinfo.me.email}" + value = data.google_client_openid_userinfo.me.email } ``` diff --git a/third_party/terraform/website/docs/d/datasource_google_service_account_key.html.markdown b/third_party/terraform/website/docs/d/datasource_google_service_account_key.html.markdown index 69c194152d61..69a2201b9a04 100644 --- a/third_party/terraform/website/docs/d/datasource_google_service_account_key.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_google_service_account_key.html.markdown @@ -20,11 +20,11 @@ resource "google_service_account" "myaccount" { } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" + service_account_id = google_service_account.myaccount.name } data "google_service_account_key" "mykey" { - name = "${google_service_account_key.mykey.name}" + name = google_service_account_key.mykey.name public_key_type = "TYPE_X509_PEM_FILE" } ``` diff --git a/third_party/terraform/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown b/third_party/terraform/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown index 7d809c68ada0..3f24fc6e358e 100644 --- a/third_party/terraform/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown +++ b/third_party/terraform/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown @@ -14,21 +14,23 @@ Get TensorFlow versions available for a project. For more information see the [o ## Example Usage ```hcl -data "google_tpu_tensorflow_versions" "available" { } +data "google_tpu_tensorflow_versions" "available" { +} ``` ## Example Usage: Configure Basic TPU Node with available version ```hcl -data "google_tpu_tensorflow_versions" "available" { } +data "google_tpu_tensorflow_versions" "available" { +} resource "google_tpu_node" "tpu" { - name = "test-tpu" - zone = "us-central1-b" + name = "test-tpu" + zone = "us-central1-b" - accelerator_type = "v3-8" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" - cidr_block = "10.2.0.0/29" + accelerator_type = "v3-8" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] + cidr_block = "10.2.0.0/29" } ``` diff --git a/third_party/terraform/website/docs/d/dns_managed_zone.html.markdown b/third_party/terraform/website/docs/d/dns_managed_zone.html.markdown index 8cef82a1f6b3..3d2403963b80 100644 --- a/third_party/terraform/website/docs/d/dns_managed_zone.html.markdown +++ b/third_party/terraform/website/docs/d/dns_managed_zone.html.markdown @@ -17,7 +17,7 @@ and ```hcl data "google_dns_managed_zone" "env_dns_zone" { - name = "qa-zone" + name = "qa-zone" } resource "google_dns_record_set" "dns" { @@ -25,7 +25,7 @@ resource "google_dns_record_set" "dns" { type = "TXT" ttl = 300 - managed_zone = "${data.google_dns_managed_zone.env_dns_zone.name}" + managed_zone = data.google_dns_managed_zone.env_dns_zone.name rrdatas = ["test"] } diff --git a/third_party/terraform/website/docs/d/google_active_folder.html.markdown b/third_party/terraform/website/docs/d/google_active_folder.html.markdown index 1caca3e4257a..9053e831fb99 100644 --- a/third_party/terraform/website/docs/d/google_active_folder.html.markdown +++ b/third_party/terraform/website/docs/d/google_active_folder.html.markdown @@ -16,7 +16,7 @@ Get an active folder within GCP by `display_name` and `parent`. ```tf data "google_active_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } ``` diff --git a/third_party/terraform/website/docs/d/google_billing_account.html.markdown b/third_party/terraform/website/docs/d/google_billing_account.html.markdown index a03e0df8ed90..84d8c8b1e19b 100644 --- a/third_party/terraform/website/docs/d/google_billing_account.html.markdown +++ b/third_party/terraform/website/docs/d/google_billing_account.html.markdown @@ -22,7 +22,7 @@ resource "google_project" "my_project" { project_id = "your-project-id" org_id = "1234567" - billing_account = "${data.google_billing_account.acct.id}" + billing_account = data.google_billing_account.acct.id } ``` diff --git a/third_party/terraform/website/docs/d/google_compute_default_service_account.html.markdown b/third_party/terraform/website/docs/d/google_compute_default_service_account.html.markdown index 7e50711381d5..8bc330a41b83 100644 --- a/third_party/terraform/website/docs/d/google_compute_default_service_account.html.markdown +++ b/third_party/terraform/website/docs/d/google_compute_default_service_account.html.markdown @@ -14,11 +14,12 @@ Use this data source to retrieve default service account for this project ## Example Usage ```hcl -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} output "default_account" { - value = "${data.google_compute_default_service_account.default.email}" -} + value = data.google_compute_default_service_account.default.email +} ``` ## Argument Reference diff --git a/third_party/terraform/website/docs/d/google_compute_instance_group.html.markdown b/third_party/terraform/website/docs/d/google_compute_instance_group.html.markdown index 303df9716666..c0ccaa30895b 100644 --- a/third_party/terraform/website/docs/d/google_compute_instance_group.html.markdown +++ b/third_party/terraform/website/docs/d/google_compute_instance_group.html.markdown @@ -13,7 +13,7 @@ Get a Compute Instance Group within GCE. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/#unmanaged_instance_groups) and [API](https://cloud.google.com/compute/docs/reference/latest/instanceGroups) -``` +```hcl data "google_compute_instance_group" "all" { name = "instance-group-name" zone = "us-central1-a" diff --git a/third_party/terraform/website/docs/d/google_compute_node_types.html.markdown b/third_party/terraform/website/docs/d/google_compute_node_types.html.markdown index 0ddde6457343..5a7d8299ce42 100644 --- a/third_party/terraform/website/docs/d/google_compute_node_types.html.markdown +++ b/third_party/terraform/website/docs/d/google_compute_node_types.html.markdown @@ -23,7 +23,7 @@ data "google_compute_node_types" "central1b" { resource "google_compute_node_template" "tmpl" { name = "terraform-test-tmpl" region = "us-central1" - node_type = "${data.google_compute_node_types.types.names[0]}" + node_type = data.google_compute_node_types.types.names[0] } ``` diff --git a/third_party/terraform/website/docs/d/google_compute_regions.html.markdown b/third_party/terraform/website/docs/d/google_compute_regions.html.markdown index 1df2c2bd8c6d..468cfd8206b3 100644 --- a/third_party/terraform/website/docs/d/google_compute_regions.html.markdown +++ b/third_party/terraform/website/docs/d/google_compute_regions.html.markdown @@ -12,15 +12,16 @@ description: |- Provides access to available Google Compute regions for a given project. See more about [regions and regions](https://cloud.google.com/compute/docs/regions-zones/) in the upstream docs. -``` -data "google_compute_regions" "available" {} +```hcl +data "google_compute_regions" "available" { +} resource "google_compute_subnetwork" "cluster" { - count = "${length(data.google_compute_regions.available.names)}" + count = length(data.google_compute_regions.available.names) name = "my-network" ip_cidr_range = "10.36.${count.index}.0/24" network = "my-network" - region = "${data.google_compute_regions.available.names[count.index]}" + region = data.google_compute_regions.available.names[count.index] } ``` diff --git a/third_party/terraform/website/docs/d/google_compute_resource_policy.html.markdown b/third_party/terraform/website/docs/d/google_compute_resource_policy.html.markdown index efaf97160418..d91f671bfa75 100644 --- a/third_party/terraform/website/docs/d/google_compute_resource_policy.html.markdown +++ b/third_party/terraform/website/docs/d/google_compute_resource_policy.html.markdown @@ -21,7 +21,7 @@ provider "google-beta" { } data "google_compute_resource_policy" "daily" { - provider = "google-beta" + provider = google-beta name = "daily" region = "us-central1" } diff --git a/third_party/terraform/website/docs/d/google_compute_zones.html.markdown b/third_party/terraform/website/docs/d/google_compute_zones.html.markdown index 604377b6aca9..cf6d3bc47c66 100644 --- a/third_party/terraform/website/docs/d/google_compute_zones.html.markdown +++ b/third_party/terraform/website/docs/d/google_compute_zones.html.markdown @@ -12,16 +12,17 @@ description: |- Provides access to available Google Compute zones in a region for a given project. See more about [regions and zones](https://cloud.google.com/compute/docs/regions-zones/regions-zones) in the upstream docs. -``` -data "google_compute_zones" "available" {} +```hcl +data "google_compute_zones" "available" { +} resource "google_compute_instance_group_manager" "foo" { - count = "${length(data.google_compute_zones.available.names)}" + count = length(data.google_compute_zones.available.names) name = "terraform-test-${count.index}" - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link base_instance_name = "foobar-${count.index}" - zone = "${data.google_compute_zones.available.names[count.index]}" + zone = data.google_compute_zones.available.names[count.index] target_size = 1 } ``` diff --git a/third_party/terraform/website/docs/d/google_container_cluster.html.markdown b/third_party/terraform/website/docs/d/google_container_cluster.html.markdown index 67aba024ce49..c03e9341260e 100644 --- a/third_party/terraform/website/docs/d/google_container_cluster.html.markdown +++ b/third_party/terraform/website/docs/d/google_container_cluster.html.markdown @@ -15,32 +15,32 @@ Get info about a GKE cluster from its name and location. ```tf data "google_container_cluster" "my_cluster" { - name = "my-cluster" - location = "us-east1-a" + name = "my-cluster" + location = "us-east1-a" } output "cluster_username" { - value = "${data.google_container_cluster.my_cluster.master_auth.0.username}" + value = data.google_container_cluster.my_cluster.master_auth[0].username } output "cluster_password" { - value = "${data.google_container_cluster.my_cluster.master_auth.0.password}" + value = data.google_container_cluster.my_cluster.master_auth[0].password } output "endpoint" { - value = "${data.google_container_cluster.my_cluster.endpoint}" + value = data.google_container_cluster.my_cluster.endpoint } output "instance_group_urls" { - value = "${data.google_container_cluster.my_cluster.instance_group_urls}" + value = data.google_container_cluster.my_cluster.instance_group_urls } output "node_config" { - value = "${data.google_container_cluster.my_cluster.node_config}" + value = data.google_container_cluster.my_cluster.node_config } output "node_pools" { - value = "${data.google_container_cluster.my_cluster.node_pool}" + value = data.google_container_cluster.my_cluster.node_pool } ``` diff --git a/third_party/terraform/website/docs/d/google_container_engine_versions.html.markdown b/third_party/terraform/website/docs/d/google_container_engine_versions.html.markdown index 49ff7cf47c47..d6b6ce23a780 100644 --- a/third_party/terraform/website/docs/d/google_container_engine_versions.html.markdown +++ b/third_party/terraform/website/docs/d/google_container_engine_versions.html.markdown @@ -21,14 +21,14 @@ support the same version. ```hcl data "google_container_engine_versions" "central1b" { - location = "us-central1-b" + location = "us-central1-b" version_prefix = "1.12." } resource "google_container_cluster" "foo" { name = "terraform-test-cluster" - location = "us-central1-b" - node_version = "${data.google_container_engine_versions.central1b.latest_node_version}" + location = "us-central1-b" + node_version = data.google_container_engine_versions.central1b.latest_node_version initial_node_count = 1 master_auth { @@ -47,14 +47,6 @@ Must exactly match the location the cluster will be deployed in, or listed versions may not be available. If `location`, `region`, and `zone` are not specified, the provider-level zone must be set and is used instead. -* `zone` (Optional, Deprecated) - Zone to list available cluster versions for. -Should match the zone the cluster will be deployed in. `zone` has been -deprecated in favour of `location`. - -* `region` (Optional, Deprecated) - Region to list available cluster versions -for. Should match the region the cluster will be deployed in. `region` has been -deprecated in favour of `location`. - * `project` (Optional) - ID of the project to list available cluster versions for. Should match the project the cluster will be deployed to. Defaults to the project that the provider is authenticated with. diff --git a/third_party/terraform/website/docs/d/google_container_registry_image.html.markdown b/third_party/terraform/website/docs/d/google_container_registry_image.html.markdown index 8d7ae054a822..799646d76cd3 100644 --- a/third_party/terraform/website/docs/d/google_container_registry_image.html.markdown +++ b/third_party/terraform/website/docs/d/google_container_registry_image.html.markdown @@ -17,11 +17,11 @@ The URLs are computed entirely offline - as long as the project exists, they wil ```hcl data "google_container_registry_image" "debian" { - name = "debian" + name = "debian" } output "gcr_location" { - value = "${data.google_container_registry_image.debian.image_url}" + value = data.google_container_registry_image.debian.image_url } ``` diff --git a/third_party/terraform/website/docs/d/google_container_registry_repository.html.markdown b/third_party/terraform/website/docs/d/google_container_registry_repository.html.markdown index 9880cc4b1b9a..3fabcaa37382 100644 --- a/third_party/terraform/website/docs/d/google_container_registry_repository.html.markdown +++ b/third_party/terraform/website/docs/d/google_container_registry_repository.html.markdown @@ -16,10 +16,11 @@ The URLs are computed entirely offline - as long as the project exists, they wil ## Example Usage ```hcl -data "google_container_registry_repository" "foo" {} +data "google_container_registry_repository" "foo" { +} output "gcr_location" { - value = "${data.google_container_registry_repository.foo.repository_url}" + value = data.google_container_registry_repository.foo.repository_url } ``` diff --git a/third_party/terraform/website/docs/d/google_folder.html.markdown b/third_party/terraform/website/docs/d/google_folder.html.markdown index c35afb99a248..51ed1c50f1c8 100644 --- a/third_party/terraform/website/docs/d/google_folder.html.markdown +++ b/third_party/terraform/website/docs/d/google_folder.html.markdown @@ -14,7 +14,7 @@ Use this data source to get information about a Google Cloud Folder. ```hcl # Get folder by id data "google_folder" "my_folder_1" { - folder = "folders/12345" + folder = "folders/12345" lookup_organization = true } @@ -24,13 +24,12 @@ data "google_folder" "my_folder_2" { } output "my_folder_1_organization" { - value = "${data.google_folder.my_folder_1.organization}" + value = data.google_folder.my_folder_1.organization } output "my_folder_2_parent" { - value = "${data.google_folder.my_folder_2.parent}" + value = data.google_folder.my_folder_2.parent } - ``` ## Argument Reference diff --git a/third_party/terraform/website/docs/d/google_iam_policy.html.markdown b/third_party/terraform/website/docs/d/google_iam_policy.html.markdown index 9c42472a613a..c5e83cf81914 100644 --- a/third_party/terraform/website/docs/d/google_iam_policy.html.markdown +++ b/third_party/terraform/website/docs/d/google_iam_policy.html.markdown @@ -13,7 +13,7 @@ description: |- Generates an IAM policy document that may be referenced by and applied to other Google Cloud Platform resources, such as the `google_project` resource. -``` +```hcl data "google_iam_policy" "admin" { binding { role = "roles/compute.instanceAdmin" diff --git a/third_party/terraform/website/docs/d/google_kms_crypto_key.html.markdown b/third_party/terraform/website/docs/d/google_kms_crypto_key.html.markdown index 82a861692efc..cc91bd9fc5f8 100644 --- a/third_party/terraform/website/docs/d/google_kms_crypto_key.html.markdown +++ b/third_party/terraform/website/docs/d/google_kms_crypto_key.html.markdown @@ -26,8 +26,8 @@ data "google_kms_key_ring" "my_key_ring" { } data "google_kms_crypto_key" "my_crypto_key" { - name = "my-crypto-key" - key_ring = "${data.google_kms_key_ring.my_key_ring.self_link}" + name = "my-crypto-key" + key_ring = data.google_kms_key_ring.my_key_ring.self_link } ``` diff --git a/third_party/terraform/website/docs/d/google_kms_crypto_key_version.html.markdown b/third_party/terraform/website/docs/d/google_kms_crypto_key_version.html.markdown index 93cc08b1ed4e..e0b00e157c08 100644 --- a/third_party/terraform/website/docs/d/google_kms_crypto_key_version.html.markdown +++ b/third_party/terraform/website/docs/d/google_kms_crypto_key_version.html.markdown @@ -26,11 +26,11 @@ data "google_kms_key_ring" "my_key_ring" { data "google_kms_crypto_key" "my_crypto_key" { name = "my-crypto-key" - key_ring = "${data.google_kms_key_ring.my_key_ring.self_link}" + key_ring = data.google_kms_key_ring.my_key_ring.self_link } data "google_kms_crypto_key_version" "my_crypto_key_version" { - crypto_key = "${data.google_kms_key.my_key.self_link}" + crypto_key = data.google_kms_key.my_key.self_link } ``` diff --git a/third_party/terraform/website/docs/d/google_kms_secret.html.markdown b/third_party/terraform/website/docs/d/google_kms_secret.html.markdown index 47014bda0583..55bdbc936c34 100644 --- a/third_party/terraform/website/docs/d/google_kms_secret.html.markdown +++ b/third_party/terraform/website/docs/d/google_kms_secret.html.markdown @@ -33,7 +33,7 @@ resource "google_kms_key_ring" "my_key_ring" { resource "google_kms_crypto_key" "my_crypto_key" { name = "my-crypto-key" - key_ring = "${google_kms_key_ring.my_key_ring.self_link}" + key_ring = google_kms_key_ring.my_key_ring.self_link } ``` @@ -56,7 +56,7 @@ Finally, reference the encrypted ciphertext in your resource definitions: ```hcl data "google_kms_secret" "sql_user_password" { - crypto_key = "${google_kms_crypto_key.my_crypto_key.self_link}" + crypto_key = google_kms_crypto_key.my_crypto_key.self_link ciphertext = "CiQAqD+xX4SXOSziF4a8JYvq4spfAuWhhYSNul33H85HnVtNQW4SOgDu2UZ46dQCRFl5MF6ekabviN8xq+F+2035ZJ85B+xTYXqNf4mZs0RJitnWWuXlYQh6axnnJYu3kDU=" } @@ -74,9 +74,9 @@ resource "google_sql_database_instance" "master" { resource "google_sql_user" "users" { name = "me" - instance = "${google_sql_database_instance.master.name}" + instance = google_sql_database_instance.master.name host = "me.com" - password = "${data.google_kms_secret.sql_user_password.plaintext}" + password = data.google_kms_secret.sql_user_password.plaintext } ``` diff --git a/third_party/terraform/website/docs/d/google_kms_secret_ciphertext.html.markdown b/third_party/terraform/website/docs/d/google_kms_secret_ciphertext.html.markdown index a83e27f602c5..40e85ab43bc3 100644 --- a/third_party/terraform/website/docs/d/google_kms_secret_ciphertext.html.markdown +++ b/third_party/terraform/website/docs/d/google_kms_secret_ciphertext.html.markdown @@ -33,7 +33,7 @@ resource "google_kms_key_ring" "my_key_ring" { resource "google_kms_crypto_key" "my_crypto_key" { name = "my-crypto-key" - key_ring = "${google_kms_key_ring.my_key_ring.self_link}" + key_ring = google_kms_key_ring.my_key_ring.self_link } ``` @@ -41,8 +41,8 @@ Next, encrypt some sensitive information and use the encrypted data in your reso ```hcl data "google_kms_secret_ciphertext" "my_password" { - crypto_key = "${google_kms_crypto_key.my_crypto_key.self_link}" - plaintext = "my-secret-password" + crypto_key = google_kms_crypto_key.my_crypto_key.self_link + plaintext = "my-secret-password" } resource "google_compute_instance" "instance" { @@ -64,7 +64,7 @@ resource "google_compute_instance" "instance" { } metadata = { - password = "${data.google_kms_secret_ciphertext.my_password.ciphertext}" + password = data.google_kms_secret_ciphertext.my_password.ciphertext } } ``` diff --git a/third_party/terraform/website/docs/d/google_organization.html.markdown b/third_party/terraform/website/docs/d/google_organization.html.markdown index 4168109714e6..d94792423880 100644 --- a/third_party/terraform/website/docs/d/google_organization.html.markdown +++ b/third_party/terraform/website/docs/d/google_organization.html.markdown @@ -18,7 +18,7 @@ data "google_organization" "org" { resource "google_folder" "sales" { display_name = "Sales" - parent = "${data.google_organization.org.name}" + parent = data.google_organization.org.name } ``` diff --git a/third_party/terraform/website/docs/d/google_project.html.markdown b/third_party/terraform/website/docs/d/google_project.html.markdown index 069939a89aa8..4887fd87a45f 100644 --- a/third_party/terraform/website/docs/d/google_project.html.markdown +++ b/third_party/terraform/website/docs/d/google_project.html.markdown @@ -16,10 +16,11 @@ For more information see ## Example Usage ```hcl -data "google_project" "project" {} +data "google_project" "project" { +} output "project_number" { - value = "${data.google_project.project.number}" + value = data.google_project.project.number } ``` diff --git a/third_party/terraform/website/docs/d/google_project_services.html.markdown b/third_party/terraform/website/docs/d/google_project_services.html.markdown deleted file mode 100644 index 6b0e1765c237..000000000000 --- a/third_party/terraform/website/docs/d/google_project_services.html.markdown +++ /dev/null @@ -1,40 +0,0 @@ ---- -subcategory: "Cloud Platform" -layout: "google" -page_title: "Google: google_project_services" -sidebar_current: "docs-google-datasource-project-services" -description: |- - Retrieve enabled of API services for a Google Cloud Platform project ---- - -# google\_project\_services - -Use this data source to get details on the enabled project services. - -For a list of services available, visit the -[API library page](https://console.cloud.google.com/apis/library) or run `gcloud services list`. - -## Example Usage - -```hcl -data "google_project_services" "project" { - project = "your-project-id" -} - -output "project_services" { - value = "${join(",", data.google_project_services.project.services)}" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `project` - (Required) The project ID. - - -## Attributes Reference - -The following attributes are exported: - -See [google_project_services](https://www.terraform.io/docs/providers/google/r/google_project_services.html) resource for details of the available attributes. diff --git a/third_party/terraform/website/docs/d/google_projects.html.markdown b/third_party/terraform/website/docs/d/google_projects.html.markdown index f71a4cf4c69c..4fc3762d1b39 100644 --- a/third_party/terraform/website/docs/d/google_projects.html.markdown +++ b/third_party/terraform/website/docs/d/google_projects.html.markdown @@ -21,7 +21,7 @@ data "google_projects" "my-org-projects" { } data "google_project" "deletion-candidate" { - project_id = "${data.google_projects.my-org-projects.projects.0.project_id}" + project_id = data.google_projects.my-org-projects.projects[0].project_id } ``` diff --git a/third_party/terraform/website/docs/d/google_storage_project_service_account.html.markdown b/third_party/terraform/website/docs/d/google_storage_project_service_account.html.markdown index 8ab1e5a98af9..017e712944b6 100644 --- a/third_party/terraform/website/docs/d/google_storage_project_service_account.html.markdown +++ b/third_party/terraform/website/docs/d/google_storage_project_service_account.html.markdown @@ -20,13 +20,14 @@ For more information see ## Example Usage ```hcl -data "google_storage_project_service_account" "gcs_account" {} +data "google_storage_project_service_account" "gcs_account" { +} resource "google_pubsub_topic_iam_binding" "binding" { - topic = "${google_pubsub_topic.topic.name}" - role = "roles/pubsub.publisher" - - members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] + topic = google_pubsub_topic.topic.name + role = "roles/pubsub.publisher" + + members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] } ``` diff --git a/third_party/terraform/website/docs/d/google_storage_transfer_project_service_account.html.markdown b/third_party/terraform/website/docs/d/google_storage_transfer_project_service_account.html.markdown index 06daf3ae0110..b3ad3bdaeb98 100644 --- a/third_party/terraform/website/docs/d/google_storage_transfer_project_service_account.html.markdown +++ b/third_party/terraform/website/docs/d/google_storage_transfer_project_service_account.html.markdown @@ -14,10 +14,11 @@ Use this data source to retrieve Storage Transfer service account for this proje ## Example Usage ```hcl -data "google_storage_transfer_project_service_account" "default" { } +data "google_storage_transfer_project_service_account" "default" { +} output "default_account" { - value = "${data.google_storage_transfer_project_service_account.default.email}" + value = data.google_storage_transfer_project_service_account.default.email } ``` diff --git a/third_party/terraform/website/docs/d/signed_url.html.markdown b/third_party/terraform/website/docs/d/signed_url.html.markdown index fee03ac2b55e..6946cd25b1ad 100644 --- a/third_party/terraform/website/docs/d/signed_url.html.markdown +++ b/third_party/terraform/website/docs/d/signed_url.html.markdown @@ -22,15 +22,15 @@ data "google_storage_object_signed_url" "artifact" { } resource "google_compute_instance" "vm" { - name = "vm" - - provisioner "remote-exec" { - inline = [ - "wget '${data.google_storage_object_signed_url.artifact.signed_url}' -O install_file.bin", - "chmod +x install_file.bin", - "./install_file.bin" - ] - } + name = "vm" + + provisioner "remote-exec" { + inline = [ + "wget '${data.google_storage_object_signed_url.artifact.signed_url}' -O install_file.bin", + "chmod +x install_file.bin", + "./install_file.bin", + ] + } } ``` @@ -43,7 +43,7 @@ data "google_storage_object_signed_url" "get_url" { content_md5 = "pRviqwS4c4OTJRTe03FD1w==" content_type = "text/plain" duration = "2d" - credentials = "${file("path/to/credentials.json")}" + credentials = file("path/to/credentials.json") extension_headers = { x-goog-if-generation-match = 1 diff --git a/third_party/terraform/website/docs/guides/provider_reference.html.markdown b/third_party/terraform/website/docs/guides/provider_reference.html.markdown index 8344f7c1fde3..741b1333ecbb 100644 --- a/third_party/terraform/website/docs/guides/provider_reference.html.markdown +++ b/third_party/terraform/website/docs/guides/provider_reference.html.markdown @@ -213,6 +213,7 @@ an access token using the service account key specified in `credentials`. * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/ndev.clouddns.readwrite * https://www.googleapis.com/auth/devstorage.full_control + * https://www.googleapis.com/auth/userinfo.email --- @@ -318,8 +319,7 @@ as their versioned counterpart but that won't necessarily always be the case. **So far, batching is implemented for**: -* enabling project services using `google_project_service` or - `google_project_services` +* enabling project services using `google_project_service`. The `batching` block supports the following fields. diff --git a/third_party/terraform/website/docs/guides/version_3_upgrade.html.markdown b/third_party/terraform/website/docs/guides/version_3_upgrade.html.markdown index 9c5829e4c804..6811039e07ad 100644 --- a/third_party/terraform/website/docs/guides/version_3_upgrade.html.markdown +++ b/third_party/terraform/website/docs/guides/version_3_upgrade.html.markdown @@ -50,50 +50,817 @@ so Terraform knows to manage them. ## Upgrade Topics + +- [Provider Version Configuration](#provider-version-configuration) +- [Provider](#provider) +- [ID Format Changes](#id-format-changes) +- [Data Source: `google_container_engine_versions`](#data-source-google_container_engine_versions) +- [Resource: `google_access_context_manager_access_level`](#resource-google_access_context_manager_access_level) +- [Resource: `google_access_context_manager_service_perimeter`](#resource-google_access_context_manager_service_perimeter) +- [Resource: `google_app_engine_application`](#resource-google_app_engine_application) +- [Resource: `google_app_engine_domain_mapping`](#resource-google_app_engine_domain_mapping) +- [Resource: `google_app_engine_standard_app_version`](#resource-google_app_engine_standard_app_version) +- [Resource: `google_bigquery_dataset`](#resource-google_bigquery_dataset) +- [Resource: `google_bigquery_table`](#resource-google_bigquery_table) +- [Resource: `google_bigtable_app_profile`](#resource-google_bigtable_app_profile) +- [Resource: `google_binary_authorization_policy`](#resource-google_binary_authorization_policy) +- [Resource: `google_cloudbuild_trigger`](#resource-google_cloudbuild_trigger) +- [Resource: `google_cloudfunctions_function`](#resource-google_cloudfunctions_function) +- [Resource: `google_cloudiot_registry`](#resource-google_cloudiot_registry) +- [Resource: `google_cloudscheduler_job`](#resource-google_cloudscheduler_job) +- [Resource: `google_composer_environment`](#resource-google_composer_environment) +- [Resource: `google_compute_backend_bucket`](#resource-google_compute_backend_bucket) +- [Resource: `google_compute_backend_service`](#resource-google_compute_backend_service) +- [Resource: `google_compute_firewall`](#resource-google_compute_firewall) +- [Resource: `google_compute_forwarding_rule`](#resource-google_compute_forwarding_rule) +- [Resource: `google_compute_global_forwarding_rule`](#resource-google_compute_global_forwarding_rule) +- [Resource: `google_compute_health_check`](#resource-google_compute_health_check) +- [Resource: `google_compute_image`](#resource-google_compute_image) +- [Resource: `google_compute_instance`](#resource-google_compute_instance) +- [Resource: `google_compute_instance_group_manager`](#resource-google_compute_instance_group_manager) +- [Resource: `google_compute_instance_template`](#resource-google_compute_instance_template) +- [Resource: `google_compute_network`](#resource-google_compute_network) +- [Resource: `google_compute_network_peering`](#resource-google_compute_network_peering) +- [Resource: `google_compute_node_template`](#resource-google_compute_node_template) +- [Resource: `google_compute_region_backend_service`](#resource-google_compute_region_backend_service) +- [Resource: `google_compute_region_health_check`](#resource-google_compute_region_health_check) +- [Resource: `google_compute_region_instance_group_manager`](#resource-google_compute_instance_group_manager) +- [Resource: `google_compute_resource_policy`](#resource-google_compute_resource_policy) +- [Resource: `google_compute_route`](#resource-google_compute_route) +- [Resource: `google_compute_router`](#resource-google_compute_router) +- [Resource: `google_compute_router_peer`](#resource-google_compute_router_peer) +- [Resource: `google_compute_snapshot`](#resource-google_compute_snapshot) +- [Resource: `google_compute_subnetwork`](#resource-google_compute_subnetwork) - [Resource: `google_container_cluster`](#resource-google_container_cluster) +- [Resource: `google_container_node_pool`](#resource-google_container_node_pool) +- [Resource: `google_dataproc_autoscaling_policy`](#resource-google_dataproc_autoscaling_policy) +- [Resource: `google_dataproc_cluster`](#resource-google_dataproc_cluster) +- [Resource: `google_dataproc_job`](#resource-google_dataproc_job) +- [Resource: `google_dns_managed_zone`](#resource-google_dns_managed_zone) +- [Resource: `google_dns_policy`](#resource-google_dns_policy) +- [Resource: `google_healthcare_hl7_v2_store`](#resource-google_healthcare_hl7_v2_store) +- [Resource: `google_logging_metric`](#resource-google_logging_metric) +- [Resource: `google_mlengine_model`](#resource-google_mlengine_model) +- [Resource: `google_monitoring_alert_policy`](#resource-google_monitoring_alert_policy) +- [Resource: `google_monitoring_uptime_check_config`](#resource-google_monitoring_uptime_check_config) +- [Resource: `google_organization_policy`](#resource-google_organization_policy) +- [Resource: `google_project_iam_audit_config`](#resource-google_project_iam_audit_config) - [Resource: `google_project_service`](#resource-google_project_service) - [Resource: `google_project_services`](#resource-google_project_services) - [Resource: `google_pubsub_subscription`](#resource-google_pubsub_subscription) -- [Resource: `google_cloudiot_registry`](#resource-google_cloudiot_registry) +- [Resource: `google_security_scanner_scan_config`](#resource-google_security_scanner_scan_config) +- [Resource: `google_service_account_key`](#resource-google_service_account_key) +- [Resource: `google_sql_database_instance`](#resource-google_sql_database_instance) +- [Resource: `google_storage_bucket`](#resource-google_storage_bucket) +- [Resource: `google_storage_transfer_job`](#resource-google_storage_transfer_job) +- [Resource: `google_tpu_node`](#resource-google_tpu_node) + + + +## Provider Version Configuration + +-> Before upgrading to version 3.0.0, it is recommended to upgrade to the most +recent `2.X` series release of the provider, make the changes noted in this guide, +and ensure that your environment successfully runs +[`terraform plan`](https://www.terraform.io/docs/commands/plan.html) +without unexpected changes or deprecation notices. + +It is recommended to use [version constraints](https://www.terraform.io/docs/configuration/providers.html#provider-versions) +when configuring Terraform providers. If you are following that recommendation, +update the version constraints in your Terraform configuration and run +[`terraform init`](https://www.terraform.io/docs/commands/init.html) to download +the new version. + +If you aren't using version constraints, you can use `terraform init -upgrade` +in order to upgrade your provider to the latest released version. + +For example, given this previous configuration: + +```hcl +provider "google" { + # ... other configuration ... + + version = "~> 2.17.0" +} +``` + +An updated configuration: + +```hcl +provider "google" { + # ... other configuration ... + + version = "~> 3.0.0" +} +``` + +## Provider + +### Terraform 0.11 no longer supported + +Support for Terraform 0.11 has been deprecated, and Terraform 0.12 or higher is +required to `terraform init` the provider. See [the blog post](https://www.hashicorp.com/blog/deprecating-terraform-0-11-support-in-terraform-providers/) +for more information. It is recommended that you upgrade to Terraform 0.12 before +upgrading to version 3.0.0 of the provider. + +### `userinfo.email` added to default scopes + +`userinfo.email` has been added to the default set of OAuth scopes in the +provider. This provides the Terraform user specified by `credentials`' (generally +a service account) email address to GCP APIs in addition to an obfuscated user +id; particularly, it makes the email of the Terraform user available for some +Kubernetes and IAP use cases. + +If this was previously defined explicitly, the definition can now be removed. + +#### Old Config + +```hcl +provider "google" { + scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/userinfo.email", + ] +} +``` + +#### New Config + +```hcl +provider "google" {} +``` + +## ID Format Changes + +ID formats on many resources have changed. ID formats have standardized on being similar to the `self_link` of +a resource. Users who depended on particular ID formats in previous versions may be impacted. + +## Data Source: `google_container_engine_versions` + +### `region` and `zone` are now removed + +Use `location` instead. + +## Resource: `google_access_context_manager_access_level` + +### `os_type` is now required on block `google_access_context_manager_access_level.basic.conditions.device_policy.os_constraints` + +In an attempt to avoid allowing empty blocks in config files, `os_type` is now +required on the `basic.conditions.device_policy.os_constraints` block. + +## Resource: `google_access_context_manager_service_perimeter` + +### At least one of `resources`, `access_levels`, or `restricted_services` is now required on `google_accesscontextmanager_service_perimeter.status` + +In an attempt to avoid allowing empty blocks in config files, at least one of `resources`, `access_levels`, +or `restricted_services` is now required on the `status` block. + +## Resource: `google_app_engine_application` + +### `split_health_checks` is now required on block `google_app_engine_application.feature_settings` + +In an attempt to avoid allowing empty blocks in config files, `split_health_checks` is now +required on the `feature_settings` block. + +## Resource: `google_app_engine_domain_mapping` + +### `ssl_management_type` is now required on `google_app_engine_domain_mapping.ssl_settings` + +In an attempt to avoid allowing empty blocks in config files, `ssl_management_type` is now +required on the `ssl_settings` block. + +## Resource: `google_app_engine_standard_app_version` + +### At least one of `zip` or `files` is now required on `google_app_engine_standard_app_version.deployment` + +In an attempt to avoid allowing empty blocks in config files, at least one of `zip` or `files` +is now required on the `deployment` block. + +### `shell` is now required on `google_app_engine_standard_app_version.entrypoint` + +In an attempt to avoid allowing empty blocks in config files, `shell` is now +required on the `entrypoint` block. + +### `script_path` is now required on `google_app_engine_standard_app_version.handlers.script` + +In an attempt to avoid allowing empty blocks in config files, `script_path` is now +required on the `handlers.script` block. + +### `source_url` is now required on `google_app_engine_standard_app_version.deployment.files` and `google_app_engine_standard_app_version.deployment.zip` + +In an attempt to avoid allowing empty blocks in config files, `shell` is now +required on the `deployment.files` and `deployment.zip` blocks. + +## Resource: `google_bigquery_dataset` + +### `role` is now required on `google_bigquery_dataset.access` + +In an attempt to avoid allowing empty blocks in config files, `role` is now +required on the `access` block. + +## Resource: `google_bigquery_table` + +### At least one of `range` or `skip_leading_rows` is now required on `external_data_configuration.google_sheets_options` + +In an attempt to avoid allowing empty blocks in config files, at least one +of `range` or `skip_leading_rows` is now required on the +`external_data_configuration.google_sheets_options` block. + +## Resource: `google_bigtable_app_profile` + +### Exactly one of `single_cluster_routing` or `multi_cluster_routing_use_any` is now required on `google_bigtable_app_profile` + +In attempt to be more consistent with the API, exactly one of `single_cluster_routing` or +`multi_cluster_routing_use_any` is now required on `google_bigtable_app_profile`. + +### `cluster_id` is now required on `google_bigtable_app_profile.single_cluster_routing` + +In an attempt to avoid allowing empty blocks in config files, `cluster_id` is now +required on the `single_cluster_routing` block. + +## Resource: `google_binary_authorization_policy` + +### `name_pattern` is now required on `google_binary_authorization_policy.admission_whitelist_patterns` + +In an attempt to avoid allowing empty blocks in config files, `name_pattern` is now +required on the `admission_whitelist_patterns` block. + +### `evaluation_mode` and `enforcement_mode` are now required on `google_binary_authorization_policy.cluster_admission_rules` + +In an attempt to avoid allowing empty blocks in config files, `evaluation_mode` and `enforcement_mode` are now +required on the `cluster_admission_rules` block. + +## Resource: `google_cloudbuild_trigger` + +### Exactly one of `filename` or `build` is now required on `google_cloudbuild_trigger` + +In attempt to be more consistent with the API, exactly one of `filename` or `build` is now +required on `google_cloudbuild_trigger`. + +### Exactly one of `branch_name`, `tag_name` or `commit_sha` is now required on `google_cloudbuild_trigger.trigger_template` + +In an attempt to avoid allowing empty blocks in config files, exactly one +of `branch_name`, `tag_name` or `commit_sha` is now required on the +`trigger_template` block. + +### Exactly one of `pull_request` or `push` is now required on `google_cloudbuild_trigger.github` + +In an attempt to avoid allowing empty blocks in config files, exactly one +of `pull_request` or `push` is now required on the `github` block. + +### Exactly one of `branch` or `tag_name` is now required on `google_cloudbuild_trigger.github.push` + +In an attempt to avoid allowing empty blocks in config files, exactly one +of `branch` or `tag_name` is now required on the `github.push` block. + +### `steps` is now required on `google_cloudbuild_trigger.build`. + +In an attempt to avoid allowing empty blocks in config files, `steps` is now +required on the `build` block. + +### `name` is now required on `google_cloudbuild_trigger.build.steps` + +In an attempt to avoid allowing empty blocks in config files, `name` is now +required on the `build.steps` block. + +### `name` and `path` are now required on `google_cloudbuild_trigger.build.steps.volumes` + +In an attempt to avoid allowing empty blocks in config files, `name` and `path` are now +required on the `build.volumes` block. + +## Resource: `google_cloudfunctions_function` + +### The `runtime` option `nodejs6` has been deprecated + +`nodejs6` has been deprecated and is no longer the default value for `runtime`. +`runtime` is now required. + +## Resource: `google_cloudiot_registry` + +### Replace singular event notification config field with plural `event_notification_configs` + +Use the plural field `event_notification_configs` instead of +`event_notification_config`, which has now been removed. +Since the Cloud IoT API now accept multiple event notification configs for a +registry, the singular field no longer exists on the API resource and has been +removed from Terraform to prevent conflicts. + + +#### Old Config + +```hcl +resource "google_cloudiot_registry" "myregistry" { + name = "%s" + + event_notification_config { + pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" + } +} + +``` + +#### New Config + +```hcl +resource "google_cloudiot_registry" "myregistry" { + name = "%s" + + event_notification_configs { + pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" + } +} +``` + +### `public_key_certificate` is now required on block `google_cloudiot_registry.credentials` + +In an attempt to avoid allowing empty blocks in config files, `public_key_certificate` is now +required on the `credentials` block. + +## Resource: `google_cloudscheduler_job` + +### Exactly one of `pubsub_target`, `http_target` or `app_engine_http_target` is required on `google_cloudscheduler_job` + +In attempt to be more consistent with the API, exactly one of `pubsub_target`, `http_target` +or `app_engine_http_target` is now required on `google_cloudscheduler_job`. + +### `service_account_email` is now required on `google_cloudscheduler_job.http_target.oauth_token` and `google_cloudscheduler_job.http_target.oidc_token`. + +In an attempt to avoid allowing empty blocks in config files, `service_account_email` is now +required on the `http_target.oauth_token` and `http_target.oidc_token` blocks. + +### At least one of `retry_count`, `max_retry_duration`, `min_backoff_duration`, `max_backoff_duration`, or `max_doublings` is now required on `google_cloud_scheduler_job.retry_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `retry_count`, +`max_retry_duration`, `min_backoff_duration`, `max_backoff_duration`, or `max_doublings` is +now required on the `retry_config` block. + +### At least one of `service`, `version`, or `instance` is now required on `google_cloud_scheduler_job.app_engine_http_target.app_engine_routing` + +In an attempt to avoid allowing empty blocks in config files, at least one of `service`, +`version`, or `instance` is now required on the `app_engine_http_target.app_engine_routing` block. + +## Resource: `google_composer_environment` + +### At least one of `airflow_config_overrides`, `pypi_packages`, `env_variables`, `image_version`, or `python_version` are now required on `google_composer_environment.config.software_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `airflow_config_overrides`, +`pypi_packages`, `env_variables`, `image_version`, or `python_version` is now required on the +`config.software_config` block. + +### `use_ip_aliases` is now required on block `google_composer_environment.ip_allocation_policy` + +Previously the default value of `use_ip_aliases` was `true`. In an attempt to avoid allowing empty blocks +in config files, `use_ip_aliases` is now required on the `ip_allocation_policy` block. + +### `enable_private_endpoint` is now required on block `google_composer_environment.private_environment_config` + +Previously the default value of `enable_private_endpoint` was `true`. In an attempt to avoid allowing empty blocks +in config files, `enable_private_endpoint` is now required on the `private_environment_config` block. + +## Resource: `google_compute_backend_bucket` + +### `signed_url_cache_max_age_sec` is now required on `google_compute_backend_bucket.autoscaling_policy.cdn_policy` + +Previously the default value of `signed_url_cache_max_age_sec` was `3600`. In an attempt to avoid allowing empty +blocks in config files, `signed_url_cache_max_age_sec` is now required on the +`autoscaling_policy.cdn_policy` block. + +## Resource: `google_compute_backend_service` + +### At least one of `connect_timeout`, `max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, or `max_retries` is now required on `google_compute_backend_service.circuit_breakers` + +In an attempt to avoid allowing empty blocks in config files, at least one of `connect_timeout`, +`max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, +or `max_retries` is now required on the `circuit_breakers` block. + +### At least one of `ttl`, `name`, or `path` is now required on `google_compute_backend_service.consistent_hash.http_cookie` + +In an attempt to avoid allowing empty blocks in config files, at least one of `ttl`, `name`, or `path` +is now required on the `consistent_hash.http_cookie` block. + +### At least one of `http_cookie`, `http_header_name`, or `minimum_ring_size` is now required on `google_compute_backend_service.consistent_hash` + +In an attempt to avoid allowing empty blocks in config files, at least one of `http_cookie`, +`http_header_name`, or `minimum_ring_size` is now required on the `consistent_hash` block. + +### At least one of `cache_key_policy` or `signed_url_cache_max_age_sec` is now required on `google_compute_backend_service.cdn_policy` + +In an attempt to avoid allowing empty blocks in config files, at least one of `cache_key_policy` or +`signed_url_cache_max_age_sec` is now required on the `cdn_policy` block. + +### At least one of `include_host`, `include_protocol`, `include_query_string`, `query_string_blacklist`, or `query_string_whitelist` is now required on `google_compute_backend_service.cdn_policy.cache_key_policy` + +In an attempt to avoid allowing empty blocks in config files, at least one of `include_host`, +`include_protocol`, `include_query_string`, `query_string_blacklist`, or `query_string_whitelist` +is now required on the `cdn_policy.cache_key_policy` block. + +### At least one of `base_ejection_time`, `consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, `enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, `success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` is now required on `google_compute_backend_service.outlier_detection` + +In an attempt to avoid allowing empty blocks in config files, at least one of `base_ejection_time`, +`consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, +`enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, +`success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` +is now required on the `outlier_detection` block. + +### At least one of `enable` or `sample_rate` is now required on `google_compute_backend_service.log_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable` or `sample_rate` +is now required on the `log_config` block. + +## Resource: `google_compute_firewall` + +### Exactly one of `allow` or `deny` is required on `google_compute_firewall` + +In attempt to be more consistent with the API, exactly one of `allowed` or `denied` +is now required on `google_compute_firewall`. + +## Resource: `google_compute_forwarding_rule` + +### `ip_version` is now removed + +`ip_version` is not used for regional forwarding rules. + +### `ip_address` is now strictly validated to enforce literal IP address format + +Previously documentation suggested Terraform could use the same range of valid +IP Address formats for `ip_address` as accepted by the API (e.g. named addresses +or URLs to GCP Address resources). However, the server returns only literal IP +addresses and thus caused diffs on re-apply (i.e. a permadiff). We amended +documenation to say Terraform only accepts literal IP addresses. + +This is now strictly validated. While this shouldn't have a large breaking +impact as users would have already run into permadiff issues on re-apply, +there might be validation errors for existing configs. The solution is be to +replace other address formats with the IP address, either manually or by +interpolating values from a `google_compute_address` resource. + +#### Old Config (that would have permadiff) + +```hcl +resource "google_compute_address" "my-addr" { + name = "my-addr" +} + +resource "google_compute_forwarding_rule" "frule" { + name = "my-forwarding-rule" + + address = google_compute_address.my-addr.self_link +} +``` + +#### New Config + +```hcl +resource "google_compute_address" "my-addr" { + name = "my-addr" +} + +resource "google_compute_forwarding_rule" "frule" { + name = "my-forwarding-rule" + + address = google_compute_address.my-addr.address +} +``` + +## Resource: `google_compute_global_forwarding_rule` + +### `ip_address` is now validated to enforce literal IP address format + +See [`google_compute_forwarding_rule`](#resource-google_compute_forwarding_rule). + +## Resource: `google_compute_health_check` + +### Exactly one of `http_health_check`, `https_health_check`, `http2_health_check`, `tcp_health_check` or `ssl_health_check` is required on `google_compute_health_check` + +In attempt to be more consistent with the API, exactly one of `http_health_check`, `https_health_check`, +`http2_health_check`, `tcp_health_check` or `ssl_health_check` is now required on +`google_compute_health_check`. + +### At least one of `host`, `request_path`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_health_check.http_health_check`, `google_compute_health_check.https_health_check` and `google_compute_health_check.http2_health_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `host`, `request_path`, `response`, +`port`, `port_name`, `proxy_header`, or `port_specification` is now required on the +`http_health_check`, `https_health_check` and `http2_health_check` blocks. + +### At least one of `request`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_health_check.ssl_health_check` and `google_compute_health_check.tcp_health_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `request`, `response`, `port`, `port_name`, +`proxy_header`, or `port_specification` is now required on the `ssl_health_check` and `tcp_health_check` blocks. + +## Resource: `google_compute_image` + +### `type` is now required on `google_compute_image.guest_os_features` + +In an attempt to avoid allowing empty blocks in config files, `type` is now required on the +`guest_os_features` block. + +## Resource: `google_compute_instance` + +### `interface` is now required on block `google_compute_instance.scratch_disk` + +Previously the default value of `interface` was `SCSI`. In an attempt to avoid allowing empty blocks +in config files, `interface` is now required on the `scratch_disk` block. + +### At least one of `auto_delete`, `device_name`, `disk_encryption_key_raw`, `kms_key_self_link`, `initialize_params`, `mode` or `source` is now required on `google_compute_instance.boot_disk` + +In an attempt to avoid allowing empty blocks in config files, at least one of `auto_delete`, `device_name`, +`disk_encryption_key_raw`, `kms_key_self_link`, `initialize_params`, `mode` or `source` is now required on the +`boot_disk` block. + +### At least one of `size`, `type`, `image`, or `labels` are now required on `google_compute_instance.boot_disk.initialize_params` + +In an attempt to avoid allowing empty blocks in config files, at least one of `size`, `type`, `image`, or `labels` +is now required on the `initialize_params` block. + +### At least one of `enable_secure_boot`, `enable_vtpm`, or `enable_integrity_monitoring` is now required on `google_compute_instance.shielded_instance_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable_secure_boot`, `enable_vtpm`, +or `enable_integrity_monitoring` is now required on the `shielded_instance_config` block. + +### At least one of `on_host_maintenance`, `automatic_restart`, `preemptible`, or `node_affinities` is now required on `google_compute_instance.scheduling` + +In an attempt to avoid allowing empty blocks in config files, at least one of `on_host_maintenance`, `automatic_restart`, +`preemptible`, or `node_affinities` is now required on the `scheduling` block. + +## Resource: `google_compute_instance_group_manager` + +The following changes apply to both `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager`. + +### `instance_template` has been replaced by `version.instance_template` + +Instance group managers should be using `version` blocks to reference which +instance template to use for provisioning. To upgrade use a single `version` +block with `instance_template` in your config and by default all traffic will be +directed to that version. + +### Old Config + +```hcl +resource "google_compute_instance_group_manager" "my_igm" { + name = "my-igm" + zone = "us-central1-c" + base_instance_name = "igm" + + instance_template = "${google_compute_instance_template.my_tmpl.self_link}" +} +``` + +### New Config + +```hcl +resource "google_compute_instance_group_manager" "my_igm" { + name = "my-igm" + zone = "us-central1-c" + base_instance_name = "igm" + + version { + name = "prod" + instance_template = "${google_compute_instance_template.my_tmpl.self_link}" + } +} +``` + +### `update_strategy` has been replaced by `update_policy` + +To allow much greater control over the updates happening to instance groups +`update_strategy` has been replaced by `update_policy`. The previous +functionality to determine if instance should be replaced or restarted can be +achieved using `update_policy.minimal_action`. For more details see the +[official guide](https://cloud.google.com/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups). + +## Resource: `google_compute_instance_template` + +### At least one of `enable_secure_boot`, `enable_vtpm`, or `enable_integrity_monitoring` is now required on `google_compute_instance_template.shielded_instance_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable_secure_boot`, `enable_vtpm`, or +`enable_integrity_monitoring` is now required on the `shielded_instance_config` block. + +### At least one of `on_host_maintenance`, `automatic_restart`, `preemptible`, or `node_affinities` is now required on `google_compute_instance_template.scheduling` + +In an attempt to avoid allowing empty blocks in config files, at least one of `on_host_maintenance`, `automatic_restart`, +`preemptible`, or `node_affinities` is now required on the `scheduling` block. + +### Disks with invalid scratch disk configurations are now rejected + +The instance template API allows specifying invalid configurations in some cases, +and an error is only returned when attempting to provision them. Terraform will +now report that some configs that previously appeared valid at plan time are +now invalid. + +A disk with `type` `"SCRATCH"` must have `disk_type` `"local-ssd"` and a size of 375GB. For example, +the following is valid: + +```hcl +disk { + auto_delete = true + type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 375 +} +``` + +These configs would have been accepted by Terraform previously, but will now +fail: + +```hcl +disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + type = "SCRATCH" +} +``` + +```hcl +disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + disk_type = "local-ssd" +} +``` + +```hcl +disk { + auto_delete = true + type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 300 +} +``` + +### `kms_key_self_link` is now required on block `google_compute_instance_template.disk_encryption_key` + +In an attempt to avoid allowing empty blocks in config files, `kms_key_self_link` is now +required on the `disk_encryption_key` block. + +## Resource: `google_compute_network` + +### `ipv4_range` is now removed + +Legacy Networks are removed and you will no longer be able to create them +using this field from Feb 1, 2020 onwards. + +## Resource: `google_compute_network_peering` + +### `auto_create_routes` is now removed + +`auto_create_routes` has been removed because it's redundant and not +user-configurable. + +## Resource: `google_compute_node_template` + +### At least one of `cpus` or `memory` is now required on `google_compute_node_template.node_type_flexibility` + +In an attempt to avoid allowing empty blocks in config files, at least one of `cpus` or `memory` +is now required on the `node_type_flexibility` block. + +## Resource: `google_compute_region_backend_service` + +### At least one of `connect_timeout`, `max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, or `max_retries` is now required on `google_compute_region_backend_service.circuit_breakers` + +In an attempt to avoid allowing empty blocks in config files, at least one of `connect_timeout`, +`max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, +or `max_retries` is now required on the `circuit_breakers` block. + +### At least one of `ttl`, `name`, or `path` is now required on `google_compute_region_backend_service.consistent_hash.http_cookie` + +In an attempt to avoid allowing empty blocks in config files, at least one of `ttl`, `name`, or `path` +is now required on the `consistent_hash.http_cookie` block. + +### At least one of `http_cookie`, `http_header_name`, or `minimum_ring_size` is now required on `google_compute_region_backend_service.consistent_hash` + +In an attempt to avoid allowing empty blocks in config files, at least one of `http_cookie`, +`http_header_name`, or `minimum_ring_size` is now required on the `consistent_hash` block. + +### At least one of `disable_connection_drain_on_failover`, `drop_traffic_if_unhealthy`, or `failover_ratio` is now required on `google_compute_region_backend_service.failover_policy` + +In an attempt to avoid allowing empty blocks in config files, at least one of `disable_connection_drain_on_failover`, +`drop_traffic_if_unhealthy`, or `failover_ratio` is now required on the `failover_policy` block. + +### At least one of `base_ejection_time`, `consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, `enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, `success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` is now required on `google_compute_region_backend_service.outlier_detection` + +In an attempt to avoid allowing empty blocks in config files, at least one of `base_ejection_time`, +`consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, +`enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, +`success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` +is now required on the `outlier_detection` block. + +### At least one of `enable` or `sample_rate` is now required on `google_compute_region_backend_service.log_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable` or `sample_rate` +is now required on the `log_config` block. + +## Resource: `google_compute_region_health_check` + +### Exactly one of `http_health_check`, `https_health_check`, `http2_health_check`, `tcp_health_check` or `ssl_health_check` is required on `google_compute_health_check` + +In attempt to be more consistent with the API, exactly one of `http_health_check`, `https_health_check`, +`http2_health_check`, `tcp_health_check` or `ssl_health_check` is now required on the +`google_compute_region_health_check`. + +### At least one of `host`, `request_path`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_region_health_check.http_health_check`, `google_compute_region_health_check.https_health_check` and `google_compute_region_health_check.http2_health_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `host`, `request_path`, `response`, +`port`, `port_name`, `proxy_header`, or `port_specification` is now required on the +`http_health_check`, `https_health_check` and `http2_health_check` blocks. - +### At least one of `request`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_region_health_check.ssl_health_check` and `google_compute_region_health_check.tcp_health_check` -## Provider Version Configuration +In an attempt to avoid allowing empty blocks in config files, at least one of `request`, `response`, `port`, `port_name`, +`proxy_header`, or `port_specification` is now required on the `ssl_health_check` and `tcp_health_check` blocks. --> Before upgrading to version 3.0.0, it is recommended to upgrade to the most -recent `2.X` series release of the provider and ensure that your environment -successfully runs [`terraform plan`](https://www.terraform.io/docs/commands/plan.html) -without unexpected changes or deprecation notices. +## Resource: `google_compute_resource_policy` -It is recommended to use [version constraints](https://www.terraform.io/docs/configuration/providers.html#provider-versions) -when configuring Terraform providers. If you are following that recommendation, -update the version constraints in your Terraform configuration and run -[`terraform init`](https://www.terraform.io/docs/commands/init.html) to download -the new version. +### Exactly one of `hourly_schedule`, `daily_schedule` or `weekly_schedule` is now required on `google_compute_resource_policy.snapshot_schedule_policy.schedule` -If you aren't using version constraints, you can use `terraform init -upgrade` -in order to upgrade your provider to the latest released version. +In an attempt to avoid allowing empty blocks in config files, exactly one +of `hourly_schedule`, `daily_schedule` or `weekly_schedule` is now required +on the `snapshot_schedule_policy.schedule` block. -For example, given this previous configuration: +### At least one of `labels`, `storage_locations`, or `guest_flush` is now required on `google_compute_resource_policy.snapshot_schedule_policy.snapshot_properties` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`labels`, `storage_locations`, or `guest_flush` is now required on the +`snapshot_schedule_policy.snapshot_properties` block. + +## Resource: `google_compute_route` + +### Exactly one of `next_hop_gateway`, `next_hop_instance`, `next_hop_ip`, `next_hop_vpn_tunnel` or `next_hop_ilb` is required on `google_compute_route` + +In attempt to be more consistent with the API, exactly one of `next_hop_gateway`, `next_hop_instance`, +`next_hop_ip`, `next_hop_vpn_tunnel` or `next_hop_ilb` is now required on the +`google_compute_route`. + +## Resource: `google_compute_router` + +### `range` is now required on `google_compute_router.bgp.advertised_ip_ranges` + +In an attempt to avoid allowing empty blocks in config files, `range` is now +required on the `bgp.advertised_ip_ranges` block. + +## Resource: `google_compute_router_peer` + +### `range` is now required on block `google_compute_router_peer.advertised_ip_ranges` + +In an attempt to avoid allowing empty blocks in config files, `range` is now +required on the `advertised_ip_ranges` block. + +## Resource: `google_compute_snapshot` + +### `raw_key` is now required on block `google_compute_snapshot.source_disk_encryption_key` + +In an attempt to avoid allowing empty blocks in config files, `raw_key` is now +required on the `source_disk_encryption_key` block. + +## Resource: `google_compute_subnetwork` + +### `enable_flow_logs` is now removed + +`enable_flow_logs` has been removed and should be replaced by the `log_config` block with configurations +for flow logging. Enablement of flow logs is now controlled by whether `log_config` is defined or not instead +of by the `enable_flow_logs` variable. Users with `enable_flow_logs = false` only need to remove the field. + +### At least one of `aggregation_interval`, `flow_sampling`, or `metadata` is now required on `google_compute_subnetwork.log_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`aggregation_interval`, `flow_sampling`, or `metadata` is now required on the +`log_config` block. + + +### Old Config ```hcl -provider "google" { - # ... other configuration ... +resource "google_compute_subnetwork" "subnet-with-logging" { + name = "log-test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" - version = "~> 2.17.0" + enable_flow_logs = true } ``` -An updated configuration: + +### New Config ```hcl -provider "google" { - # ... other configuration ... +resource "google_compute_subnetwork" "subnet-with-logging" { + name = "log-test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" - version = "~> 3.0.0" + log_config { + aggregation_interval = "INTERVAL_10_MIN" + flow_sampling = 0.5 + metadata = "INCLUDE_ALL_METADATA" + } } ``` + ## Resource: `google_container_cluster` ### `ip_allocation_policy` will catch out-of-band changes, `use_ip_aliases` removed @@ -103,7 +870,7 @@ removed" are related; see the other entry for more details. In `2.X`, `ip_allocation_policy` wouldn't cause a diff if it was undefined in config but was set on the cluster itself. Additionally, it could be defined with -`use_ip_aliases` set to `false`. However, this made it difficult to reason about +`use_ip_aliases` set to `false`. However, this made it difficult to reason about whether a cluster was routes-based or VPC-native. With `3.0.0`, Terraform will detect drift on the block. The configuration has also @@ -244,6 +1011,71 @@ resource "google_container_cluster" "primary" { } ``` +### `taint` field is now authoritative when set + +The `taint` field inside of `node_config` blocks on `google_container_cluster` +and `google_container_node_pool` will no longer ignore GPU-related values when +set. + +Previously, the field ignored upstream taints when unset and ignored unset GPU +taints when other taints were set. Now it will ignore upstream taints when set +and act authoritatively when set, requiring all taints (including Kubernetes and +GKE-managed ones) to be defined in config. + +Additionally, an empty taint can now be specified with `taint = []`. As a result +of this change, the JSON/state representation of the field has changed, +introducing an incompatibility for users who specify config in JSON instead of +HCL or who use `dynamic` blocks. See more details in the [Attributes as Blocks](https://www.terraform.io/docs/configuration/attr-as-blocks.html) +documentation. + +### `addons_config.kubernetes_dashboard` is now removed + +The `kubernetes_dashboard` addon is deprecated for clusters on GKE and +will soon be removed. It is recommended to use alternative GCP Console +dashboards. + +### `channel` is now required on `google_container_cluster.release_channel` + +In an attempt to avoid allowing empty blocks in config files, `channel` is now +required on the `release_channel` block. + +### `cidr_blocks` is now required on block `google_container_cluster.master_authorized_networks_config` + +In an attempt to avoid allowing empty blocks in config files, `cidr_blocks` is now +required on the `master_authorized_networks_config` block. + +### The `disabled` field is now required on the `addons_config` blocks for `http_load_balancing`, `horizontal_pod_autoscaling`, `istio_config`, `cloudrun_config` and `network_policy_config`. + +In an attempt to avoid allowing empty blocks in config files, `disabled` is now +required on the different `google_container_cluster.addons_config` blocks. + +### At least one of `http_load_balancing`, `horizontal_pod_autoscaling` , `network_policy_config`, `cloudrun_config`, or `istio_config` is now required on `google_container_cluster.addons_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `http_load_balancing`, +`horizontal_pod_autoscaling` , `network_policy_config`, `cloudrun_config`, or `istio_config` is now required on the +`addons_config` block. + + +### At least one of `username`, `password` or `client_certificate_config` is now required on `google_container_cluster.master_auth` + +In an attempt to avoid allowing empty blocks in config files, at least one of `username`, `password` +or `client_certificate_config` is now required on the `master_auth` block. + +### `enabled` is now required on block `google_container_cluster.vertical_pod_autoscaling` + +In an attempt to avoid allowing empty blocks in config files, `enabled` is now +required on the `vertical_pod_autoscaling` block. + +### `enabled` is now required on block `google_container_cluster.network_policy` + +Previously the default value of `enabled` was `false`. In an attempt to avoid allowing empty blocks +in config files, `enabled` is now required on the `network_policy` block. + +### `enable_private_endpoint` is now required on block `google_container_cluster.private_cluster_config` + +In an attempt to avoid allowing empty blocks in config files, `enable_private_endpoint` is now +required on the `private_cluster_config` block. + ### `logging_service` and `monitoring_service` defaults changed GKE Stackdriver Monitoring (the GKE-specific Stackdriver experience) is now @@ -274,22 +1106,265 @@ logging_service = "logging.googleapis.com/kubernetes" monitoring_service = "monitoring.googleapis.com/kubernetes" ``` -### `taint` field is now authoritative when set +### `use_ip_aliases` is now required on block `google_container_cluster.ip_allocation_policy` -The `taint` field inside of `node_config` blocks on `google_container_cluster` -and `google_container_node_pool` will no longer ignore GPU-related values when -set. +Previously the default value of `use_ip_aliases` was `true`. In an attempt to avoid allowing empty blocks +in config files, `use_ip_aliases` is now required on the `ip_allocation_policy` block. -Previously, the field ignored upstream taints when unset and ignored unset GPU -taints when other taints were set. Now it will ignore upstream taints when set -and act authoritatively when set, requiring all taints (including Kubernetes and -GKE-managed ones) to be defined in config. +### `zone`, `region` and `additional_zones` are now removed -Additionally, an empty taint can now be specified with `taint = []`. As a result -of this change, the JSON/state representation of the field has changed, -introducing an incompatibility for users who specify config in JSON instead of -HCL or who use `dynamic` blocks. See more details in the [Attributes as Blocks](https://www.terraform.io/docs/configuration/attr-as-blocks.html) -documentation. +`zone` and `region` have been removed in favor of `location` and +`additional_zones` has been removed in favor of `node_locations` + +## Resource: `google_container_node_pool` + +### `zone` and `region` are now removed + +`zone` and `region` have been removed in favor of `location` + +## Resource: `google_dataproc_autoscaling_policy` + +### At least one of `min_instances`, `max_instances`, or `weight` is now required on `google_dataproc_autoscaling_policy.secondary_worker_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `min_instances`, +`max_instances`, or `weight` is now required on the `secondary_worker_config` +block. + +## Resource: `google_dataproc_cluster` + +### At least one of `staging_bucket`, `gce_cluster_config`, `master_config`, `worker_config`, `preemptible_worker_config`, `software_config`, `initialization_action` or `encryption_config` is now required on `google_dataproc_cluster.cluster_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `staging_bucket`, +`gce_cluster_config`, `master_config`, `worker_config`, `preemptible_worker_config`, `software_config`, +`initialization_action` or `encryption_config` is now required on the +`cluster_config` block. + +### At least one of `image_version`, `override_properties` or `optional_components` is now required on `google_dataproc_cluster.cluster_config.software_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `image_version`, +`override_properties` or `optional_components` is now required on the +`cluster_config.software_config` block. + +### At least one of `num_instances` or `disk_config` is now required on `google_dataproc_cluster.cluster_config.preemptible_worker_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_instances` +or `disk_config` is now required on the `cluster_config.preemptible_worker_config` block. + +### At least one of `zone`, `network`, `subnetwork`, `tags`, `service_account`, `service_account_scopes`, `internal_ip_only` or `metadata` is now required on `google_dataproc_cluster.cluster_config.gce_cluster_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `zone`, `network`, `subnetwork`, +`tags`, `service_account`, `service_account_scopes`, `internal_ip_only` or `metadata` is now required on the +`gce_cluster_config` block. + +### At least one of `num_instances`, `image_uri`, `machine_type`, `min_cpu_platform`, `disk_config`, or `accelerators` is now required on `google_dataproc_cluster.cluster_config.master_config` and `google_dataproc_cluster.cluster_config.worker_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_instances`, `image_uri`, +`machine_type`, `min_cpu_platform`, `disk_config`, or `accelerators` is now required on the +`cluster_config.master_config` and `cluster_config.worker_config` blocks. + +### At least one of `num_local_ssds`, `boot_disk_size_gb` or `boot_disk_type` is now required on `google_dataproc_cluster.cluster_config.preemptible_worker_config.disk_config`, `google_dataproc_cluster.cluster_config.master_config.disk_config` and `google_dataproc_cluster.cluster_config.worker_config.disk_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_local_ssds`, `boot_disk_size_gb` +or `boot_disk_type` is now required on the `cluster_config.preemptible_worker_config.disk_config`, +`cluster_config.master_config.disk_config` and `cluster_config.worker_config.disk_config` blocks. + + +### `policy_uri` is now required on `google_dataproc_cluster.autoscaling_config` block. + +In an attempt to avoid allowing empty blocks in config files, `policy_uri` is now +required on the `autoscaling_config` block. + +## Resource: `google_dataproc_job` + +### At least one of `query_file_uri` or `query_list` is now required on `hive_config`, `pig_config`, and `sparksql_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`query_file_uri` or `query_list` is now required on the `hive_config`, `pig_config`, and +`sparksql_config` blocks. + +### At least one of `main_class` or `main_jar_file_uri` is now required on `google_dataproc_job.spark_config` and `google_dataproc_job.hadoop_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`main_class` or `main_jar_file_uri` is now required on the `spark_config` +and `hadoop_config` blocks. + +### `driver_log_levels` is now required on `logging_config` blocks for `pyspark_config`, `hadoop_config`, `spark_config`, `pig_config`, and `sparksql_config`. + +In an attempt to avoid allowing empty blocks in config files, `driver_log_levels` is now +required on `pyspark_config`, `hadoop_config`, `spark_config`, `pig_config`, and +`sparksql_config` blocks. + +### `max_failures_per_hour` is now required on block `google_dataproc_job.scheduling` + +In an attempt to avoid allowing empty blocks in config files, `max_failures_per_hour` is now +required on the `scheduling` block. + +## Resource: `google_dns_managed_zone` + +### At least one of `kind`, `non_existence`, `state`, or `default_key_specs` is now required on `google_dns_managed_zone.dnssec_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`kind`, `non_existence`, `state`, or `default_key_specs` is now required on the +`dnssec_config` block. + +### `target_network` is now required on block `google_dns_managed_zone.peering_config` + +In an attempt to avoid allowing empty blocks in config files, `target_network` is now +required on the `peering_config` block. + +### `network_url` is now required on block `google_dns_managed_zone.peering_config.target_network` + +In an attempt to avoid allowing empty blocks in config files, `network_url` is now +required on the `peering_config.target_network` block. + +### `target_name_servers` is now required on block `google_dns_managed_zone.forwarding_config` + +In an attempt to avoid allowing empty blocks in config files, `target_name_servers` is now +required on the `forwarding_config` block. + +### `ipv4_address` is now required on block `google_dns_managed_zone.forwarding_config.target_name_servers` + +In an attempt to avoid allowing empty blocks in config files, `ipv4_address` is now +required on the `forwarding_config.target_name_servers` block. + +### `target_name_servers` is now required on block `google_dns_managed_zone.forwarding_config` + +In an attempt to avoid allowing empty blocks in config files, `target_name_servers` is now +required on the `forwarding_config` block. + +### `networks` is now required on block `google_dns_managed_zone.private_visibility_config` + +In an attempt to avoid allowing empty blocks in config files, `networks` is now +required on the `private_visibility_config` block. + +### `network_url` is now required on block `google_dns_managed_zone.private_visibility_config.networks` + +In an attempt to avoid allowing empty blocks in config files, `network_url` is now +required on the `private_visibility_config.networks` block. + +## Resource: `google_dns_policy` + +### `network_url` is now required on block `google_dns_policy.networks` + +In an attempt to avoid allowing empty blocks in config files, `network_url` is now +required on the `networks` block. + +### `target_name_servers` is now required on block `google_dns_policy.alternative_name_server_config` + +In an attempt to avoid allowing empty blocks in config files, `target_name_servers` is now +required on the `alternative_name_server_config` block. + +### `ipv4_address` is now required on block `google_dns_policy.alternative_name_server_config.target_name_servers` + +In an attempt to avoid allowing empty blocks in config files, `ipv4_address` is now +required on the `alternative_name_server_config.target_name_servers` block. + +## Resource: `google_healthcare_hl7_v2_store` + +### At least one of `allow_null_header ` or `segment_terminator` is now required on `google_healthcare_hl7_v2_store.parser_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `allow_null_header ` +or `segment_terminator` is now required on the `parser_config` block. + +## Resource: `google_logging_metric` + +### At least one of `linear_buckets`, `exponential_buckets` or `explicit_buckets` is now required on `google_logging_metric.bucket_options` + +In an attempt to avoid allowing empty blocks in config files, at least one of `linear_buckets`, +`exponential_buckets` or `explicit_buckets` is now required on the `bucket_options` block. + +### At least one of `num_finite_buckets`, `width` or `offset` is now required on `google_logging_metric.bucket_options.linear_buckets` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_finite_buckets`, +`width` or `offset` is now required on the `bucket_options.linear_buckets` block. + +### At least one of `num_finite_buckets`, `growth_factor` or `scale` is now required on `google_logging_metric.bucket_options.exponential_buckets` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_finite_buckets`, +`growth_factor` or `scale` is now required on the `bucket_options.exponential_buckets` block. + +### `bounds` is now required on `google_logging_metric.bucket_options.explicit_buckets` + +In an attempt to avoid allowing empty blocks in config files, `bounds` is now required on the +`bucket_options.explicit_buckets` block. + +## Resource: `google_mlengine_model` + +### `name` is now required on `google_mlengine_model.default_version` + +In an attempt to avoid allowing empty blocks in config files, `name` is now required on the +`default_version` block. + +## Resource: `google_monitoring_alert_policy` + +### `labels` is now removed + +`labels` is removed as it was never used. See `user_labels` for the correct field. + +### At least one of `content` or `mime_type` is now required on `google_monitoring_alert_policy.documentation` + +In an attempt to avoid allowing empty blocks in config files, at least one of `content` or `mime_type` +is now required on the `documentation` block. + +## Resource: `google_monitoring_uptime_check_config` + +### Exactly one of `resource_group` or `monitored_resource` is now required on `google_monitoring_uptime_check_config` + +In attempt to be more consistent with the API, exactly one of `resource_group` or `monitored_resource` is now required +on `google_monitoring_uptime_check_config`. + +### Exactly one of `http_check` or `tcp_check` is now required on `google_monitoring_uptime_check_config` + +In attempt to be more consistent with the API, exactly one of `http_check` or `tcp_check` is now required +on `google_monitoring_uptime_check_config`. + +### At least one of `auth_info`, `port`, `headers`, `path`, `use_ssl`, or `mask_headers` is now required on `google_monitoring_uptime_check_config.http_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `auth_info`, +`port`, `headers`, `path`, `use_ssl`, or `mask_headers` is now required on the `http_check` block. + +### At least one of `resource_type` or `group_id` is now required on `google_monitoring_uptime_check_config.resource_group` + +In an attempt to avoid allowing empty blocks in config files, at least one of `resource_type` or `group_id` +is now required on the `resource_group` block. + +### `content` is now required on block `google_monitoring_uptime_check_config.content_matchers` + +In an attempt to avoid allowing empty blocks in config files, `content` is now +required on the `content_matchers` block. + +### `username` and `password` are now required on block `google_monitoring_uptime_check_config.http_check.auth_info` + +In an attempt to avoid allowing empty blocks in config files, `username` and `password` are now +required on the `http_check.auth_info` block. + +### `is_internal` and `internal_checker` are now removed + +`is_internal` and `internal_checker` never worked, and are now removed. + +## Resource: `google_organization_policy` + +### Exactly one of `list_policy`, `boolean_policy`, or `restore_policy` is now required on `google_organization_policy` + +In attempt to be more consistent with the API, exactly one of `list_policy`, `boolean_policy`, +or `restore_policy` is now required on `google_organization_policy`. + +### Exactly one of `all` or `values` is now required on `google_organization_policy.list_policy.allow` and `google_organization_policy.list_policy.deny` + +In an attempt to avoid allowing empty blocks in config files, exactly one of `all` or `values` is now +required on the `list_policy.allow` and `list_policy.deny` blocks. + +### `inherit_from_parent` is now required on block `google_organization_policy.list_policy` + +In an attempt to avoid allowing empty blocks in config files, `inherit_from_parent` is now +required on the `list_policy` block. + +## Resource: `google_project_iam_audit_config` + +### Audit configs are now authoritative on create + +Audit configs are now authoritative on create, rather than merging with existing configs on create. +Writing an audit config resource will now overwrite any existing audit configs on the given project. ## Resource: `google_project_service` @@ -325,8 +1400,9 @@ Users should migrate to using `google_project_service` resources, or using the module for a similar interface to `google_project_services`. -> Prior to `2.13.0`, each `google_project_service` sent separate API enablement -requests. From `2.13.0` onwards, those requests are batched. It's recommended -that you upgrade to `2.13.0+` before migrating if you encounter quota issues +requests. From `2.13.0` onwards, those requests are batched on write, and from `2.20.0` onwards, +batched on read. It's recommended that you upgrade to `2.13.0+` before migrating if you +encounter write quota issues or `2.20.0+` before migrating if you encounter read quota issues when you migrate off `google_project_services`. #### Old Config @@ -360,15 +1436,15 @@ module "project_services" { #### New Config (google_project_service) ```hcl -resource "google_project_service" "project_iam" { - project = "your-project-id" - service = "iam.googleapis.com" - disable_on_destroy = false -} +resource "google_project_service" "service" { + for_each = toset([ + "iam.googleapis.com", + "cloudresourcemanager.googleapis.com", + ]) + + service = each.key -resource "google_project_service" "project_cloudresourcemanager" { project = "your-project-id" - service = "cloudresourcemanager.googleapis.com" disable_on_destroy = false } ``` @@ -380,39 +1456,108 @@ resource "google_project_service" "project_cloudresourcemanager" { `name` previously could have been specified by a long name (e.g. `projects/my-project/subscriptions/my-subscription`) or a shortname (e.g. `my-subscription`). `name` now must be the shortname. +### `ttl` is now required on `google_pubsub_subscription.expiration_policy` -## Resource: `google_cloudiot_registry` +Previously, an empty `expiration_policy` block would allow the resource to never expire. In an attempt to avoid +allowing empty blocks in config files, `ttl` is now required on the `expiration_policy` block. `ttl` should be set +to `""` for the resource to never expire. -### Replace singular event notification config field with plural `event_notification_configs` +## Resource: `google_security_scanner_scan_config` -Use the plural field `event_notification_configs` instead of -`event_notification_config`, which has now been removed. -Since the Cloud IoT API now accept multiple event notification configs for a -registry, the singular field no longer exists on the API resource and has been -removed from Terraform to prevent conflicts. +### At least one of `google_account` or `custom_account` is now required on `google_security_scanner_scan_config.authentication` +In an attempt to avoid allowing empty blocks in config files, at least one of `google_account` or +`custom_account` is now required on the `authentication` block. -#### Old Config +## Resource: `google_service_account_key` -```hcl -resource "google_cloudiot_registry" "myregistry" { - name = "%s" +### `pgp_key`, `private_key_fingerprint`, and `private_key_encrypted` are now removed - event_notification_config { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" - } -} +`google_service_account_key` previously supported encrypting the private key with +a supplied PGP key. This is [no longer supported](https://www.terraform.io/docs/extend/best-practices/sensitive-state.html#don-39-t-encrypt-state) +and has been removed as functionality. State should instead be treated as sensitive, +and ideally encrypted using a remote state backend. -``` +This will require re-provisioning your service account key, unfortunately. There +is no known alternative at this time. -#### New Config +## Resource: `google_sql_database_instance` -```hcl -resource "google_cloudiot_registry" "myregistry" { - name = "%s" +### At least one of `ca_certificate`, `client_certificate`, `client_key`, `connect_retry_interval`, `dump_file_path`, `failover_target`, `master_heartbeat_period`, `password`, `ssl_cipher`, `username`, or `verify_server_certificate` is now required on `google_sql_database_instance.settings.replica_configuration` - event_notification_configs { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" - } -} -``` \ No newline at end of file +In an attempt to avoid allowing empty blocks in config files, at least one of `ca_certificate`, `client_certificate`, `client_key`, `connect_retry_interval`, +`dump_file_path`, `failover_target`, `master_heartbeat_period`, `password`, `ssl_cipher`, `username`, or `verify_server_certificate` is now required on the +`settings.replica_configuration` block. + +### At least one of `cert`, `common_name`, `create_time`, `expiration_time`, or `sha1_fingerprint` is now required on `google_sql_database_instance.settings.server_ca_cert` + +In an attempt to avoid allowing empty blocks in config files, at least one of `cert`, `common_name`, `create_time`, `expiration_time`, or `sha1_fingerprint` is now required on the `settings.server_ca_cert` block. + +### At least one of `day`, `hour`, or `update_track` is now required on `google_sql_database_instance.settings.maintenance_window` + +In an attempt to avoid allowing empty blocks in config files, at least one of `day`, `hour`, +or `update_track` is now required on the `settings.maintenance_window` block. + +### At least one of `binary_log_enabled`, `enabled`, `start_time`, or `location` is now required on `google_sql_database_instance.settings.backup_configuration` + +In an attempt to avoid allowing empty blocks in config files, at least one of `binary_log_enabled`, `enabled`, `start_time`, or `location` is now required on the +`settings.backup_configuration` block. + +### At least one of `authorized_networks`, `ipv4_enabled`, `require_ssl`, or `private_network` is now required on `google_sql_database_instance.settings.ip_configuration` + +In an attempt to avoid allowing empty blocks in config files, at least one of `authorized_networks`, `ipv4_enabled`, +`require_ssl`, and `private_network` is now required on the `settings.ip_configuration` block. + +### `name` and `value` are now required on block `google_sql_database_instance.settings.database_flags` + +In an attempt to avoid allowing empty blocks in config files, `name` and `value` are now required on the `settings.database_flags` block. + +### `value` is now required on block `google_sql_database_instance.settings.ip_configuration.authorized_networks` + +In an attempt to avoid allowing empty blocks in config files, `value` is now required on the `settings.ip_configuration.authorized_networks` block. + +### `zone` is now required on block `google_sql_database_instance.settings.location_preference` + +In an attempt to avoid allowing empty blocks in config files, `zone` is now +required on the `settings.location_preference` block. + +## Resource: `google_storage_bucket` + +### `enabled` is now required on block `google_storage_bucket.versioning` + +Previously the default value of `enabled` was `false`. In an attempt to avoid allowing empty blocks +in config files, `enabled` is now required on the `versioning` block. + +### At least one of `main_page_suffix` or `not_found_page` is now required on `google_storage_bucket.website` + +In an attempt to avoid allowing empty blocks in config files, at least one of `main_page_suffix` or +`not_found_page` is now required on the `website` block. + +### At least one of `min_time_elapsed_since_last_modification`, `max_time_elapsed_since_last_modification`, `include_prefixes`, or `exclude_prefixes` is now required on `google_storage_transfer_job.transfer_spec.object_conditions` + +In an attempt to avoid allowing empty blocks in config files, at least one of `min_time_elapsed_since_last_modification`, +`max_time_elapsed_since_last_modification`, `include_prefixes`, or `exclude_prefixes` is now required on the `transfer_spec.object_conditions` block. + +### `is_live` is now removed + +Please use `with_state` instead, as `is_live` is now removed. + +## Resource: `google_storage_transfer_job` + +### At least one of `overwrite_objects_already_existing_in_sink`, `delete_objects_unique_in_sink`, or `delete_objects_from_source_after_transfer` is now required on `google_storage_transfer_job.transfer_spec.transfer_options` + +In an attempt to avoid allowing empty blocks in config files, at least one of `overwrite_objects_already_existing_in_sink`, +`delete_objects_unique_in_sink`, or `delete_objects_from_source_after_transfer` is now required on the +`transfer_spec.transfer_options` block. + +### At least one of `gcs_data_source`, `aws_s3_data_source`, or `http_data_source` is now required on `google_storage_transfer_job.transfer_spec` + +In an attempt to avoid allowing empty blocks in config files, at least one of `gcs_data_source`, `aws_s3_data_source`, +or `http_data_source` is now required on the `transfer_spec` block. + +## Resource: `google_tpu_node` + +### `preemptible` is now required on block `google_tpu_node.scheduling_config` + +In an attempt to avoid allowing empty blocks in config files, `preemptible` is now +required on the `scheduling_config` block. diff --git a/third_party/terraform/website/docs/r/app_engine_application.html.markdown b/third_party/terraform/website/docs/r/app_engine_application.html.markdown index 99a4773d6da5..d998e9df5425 100755 --- a/third_party/terraform/website/docs/r/app_engine_application.html.markdown +++ b/third_party/terraform/website/docs/r/app_engine_application.html.markdown @@ -26,7 +26,7 @@ resource "google_project" "my_project" { } resource "google_app_engine_application" "app" { - project = "${google_project.my_project.project_id}" + project = google_project.my_project.project_id location_id = "us-central" } ``` diff --git a/third_party/terraform/website/docs/r/bigquery_table.html.markdown b/third_party/terraform/website/docs/r/bigquery_table.html.markdown index abfe5f9c6a98..be06f7fa42f4 100644 --- a/third_party/terraform/website/docs/r/bigquery_table.html.markdown +++ b/third_party/terraform/website/docs/r/bigquery_table.html.markdown @@ -30,7 +30,7 @@ resource "google_bigquery_dataset" "default" { } resource "google_bigquery_table" "default" { - dataset_id = "${google_bigquery_dataset.default.dataset_id}" + dataset_id = google_bigquery_dataset.default.dataset_id table_id = "bar" time_partitioning { @@ -57,10 +57,11 @@ resource "google_bigquery_table" "default" { } ] EOF + } resource "google_bigquery_table" "sheet" { - dataset_id = "${google_bigquery_dataset.default.dataset_id}" + dataset_id = google_bigquery_dataset.default.dataset_id table_id = "sheet" external_data_configuration { @@ -243,5 +244,5 @@ exported: BigQuery tables can be imported using the `project`, `dataset_id`, and `table_id`, e.g. ``` -$ terraform import google_bigquery_table.default gcp-project:foo.bar +$ terraform import google_bigquery_table.default gcp-project/foo/bar ``` diff --git a/third_party/terraform/website/docs/r/bigtable_gc_policy.html.markdown b/third_party/terraform/website/docs/r/bigtable_gc_policy.html.markdown index 0e4f4edf92d2..e19beed025fd 100644 --- a/third_party/terraform/website/docs/r/bigtable_gc_policy.html.markdown +++ b/third_party/terraform/website/docs/r/bigtable_gc_policy.html.markdown @@ -18,27 +18,29 @@ Creates a Google Cloud Bigtable GC Policy inside a family. For more information ```hcl resource "google_bigtable_instance" "instance" { - name = "tf-instance" - cluster_id = "tf-instance-cluster" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" + name = "tf-instance" + cluster { + cluster_id = "tf-instance-cluster" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_table" "table" { name = "tf-table" - instance_name = "${google_bigtable_instance.instance.name}" - + instance_name = google_bigtable_instance.instance.name + column_family { family = "name" } } resource "google_bigtable_gc_policy" "policy" { - instance_name = "${google_bigtable_instance.instance.name}" - table = "${google_bigtable_table.table.name}" + instance_name = google_bigtable_instance.instance.name + table = google_bigtable_table.table.name column_family = "name" - + max_age { days = 7 } @@ -46,18 +48,18 @@ resource "google_bigtable_gc_policy" "policy" { ``` Multiple conditions is also supported. `UNION` when any of its sub-policies apply (OR). `INTERSECTION` when all its sub-policies apply (AND) -``` +```hcl resource "google_bigtable_gc_policy" "policy" { - instance_name = "${google_bigtable_instance.instance.name}" - table = "${google_bigtable_table.table.name}" + instance_name = google_bigtable_instance.instance.name + table = google_bigtable_table.table.name column_family = "name" - + mode = "UNION" - + max_age { days = 7 } - + max_version { number = 10 } diff --git a/third_party/terraform/website/docs/r/bigtable_instance.html.markdown b/third_party/terraform/website/docs/r/bigtable_instance.html.markdown index e54ab9d79e54..083db724b39d 100644 --- a/third_party/terraform/website/docs/r/bigtable_instance.html.markdown +++ b/third_party/terraform/website/docs/r/bigtable_instance.html.markdown @@ -18,7 +18,7 @@ Creates a Google Bigtable instance. For more information see ```hcl resource "google_bigtable_instance" "production-instance" { - name = "tf-instance" + name = "tf-instance" cluster { cluster_id = "tf-instance-cluster" diff --git a/third_party/terraform/website/docs/r/bigtable_instance_iam.html.markdown b/third_party/terraform/website/docs/r/bigtable_instance_iam.html.markdown index c05293c5283e..1fc149aff35b 100644 --- a/third_party/terraform/website/docs/r/bigtable_instance_iam.html.markdown +++ b/third_party/terraform/website/docs/r/bigtable_instance_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage IAM policies on bigtable instances. Ea ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -32,9 +32,9 @@ data "google_iam_policy" "admin" { } resource "google_bigtable_instance_iam_policy" "editor" { - project = "your-project" - instance = "your-bigtable-instance" - policy_data = "${data.google_iam_policy.admin.policy_data}" + project = "your-project" + instance = "your-bigtable-instance" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -42,9 +42,9 @@ resource "google_bigtable_instance_iam_policy" "editor" { ```hcl resource "google_bigtable_instance_iam_binding" "editor" { - instance = "your-bigtable-instance" - role = "roles/editor" - members = [ + instance = "your-bigtable-instance" + role = "roles/editor" + members = [ "user:jane@example.com", ] } @@ -54,9 +54,9 @@ resource "google_bigtable_instance_iam_binding" "editor" { ```hcl resource "google_bigtable_instance_iam_member" "editor" { - instance = "your-bigtable-instance" - role = "roles/editor" - member = "user:jane@example.com" + instance = "your-bigtable-instance" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/bigtable_table.html.markdown b/third_party/terraform/website/docs/r/bigtable_table.html.markdown index b69fcd5bc555..8c035c6d35f4 100644 --- a/third_party/terraform/website/docs/r/bigtable_table.html.markdown +++ b/third_party/terraform/website/docs/r/bigtable_table.html.markdown @@ -18,16 +18,19 @@ Creates a Google Cloud Bigtable table inside an instance. For more information s ```hcl resource "google_bigtable_instance" "instance" { - name = "tf-instance" - cluster_id = "tf-instance-cluster" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" + name = "tf-instance" + + cluster { + cluster_id = "tf-instance-cluster" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_table" "table" { name = "tf-table" - instance_name = "${google_bigtable_instance.instance.name}" + instance_name = google_bigtable_instance.instance.name split_keys = ["a", "b", "c"] } ``` diff --git a/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown b/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown index ffec17d642b0..98611aa045ff 100644 --- a/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown +++ b/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown @@ -29,29 +29,29 @@ resource "google_storage_bucket" "bucket" { resource "google_storage_bucket_object" "archive" { name = "index.zip" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name source = "./path/to/zip/file/which/contains/code" } resource "google_cloudfunctions_function" "function" { - name = "function-test" - description = "My function" - runtime = "nodejs10" + name = "function-test" + description = "My function" + runtime = "nodejs10" available_memory_mb = 128 - source_archive_bucket = "${google_storage_bucket.bucket.name}" - source_archive_object = "${google_storage_bucket_object.archive.name}" + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name trigger_http = true entry_point = "helloGET" } # IAM entry for all users to invoke the function resource "google_cloudfunctions_function_iam_member" "invoker" { - project = "${google_cloudfunctions_function.function.project}" - region = "${google_cloudfunctions_function.function.region}" - cloud_function = "${google_cloudfunctions_function.function.name}" + project = google_cloudfunctions_function.function.project + region = google_cloudfunctions_function.function.region + cloud_function = google_cloudfunctions_function.function.name - role = "roles/cloudfunctions.invoker" + role = "roles/cloudfunctions.invoker" member = "allUsers" } ``` @@ -65,18 +65,18 @@ resource "google_storage_bucket" "bucket" { resource "google_storage_bucket_object" "archive" { name = "index.zip" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name source = "./path/to/zip/file/which/contains/code" } resource "google_cloudfunctions_function" "function" { - name = "function-test" - description = "My function" - runtime = "nodejs10" + name = "function-test" + description = "My function" + runtime = "nodejs10" available_memory_mb = 128 - source_archive_bucket = "${google_storage_bucket.bucket.name}" - source_archive_object = "${google_storage_bucket_object.archive.name}" + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name trigger_http = true timeout = 60 entry_point = "helloGET" @@ -91,11 +91,11 @@ resource "google_cloudfunctions_function" "function" { # IAM entry for a single user to invoke the function resource "google_cloudfunctions_function_iam_member" "invoker" { - project = "${google_cloudfunctions_function.function.project}" - region = "${google_cloudfunctions_function.function.region}" - cloud_function = "${google_cloudfunctions_function.function.name}" + project = google_cloudfunctions_function.function.project + region = google_cloudfunctions_function.function.region + cloud_function = google_cloudfunctions_function.function.name - role = "roles/cloudfunctions.invoker" + role = "roles/cloudfunctions.invoker" member = "user:myFunctionInvoker@example.com" } ``` @@ -106,10 +106,8 @@ The following arguments are supported: * `name` - (Required) A user-defined name of the function. Function names must be unique globally. -* `runtime` - (Optional) The runtime in which the function is going to run. One -of `"nodejs6"`, `"nodejs8"`, `"nodejs10"`, `"python37"`, `"go111"`. If empty, -defaults to `"nodejs6"`. It's recommended that you override the default, as -`"nodejs6"` is deprecated. +* `runtime` - (Required) The runtime in which the function is going to run. +Eg. `"nodejs8"`, `"nodejs10"`, `"python37"`, `"go111"`. - - - diff --git a/third_party/terraform/website/docs/r/cloudiot_registry.html.markdown b/third_party/terraform/website/docs/r/cloudiot_registry.html.markdown index ec2188850d4c..5aca4f3eaf60 100644 --- a/third_party/terraform/website/docs/r/cloudiot_registry.html.markdown +++ b/third_party/terraform/website/docs/r/cloudiot_registry.html.markdown @@ -29,11 +29,11 @@ resource "google_cloudiot_registry" "default-registry" { name = "default-registry" event_notification_configs { - pubsub_topic_name = "${google_pubsub_topic.default-telemetry.id}" + pubsub_topic_name = google_pubsub_topic.default-telemetry.id } state_notification_config = { - pubsub_topic_name = "${google_pubsub_topic.default-devicestatus.id}" + pubsub_topic_name = google_pubsub_topic.default-devicestatus.id } http_config = { @@ -47,7 +47,7 @@ resource "google_cloudiot_registry" "default-registry" { credentials { public_key_certificate = { format = "X509_CERTIFICATE_PEM" - certificate = "${file("rsa_cert.pem")}" + certificate = file("rsa_cert.pem") } } } @@ -66,8 +66,6 @@ The following arguments are supported: * `region` - (Optional) The Region in which the created address should reside. If it is not provided, the provider region is used. -* `event_notification_config` - (Deprecated) Use `event_notification_configs` instead. - * `event_notification_configs` - (Optional) List of configurations for event notification, such as PubSub topics to publish device events to. Structure is documented below. diff --git a/third_party/terraform/website/docs/r/composer_environment.html.markdown b/third_party/terraform/website/docs/r/composer_environment.html.markdown index 77a9feacefef..2b830feab889 100644 --- a/third_party/terraform/website/docs/r/composer_environment.html.markdown +++ b/third_party/terraform/website/docs/r/composer_environment.html.markdown @@ -50,27 +50,27 @@ on the IAM policy binding (see `google_project_iam_member` below). ```hcl resource "google_composer_environment" "test" { - name = "%s" + name = "%s" region = "us-central1" config { node_count = 4 node_config { - zone = "us-central1-a" + zone = "us-central1-a" machine_type = "n1-standard-1" - network = "${google_compute_network.test.self_link}" - subnetwork = "${google_compute_subnetwork.test.self_link}" + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link - service_account = "${google_service_account.test.name}" + service_account = google_service_account.test.name } } - depends_on = ["google_project_iam_member.composer-worker"] + depends_on = [google_project_iam_member.composer-worker] } resource "google_compute_network" "test" { - name = "composer-test-network" + name = "composer-test-network" auto_create_subnetworks = false } @@ -78,7 +78,7 @@ resource "google_compute_subnetwork" "test" { name = "composer-test-subnetwork" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.test.self_link}" + network = google_compute_network.test.self_link } resource "google_service_account" "test" { @@ -87,15 +87,15 @@ resource "google_service_account" "test" { } resource "google_project_iam_member" "composer-worker" { - role = "roles/composer.worker" - member = "serviceAccount:${google_service_account.test.email}" + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" } ``` ### With Software (Airflow) Config ```hcl resource "google_composer_environment" "test" { - name = "%s" + name = "%s" region = "us-central1" config { @@ -110,7 +110,7 @@ resource "google_composer_environment" "test" { } env_variables = { - FOO = "bar" + FOO = "bar" } } } diff --git a/third_party/terraform/website/docs/r/compute_attached_disk.html.markdown b/third_party/terraform/website/docs/r/compute_attached_disk.html.markdown index cf2747939305..df9c0e11139f 100644 --- a/third_party/terraform/website/docs/r/compute_attached_disk.html.markdown +++ b/third_party/terraform/website/docs/r/compute_attached_disk.html.markdown @@ -28,8 +28,8 @@ To get more information about attaching disks, see: ## Example Usage ```hcl resource "google_compute_attached_disk" "default" { - disk = "${google_compute_disk.default.self_link}" - instance = "${google_compute_instance.default.self_link}" + disk = google_compute_disk.default.self_link + instance = google_compute_instance.default.self_link } resource "google_compute_instance" "default" { @@ -37,7 +37,6 @@ resource "google_compute_instance" "default" { machine_type = "n1-standard-1" zone = "us-west1-a" - boot_disk { initialize_params { image = "debian-cloud/debian-9" @@ -49,7 +48,7 @@ resource "google_compute_instance" "default" { } lifecycle { - ignore_changes = ["attached_disk"] + ignore_changes = [attached_disk] } } ``` @@ -118,6 +117,6 @@ This resource provides the following Attached Disk can be imported the following ways: ``` -$ terraform import google_compute_disk.default projects/{{project}}/zones/{{zone}}/disks/{{instance.name}}:{{disk.name}} -$ terraform import google_compute_disk.default {{project}}/{{zone}}/{{instance.name}}:{{disk.name}} +$ terraform import google_compute_disk.default projects/{{project}}/zones/{{zone}}/instances/{{instance.name}}/{{disk.name}} +$ terraform import google_compute_disk.default {{project}}/{{zone}}/{{instance.name}}/{{disk.name}} ``` diff --git a/third_party/terraform/website/docs/r/compute_instance_from_template.html.markdown b/third_party/terraform/website/docs/r/compute_instance_from_template.html.markdown index 99491ad5458d..1458e654c478 100644 --- a/third_party/terraform/website/docs/r/compute_instance_from_template.html.markdown +++ b/third_party/terraform/website/docs/r/compute_instance_from_template.html.markdown @@ -23,14 +23,14 @@ This resource is specifically to create a compute instance from a given ```hcl resource "google_compute_instance_template" "tpl" { - name = "template" + name = "template" machine_type = "n1-standard-1" disk { source_image = "debian-cloud/debian-9" - auto_delete = true + auto_delete = true disk_size_gb = 100 - boot = true + boot = true } network_interface { @@ -45,15 +45,15 @@ resource "google_compute_instance_template" "tpl" { } resource "google_compute_instance_from_template" "tpl" { - name = "instance-from-template" - zone = "us-central1-a" + name = "instance-from-template" + zone = "us-central1-a" - source_instance_template = "${google_compute_instance_template.tpl.self_link}" + source_instance_template = google_compute_instance_template.tpl.self_link // Override fields from instance template can_ip_forward = false labels = { - my_key = "my_value" + my_key = "my_value" } } ``` diff --git a/third_party/terraform/website/docs/r/compute_instance_group.html.markdown b/third_party/terraform/website/docs/r/compute_instance_group.html.markdown index 5009ac062ad6..4407f62caeae 100644 --- a/third_party/terraform/website/docs/r/compute_instance_group.html.markdown +++ b/third_party/terraform/website/docs/r/compute_instance_group.html.markdown @@ -24,7 +24,7 @@ resource "google_compute_instance_group" "test" { name = "terraform-test" description = "Terraform test instance group" zone = "us-central1-a" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } ``` @@ -36,8 +36,8 @@ resource "google_compute_instance_group" "webservers" { description = "Terraform test instance group" instances = [ - "${google_compute_instance.test.self_link}", - "${google_compute_instance.test2.self_link}", + google_compute_instance.test.self_link, + google_compute_instance.test2.self_link, ] named_port { @@ -61,9 +61,9 @@ as shown in this example to avoid this type of error. ```hcl resource "google_compute_instance_group" "staging_group" { - name = "staging-instance-group" - zone = "us-central1-c" - instances = [ "${google_compute_instance.staging_vm.self_link}" ] + name = "staging-instance-group" + zone = "us-central1-c" + instances = [google_compute_instance.staging_vm.self_link] named_port { name = "http" port = "8080" @@ -85,12 +85,12 @@ data "google_compute_image" "debian_image" { } resource "google_compute_instance" "staging_vm" { - name = "staging-vm" + name = "staging-vm" machine_type = "n1-standard-1" - zone = "us-central1-c" + zone = "us-central1-c" boot_disk { initialize_params { - image = "${data.google_compute_image.debian_image.self_link}" + image = data.google_compute_image.debian_image.self_link } } @@ -105,11 +105,11 @@ resource "google_compute_backend_service" "staging_service" { protocol = "HTTPS" backend { - group = "${google_compute_instance_group.staging_group.self_link}" + group = google_compute_instance_group.staging_group.self_link } health_checks = [ - "${google_compute_https_health_check.staging_health.self_link}", + google_compute_https_health_check.staging_health.self_link, ] } @@ -181,4 +181,5 @@ Instance group can be imported using the `zone` and `name` with an optional `pro ``` $ terraform import google_compute_instance_group.webservers us-central1-a/terraform-webservers $ terraform import google_compute_instance_group.webservers big-project/us-central1-a/terraform-webservers +$ terraform import google_compute_instance_group.webservers projects/big-project/zones/us-central1-a/instanceGroups/terraform-webservers ``` diff --git a/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown b/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown index 5ae5e2b51836..4107bf091199 100644 --- a/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown +++ b/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown @@ -24,7 +24,7 @@ resource "google_compute_health_check" "autohealing" { check_interval_sec = 5 timeout_sec = 5 healthy_threshold = 2 - unhealthy_threshold = 10 # 50 seconds + unhealthy_threshold = 10 # 50 seconds http_health_check { request_path = "/healthz" @@ -39,10 +39,10 @@ resource "google_compute_instance_group_manager" "appserver" { zone = "us-central1-a" version { - instance_template = "${google_compute_instance_template.appserver.self_link}" + instance_template = google_compute_instance_template.appserver.self_link } - target_pools = ["${google_compute_target_pool.appserver.self_link}"] + target_pools = [google_compute_target_pool.appserver.self_link] target_size = 2 named_port { @@ -51,7 +51,7 @@ resource "google_compute_instance_group_manager" "appserver" { } auto_healing_policies { - health_check = "${google_compute_health_check.autohealing.self_link}" + health_check = google_compute_health_check.autohealing.self_link initial_delay_sec = 300 } } @@ -60,22 +60,22 @@ resource "google_compute_instance_group_manager" "appserver" { ## Example Usage with multiple versions (`google-beta` provider) ```hcl resource "google_compute_instance_group_manager" "appserver" { - provider = "google-beta" - name = "appserver-igm" + provider = google-beta + name = "appserver-igm" base_instance_name = "app" zone = "us-central1-a" - target_size = 5 + target_size = 5 version { - name = "appserver" - instance_template = "${google_compute_instance_template.appserver.self_link}" + name = "appserver" + instance_template = google_compute_instance_template.appserver.self_link } version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link target_size { fixed = 1 } @@ -94,16 +94,9 @@ The following arguments are supported: appending a hyphen and a random four-character string to the base instance name. -* `instance_template` - (Deprecated) The - full URL to an instance template from which all new instances - will be created. This field is replaced by `version.instance_template`. You must - specify at least one `version` block with an `instance_template`. - -* `version` - (Optional) Application versions managed by this instance group. Each +* `version` - (Required) Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios. Structure is documented below. - Until `instance_template` is removed this field will be Optional to allow for a - graceful upgrade. In the Beta provider and as of 3.0.0 it will be Required. * `name` - (Required) The name of the instance group manager. Must be 1-63 characters long and comply with @@ -124,9 +117,6 @@ The following arguments are supported: * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. -* `update_strategy` - (Deprecated) This field has been deprecated, use `update_policy` - instead. - * `target_size` - (Optional) The target number of running instances for this managed instance group. This value should always be explicitly set unless this resource is attached to an autoscaler, in which case it should never be set. Defaults to `0`. @@ -150,12 +140,12 @@ group. You can specify only one value. Structure is documented below. For more i The `update_policy` block supports: ```hcl -update_policy{ - type = "PROACTIVE" - minimal_action = "REPLACE" - max_surge_percent = 20 +update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_percent = 20 max_unavailable_fixed = 2 - min_ready_sec = 50 + min_ready_sec = 50 } ``` @@ -192,21 +182,23 @@ The `version` block supports: ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - fixed = 1 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + fixed = 1 + } } ``` ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - percent = 20 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + percent = 20 + } } ``` @@ -255,6 +247,7 @@ This resource provides the following Instance group managers can be imported using any of these accepted formats: ``` +$ terraform import google_compute_instance_group_manager.appserver projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}} $ terraform import google_compute_instance_group_manager.appserver {{project}}/{{zone}}/{{name}} $ terraform import google_compute_instance_group_manager.appserver {{project}}/{{name}} $ terraform import google_compute_instance_group_manager.appserver {{name}} diff --git a/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/third_party/terraform/website/docs/r/compute_instance_template.html.markdown index c43762726883..c8d00105abca 100644 --- a/third_party/terraform/website/docs/r/compute_instance_template.html.markdown +++ b/third_party/terraform/website/docs/r/compute_instance_template.html.markdown @@ -47,7 +47,7 @@ resource "google_compute_instance_template" "default" { // Use an existing disk resource disk { // Instance Templates reference disks by name, not self link - source = "${google_compute_disk.foobar.name}" + source = google_compute_disk.foobar.name auto_delete = false boot = false } @@ -72,7 +72,7 @@ data "google_compute_image" "my_image" { resource "google_compute_disk" "foobar" { name = "existing-disk" - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link size = 10 type = "pd-ssd" zone = "us-central1-a" @@ -112,7 +112,7 @@ resource "google_compute_instance_template" "instance_template" { resource "google_compute_instance_group_manager" "instance_group_manager" { name = "instance-group-manager" - instance_template = "${google_compute_instance_template.instance_template.self_link}" + instance_template = google_compute_instance_template.instance_template.self_link base_instance_name = "instance-group-manager" zone = "us-central1-f" target_size = "1" @@ -153,7 +153,7 @@ resource "google_compute_instance_template" "instance_template" { // boot disk disk { - source_image = "${google_compute_image.my_image.self_link}" + source_image = google_compute_image.my_image.self_link } } ``` @@ -422,10 +422,12 @@ exported: ## Import -Instance templates can be imported using the `name`, e.g. +Instance templates can be imported using any of these accepted formats: ``` -$ terraform import google_compute_instance_template.default appserver-template +$ terraform import google_compute_instance_template.default projects/{{project}}/global/instanceTemplates/{{name}} +$ terraform import google_compute_instance_template.default {{project}}/{{name}} +$ terraform import google_compute_instance_template.default {{name}} ``` [custom-vm-types]: https://cloud.google.com/dataproc/docs/concepts/compute/custom-machine-types diff --git a/third_party/terraform/website/docs/r/compute_network_peering.html.markdown b/third_party/terraform/website/docs/r/compute_network_peering.html.markdown index c8d0fdb20a3b..3357a38aedcf 100644 --- a/third_party/terraform/website/docs/r/compute_network_peering.html.markdown +++ b/third_party/terraform/website/docs/r/compute_network_peering.html.markdown @@ -22,15 +22,15 @@ and ```hcl resource "google_compute_network_peering" "peering1" { - name = "peering1" - network = "${google_compute_network.default.self_link}" - peer_network = "${google_compute_network.other.self_link}" + name = "peering1" + network = google_compute_network.default.self_link + peer_network = google_compute_network.other.self_link } resource "google_compute_network_peering" "peering2" { - name = "peering2" - network = "${google_compute_network.other.self_link}" - peer_network = "${google_compute_network.default.self_link}" + name = "peering2" + network = google_compute_network.other.self_link + peer_network = google_compute_network.default.self_link } resource "google_compute_network" "default" { @@ -54,9 +54,6 @@ The following arguments are supported: * `peer_network` - (Required) Resource link of the peer network. -* `auto_create_routes` - (Optional) If set to `true`, the routes between the two networks will - be created and managed automatically. Defaults to `true`. - ## Attributes Reference In addition to the arguments listed above, the following computed attributes are diff --git a/third_party/terraform/website/docs/r/compute_project_metadata_item.html.markdown b/third_party/terraform/website/docs/r/compute_project_metadata_item.html.markdown index d3f92a6a9971..b82d909e5521 100644 --- a/third_party/terraform/website/docs/r/compute_project_metadata_item.html.markdown +++ b/third_party/terraform/website/docs/r/compute_project_metadata_item.html.markdown @@ -18,7 +18,7 @@ project metadata map. ```hcl resource "google_compute_project_metadata_item" "default" { - key = "my_metadata" + key = "my_metadata" value = "my_value" } ``` diff --git a/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown b/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown index 16e1d4ecdab9..6ab67dcc8571 100644 --- a/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown +++ b/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown @@ -24,7 +24,7 @@ resource "google_compute_health_check" "autohealing" { check_interval_sec = 5 timeout_sec = 5 healthy_threshold = 2 - unhealthy_threshold = 10 # 50 seconds + unhealthy_threshold = 10 # 50 seconds http_health_check { request_path = "/healthz" @@ -36,14 +36,14 @@ resource "google_compute_region_instance_group_manager" "appserver" { name = "appserver-igm" base_instance_name = "app" + region = "us-central1" + distribution_policy_zones = ["us-central1-a", "us-central1-f"] version { - instance_template = "${google_compute_instance_template.appserver.self_link}" + instance_template = google_compute_instance_template.appserver.self_link } - region = "us-central1" - distribution_policy_zones = ["us-central1-a", "us-central1-f"] - target_pools = ["${google_compute_target_pool.appserver.self_link}"] + target_pools = [google_compute_target_pool.appserver.self_link] target_size = 2 named_port { @@ -52,11 +52,10 @@ resource "google_compute_region_instance_group_manager" "appserver" { } auto_healing_policies { - health_check = "${google_compute_health_check.autohealing.self_link}" + health_check = google_compute_health_check.autohealing.self_link initial_delay_sec = 300 } } - ``` ## Example Usage with multiple versions @@ -67,14 +66,14 @@ resource "google_compute_region_instance_group_manager" "appserver" { base_instance_name = "app" region = "us-central1" - target_size = 5 + target_size = 5 version { - instance_template = "${google_compute_instance_template.appserver.self_link}" + instance_template = google_compute_instance_template.appserver.self_link } version { - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" + instance_template = google_compute_instance_template.appserver-canary.self_link target_size { fixed = 1 } @@ -93,16 +92,9 @@ The following arguments are supported: appending a hyphen and a random four-character string to the base instance name. -* `instance_template` - (Deprecated) The - full URL to an instance template from which all new instances - will be created. This field is replaced by `version.instance_template`. You must - specify at least one `version` block with an `instance_template`. - -* `version` - (Optional) Application versions managed by this instance group. Each +* `version` - (Required) Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios. Structure is documented below. - Until `instance_template` is removed this field will be Optional to allow for a - graceful upgrade. In the Beta provider and as of 3.0.0 it will be Required. * `name` - (Required) The name of the instance group manager. Must be 1-63 characters long and comply with @@ -150,13 +142,13 @@ group. You can specify one or more values. For more information, see the [offici The `update_policy` block supports: ```hcl -update_policy{ - type = "PROACTIVE" +update_policy { + type = "PROACTIVE" instance_redistribution_type = "PROACTIVE" - minimal_action = "REPLACE" - max_surge_percent = 20 - max_unavailable_fixed = 2 - min_ready_sec = 50 + minimal_action = "REPLACE" + max_surge_percent = 20 + max_unavailable_fixed = 2 + min_ready_sec = 50 } ``` @@ -195,21 +187,23 @@ The `version` block supports: ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - fixed = 1 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + fixed = 1 + } } ``` ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - percent = 20 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + percent = 20 + } } ``` diff --git a/third_party/terraform/website/docs/r/compute_security_policy.html.markdown b/third_party/terraform/website/docs/r/compute_security_policy.html.markdown index 778bd199eda8..88ea7b09f20b 100644 --- a/third_party/terraform/website/docs/r/compute_security_policy.html.markdown +++ b/third_party/terraform/website/docs/r/compute_security_policy.html.markdown @@ -106,8 +106,10 @@ exported: ## Import -Security policies can be imported using the `name`, e.g. +Security policies can be imported using any of the following formats ``` -$ terraform import google_compute_security_policy.policy my-policy +$ terraform import google_compute_security_policy.policy projects/{{project}}/global/securityPolicies/{{name}} +$ terraform import google_compute_security_policy.policy {{project}}/{{name}} +$ terraform import google_compute_security_policy.policy {{name}} ``` diff --git a/third_party/terraform/website/docs/r/compute_shared_vpc_host_project.html.markdown b/third_party/terraform/website/docs/r/compute_shared_vpc_host_project.html.markdown index cbefbe3e9b8b..6a5a56f5cfa6 100644 --- a/third_party/terraform/website/docs/r/compute_shared_vpc_host_project.html.markdown +++ b/third_party/terraform/website/docs/r/compute_shared_vpc_host_project.html.markdown @@ -28,11 +28,12 @@ resource "google_compute_shared_vpc_host_project" "host" { # A service project gains access to network resources provided by its # associated host project. resource "google_compute_shared_vpc_service_project" "service1" { - host_project = "${google_compute_shared_vpc_host_project.host.project}" + host_project = google_compute_shared_vpc_host_project.host.project service_project = "service-project-id-1" } + resource "google_compute_shared_vpc_service_project" "service2" { - host_project = "${google_compute_shared_vpc_host_project.host.project}" + host_project = google_compute_shared_vpc_host_project.host.project service_project = "service-project-id-2" } ``` diff --git a/third_party/terraform/website/docs/r/compute_target_pool.html.markdown b/third_party/terraform/website/docs/r/compute_target_pool.html.markdown index 4d7a7453385f..47119d23d410 100644 --- a/third_party/terraform/website/docs/r/compute_target_pool.html.markdown +++ b/third_party/terraform/website/docs/r/compute_target_pool.html.markdown @@ -28,7 +28,7 @@ resource "google_compute_target_pool" "default" { ] health_checks = [ - "${google_compute_http_health_check.default.name}", + google_compute_http_health_check.default.name, ] } @@ -85,8 +85,11 @@ exported: ## Import -Target pools can be imported using the `name`, e.g. +Target pools can be imported using any of the following formats: ``` -$ terraform import google_compute_target_pool.default instance-pool +$ terraform import google_compute_target_pool.default projects/{{project}}/regions/{{region}}/targetPools/{{name}} +$ terraform import google_compute_target_pool.default {{project}}/{{region}}/{{name}} +$ terraform import google_compute_target_pool.default {{region}}/{{name}} +$ terraform import google_compute_target_pool.default {{name}} ``` diff --git a/third_party/terraform/website/docs/r/container_cluster.html.markdown b/third_party/terraform/website/docs/r/container_cluster.html.markdown index f06cfbaefb62..6947913c9548 100644 --- a/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -28,7 +28,7 @@ resource "google_container_cluster" "primary" { # separately managed node pools. So we create the smallest possible default # node pool and immediately delete it. remove_default_node_pool = true - initial_node_count = 1 + initial_node_count = 1 master_auth { username = "" @@ -43,7 +43,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "primary_preemptible_nodes" { name = "my-node-pool" location = "us-central1" - cluster = "${google_container_cluster.primary.name}" + cluster = google_container_cluster.primary.name node_count = 1 node_config { @@ -115,19 +115,7 @@ master will be created, as well as the default node location. If you specify a zone (such as `us-central1-a`), the cluster will be a zonal cluster with a single cluster master. If you specify a region (such as `us-west1`), the cluster will be a regional cluster with multiple masters spread across zones in -the region, and with default node locations in those zones as well. - -* `zone` - (Optional, Deprecated) The zone that the cluster master and nodes -should be created in. If specified, this cluster will be a zonal cluster. `zone` -has been deprecated in favour of `location`. - -* `region` (Optional, Deprecated) The region that the cluster master and nodes -should be created in. If specified, this cluster will be a [regional clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/multi-zone-and-regional-clusters#regional) -where the cluster master and nodes (by default) will be created in several zones -throughout the region. `region` has been deprecated in favour of `location`. - -~> Only one of `location`, `zone`, and `region` may be set. If none are set, -the provider zone is used to create a zonal cluster. +the region, and with default node locations in those zones as well * `node_locations` - (Optional) The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the @@ -141,23 +129,13 @@ locations. In contrast, in a regional cluster, cluster master nodes are present in multiple zones in the region. For that reason, regional clusters should be preferred. -* `additional_zones` - (Optional) The list of zones in which the cluster's nodes -should be located. These must be in the same region as the cluster zone for -zonal clusters, or in the region of a regional cluster. In a multi-zonal cluster, -the number of nodes specified in `initial_node_count` is created in -all specified zones as well as the primary zone. If specified for a regional -cluster, nodes will only be created in these zones. `additional_zones` has been -deprecated in favour of `node_locations`. - * `addons_config` - (Optional) The configuration for addons supported by GKE. Structure is documented below. * `cluster_ipv4_cidr` - (Optional) The IP address range of the Kubernetes pods -in this cluster in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one -automatically chosen or specify a /14 block in 10.0.0.0/8. This field will only -work if your cluster is not VPC-native- when an `ip_allocation_policy` block is -not defined, or `ip_allocation_policy.use_ip_aliases` is set to false. If your -cluster is VPC-native, use `ip_allocation_policy.cluster_ipv4_cidr_block`. +in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one +automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will +only work for routes-based clusters, where `ip_allocation_policy` is not defined. * `cluster_autoscaling` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to @@ -200,14 +178,14 @@ number of nodes per zone. Must be set if `node_pool` is not set. If you're using set this to a value of at least `1`, alongside setting `remove_default_node_pool` to `true`. -* `ip_allocation_policy` - (Optional) Configuration for cluster IP allocation. As of now, only pre-allocated subnetworks (custom type with secondary ranges) are supported. - This will activate IP aliases. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases) - Structure is documented below. This field is marked to use [Attribute as Block](/docs/configuration/attr-as-blocks.html) - in order to support explicit removal with `ip_allocation_policy = []`. +* `ip_allocation_policy` - (Optional) Configuration of cluster IP allocation for +VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases), +making the cluster VPC-native instead of routes-based. Structure is documented +below. * `logging_service` - (Optional) The logging service that the cluster should write logs to. Available options include `logging.googleapis.com`, - `logging.googleapis.com/kubernetes`, and `none`. Defaults to `logging.googleapis.com` + `logging.googleapis.com/kubernetes`, and `none`. Defaults to `logging.googleapis.com/kubernetes` * `maintenance_policy` - (Optional) The maintenance policy to use for the cluster. Structure is documented below. @@ -234,9 +212,9 @@ Structure is documented below. [the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version) describe the various acceptable formats for this field. --> If you are using the `google_container_engine_versions` datasource with a regional cluster, ensure that you have provided a `region` -to the datasource. A `region` can have a different set of supported versions than its corresponding `zone`s, and not all `zone`s in a -`region` are guaranteed to support the same version. +-> If you are using the `google_container_engine_versions` datasource with a regional cluster, ensure that you have provided a `location` +to the datasource. A region can have a different set of supported versions than its corresponding zones, and not all zones in a +region are guaranteed to support the same version. * `monitoring_service` - (Optional) The monitoring service that the cluster should write metrics to. @@ -244,7 +222,7 @@ to the datasource. A `region` can have a different set of supported versions tha VM metrics will be collected by Google Compute Engine regardless of this setting Available options include `monitoring.googleapis.com`, `monitoring.googleapis.com/kubernetes`, and `none`. - Defaults to `monitoring.googleapis.com` + Defaults to `monitoring.googleapis.com/kubernetes` * `network` - (Optional) The name or self_link of the Google Compute Engine network to which the cluster is connected. For Shared VPC, set this to the self link of the @@ -305,8 +283,8 @@ clusters with private nodes. Structure is documented below. [ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature. Structure is documented below. -* `subnetwork` - (Optional) The name or self_link of the Google Compute Engine subnetwork in - which the cluster's instances are launched. +* `subnetwork` - (Optional) The name or self_link of the Google Compute Engine +subnetwork in which the cluster's instances are launched. * `vertical_pod_autoscaling` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. @@ -333,10 +311,6 @@ The `addons_config` block supports: controller addon, which makes it easy to set up HTTP load balancers for services in a cluster. It is enabled by default; set `disabled = true` to disable. -* `kubernetes_dashboard` - (Optional, Deprecated) The status of the Kubernetes Dashboard - add-on, which controls whether the Kubernetes Dashboard is enabled for this cluster. - It is disabled by default; set `disabled = false` to enable. - * `network_policy_config` - (Optional) Whether we should enable the network policy addon for the master. This must be enabled in order to enable network policy for the nodes. To enable this, you must also define a [`network_policy`](#network_policy) block, @@ -353,11 +327,12 @@ The `addons_config` block supports: This example `addons_config` disables two addons: -``` +```hcl addons_config { http_load_balancing { disabled = true } + horizontal_pod_autoscaling { disabled = true } @@ -407,7 +382,7 @@ The `maintenance_policy` block supports: Specify `start_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MM”, where HH : \[00-23\] and MM : \[00-59\] GMT. For example: -``` +```hcl maintenance_policy { daily_maintenance_window { start_time = "03:00" @@ -437,46 +412,26 @@ In beta, one or the other of `recurring_window` and `daily_maintenance_window` i The `ip_allocation_policy` block supports: -* `use_ip_aliases` - (Optional) Whether alias IPs will be used for pod IPs in -the cluster. Defaults to `true` if the `ip_allocation_policy` block is defined, -and to the API default otherwise. Prior to June 17th 2019, the default on the -API is `false`; afterwards, it's `true`. +* `cluster_secondary_range_name` - (Optional) The name of the existing secondary +range in the cluster's subnetwork to use for pod IP addresses. Alternatively, +`cluster_ipv4_cidr_block` can be used to automatically create a GKE-managed one. -* `cluster_secondary_range_name` - (Optional) The name of the secondary range to be - used as for the cluster CIDR block. The secondary range will be used for pod IP - addresses. This must be an existing secondary range associated with the cluster - subnetwork. - -* `services_secondary_range_name` - (Optional) The name of the secondary range to be - used as for the services CIDR block. The secondary range will be used for service - ClusterIPs. This must be an existing secondary range associated with the cluster - subnetwork. +* `services_secondary_range_name` - (Optional) The name of the existing +secondary range in the cluster's subnetwork to use for service `ClusterIP`s. +Alternatively, `services_ipv4_cidr_block` can be used to automatically create a +GKE-managed one. * `cluster_ipv4_cidr_block` - (Optional) The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to -pick a specific range to use. This field will only work if your cluster is -VPC-native- when `ip_allocation_policy.use_ip_aliases` is undefined or set to -true. If your cluster is not VPC-native, use `cluster_ipv4_cidr`. - -* `node_ipv4_cidr_block` - (Optional) The IP address range of the node IPs in this cluster. - This should be set only if `create_subnetwork` is true. - Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) - to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) - from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to - pick a specific range to use. +pick a specific range to use. * `services_ipv4_cidr_block` - (Optional) The IP address range of the services IPs in this cluster. - Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) - to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) - from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to - pick a specific range to use. - -* `create_subnetwork`- (Optional) Whether a new subnetwork will be created automatically for the cluster. - -* `subnetwork_name` - (Optional) A custom subnetwork name to be used if create_subnetwork is true. - If this field is empty, then an automatic name will be chosen for the new subnetwork. +Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) +to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) +from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to +pick a specific range to use. The `master_auth` block supports: @@ -488,7 +443,7 @@ The `master_auth` block supports: * `client_certificate_config` - (Optional) Whether client certificate authorization is enabled for this cluster. For example: -``` +```hcl master_auth { client_certificate_config { issue_client_certificate = false @@ -564,9 +519,9 @@ The `node_config` block supports: Note this will grant read access to ALL GCS content unless you also specify a custom role. See https://cloud.google.com/kubernetes-engine/docs/how-to/access-scopes * `logging-write` (`https://www.googleapis.com/auth/logging.write`), - if `logging_service` points to Google + if `logging_service` is not `none`. * `monitoring` (`https://www.googleapis.com/auth/monitoring`), - if `monitoring_service` points to Google + if `monitoring_service` is not `none`. * `preemptible` - (Optional) A boolean that represents whether or not the underlying node VMs are preemptible. See the [official documentation](https://cloud.google.com/container-engine/docs/preemptible-vm) @@ -609,7 +564,7 @@ The `guest_accelerator` block supports: The `workload_identity_config` block supports: * `identity_namespace` (Required) - Currently, the only supported identity namespace is the project's default. -``` +```hcl workload_identity_config { identity_namespace = "${data.google_project.project.project_id}.svc.id.goog" } @@ -675,9 +630,10 @@ The `resource_usage_export_config` block supports: * `bigquery_destination.dataset_id` (Required) - The ID of a BigQuery Dataset. For Example: -``` +```hcl resource_usage_export_config { enable_network_egress_metering = false + bigquery_destination { dataset_id = "cluster_resource_usage" } @@ -762,10 +718,12 @@ This resource provides the following ## Import -GKE clusters can be imported using the `project` , `zone` or `region`, and `name`. If the project is omitted, the default +GKE clusters can be imported using the `project` , `location`, and `name`. If the project is omitted, the default provider value will be used. Examples: ``` +$ terraform import google_container_cluster.mycluster projects/my-gcp-project/locations/us-east1-a/clusters/my-cluster + $ terraform import google_container_cluster.mycluster my-gcp-project/us-east1-a/my-cluster $ terraform import google_container_cluster.mycluster us-east1-a/my-cluster diff --git a/third_party/terraform/website/docs/r/container_node_pool.html.markdown b/third_party/terraform/website/docs/r/container_node_pool.html.markdown index cce77db73f71..dbb9d8d3914c 100644 --- a/third_party/terraform/website/docs/r/container_node_pool.html.markdown +++ b/third_party/terraform/website/docs/r/container_node_pool.html.markdown @@ -19,18 +19,18 @@ and [the API reference](https://cloud.google.com/container-engine/reference/rest resource "google_container_cluster" "primary" { name = "my-gke-cluster" location = "us-central1" - + # We can't create a cluster with no node pool defined, but we want to only use # separately managed node pools. So we create the smallest possible default # node pool and immediately delete it. remove_default_node_pool = true - initial_node_count = 1 + initial_node_count = 1 } resource "google_container_node_pool" "primary_preemptible_nodes" { name = "my-node-pool" location = "us-central1" - cluster = "${google_container_cluster.primary.name}" + cluster = google_container_cluster.primary.name node_count = 1 node_config { @@ -51,7 +51,7 @@ resource "google_container_node_pool" "primary_preemptible_nodes" { resource "google_container_node_pool" "np" { name = "my-node-pool" location = "us-central1-a" - cluster = "${google_container_cluster.primary.name}" + cluster = google_container_cluster.primary.name node_count = 3 timeouts { @@ -94,26 +94,16 @@ resource "google_container_cluster" "primary" { } } } - ``` ## Argument Reference -* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters. +* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `location` provided for zonal clusters. - - - * `location` - (Optional) The location (region or zone) of the cluster. -* `zone` - (Optional, Deprecated) The zone in which the cluster resides. `zone` -has been deprecated in favor of `location`. - -* `region` - (Optional, Deprecated) The region in which the cluster resides (for -regional clusters). `region` has been deprecated in favor of `location`. - --> Note: You must specify a `location` for either cluster type or the -type-specific `region` for regional clusters / `zone` for zonal clusters. - - - - * `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust diff --git a/third_party/terraform/website/docs/r/dataflow_job.html.markdown b/third_party/terraform/website/docs/r/dataflow_job.html.markdown index a93ab643ef85..7b4aa2127cb6 100644 --- a/third_party/terraform/website/docs/r/dataflow_job.html.markdown +++ b/third_party/terraform/website/docs/r/dataflow_job.html.markdown @@ -18,13 +18,13 @@ the official documentation for ```hcl resource "google_dataflow_job" "big_data_job" { - name = "dataflow-job" - template_gcs_path = "gs://my-bucket/templates/template_file" - temp_gcs_location = "gs://my-bucket/tmp_dir" - parameters = { - foo = "bar" - baz = "qux" - } + name = "dataflow-job" + template_gcs_path = "gs://my-bucket/templates/template_file" + temp_gcs_location = "gs://my-bucket/tmp_dir" + parameters = { + foo = "bar" + baz = "qux" + } } ``` diff --git a/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown b/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown index 384554d54a38..c94aabe99b82 100644 --- a/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown +++ b/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown @@ -21,8 +21,8 @@ whole cluster! ```hcl resource "google_dataproc_cluster" "simplecluster" { - name = "simplecluster" - region = "us-central1" + name = "simplecluster" + region = "us-central1" } ``` @@ -30,69 +30,62 @@ resource "google_dataproc_cluster" "simplecluster" { ```hcl resource "google_dataproc_cluster" "mycluster" { - name = "mycluster" - region = "us-central1" - labels = { - foo = "bar" + name = "mycluster" + region = "us-central1" + labels = { + foo = "bar" + } + + cluster_config { + staging_bucket = "dataproc-staging-bucket" + + master_config { + num_instances = 1 + machine_type = "n1-standard-1" + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 15 + } } - cluster_config { - staging_bucket = "dataproc-staging-bucket" - - master_config { - num_instances = 1 - machine_type = "n1-standard-1" - disk_config { - boot_disk_type = "pd-ssd" - boot_disk_size_gb = 15 - } - } - - worker_config { - num_instances = 2 - machine_type = "n1-standard-1" - min_cpu_platform = "Intel Skylake" - disk_config { - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } - - preemptible_worker_config { - num_instances = 0 - } - - # Override or set some custom properties - software_config { - image_version = "1.3.7-deb9" - override_properties = { - "dataproc:dataproc.allow.zero.workers" = "true" - } - } - - gce_cluster_config { - #network = "${google_compute_network.dataproc_network.name}" - tags = ["foo", "bar"] - service_account_scopes = [ - # User supplied scopes - "https://www.googleapis.com/auth/monitoring", - - # The following scopes necessary for the cluster to function properly are - # always added, even if not explicitly specified: - # useraccounts-ro: https://www.googleapis.com/auth/cloud.useraccounts.readonly - # storage-rw: https://www.googleapis.com/auth/devstorage.read_write - # logging-write: https://www.googleapis.com/auth/logging.write - "useraccounts-ro","storage-rw","logging-write" - ] - } + worker_config { + num_instances = 2 + machine_type = "n1-standard-1" + min_cpu_platform = "Intel Skylake" + disk_config { + boot_disk_size_gb = 15 + num_local_ssds = 1 + } + } - # You can define multiple initialization_action blocks - initialization_action { - script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" - timeout_sec = 500 - } + preemptible_worker_config { + num_instances = 0 + } + + # Override or set some custom properties + software_config { + image_version = "1.3.7-deb9" + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + gce_cluster_config { + tags = ["foo", "bar"] + service_account_scopes = [ + "https://www.googleapis.com/auth/monitoring", + "useraccounts-ro", + "storage-rw", + "logging-write", + ] + } + # You can define multiple initialization_action blocks + initialization_action { + script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" + timeout_sec = 500 } + } } ``` @@ -100,21 +93,21 @@ resource "google_dataproc_cluster" "mycluster" { ```hcl resource "google_dataproc_cluster" "accelerated_cluster" { - name = "my-cluster-with-gpu" - region = "us-central1" + name = "my-cluster-with-gpu" + region = "us-central1" - cluster_config { - gce_cluster_config { - zone = "us-central1-a" - } - - master_config { - accelerators { - accelerator_type = "nvidia-tesla-k80" - accelerator_count = "1" - } - } + cluster_config { + gce_cluster_config { + zone = "us-central1-a" + } + + master_config { + accelerators { + accelerator_type = "nvidia-tesla-k80" + accelerator_count = "1" + } } + } } ``` @@ -189,18 +182,17 @@ The `cluster_config` block supports: The `cluster_config.gce_cluster_config` block supports: ```hcl - cluster_config { - gce_cluster_config { - - zone = "us-central1-a" + cluster_config { + gce_cluster_config { + zone = "us-central1-a" - # One of the below to hook into a custom network / subnetwork - network = "${google_compute_network.dataproc_network.name}" - subnetwork = "${google_compute_network.dataproc_subnetwork.name}" + # One of the below to hook into a custom network / subnetwork + network = google_compute_network.dataproc_network.name + subnetwork = google_compute_network.dataproc_subnetwork.name - tags = ["foo", "bar"] - } + tags = ["foo", "bar"] } + } ``` * `zone` - (Optional, Computed) The GCP zone where your data is stored and used (i.e. where @@ -248,18 +240,19 @@ The `cluster_config.gce_cluster_config` block supports: The `cluster_config.master_config` block supports: ```hcl - cluster_config { - master_config { - num_instances = 1 - machine_type = "n1-standard-1" - min_cpu_platform = "Intel Skylake" - disk_config { - boot_disk_type = "pd-ssd" - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } +cluster_config { + master_config { + num_instances = 1 + machine_type = "n1-standard-1" + min_cpu_platform = "Intel Skylake" + + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 15 + num_local_ssds = 1 } + } +} ``` * `num_instances`- (Optional, Computed) Specifies the number of master nodes to create. @@ -306,18 +299,19 @@ if you are trying to use accelerators in a given zone. The `cluster_config.worker_config` block supports: ```hcl - cluster_config { - worker_config { - num_instances = 3 - machine_type = "n1-standard-1" - min_cpu_platform = "Intel Skylake" - disk_config { - boot_disk_type = "pd-standard" - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } +cluster_config { + worker_config { + num_instances = 3 + machine_type = "n1-standard-1" + min_cpu_platform = "Intel Skylake" + + disk_config { + boot_disk_type = "pd-standard" + boot_disk_size_gb = 15 + num_local_ssds = 1 } + } +} ``` * `num_instances`- (Optional, Computed) Specifies the number of worker nodes to create. @@ -368,16 +362,17 @@ if you are trying to use accelerators in a given zone. The `cluster_config.preemptible_worker_config` block supports: ```hcl - cluster_config { - preemptible_worker_config { - num_instances = 1 - disk_config { - boot_disk_type = "pd-standard" - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } +cluster_config { + preemptible_worker_config { + num_instances = 1 + + disk_config { + boot_disk_type = "pd-standard" + boot_disk_size_gb = 15 + num_local_ssds = 1 } + } +} ``` Note: Unlike `worker_config`, you cannot set the `machine_type` value directly. This @@ -404,15 +399,16 @@ will be set for you based on whatever was set for the `worker_config.machine_typ The `cluster_config.software_config` block supports: ```hcl - cluster_config { - # Override or set some custom properties - software_config { - image_version = "1.3.7-deb9" - override_properties = { - "dataproc:dataproc.allow.zero.workers" = "true" - } - } +cluster_config { + # Override or set some custom properties + software_config { + image_version = "1.3.7-deb9" + + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" } + } +} ``` * `image_version` - (Optional, Computed) The Cloud Dataproc image version to use @@ -431,13 +427,13 @@ The `cluster_config.software_config` block supports: The `initialization_action` block (Optional) can be specified multiple times and supports: ```hcl - cluster_config { - # You can define multiple initialization_action blocks - initialization_action { - script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" - timeout_sec = 500 - } - } +cluster_config { + # You can define multiple initialization_action blocks + initialization_action { + script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" + timeout_sec = 500 + } +} ``` * `script`- (Required) The script to be executed during initialization of the cluster. @@ -452,11 +448,10 @@ The `initialization_action` block (Optional) can be specified multiple times and The `encryption_config` block supports: ```hcl - cluster_config { - encryption_config { - kms_key_name = "projects/projectId/locations/region/keyRings/keyRingName/cryptoKeys/keyName" - } - } +cluster_config { + encryption_config { + kms_key_name = "projects/projectId/locations/region/keyRings/keyRingName/cryptoKeys/keyName" + } } ``` diff --git a/third_party/terraform/website/docs/r/dataproc_cluster_iam.html.markdown b/third_party/terraform/website/docs/r/dataproc_cluster_iam.html.markdown index 0668b4b1a6bb..8e9dbbe84cc2 100644 --- a/third_party/terraform/website/docs/r/dataproc_cluster_iam.html.markdown +++ b/third_party/terraform/website/docs/r/dataproc_cluster_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage IAM policies on dataproc clusters. Eac ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -32,10 +32,10 @@ data "google_iam_policy" "admin" { } resource "google_dataproc_cluster_iam_policy" "editor" { - project = "your-project" - region = "your-region" - cluster = "your-dataproc-cluster" - policy_data = "${data.google_iam_policy.admin.policy_data}" + project = "your-project" + region = "your-region" + cluster = "your-dataproc-cluster" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -43,9 +43,9 @@ resource "google_dataproc_cluster_iam_policy" "editor" { ```hcl resource "google_dataproc_cluster_iam_binding" "editor" { - cluster = "your-dataproc-cluster" - role = "roles/editor" - members = [ + cluster = "your-dataproc-cluster" + role = "roles/editor" + members = [ "user:jane@example.com", ] } @@ -55,9 +55,9 @@ resource "google_dataproc_cluster_iam_binding" "editor" { ```hcl resource "google_dataproc_cluster_iam_member" "editor" { - cluster = "your-dataproc-cluster" - role = "roles/editor" - member = "user:jane@example.com" + cluster = "your-dataproc-cluster" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/dataproc_job.html.markdown b/third_party/terraform/website/docs/r/dataproc_job.html.markdown index 0fa795e14784..abfcc73da2c4 100644 --- a/third_party/terraform/website/docs/r/dataproc_job.html.markdown +++ b/third_party/terraform/website/docs/r/dataproc_job.html.markdown @@ -18,58 +18,58 @@ Manages a job resource within a Dataproc cluster within GCE. For more informatio ```hcl resource "google_dataproc_cluster" "mycluster" { - name = "dproc-cluster-unique-name" - region = "us-central1" + name = "dproc-cluster-unique-name" + region = "us-central1" } # Submit an example spark job to a dataproc cluster resource "google_dataproc_job" "spark" { - region = "${google_dataproc_cluster.mycluster.region}" - force_delete = true - placement { - cluster_name = "${google_dataproc_cluster.mycluster.name}" + region = google_dataproc_cluster.mycluster.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.mycluster.name + } + + spark_config { + main_class = "org.apache.spark.examples.SparkPi" + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + args = ["1000"] + + properties = { + "spark.logConf" = "true" } - spark_config { - main_class = "org.apache.spark.examples.SparkPi" - jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] - args = ["1000"] - - properties = { - "spark.logConf" = "true" - } - - logging_config { - driver_log_levels = { - "root" = "INFO" - } - } + logging_config { + driver_log_levels = { + "root" = "INFO" + } } + } } # Submit an example pyspark job to a dataproc cluster resource "google_dataproc_job" "pyspark" { - region = "${google_dataproc_cluster.mycluster.region}" - force_delete = true - placement { - cluster_name = "${google_dataproc_cluster.mycluster.name}" - } - - pyspark_config { - main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" - properties = { - "spark.logConf" = "true" - } + region = google_dataproc_cluster.mycluster.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.mycluster.name + } + + pyspark_config { + main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" + properties = { + "spark.logConf" = "true" } + } } # Check out current state of the jobs output "spark_status" { - value = "${google_dataproc_job.spark.status.0.state}" + value = google_dataproc_job.spark.status[0].state } output "pyspark_status" { - value = "${google_dataproc_job.pyspark.status.0.state}" + value = google_dataproc_job.pyspark.status[0].state } ``` @@ -112,17 +112,15 @@ The `pyspark_config` block supports: Submitting a pyspark job to the cluster. Below is an example configuration: ```hcl - # Submit a pyspark job to the cluster resource "google_dataproc_job" "pyspark" { - ... - - pyspark_config { - main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" - properties = { - "spark.logConf" = "true" - } + ... + pyspark_config { + main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" + properties = { + "spark.logConf" = "true" } + } } ``` @@ -152,26 +150,24 @@ are generally applicable: The `spark_config` block supports: ```hcl - # Submit a spark job to the cluster resource "google_dataproc_job" "spark" { - ... - - spark_config { - main_class = "org.apache.spark.examples.SparkPi" - jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] - args = ["1000"] - - properties = { - "spark.logConf" = "true" - } - - logging_config { - driver_log_levels = { - "root" = "INFO" - } - } + ... + spark_config { + main_class = "org.apache.spark.examples.SparkPi" + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + args = ["1000"] + + properties = { + "spark.logConf" = "true" + } + + logging_config { + driver_log_levels = { + "root" = "INFO" + } } + } } ``` @@ -197,19 +193,17 @@ resource "google_dataproc_job" "spark" { The `hadoop_config` block supports: ```hcl - # Submit a hadoop job to the cluster resource "google_dataproc_job" "hadoop" { - ... - - hadoop_config { - main_jar_file_uri = "file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar" - args = [ - "wordcount", - "file:///usr/lib/spark/NOTICE", - "gs://${google_dataproc_cluster.basic.cluster_config.0.bucket}/hadoopjob_output" - ] - } + ... + hadoop_config { + main_jar_file_uri = "file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar" + args = [ + "wordcount", + "file:///usr/lib/spark/NOTICE", + "gs://${google_dataproc_cluster.basic.cluster_config[0].bucket}/hadoopjob_output", + ] + } } ``` @@ -232,18 +226,16 @@ resource "google_dataproc_job" "hadoop" { The `hive_config` block supports: ```hcl - # Submit a hive job to the cluster resource "google_dataproc_job" "hive" { - ... - - hive_config { - query_list = [ - "DROP TABLE IF EXISTS dprocjob_test", - "CREATE EXTERNAL TABLE dprocjob_test(bar int) LOCATION 'gs://${google_dataproc_cluster.basic.cluster_config.0.bucket}/hive_dprocjob_test/'", - "SELECT * FROM dprocjob_test WHERE bar > 2", - ] - } + ... + hive_config { + query_list = [ + "DROP TABLE IF EXISTS dprocjob_test", + "CREATE EXTERNAL TABLE dprocjob_test(bar int) LOCATION 'gs://${google_dataproc_cluster.basic.cluster_config[0].bucket}/hive_dprocjob_test/'", + "SELECT * FROM dprocjob_test WHERE bar > 2", + ] + } } ``` @@ -264,20 +256,18 @@ resource "google_dataproc_job" "hive" { The `pig_config` block supports: ```hcl - # Submit a pig job to the cluster resource "google_dataproc_job" "pig" { - ... - - pig_config { - query_list = [ - "LNS = LOAD 'file:///usr/lib/pig/LICENSE.txt ' AS (line)", - "WORDS = FOREACH LNS GENERATE FLATTEN(TOKENIZE(line)) AS word", - "GROUPS = GROUP WORDS BY word", - "WORD_COUNTS = FOREACH GROUPS GENERATE group, COUNT(WORDS)", - "DUMP WORD_COUNTS" - ] - } + ... + pig_config { + query_list = [ + "LNS = LOAD 'file:///usr/lib/pig/LICENSE.txt ' AS (line)", + "WORDS = FOREACH LNS GENERATE FLATTEN(TOKENIZE(line)) AS word", + "GROUPS = GROUP WORDS BY word", + "WORD_COUNTS = FOREACH GROUPS GENERATE group, COUNT(WORDS)", + "DUMP WORD_COUNTS", + ] + } } ``` @@ -301,18 +291,16 @@ resource "google_dataproc_job" "pig" { The `sparksql_config` block supports: ```hcl - # Submit a spark SQL job to the cluster resource "google_dataproc_job" "sparksql" { - ... - - sparksql_config { - query_list = [ - "DROP TABLE IF EXISTS dprocjob_test", - "CREATE TABLE dprocjob_test(bar int)", - "SELECT * FROM dprocjob_test WHERE bar > 2", - ] - } + ... + sparksql_config { + query_list = [ + "DROP TABLE IF EXISTS dprocjob_test", + "CREATE TABLE dprocjob_test(bar int)", + "SELECT * FROM dprocjob_test WHERE bar > 2", + ] + } } ``` diff --git a/third_party/terraform/website/docs/r/dataproc_job_iam.html.markdown b/third_party/terraform/website/docs/r/dataproc_job_iam.html.markdown index 694d3126d7f6..0cdea119c9d0 100644 --- a/third_party/terraform/website/docs/r/dataproc_job_iam.html.markdown +++ b/third_party/terraform/website/docs/r/dataproc_job_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage IAM policies on dataproc jobs. Each of ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -32,10 +32,10 @@ data "google_iam_policy" "admin" { } resource "google_dataproc_job_iam_policy" "editor" { - project = "your-project" - region = "your-region" - job_id = "your-dataproc-job" - policy_data = "${data.google_iam_policy.admin.policy_data}" + project = "your-project" + region = "your-region" + job_id = "your-dataproc-job" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -43,9 +43,9 @@ resource "google_dataproc_job_iam_policy" "editor" { ```hcl resource "google_dataproc_job_iam_binding" "editor" { - job_id = "your-dataproc-job" - role = "roles/editor" - members = [ + job_id = "your-dataproc-job" + role = "roles/editor" + members = [ "user:jane@example.com", ] } @@ -55,9 +55,9 @@ resource "google_dataproc_job_iam_binding" "editor" { ```hcl resource "google_dataproc_job_iam_member" "editor" { - job_id = "your-dataproc-job" - role = "roles/editor" - member = "user:jane@example.com" + job_id = "your-dataproc-job" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/dns_record_set.markdown b/third_party/terraform/website/docs/r/dns_record_set.html.markdown similarity index 80% rename from third_party/terraform/website/docs/r/dns_record_set.markdown rename to third_party/terraform/website/docs/r/dns_record_set.html.markdown index 1c48f640a0ab..9bf54bf116e9 100644 --- a/third_party/terraform/website/docs/r/dns_record_set.markdown +++ b/third_party/terraform/website/docs/r/dns_record_set.html.markdown @@ -24,9 +24,9 @@ resource "google_dns_record_set" "frontend" { type = "A" ttl = 300 - managed_zone = "${google_dns_managed_zone.prod.name}" + managed_zone = google_dns_managed_zone.prod.name - rrdatas = ["${google_compute_instance.frontend.network_interface.0.access_config.0.nat_ip}"] + rrdatas = [google_compute_instance.frontend.network_interface[0].access_config[0].nat_ip] } resource "google_compute_instance" "frontend" { @@ -41,8 +41,9 @@ resource "google_compute_instance" "frontend" { } network_interface { - network = "default" - access_config = {} + network = "default" + access_config { + } } } @@ -56,10 +57,10 @@ resource "google_dns_managed_zone" "prod" { ```hcl resource "google_dns_record_set" "a" { - name = "backend.${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "A" - ttl = 300 + name = "backend.${google_dns_managed_zone.prod.dns_name}" + managed_zone = google_dns_managed_zone.prod.name + type = "A" + ttl = 300 rrdatas = ["8.8.8.8"] } @@ -74,17 +75,17 @@ resource "google_dns_managed_zone" "prod" { ```hcl resource "google_dns_record_set" "mx" { - name = "${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "MX" - ttl = 3600 + name = google_dns_managed_zone.prod.dns_name + managed_zone = google_dns_managed_zone.prod.name + type = "MX" + ttl = 3600 rrdatas = [ "1 aspmx.l.google.com.", "5 alt1.aspmx.l.google.com.", "5 alt2.aspmx.l.google.com.", "10 alt3.aspmx.l.google.com.", - "10 alt4.aspmx.l.google.com." + "10 alt4.aspmx.l.google.com.", ] } @@ -100,10 +101,10 @@ Quotes (`""`) must be added around your `rrdatas` for a SPF record. Otherwise `r ```hcl resource "google_dns_record_set" "spf" { - name = "frontend.${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "TXT" - ttl = 300 + name = "frontend.${google_dns_managed_zone.prod.dns_name}" + managed_zone = google_dns_managed_zone.prod.name + type = "TXT" + ttl = 300 rrdatas = ["\"v=spf1 ip4:111.111.111.111 include:backoff.email-example.com -all\""] } @@ -120,16 +121,16 @@ resource "google_dns_managed_zone" "prod" { ```hcl resource "google_dns_record_set" "cname" { - name = "frontend.${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "CNAME" - ttl = 300 - rrdatas = ["frontend.mydomain.com."] + name = "frontend.${google_dns_managed_zone.prod.dns_name}" + managed_zone = google_dns_managed_zone.prod.name + type = "CNAME" + ttl = 300 + rrdatas = ["frontend.mydomain.com."] } resource "google_dns_managed_zone" "prod" { - name = "prod-zone" - dns_name = "prod.mydomain.com." + name = "prod-zone" + dns_name = "prod.mydomain.com." } ``` diff --git a/third_party/terraform/website/docs/r/endpoints_service.html.markdown b/third_party/terraform/website/docs/r/endpoints_service.html.markdown index c4ee63714ce2..232068c8e5cc 100644 --- a/third_party/terraform/website/docs/r/endpoints_service.html.markdown +++ b/third_party/terraform/website/docs/r/endpoints_service.html.markdown @@ -17,14 +17,14 @@ This resource creates and rolls out a Cloud Endpoints service using OpenAPI or g resource "google_endpoints_service" "openapi_service" { service_name = "api-name.endpoints.project-id.cloud.goog" project = "project-id" - openapi_config = "${file("openapi_spec.yml")}" + openapi_config = file("openapi_spec.yml") } resource "google_endpoints_service" "grpc_service" { service_name = "api-name.endpoints.project-id.cloud.goog" project = "project-id" - grpc_config = "${file("service_spec.yml")}" - protoc_output_base64 = "${base64encode(file("compiled_descriptor_file.pb"))}" + grpc_config = file("service_spec.yml") + protoc_output_base64 = base64encode(file("compiled_descriptor_file.pb")) } ``` diff --git a/third_party/terraform/website/docs/r/google_billing_account_iam_binding.md b/third_party/terraform/website/docs/r/google_billing_account_iam_binding.html.markdown similarity index 100% rename from third_party/terraform/website/docs/r/google_billing_account_iam_binding.md rename to third_party/terraform/website/docs/r/google_billing_account_iam_binding.html.markdown diff --git a/third_party/terraform/website/docs/r/google_billing_account_iam_member.md b/third_party/terraform/website/docs/r/google_billing_account_iam_member.html.markdown similarity index 100% rename from third_party/terraform/website/docs/r/google_billing_account_iam_member.md rename to third_party/terraform/website/docs/r/google_billing_account_iam_member.html.markdown diff --git a/third_party/terraform/website/docs/r/google_billing_account_iam_policy.md b/third_party/terraform/website/docs/r/google_billing_account_iam_policy.html.markdown similarity index 93% rename from third_party/terraform/website/docs/r/google_billing_account_iam_policy.md rename to third_party/terraform/website/docs/r/google_billing_account_iam_policy.html.markdown index 5f571bab400d..e704e910bf73 100644 --- a/third_party/terraform/website/docs/r/google_billing_account_iam_policy.md +++ b/third_party/terraform/website/docs/r/google_billing_account_iam_policy.html.markdown @@ -25,8 +25,8 @@ by use of this resource. The safest alternative is to use multiple `google_billi ```hcl resource "google_billing_account_iam_policy" "policy" { - billing_account_id = "00AA00-000AAA-00AA0A" - policy_data = "${data.google_iam_policy.admin.policy_data}" + billing_account_id = "00AA00-000AAA-00AA0A" + policy_data = data.google_iam_policy.admin.policy_data } data "google_iam_policy" "admin" { diff --git a/third_party/terraform/website/docs/r/google_folder.html.markdown b/third_party/terraform/website/docs/r/google_folder.html.markdown index fb476f3b3a49..6f90117df581 100644 --- a/third_party/terraform/website/docs/r/google_folder.html.markdown +++ b/third_party/terraform/website/docs/r/google_folder.html.markdown @@ -29,13 +29,13 @@ doc for more information. # Top-level folder under an organization. resource "google_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } # Folder nested under another folder. resource "google_folder" "team-abc" { display_name = "Team ABC" - parent = "${google_folder.department1.name}" + parent = google_folder.department1.name } ``` diff --git a/third_party/terraform/website/docs/r/google_folder_iam_binding.html.markdown b/third_party/terraform/website/docs/r/google_folder_iam_binding.html.markdown index a5201dde4573..8b8a453fffa4 100644 --- a/third_party/terraform/website/docs/r/google_folder_iam_binding.html.markdown +++ b/third_party/terraform/website/docs/r/google_folder_iam_binding.html.markdown @@ -29,8 +29,8 @@ resource "google_folder" "department1" { } resource "google_folder_iam_binding" "admin" { - folder = "${google_folder.department1.name}" - role = "roles/editor" + folder = google_folder.department1.name + role = "roles/editor" members = [ "user:alice@gmail.com", diff --git a/third_party/terraform/website/docs/r/google_folder_iam_member.html.markdown b/third_party/terraform/website/docs/r/google_folder_iam_member.html.markdown index e8e4b9634951..bf01ecaf8a9e 100644 --- a/third_party/terraform/website/docs/r/google_folder_iam_member.html.markdown +++ b/third_party/terraform/website/docs/r/google_folder_iam_member.html.markdown @@ -26,9 +26,9 @@ resource "google_folder" "department1" { } resource "google_folder_iam_member" "admin" { - folder = "${google_folder.department1.name}" - role = "roles/editor" - member = "user:alice@gmail.com" + folder = google_folder.department1.name + role = "roles/editor" + member = "user:alice@gmail.com" } ``` diff --git a/third_party/terraform/website/docs/r/google_folder_iam_policy.html.markdown b/third_party/terraform/website/docs/r/google_folder_iam_policy.html.markdown index f709a2aa80d7..59bfd0313003 100644 --- a/third_party/terraform/website/docs/r/google_folder_iam_policy.html.markdown +++ b/third_party/terraform/website/docs/r/google_folder_iam_policy.html.markdown @@ -16,13 +16,13 @@ Platform folder. ```hcl resource "google_folder_iam_policy" "folder_admin_policy" { - folder = "${google_folder.department1.name}" - policy_data = "${data.google_iam_policy.admin.policy_data}" + folder = google_folder.department1.name + policy_data = data.google_iam_policy.admin.policy_data } resource "google_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } data "google_iam_policy" "admin" { diff --git a/third_party/terraform/website/docs/r/google_folder_organization_policy.html.markdown b/third_party/terraform/website/docs/r/google_folder_organization_policy.html.markdown index d6687932a4a3..7a4690f9d7eb 100644 --- a/third_party/terraform/website/docs/r/google_folder_organization_policy.html.markdown +++ b/third_party/terraform/website/docs/r/google_folder_organization_policy.html.markdown @@ -54,7 +54,7 @@ resource "google_folder_organization_policy" "services_policy" { constraint = "serviceuser.services" list_policy { - suggested_values = "compute.googleapis.com" + suggested_value = "compute.googleapis.com" deny { values = ["cloudresourcemanager.googleapis.com"] @@ -108,7 +108,7 @@ The `list_policy` block supports: * `allow` or `deny` - (Optional) One or the other must be set. -* `suggested_values` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. +* `suggested_value` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. * `inherit_from_parent` - (Optional) If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. @@ -137,6 +137,6 @@ exported: Folder organization policies can be imported using any of the follow formats: ``` -$ terraform import google_folder_organization_policy.policy folders/folder-1234:constraints/serviceuser.services -$ terraform import google_folder_organization_policy.policy folder-1234:serviceuser.services +$ terraform import google_folder_organization_policy.policy folders/folder-1234/constraints/serviceuser.services +$ terraform import google_folder_organization_policy.policy folder-1234/serviceuser.services ``` diff --git a/third_party/terraform/website/docs/r/google_iap_tunnel_instance_iam.markdown b/third_party/terraform/website/docs/r/google_iap_tunnel_instance_iam.html.markdown similarity index 100% rename from third_party/terraform/website/docs/r/google_iap_tunnel_instance_iam.markdown rename to third_party/terraform/website/docs/r/google_iap_tunnel_instance_iam.html.markdown diff --git a/third_party/terraform/website/docs/r/google_kms_key_ring_iam.html.markdown b/third_party/terraform/website/docs/r/google_kms_key_ring_iam.html.markdown index 4e16455d9bef..b06a0bb48c91 100644 --- a/third_party/terraform/website/docs/r/google_kms_key_ring_iam.html.markdown +++ b/third_party/terraform/website/docs/r/google_kms_key_ring_iam.html.markdown @@ -33,8 +33,8 @@ data "google_iam_policy" "admin" { } resource "google_kms_key_ring_iam_policy" "key_ring" { - key_ring_id = "your-key-ring-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + key_ring_id = "your-key-ring-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` diff --git a/third_party/terraform/website/docs/r/google_organization_iam_member.md b/third_party/terraform/website/docs/r/google_organization_iam_member.html.markdown similarity index 96% rename from third_party/terraform/website/docs/r/google_organization_iam_member.md rename to third_party/terraform/website/docs/r/google_organization_iam_member.html.markdown index ca03fe63628a..df547c2937c8 100644 --- a/third_party/terraform/website/docs/r/google_organization_iam_member.md +++ b/third_party/terraform/website/docs/r/google_organization_iam_member.html.markdown @@ -21,8 +21,8 @@ the IAM policy for an existing Google Cloud Platform Organization. ```hcl resource "google_organization_iam_member" "binding" { org_id = "0123456789" - role = "roles/editor" - member = "user:alice@gmail.com" + role = "roles/editor" + member = "user:alice@gmail.com" } ``` diff --git a/third_party/terraform/website/docs/r/google_organization_iam_policy.md b/third_party/terraform/website/docs/r/google_organization_iam_policy.html.markdown similarity index 95% rename from third_party/terraform/website/docs/r/google_organization_iam_policy.md rename to third_party/terraform/website/docs/r/google_organization_iam_policy.html.markdown index 84962b74c2a6..a65b033054a0 100644 --- a/third_party/terraform/website/docs/r/google_organization_iam_policy.md +++ b/third_party/terraform/website/docs/r/google_organization_iam_policy.html.markdown @@ -28,8 +28,8 @@ Allows management of the entire IAM policy for an existing Google Cloud Platform ```hcl resource "google_organization_iam_policy" "policy" { - org_id = "123456789" - policy_data = "${data.google_iam_policy.admin.policy_data}" + org_id = "123456789" + policy_data = data.google_iam_policy.admin.policy_data } data "google_iam_policy" "admin" { diff --git a/third_party/terraform/website/docs/r/google_organization_policy.html.markdown b/third_party/terraform/website/docs/r/google_organization_policy.html.markdown index cafeb8dd6d71..784e3fc3cbf8 100644 --- a/third_party/terraform/website/docs/r/google_organization_policy.html.markdown +++ b/third_party/terraform/website/docs/r/google_organization_policy.html.markdown @@ -53,7 +53,7 @@ resource "google_organization_policy" "services_policy" { constraint = "serviceuser.services" list_policy { - suggested_values = "compute.googleapis.com" + suggested_value = "compute.googleapis.com" deny { values = ["cloudresourcemanager.googleapis.com"] @@ -66,7 +66,7 @@ To restore the default organization policy, use the following instead: ```hcl resource "google_organization_policy" "services_policy" { - org_id = "123456789" + org_id = "123456789" constraint = "serviceuser.services" restore_policy { @@ -106,7 +106,7 @@ The `list_policy` block supports: * `allow` or `deny` - (Optional) One or the other must be set. -* `suggested_values` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. +* `suggested_value` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. * `inherit_from_parent` - (Optional) If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. @@ -135,4 +135,7 @@ exported: Organization Policies can be imported using the `org_id` and the `constraint`, e.g. ``` -$ terraform import google_organization_policy.services_policy 123456789:constraints/serviceuser.services +$ terraform import google_organization_policy.services_policy 123456789/constraints/serviceuser.services +``` + +It is all right if the constraint contains a slash, as in the example above. diff --git a/third_party/terraform/website/docs/r/google_project.html.markdown b/third_party/terraform/website/docs/r/google_project.html.markdown index 33b93e0827b7..2fc93707cd54 100755 --- a/third_party/terraform/website/docs/r/google_project.html.markdown +++ b/third_party/terraform/website/docs/r/google_project.html.markdown @@ -41,7 +41,7 @@ Terraform. Only newly added projects are affected. ```hcl resource "google_project" "my_project" { - name = "My Project" + name = "My Project" project_id = "your-project-id" org_id = "1234567" } @@ -51,14 +51,14 @@ To create a project under a specific folder ```hcl resource "google_project" "my_project-in-a-folder" { - name = "My Project" + name = "My Project" project_id = "your-project-id" - folder_id = "${google_folder.department1.name}" + folder_id = google_folder.department1.name } resource "google_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } ``` diff --git a/third_party/terraform/website/docs/r/google_project_iam.html.markdown b/third_party/terraform/website/docs/r/google_project_iam.html.markdown index 94c95c523f77..6821d1b28a0b 100644 --- a/third_party/terraform/website/docs/r/google_project_iam.html.markdown +++ b/third_party/terraform/website/docs/r/google_project_iam.html.markdown @@ -33,7 +33,7 @@ Four different resources help you manage your IAM policy for a project. Each of ```hcl resource "google_project_iam_policy" "project" { project = "your-project-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } data "google_iam_policy" "admin" { diff --git a/third_party/terraform/website/docs/r/google_project_organization_policy.html.markdown b/third_party/terraform/website/docs/r/google_project_organization_policy.html.markdown index 503fdb382d42..d211f132dc7e 100644 --- a/third_party/terraform/website/docs/r/google_project_organization_policy.html.markdown +++ b/third_party/terraform/website/docs/r/google_project_organization_policy.html.markdown @@ -54,7 +54,7 @@ resource "google_project_organization_policy" "services_policy" { constraint = "serviceuser.services" list_policy { - suggested_values = "compute.googleapis.com" + suggested_value = "compute.googleapis.com" deny { values = ["cloudresourcemanager.googleapis.com"] @@ -107,7 +107,7 @@ The `list_policy` block supports: * `allow` or `deny` - (Optional) One or the other must be set. -* `suggested_values` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. +* `suggested_value` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. * `inherit_from_parent` - (Optional) If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. diff --git a/third_party/terraform/website/docs/r/google_project_service.html.markdown b/third_party/terraform/website/docs/r/google_project_service.html.markdown index c4f22cc4fa3f..6361c32fba93 100644 --- a/third_party/terraform/website/docs/r/google_project_service.html.markdown +++ b/third_party/terraform/website/docs/r/google_project_service.html.markdown @@ -14,9 +14,6 @@ Allows management of a single API service for an existing Google Cloud Platform For a list of services available, visit the [API library page](https://console.cloud.google.com/apis/library) or run `gcloud services list`. -~> **Note:** This resource _must not_ be used in conjunction with - `google_project_services` or they will fight over which services should be enabled. - ## Example Usage ```hcl diff --git a/third_party/terraform/website/docs/r/google_project_services.html.markdown b/third_party/terraform/website/docs/r/google_project_services.html.markdown deleted file mode 100644 index f685b10dfccc..000000000000 --- a/third_party/terraform/website/docs/r/google_project_services.html.markdown +++ /dev/null @@ -1,55 +0,0 @@ ---- -subcategory: "Cloud Platform" -layout: "google" -page_title: "Google: google_project_services" -sidebar_current: "docs-google-project-services" -description: |- - Allows management of API services for a Google Cloud Platform project. ---- - -# google\_project\_services - -Allows management of enabled API services for an existing Google Cloud -Platform project. Services in an existing project that are not defined -in the config will be removed. - -For a list of services available, visit the -[API library page](https://console.cloud.google.com/apis/library) or run `gcloud services list`. - -~> **Note:** This resource attempts to be the authoritative source on *all* enabled APIs, which often - leads to conflicts when certain actions enable other APIs. If you do not need to ensure that - *exclusively* a particular set of APIs are enabled, you should most likely use the - [google_project_service](google_project_service.html) resource, one resource per API. - -## Example Usage - -```hcl -resource "google_project_services" "project" { - project = "your-project-id" - services = ["iam.googleapis.com", "cloudresourcemanager.googleapis.com"] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `project` - (Required) The project ID. - Changing this forces Terraform to attempt to disable all previously managed - API services in the previous project. - -* `services` - (Required) The list of services that are enabled. Supports - update. - -* `disable_on_destroy` - (Optional) Whether or not to disable APIs on project - when destroyed. Defaults to true. **Note**: When `disable_on_destroy` is - true and the project is changed, Terraform will force disable API services - managed by Terraform for the previous project. - -## Import - -Project services can be imported using the `project_id`, e.g. - -``` -$ terraform import google_project_services.my_project your-project-id -``` diff --git a/third_party/terraform/website/docs/r/google_service_account_iam.html.markdown b/third_party/terraform/website/docs/r/google_service_account_iam.html.markdown index 30e44b495918..176c88d4434a 100644 --- a/third_party/terraform/website/docs/r/google_service_account_iam.html.markdown +++ b/third_party/terraform/website/docs/r/google_service_account_iam.html.markdown @@ -40,22 +40,21 @@ resource "google_service_account" "sa" { } resource "google_service_account_iam_policy" "admin-account-iam" { - service_account_id = "${google_service_account.sa.name}" - policy_data = "${data.google_iam_policy.admin.policy_data}" + service_account_id = google_service_account.sa.name + policy_data = data.google_iam_policy.admin.policy_data } ``` ## google\_service\_account\_iam\_binding ```hcl - resource "google_service_account" "sa" { account_id = "my-service-account" display_name = "A service account that only Jane can use" } resource "google_service_account_iam_binding" "admin-account-iam" { - service_account_id = "${google_service_account.sa.name}" + service_account_id = google_service_account.sa.name role = "roles/iam.serviceAccountUser" members = [ @@ -91,7 +90,8 @@ resource "google_service_account_iam_binding" "admin-account-iam" { ## google\_service\_account\_iam\_member ```hcl -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} resource "google_service_account" "sa" { account_id = "my-service-account" @@ -99,14 +99,14 @@ resource "google_service_account" "sa" { } resource "google_service_account_iam_member" "admin-account-iam" { - service_account_id = "${google_service_account.sa.name}" + service_account_id = google_service_account.sa.name role = "roles/iam.serviceAccountUser" member = "user:jane@example.com" } # Allow SA service account use the default GCE account resource "google_service_account_iam_member" "gce-default-account-iam" { - service_account_id = "${data.google_compute_default_service_account.default.name}" + service_account_id = data.google_compute_default_service_account.default.name role = "roles/iam.serviceAccountUser" member = "serviceAccount:${google_service_account.sa.email}" } @@ -186,7 +186,7 @@ $ terraform import google_service_account_iam_policy.admin-account-iam projects/ $ terraform import google_service_account_iam_binding.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} iam.serviceAccountUser" -$ terraform import google_service_account_iam_member.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} iam.serviceAccountUser user:foo@example.com" +$ terraform import google_service_account_iam_member.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} roles/editor user:foo@example.com" ``` With conditions: diff --git a/third_party/terraform/website/docs/r/google_service_account_key.html.markdown b/third_party/terraform/website/docs/r/google_service_account_key.html.markdown index 87f80b2e9cc1..b8fbadd8f754 100644 --- a/third_party/terraform/website/docs/r/google_service_account_key.html.markdown +++ b/third_party/terraform/website/docs/r/google_service_account_key.html.markdown @@ -16,13 +16,13 @@ Creates and manages service account key-pairs, which allow the user to establish ```hcl resource "google_service_account" "myaccount" { - account_id = "myaccount" + account_id = "myaccount" display_name = "My Service Account" } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" - public_key_type = "TYPE_X509_PEM_FILE" + service_account_id = google_service_account.myaccount.name + public_key_type = "TYPE_X509_PEM_FILE" } ``` @@ -35,7 +35,7 @@ resource "google_service_account" "myaccount" { } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" + service_account_id = google_service_account.myaccount.name } resource "kubernetes_secret" "google-application-credentials" { @@ -43,7 +43,7 @@ resource "kubernetes_secret" "google-application-credentials" { name = "google-application-credentials" } data = { - credentials.json = "${base64decode(google_service_account_key.mykey.private_key)}" + credentials.json = base64decode(google_service_account_key.mykey.private_key) } } ``` @@ -52,13 +52,13 @@ resource "kubernetes_secret" "google-application-credentials" { ```hcl resource "google_service_account" "myaccount" { - account_id = "myaccount" + account_id = "myaccount" display_name = "My Service Account" } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" - public_key_type = "TYPE_X509_PEM_FILE" + service_account_id = google_service_account.myaccount.name + public_key_type = "TYPE_X509_PEM_FILE" } ``` diff --git a/third_party/terraform/website/docs/r/healthcare_dataset_iam.html.markdown b/third_party/terraform/website/docs/r/healthcare_dataset_iam.html.markdown index bc510041490c..37b418c4f162 100644 --- a/third_party/terraform/website/docs/r/healthcare_dataset_iam.html.markdown +++ b/third_party/terraform/website/docs/r/healthcare_dataset_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_dataset_iam_policy" "dataset" { - dataset_id = "your-dataset-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + dataset_id = "your-dataset-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,7 +46,7 @@ resource "google_healthcare_dataset_iam_policy" "dataset" { ```hcl resource "google_healthcare_dataset_iam_binding" "dataset" { dataset_id = "your-dataset-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", @@ -59,8 +59,8 @@ resource "google_healthcare_dataset_iam_binding" "dataset" { ```hcl resource "google_healthcare_dataset_iam_member" "dataset" { dataset_id = "your-dataset-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/healthcare_dicom_store_iam.html.markdown b/third_party/terraform/website/docs/r/healthcare_dicom_store_iam.html.markdown index 56c5f7834ff6..e871816c6cb9 100644 --- a/third_party/terraform/website/docs/r/healthcare_dicom_store_iam.html.markdown +++ b/third_party/terraform/website/docs/r/healthcare_dicom_store_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_dicom_store_iam_policy" "dicom_store" { - dicom_store_id = "your-dicom-store-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + dicom_store_id = "your-dicom-store-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,7 +46,7 @@ resource "google_healthcare_dicom_store_iam_policy" "dicom_store" { ```hcl resource "google_healthcare_dicom_store_iam_binding" "dicom_store" { dicom_store_id = "your-dicom-store-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", @@ -59,8 +59,8 @@ resource "google_healthcare_dicom_store_iam_binding" "dicom_store" { ```hcl resource "google_healthcare_dicom_store_iam_member" "dicom_store" { dicom_store_id = "your-dicom-store-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/healthcare_fhir_store_iam.html.markdown b/third_party/terraform/website/docs/r/healthcare_fhir_store_iam.html.markdown index 0074adf9143d..58084b82ae0b 100644 --- a/third_party/terraform/website/docs/r/healthcare_fhir_store_iam.html.markdown +++ b/third_party/terraform/website/docs/r/healthcare_fhir_store_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_fhir_store_iam_policy" "fhir_store" { - fhir_store_id = "your-fhir-store-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + fhir_store_id = "your-fhir-store-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,7 +46,7 @@ resource "google_healthcare_fhir_store_iam_policy" "fhir_store" { ```hcl resource "google_healthcare_fhir_store_iam_binding" "fhir_store" { fhir_store_id = "your-fhir-store-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", @@ -59,8 +59,8 @@ resource "google_healthcare_fhir_store_iam_binding" "fhir_store" { ```hcl resource "google_healthcare_fhir_store_iam_member" "fhir_store" { fhir_store_id = "your-fhir-store-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown b/third_party/terraform/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown index 1a608c06affd..db8392014910 100644 --- a/third_party/terraform/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown +++ b/third_party/terraform/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_hl7_v2_store_iam_policy" "hl7_v2_store" { - hl7_v2_store_id = "your-hl7-v2-store-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + hl7_v2_store_id = "your-hl7-v2-store-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,12 +46,13 @@ resource "google_healthcare_hl7_v2_store_iam_policy" "hl7_v2_store" { ```hcl resource "google_healthcare_hl7_v2_store_iam_binding" "hl7_v2_store" { hl7_v2_store_id = "your-hl7-v2-store-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] } + ``` ## google\_healthcare\_hl7\_v2\_store\_iam\_member @@ -59,8 +60,8 @@ resource "google_healthcare_hl7_v2_store_iam_binding" "hl7_v2_store" { ```hcl resource "google_healthcare_hl7_v2_store_iam_member" "hl7_v2_store" { hl7_v2_store_id = "your-hl7-v2-store-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/logging_billing_account_exclusion.html.markdown b/third_party/terraform/website/docs/r/logging_billing_account_exclusion.html.markdown index 0bf107148196..b54161a8d10b 100644 --- a/third_party/terraform/website/docs/r/logging_billing_account_exclusion.html.markdown +++ b/third_party/terraform/website/docs/r/logging_billing_account_exclusion.html.markdown @@ -20,13 +20,13 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_billing_account_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" - billing_account = "ABCDEF-012345-GHIJKL" + name = "my-instance-debug-exclusion" + billing_account = "ABCDEF-012345-GHIJKL" - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } ``` diff --git a/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown b/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown index b4b005c56d98..da8b54c2a452 100644 --- a/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown +++ b/third_party/terraform/website/docs/r/logging_billing_account_sink.html.markdown @@ -22,23 +22,23 @@ typical IAM roles granted on a project. ```hcl resource "google_logging_billing_account_sink" "my-sink" { - name = "my-sink" - billing_account = "ABCDEF-012345-GHIJKL" + name = "my-sink" + billing_account = "ABCDEF-012345-GHIJKL" - # Can export to pubsub, cloud storage, or bigquery - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + # Can export to pubsub, cloud storage, or bigquery + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" } resource "google_storage_bucket" "log-bucket" { - name = "billing-logging-bucket" + name = "billing-logging-bucket" } resource "google_project_iam_binding" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - members = [ - "${google_logging_billing_account_sink.my-sink.writer_identity}", - ] + members = [ + google_logging_billing_account_sink.my-sink.writer_identity, + ] } ``` diff --git a/third_party/terraform/website/docs/r/logging_folder_exclusion.html.markdown b/third_party/terraform/website/docs/r/logging_folder_exclusion.html.markdown index 107dc42c6c8c..615e53d85e7e 100644 --- a/third_party/terraform/website/docs/r/logging_folder_exclusion.html.markdown +++ b/third_party/terraform/website/docs/r/logging_folder_exclusion.html.markdown @@ -20,18 +20,18 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_folder_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" - folder = "${google_folder.my-folder.name}" + name = "my-instance-debug-exclusion" + folder = google_folder.my-folder.name - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } resource "google_folder" "my-folder" { - display_name = "My folder" - parent = "organizations/123456" + display_name = "My folder" + parent = "organizations/123456" } ``` diff --git a/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown b/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown index ab0763768383..ec70627ab6f4 100644 --- a/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown +++ b/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown @@ -20,31 +20,31 @@ granted to the credentials used with terraform. ```hcl resource "google_logging_folder_sink" "my-sink" { - name = "my-sink" - folder = "${google_folder.my-folder.name}" + name = "my-sink" + folder = google_folder.my-folder.name - # Can export to pubsub, cloud storage, or bigquery - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + # Can export to pubsub, cloud storage, or bigquery + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" - # Log all WARN or higher severity messages relating to instances - filter = "resource.type = gce_instance AND severity >= WARN" + # Log all WARN or higher severity messages relating to instances + filter = "resource.type = gce_instance AND severity >= WARN" } resource "google_storage_bucket" "log-bucket" { - name = "folder-logging-bucket" + name = "folder-logging-bucket" } resource "google_project_iam_binding" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - members = [ - "${google_logging_folder_sink.my-sink.writer_identity}", - ] + members = [ + google_logging_folder_sink.my-sink.writer_identity, + ] } resource "google_folder" "my-folder" { - display_name = "My folder" - parent = "organizations/123456" + display_name = "My folder" + parent = "organizations/123456" } ``` diff --git a/third_party/terraform/website/docs/r/logging_organization_exclusion.html.markdown b/third_party/terraform/website/docs/r/logging_organization_exclusion.html.markdown index e0bca87533e9..c580ba520dc0 100644 --- a/third_party/terraform/website/docs/r/logging_organization_exclusion.html.markdown +++ b/third_party/terraform/website/docs/r/logging_organization_exclusion.html.markdown @@ -20,13 +20,13 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_organization_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" - org_id = "123456789" + name = "my-instance-debug-exclusion" + org_id = "123456789" - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } ``` diff --git a/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown b/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown index 74b9326ea0f7..c663da6e3500 100644 --- a/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown +++ b/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown @@ -20,24 +20,24 @@ granted to the credentials used with terraform. ```hcl resource "google_logging_organization_sink" "my-sink" { - name = "my-sink" - org_id = "123456789" + name = "my-sink" + org_id = "123456789" - # Can export to pubsub, cloud storage, or bigquery - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + # Can export to pubsub, cloud storage, or bigquery + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" - # Log all WARN or higher severity messages relating to instances - filter = "resource.type = gce_instance AND severity >= WARN" + # Log all WARN or higher severity messages relating to instances + filter = "resource.type = gce_instance AND severity >= WARN" } resource "google_storage_bucket" "log-bucket" { - name = "organization-logging-bucket" + name = "organization-logging-bucket" } resource "google_project_iam_member" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - member = "${google_logging_organization_sink.my-sink.writer_identity}" + member = google_logging_organization_sink.my-sink.writer_identity } ``` diff --git a/third_party/terraform/website/docs/r/logging_project_exclusion.html.markdown b/third_party/terraform/website/docs/r/logging_project_exclusion.html.markdown index 74d4ea2ab94a..854d968db428 100644 --- a/third_party/terraform/website/docs/r/logging_project_exclusion.html.markdown +++ b/third_party/terraform/website/docs/r/logging_project_exclusion.html.markdown @@ -20,12 +20,12 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_project_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" + name = "my-instance-debug-exclusion" - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } ``` diff --git a/third_party/terraform/website/docs/r/logging_project_sink.html.markdown b/third_party/terraform/website/docs/r/logging_project_sink.html.markdown index e0eacea8d50d..892c1dd24b6f 100644 --- a/third_party/terraform/website/docs/r/logging_project_sink.html.markdown +++ b/third_party/terraform/website/docs/r/logging_project_sink.html.markdown @@ -23,16 +23,16 @@ and ```hcl resource "google_logging_project_sink" "my-sink" { - name = "my-pubsub-instance-sink" + name = "my-pubsub-instance-sink" - # Can export to pubsub, cloud storage, or bigquery - destination = "pubsub.googleapis.com/projects/my-project/topics/instance-activity" + # Can export to pubsub, cloud storage, or bigquery + destination = "pubsub.googleapis.com/projects/my-project/topics/instance-activity" - # Log all WARN or higher severity messages relating to instances - filter = "resource.type = gce_instance AND severity >= WARN" + # Log all WARN or higher severity messages relating to instances + filter = "resource.type = gce_instance AND severity >= WARN" - # Use a unique writer (creates a unique service account used for writing) - unique_writer_identity = true + # Use a unique writer (creates a unique service account used for writing) + unique_writer_identity = true } ``` @@ -57,33 +57,33 @@ resource "google_compute_instance" "my-logged-instance" { network_interface { network = "default" - access_config {} + access_config { + } } } # A bucket to store logs in resource "google_storage_bucket" "log-bucket" { - name = "my-unique-logging-bucket" + name = "my-unique-logging-bucket" } # Our sink; this logs all activity related to our "my-logged-instance" instance resource "google_logging_project_sink" "instance-sink" { - name = "my-instance-sink" - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" - filter = "resource.type = gce_instance AND resource.labels.instance_id = \"${google_compute_instance.my-logged-instance.instance_id}\"" + name = "my-instance-sink" + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + filter = "resource.type = gce_instance AND resource.labels.instance_id = \"${google_compute_instance.my-logged-instance.instance_id}\"" - unique_writer_identity = true + unique_writer_identity = true } # Because our sink uses a unique_writer, we must grant that writer access to the bucket. resource "google_project_iam_binding" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - members = [ - "${google_logging_project_sink.instance-sink.writer_identity}", - ] + members = [ + google_logging_project_sink.instance-sink.writer_identity, + ] } - ``` ## Argument Reference diff --git a/third_party/terraform/website/docs/r/pubsub_subscription_iam.html.markdown b/third_party/terraform/website/docs/r/pubsub_subscription_iam.html.markdown index 12e5181706d7..5b1371252319 100644 --- a/third_party/terraform/website/docs/r/pubsub_subscription_iam.html.markdown +++ b/third_party/terraform/website/docs/r/pubsub_subscription_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage your IAM policy for pubsub subscriptio ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -33,7 +33,7 @@ data "google_iam_policy" "admin" { resource "google_pubsub_subscription_iam_policy" "editor" { subscription = "your-subscription-name" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -43,7 +43,7 @@ resource "google_pubsub_subscription_iam_policy" "editor" { resource "google_pubsub_subscription_iam_binding" "editor" { subscription = "your-subscription-name" role = "roles/editor" - members = [ + members = [ "user:jane@example.com", ] } diff --git a/third_party/terraform/website/docs/r/runtimeconfig_config.html.markdown b/third_party/terraform/website/docs/r/runtimeconfig_config.html.markdown index 713dee1a9424..0af0429327a5 100644 --- a/third_party/terraform/website/docs/r/runtimeconfig_config.html.markdown +++ b/third_party/terraform/website/docs/r/runtimeconfig_config.html.markdown @@ -20,8 +20,8 @@ Example creating a RuntimeConfig resource. ```hcl resource "google_runtimeconfig_config" "my-runtime-config" { - name = "my-service-runtime-config" - description = "Runtime configuration values for my service" + name = "my-service-runtime-config" + description = "Runtime configuration values for my service" } ``` diff --git a/third_party/terraform/website/docs/r/runtimeconfig_variable.html.markdown b/third_party/terraform/website/docs/r/runtimeconfig_variable.html.markdown index 3618b1ffbdc3..d5b5e80ba3d8 100644 --- a/third_party/terraform/website/docs/r/runtimeconfig_variable.html.markdown +++ b/third_party/terraform/website/docs/r/runtimeconfig_variable.html.markdown @@ -20,14 +20,14 @@ Example creating a RuntimeConfig variable. ```hcl resource "google_runtimeconfig_config" "my-runtime-config" { - name = "my-service-runtime-config" - description = "Runtime configuration values for my service" + name = "my-service-runtime-config" + description = "Runtime configuration values for my service" } resource "google_runtimeconfig_variable" "environment" { - parent = "${google_runtimeconfig_config.my-runtime-config.name}" - name = "prod-variables/hostname" - text = "example.com" + parent = google_runtimeconfig_config.my-runtime-config.name + name = "prod-variables/hostname" + text = "example.com" } ``` @@ -38,14 +38,14 @@ Example of using the `value` argument. ```hcl resource "google_runtimeconfig_config" "my-runtime-config" { - name = "my-service-runtime-config" - description = "Runtime configuration values for my service" + name = "my-service-runtime-config" + description = "Runtime configuration values for my service" } resource "google_runtimeconfig_variable" "my-secret" { - parent = "${google_runtimeconfig_config.my-runtime-config.name}" - name = "secret" - value = "${base64encode(file("my-encrypted-secret.dat"))}" + parent = google_runtimeconfig_config.my-runtime-config.name + name = "secret" + value = base64encode(file("my-encrypted-secret.dat")) } ``` diff --git a/third_party/terraform/website/docs/r/service_networking_connection.html.markdown b/third_party/terraform/website/docs/r/service_networking_connection.html.markdown index 1e377834bd7c..e201834c9315 100644 --- a/third_party/terraform/website/docs/r/service_networking_connection.html.markdown +++ b/third_party/terraform/website/docs/r/service_networking_connection.html.markdown @@ -26,13 +26,13 @@ resource "google_compute_global_address" "private_ip_alloc" { purpose = "VPC_PEERING" address_type = "INTERNAL" prefix_length = 16 - network = "${google_compute_network.peering_network.self_link}" + network = google_compute_network.peering_network.self_link } resource "google_service_networking_connection" "foobar" { - network = "${google_compute_network.peering_network.self_link}" + network = google_compute_network.peering_network.self_link service = "servicenetworking.googleapis.com" - reserved_peering_ranges = ["${google_compute_global_address.private_ip_alloc.name}"] + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] } ``` diff --git a/third_party/terraform/website/docs/r/spanner_database_iam.html.markdown b/third_party/terraform/website/docs/r/spanner_database_iam.html.markdown index 006e3ed75528..d386ef2d1010 100644 --- a/third_party/terraform/website/docs/r/spanner_database_iam.html.markdown +++ b/third_party/terraform/website/docs/r/spanner_database_iam.html.markdown @@ -38,7 +38,7 @@ data "google_iam_policy" "admin" { resource "google_spanner_database_iam_policy" "database" { instance = "your-instance-name" database = "your-database-name" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,9 +46,9 @@ resource "google_spanner_database_iam_policy" "database" { ```hcl resource "google_spanner_database_iam_binding" "database" { - instance = "your-instance-name" - database = "your-database-name" - role = "roles/compute.networkUser" + instance = "your-instance-name" + database = "your-database-name" + role = "roles/compute.networkUser" members = [ "user:jane@example.com", @@ -60,10 +60,10 @@ resource "google_spanner_database_iam_binding" "database" { ```hcl resource "google_spanner_database_iam_member" "database" { - instance = "your-instance-name" - database = "your-database-name" - role = "roles/compute.networkUser" - member = "user:jane@example.com" + instance = "your-instance-name" + database = "your-database-name" + role = "roles/compute.networkUser" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/spanner_instance_iam.html.markdown b/third_party/terraform/website/docs/r/spanner_instance_iam.html.markdown index 969a13a326bf..1b301502fc08 100644 --- a/third_party/terraform/website/docs/r/spanner_instance_iam.html.markdown +++ b/third_party/terraform/website/docs/r/spanner_instance_iam.html.markdown @@ -37,7 +37,7 @@ data "google_iam_policy" "admin" { resource "google_spanner_instance_iam_policy" "instance" { instance = "your-instance-name" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -45,8 +45,8 @@ resource "google_spanner_instance_iam_policy" "instance" { ```hcl resource "google_spanner_instance_iam_binding" "instance" { - instance = "your-instance-name" - role = "roles/compute.networkUser" + instance = "your-instance-name" + role = "roles/compute.networkUser" members = [ "user:jane@example.com", @@ -58,9 +58,9 @@ resource "google_spanner_instance_iam_binding" "instance" { ```hcl resource "google_spanner_instance_iam_member" "instance" { - instance = "your-instance-name" - role = "roles/compute.networkUser" - member = "user:jane@example.com" + instance = "your-instance-name" + role = "roles/compute.networkUser" + member = "user:jane@example.com" } ``` diff --git a/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index f7c787ac9903..f16a5db743bb 100644 --- a/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -27,8 +27,9 @@ resource "random_id" "db_name_suffix" { } resource "google_sql_database_instance" "master" { - name = "master-instance-${random_id.db_name_suffix.hex}" + name = "master-instance-${random_id.db_name_suffix.hex}" database_version = "MYSQL_5_6" + # First-generation instance regions are not the conventional # Google Compute Engine regions. See argument reference below. region = "us-central" @@ -43,9 +44,9 @@ resource "google_sql_database_instance" "master" { ```hcl resource "google_sql_database_instance" "master" { - name = "master-instance" + name = "master-instance" database_version = "POSTGRES_9_6" - region = "us-central1" + region = "us-central1" settings { # Second-generation instance tiers are based on the machine @@ -78,40 +79,42 @@ resource "google_compute_instance" "apps" { } } -data "null_data_source" "auth_netw_postgres_allowed_1" { - count = "${length(google_compute_instance.apps.*.self_link)}" - - inputs = { - name = "apps-${count.index + 1}" - value = "${element(google_compute_instance.apps.*.network_interface.0.access_config.0.nat_ip, count.index)}" - } -} - -data "null_data_source" "auth_netw_postgres_allowed_2" { - count = 2 - - inputs = { - name = "onprem-${count.index + 1}" - value = "${element(list("192.168.1.2", "192.168.2.3"), count.index)}" - } -} - resource "random_id" "db_name_suffix" { byte_length = 4 } +locals { + onprem = ["192.168.1.2", "192.168.2.3"] +} + resource "google_sql_database_instance" "postgres" { - name = "postgres-instance-${random_id.db_name_suffix.hex}" + name = "postgres-instance-${random_id.db_name_suffix.hex}" database_version = "POSTGRES_9_6" settings { tier = "db-f1-micro" ip_configuration { - authorized_networks = [ - "${data.null_data_source.auth_netw_postgres_allowed_1.*.outputs}", - "${data.null_data_source.auth_netw_postgres_allowed_2.*.outputs}", - ] + + dynamic "authorized_networks" { + for_each = google_compute_instance.apps + iterator = apps + + content { + name = apps.value.name + value = apps.value.network_interface.0.access_config.0.nat_ip + } + } + + dynamic "authorized_networks" { + for_each = local.onprem + iterator = onprem + + content { + name = "onprem-${onprem.key}" + value = onprem.value + } + } } } } @@ -122,27 +125,27 @@ resource "google_sql_database_instance" "postgres" { ```hcl resource "google_compute_network" "private_network" { - provider = "google-beta" + provider = google-beta - name = "private-network" + name = "private-network" } resource "google_compute_global_address" "private_ip_address" { - provider = "google-beta" + provider = google-beta name = "private-ip-address" purpose = "VPC_PEERING" - address_type = "INTERNAL" + address_type = "INTERNAL" prefix_length = 16 - network = "${google_compute_network.private_network.self_link}" + network = google_compute_network.private_network.self_link } resource "google_service_networking_connection" "private_vpc_connection" { - provider = "google-beta" + provider = google-beta - network = "${google_compute_network.private_network.self_link}" - service = "servicenetworking.googleapis.com" - reserved_peering_ranges = ["${google_compute_global_address.private_ip_address.name}"] + network = google_compute_network.private_network.self_link + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] } resource "random_id" "db_name_suffix" { @@ -150,25 +153,23 @@ resource "random_id" "db_name_suffix" { } resource "google_sql_database_instance" "instance" { - provider = "google-beta" + provider = google-beta - name = "private-instance-${random_id.db_name_suffix.hex}" + name = "private-instance-${random_id.db_name_suffix.hex}" region = "us-central1" - depends_on = [ - "google_service_networking_connection.private_vpc_connection" - ] + depends_on = [google_service_networking_connection.private_vpc_connection] settings { tier = "db-f1-micro" ip_configuration { - ipv4_enabled = false - private_network = "${google_compute_network.private_network.self_link}" + ipv4_enabled = false + private_network = google_compute_network.private_network.self_link } } } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/third_party/terraform/website/docs/r/sql_ssl_cert.html.markdown b/third_party/terraform/website/docs/r/sql_ssl_cert.html.markdown index 76b4efc1cee3..9710ba8711dd 100644 --- a/third_party/terraform/website/docs/r/sql_ssl_cert.html.markdown +++ b/third_party/terraform/website/docs/r/sql_ssl_cert.html.markdown @@ -33,7 +33,7 @@ resource "google_sql_database_instance" "master" { resource "google_sql_ssl_cert" "client_cert" { common_name = "client-name" - instance = "${google_sql_database_instance.master.name}" + instance = google_sql_database_instance.master.name } ``` diff --git a/third_party/terraform/website/docs/r/sql_user.html.markdown b/third_party/terraform/website/docs/r/sql_user.html.markdown index a282e6100f34..8f78fa751929 100644 --- a/third_party/terraform/website/docs/r/sql_user.html.markdown +++ b/third_party/terraform/website/docs/r/sql_user.html.markdown @@ -34,7 +34,7 @@ resource "google_sql_database_instance" "master" { resource "google_sql_user" "users" { name = "me" - instance = "${google_sql_database_instance.master.name}" + instance = google_sql_database_instance.master.name host = "me.com" password = "changeme" } diff --git a/third_party/terraform/website/docs/r/storage_bucket.html.markdown b/third_party/terraform/website/docs/r/storage_bucket.html.markdown index b96fd891bc8f..752bd7a4a5c5 100644 --- a/third_party/terraform/website/docs/r/storage_bucket.html.markdown +++ b/third_party/terraform/website/docs/r/storage_bucket.html.markdown @@ -96,9 +96,7 @@ The `condition` block supports the following elements, and requires at least one * `created_before` - (Optional) Creation date of an object in RFC 3339 (e.g. `2017-06-13`) to satisfy this condition. -* `with_state` - (Optional) Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: `"LIVE"`, `"ARCHIVED"`, `"ANY"`. Unset or empty strings will be treated as `ARCHIVED` to maintain backwards compatibility with `is_live`. - -* `is_live` - (Optional, Deprecated) Defaults to `false` to match archived objects. If `true`, this condition matches live objects. Unversioned buckets have only live objects. +* `with_state` - (Optional) Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: `"LIVE"`, `"ARCHIVED"`, `"ANY"`. * `matches_storage_class` - (Optional) [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of objects to satisfy this condition. Supported values include: `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`, `STANDARD`, `DURABLE_REDUCED_AVAILABILITY`. diff --git a/third_party/terraform/website/docs/r/storage_bucket_acl.html.markdown b/third_party/terraform/website/docs/r/storage_bucket_acl.html.markdown index 13422ab52267..db9e2467d556 100644 --- a/third_party/terraform/website/docs/r/storage_bucket_acl.html.markdown +++ b/third_party/terraform/website/docs/r/storage_bucket_acl.html.markdown @@ -25,7 +25,7 @@ resource "google_storage_bucket" "image-store" { } resource "google_storage_bucket_acl" "image-store-acl" { - bucket = "${google_storage_bucket.image-store.name}" + bucket = google_storage_bucket.image-store.name role_entity = [ "OWNER:user-my.email@gmail.com", diff --git a/third_party/terraform/website/docs/r/storage_bucket_iam.html.markdown b/third_party/terraform/website/docs/r/storage_bucket_iam.html.markdown index feb2c22b9e6c..bc2510a7730b 100644 --- a/third_party/terraform/website/docs/r/storage_bucket_iam.html.markdown +++ b/third_party/terraform/website/docs/r/storage_bucket_iam.html.markdown @@ -23,7 +23,7 @@ Three different resources help you manage your IAM policy for storage bucket. Ea ```hcl resource "google_storage_bucket_iam_binding" "binding" { bucket = "your-bucket-name" - role = "roles/storage.objectViewer" + role = "roles/storage.objectViewer" members = [ "user:jane@example.com", @@ -36,8 +36,8 @@ resource "google_storage_bucket_iam_binding" "binding" { ```hcl resource "google_storage_bucket_iam_member" "member" { bucket = "your-bucket-name" - role = "roles/storage.objectViewer" - member = "user:jane@example.com" + role = "roles/storage.objectViewer" + member = "user:jane@example.com" } ``` @@ -54,13 +54,13 @@ data "google_iam_policy" "foo-policy" { binding { role = "roles/your-role" - members = [ "group:yourgroup@example.com" ] + members = ["group:yourgroup@example.com"] } } resource "google_storage_bucket_iam_policy" "member" { - bucket = "your-bucket-name" - policy_data = "${data.google_iam_policy.foo-policy.policy_data}" + bucket = "your-bucket-name" + policy_data = data.google_iam_policy.foo-policy.policy_data } ``` diff --git a/third_party/terraform/website/docs/r/storage_default_object_acl.html.markdown b/third_party/terraform/website/docs/r/storage_default_object_acl.html.markdown index 596832b3cfda..72df1a5914b1 100644 --- a/third_party/terraform/website/docs/r/storage_default_object_acl.html.markdown +++ b/third_party/terraform/website/docs/r/storage_default_object_acl.html.markdown @@ -34,7 +34,7 @@ resource "google_storage_bucket" "image-store" { } resource "google_storage_default_object_acl" "image-store-default-acl" { - bucket = "${google_storage_bucket.image-store.name}" + bucket = google_storage_bucket.image-store.name role_entity = [ "OWNER:user-my.email@gmail.com", "READER:group-mygroup", diff --git a/third_party/terraform/website/docs/r/storage_notification.html.markdown b/third_party/terraform/website/docs/r/storage_notification.html.markdown index 645956087d90..2ed21de6948f 100644 --- a/third_party/terraform/website/docs/r/storage_notification.html.markdown +++ b/third_party/terraform/website/docs/r/storage_notification.html.markdown @@ -25,36 +25,35 @@ for an example of enabling notifications by granting the correct IAM permission. ```hcl resource "google_storage_notification" "notification" { - notification_id = "1" - bucket = "${google_storage_bucket.bucket.name}" - payload_format = "JSON_API_V1" - topic = "${google_pubsub_topic.topic.name}" - event_types = ["OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE"] - custom_attributes = { - new-attribute = "new-attribute-value" - } - depends_on = ["google_pubsub_topic_iam_binding.binding"] + bucket = google_storage_bucket.bucket.name + payload_format = "JSON_API_V1" + topic = google_pubsub_topic.topic.name + event_types = ["OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE"] + custom_attributes = { + new-attribute = "new-attribute-value" + } + depends_on = [google_pubsub_topic_iam_binding.binding] } // Enable notifications by giving the correct IAM permission to the unique service account. -data "google_storage_project_service_account" "gcs_account" {} +data "google_storage_project_service_account" "gcs_account" { +} resource "google_pubsub_topic_iam_binding" "binding" { - topic = "${google_pubsub_topic.topic.name}" - role = "roles/pubsub.publisher" - members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] + topic = google_pubsub_topic.topic.name + role = "roles/pubsub.publisher" + members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] } // End enabling notifications - resource "google_storage_bucket" "bucket" { - name = "default_bucket" + name = "default_bucket" } resource "google_pubsub_topic" "topic" { - name = "default_topic" + name = "default_topic" } ``` diff --git a/third_party/terraform/website/docs/r/storage_object_acl.html.markdown b/third_party/terraform/website/docs/r/storage_object_acl.html.markdown index e8eb1d674736..3dedc497a628 100644 --- a/third_party/terraform/website/docs/r/storage_object_acl.html.markdown +++ b/third_party/terraform/website/docs/r/storage_object_acl.html.markdown @@ -33,13 +33,13 @@ resource "google_storage_bucket" "image-store" { resource "google_storage_bucket_object" "image" { name = "image1" - bucket = "${google_storage_bucket.image-store.name}" + bucket = google_storage_bucket.image-store.name source = "image1.jpg" } resource "google_storage_object_acl" "image-store-acl" { - bucket = "${google_storage_bucket.image-store.name}" - object = "${google_storage_bucket_object.image.output_name}" + bucket = google_storage_bucket.image-store.name + object = google_storage_bucket_object.image.output_name role_entity = [ "OWNER:user-my.email@gmail.com", diff --git a/third_party/terraform/website/docs/r/storage_transfer_job.html.markdown b/third_party/terraform/website/docs/r/storage_transfer_job.html.markdown index be2bc0172f91..8e8c69bcafc8 100644 --- a/third_party/terraform/website/docs/r/storage_transfer_job.html.markdown +++ b/third_party/terraform/website/docs/r/storage_transfer_job.html.markdown @@ -23,74 +23,69 @@ To get more information about Google Cloud Storage Transfer, see: Example creating a nightly Transfer Job from an AWS S3 Bucket to a GCS bucket. ```hcl - data "google_storage_transfer_project_service_account" "default" { - project = "${var.project}" + project = var.project } resource "google_storage_bucket" "s3-backup-bucket" { name = "${var.aws_s3_bucket}-backup" storage_class = "NEARLINE" - project = "${var.project}" + project = var.project } resource "google_storage_bucket_iam_member" "s3-backup-bucket" { - bucket = "${google_storage_bucket.s3-backup-bucket.name}" - role = "roles/storage.admin" - member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}" - depends_on = [ - "google_storage_bucket.s3-backup-bucket" - ] + bucket = google_storage_bucket.s3-backup-bucket.name + role = "roles/storage.admin" + member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}" + depends_on = [google_storage_bucket.s3-backup-bucket] } resource "google_storage_transfer_job" "s3-bucket-nightly-backup" { - description = "Nightly backup of S3 bucket" - project = "${var.project}" - - transfer_spec { - object_conditions { - max_time_elapsed_since_last_modification = "600s" - exclude_prefixes = [ - "requests.gz" - ] - } - transfer_options { - delete_objects_unique_in_sink = false - } - aws_s3_data_source { - bucket_name = "${var.aws_s3_bucket}" - aws_access_key { - access_key_id = "${var.aws_access_key}" - secret_access_key = "${var.aws_secret_key}" - } - } - gcs_data_sink { - bucket_name = "${google_storage_bucket.s3-backup-bucket.name}" - } - } - - schedule { - schedule_start_date { - year = 2018 - month = 10 - day = 1 - } - schedule_end_date { - year = 2019 - month = 1 - day = 15 - } - start_time_of_day { - hours = 23 - minutes = 30 - seconds = 0 - nanos = 0 - } - } - - depends_on = [ - "google_storage_bucket_iam_member.s3-backup-bucket" - ] + description = "Nightly backup of S3 bucket" + project = var.project + + transfer_spec { + object_conditions { + max_time_elapsed_since_last_modification = "600s" + exclude_prefixes = [ + "requests.gz", + ] + } + transfer_options { + delete_objects_unique_in_sink = false + } + aws_s3_data_source { + bucket_name = var.aws_s3_bucket + aws_access_key { + access_key_id = var.aws_access_key + secret_access_key = var.aws_secret_key + } + } + gcs_data_sink { + bucket_name = google_storage_bucket.s3-backup-bucket.name + } + } + + schedule { + schedule_start_date { + year = 2018 + month = 10 + day = 1 + } + schedule_end_date { + year = 2019 + month = 1 + day = 15 + } + start_time_of_day { + hours = 23 + minutes = 30 + seconds = 0 + nanos = 0 + } + } + + depends_on = [google_storage_bucket_iam_member.s3-backup-bucket] } ``` diff --git a/third_party/terraform/website/docs/r/usage_export_bucket.html.markdown b/third_party/terraform/website/docs/r/usage_export_bucket.html.markdown index d7bb95c66d49..6a7935c8fc14 100644 --- a/third_party/terraform/website/docs/r/usage_export_bucket.html.markdown +++ b/third_party/terraform/website/docs/r/usage_export_bucket.html.markdown @@ -25,8 +25,8 @@ safe to have multiple resources with the same backing bucket. ```hcl resource "google_project_usage_export_bucket" "usage_export" { - project = "development-project" - bucket_name = "usage-tracking-bucket" + project = "development-project" + bucket_name = "usage-tracking-bucket" } ```