From 2b1fd91d7b9d4ea14a5bd823728056da8c6f20b1 Mon Sep 17 00:00:00 2001 From: Modular Magician Date: Mon, 30 Sep 2024 17:15:20 +0000 Subject: [PATCH] Add resource Dataproc batch (#11750) [upstream:a0b7099e82cb2d112518628de9032ab92754838b] Signed-off-by: Modular Magician --- .changelog/11750.txt | 6 + google/acctest/bootstrap_test_utils.go | 27 +- google/provider/provider_mmv1_resources.go | 5 +- .../services/dataproc/dataproc_operation.go | 92 + .../dataproc/resource_dataproc_batch.go | 2011 +++++++++++++++++ .../resource_dataproc_batch_generated_test.go | 446 ++++ .../resource_dataproc_batch_sweeper.go | 143 ++ .../dataproc/resource_dataproc_batch_test.go | 61 + website/docs/r/dataproc_batch.html.markdown | 682 ++++++ 9 files changed, 3469 insertions(+), 4 deletions(-) create mode 100644 .changelog/11750.txt create mode 100644 google/services/dataproc/dataproc_operation.go create mode 100644 google/services/dataproc/resource_dataproc_batch.go create mode 100644 google/services/dataproc/resource_dataproc_batch_generated_test.go create mode 100644 google/services/dataproc/resource_dataproc_batch_sweeper.go create mode 100644 google/services/dataproc/resource_dataproc_batch_test.go create mode 100644 website/docs/r/dataproc_batch.html.markdown diff --git a/.changelog/11750.txt b/.changelog/11750.txt new file mode 100644 index 00000000000..b886be000ab --- /dev/null +++ b/.changelog/11750.txt @@ -0,0 +1,6 @@ +```release-note:new-resource +`google_dataproc_batch` +``` +```release-note:enhancement +dataproc: switched to the v1 API for `google_dataproc_autoscaling_policy` resource (beta) +``` \ No newline at end of file diff --git a/google/acctest/bootstrap_test_utils.go b/google/acctest/bootstrap_test_utils.go index 437088baa9b..f708e59a43d 100644 --- a/google/acctest/bootstrap_test_utils.go +++ b/google/acctest/bootstrap_test_utils.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "log" + "maps" "os" "strings" "testing" @@ -912,7 +913,25 @@ func BootstrapSharedCaPoolInLocation(t *testing.T, location string) string { return poolName } +func BootstrapSubnetForDataprocBatches(t *testing.T, subnetName string, networkName string) string { + subnetOptions := map[string]interface{}{ + "privateIpGoogleAccess": true, + } + return BootstrapSubnetWithOverrides(t, subnetName, networkName, subnetOptions) +} + func BootstrapSubnet(t *testing.T, subnetName string, networkName string) string { + return BootstrapSubnetWithOverrides(t, subnetName, networkName, make(map[string]interface{})) +} + +func BootstrapSubnetWithFirewallForDataprocBatches(t *testing.T, testId string, subnetName string) string { + networkName := BootstrapSharedTestNetwork(t, testId) + subnetworkName := BootstrapSubnetForDataprocBatches(t, subnetName, networkName) + BootstrapFirewallForDataprocSharedNetwork(t, subnetName, networkName) + return subnetworkName +} + +func BootstrapSubnetWithOverrides(t *testing.T, subnetName string, networkName string, subnetOptions map[string]interface{}) string { projectID := envvar.GetTestProjectFromEnv() region := envvar.GetTestRegionFromEnv() @@ -934,20 +953,24 @@ func BootstrapSubnet(t *testing.T, subnetName string, networkName string) string networkUrl := fmt.Sprintf("%sprojects/%s/global/networks/%s", config.ComputeBasePath, projectID, networkName) url := fmt.Sprintf("%sprojects/%s/regions/%s/subnetworks", config.ComputeBasePath, projectID, region) - subnetObj := map[string]interface{}{ + defaultSubnetObj := map[string]interface{}{ "name": subnetName, "region ": region, "network": networkUrl, "ipCidrRange": "10.77.0.0/20", } + if len(subnetOptions) != 0 { + maps.Copy(defaultSubnetObj, subnetOptions) + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "POST", Project: projectID, RawURL: url, UserAgent: config.UserAgent, - Body: subnetObj, + Body: defaultSubnetObj, Timeout: 4 * time.Minute, }) diff --git a/google/provider/provider_mmv1_resources.go b/google/provider/provider_mmv1_resources.go index 2252f164d3d..a297bb2f81e 100644 --- a/google/provider/provider_mmv1_resources.go +++ b/google/provider/provider_mmv1_resources.go @@ -437,9 +437,9 @@ var handwrittenIAMDatasources = map[string]*schema.Resource{ } // Resources -// Generated resources: 466 +// Generated resources: 467 // Generated IAM resources: 261 -// Total generated resources: 727 +// Total generated resources: 728 var generatedResources = map[string]*schema.Resource{ "google_folder_access_approval_settings": accessapproval.ResourceAccessApprovalFolderSettings(), "google_organization_access_approval_settings": accessapproval.ResourceAccessApprovalOrganizationSettings(), @@ -769,6 +769,7 @@ var generatedResources = map[string]*schema.Resource{ "google_dataproc_autoscaling_policy_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), "google_dataproc_autoscaling_policy_iam_member": tpgiamresource.ResourceIamMember(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), "google_dataproc_autoscaling_policy_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_batch": dataproc.ResourceDataprocBatch(), "google_dataproc_metastore_federation": dataprocmetastore.ResourceDataprocMetastoreFederation(), "google_dataproc_metastore_federation_iam_binding": tpgiamresource.ResourceIamBinding(dataprocmetastore.DataprocMetastoreFederationIamSchema, dataprocmetastore.DataprocMetastoreFederationIamUpdaterProducer, dataprocmetastore.DataprocMetastoreFederationIdParseFunc), "google_dataproc_metastore_federation_iam_member": tpgiamresource.ResourceIamMember(dataprocmetastore.DataprocMetastoreFederationIamSchema, dataprocmetastore.DataprocMetastoreFederationIamUpdaterProducer, dataprocmetastore.DataprocMetastoreFederationIdParseFunc), diff --git a/google/services/dataproc/dataproc_operation.go b/google/services/dataproc/dataproc_operation.go new file mode 100644 index 00000000000..3a19d4e9820 --- /dev/null +++ b/google/services/dataproc/dataproc_operation.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type DataprocOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *DataprocOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.DataprocBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createDataprocWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DataprocOperationWaiter, error) { + w := &DataprocOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func DataprocOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createDataprocWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + rawResponse := []byte(w.CommonOperationWaiter.Op.Response) + if len(rawResponse) == 0 { + return errors.New("`resource` not set in operation response") + } + return json.Unmarshal(rawResponse, response) +} + +func DataprocOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDataprocWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/google/services/dataproc/resource_dataproc_batch.go b/google/services/dataproc/resource_dataproc_batch.go new file mode 100644 index 00000000000..40f751f175c --- /dev/null +++ b/google/services/dataproc/resource_dataproc_batch.go @@ -0,0 +1,2011 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "fmt" + "log" + "net/http" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +/* + * Dataproc Batch api apends subminor version to the provided + * version. We are suppressing this server generated subminor. + */ +func CloudDataprocBatchRuntimeConfigVersionDiffSuppressFunc(old, new string) bool { + if old != "" && strings.HasPrefix(new, old) || (new != "" && strings.HasPrefix(old, new)) { + return true + } + + return old == new +} + +func CloudDataprocBatchRuntimeConfigVersionDiffSuppress(_, old, new string, d *schema.ResourceData) bool { + return CloudDataprocBatchRuntimeConfigVersionDiffSuppressFunc(old, new) +} + +func ResourceDataprocBatch() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocBatchCreate, + Read: resourceDataprocBatchRead, + Update: resourceDataprocBatchUpdate, + Delete: resourceDataprocBatchDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocBatchImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "batch_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The ID to use for the batch, which will become the final component of the batch's resource name. +This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.`, + }, + "environment_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Environment configuration for the batch execution.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Execution configuration for a workload.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Cloud KMS key to use for encryption.`, + }, + "network_tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Tags used for network traffic control.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "network_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Network configuration for workload execution.`, + ConflictsWith: []string{"environment_config.0.execution_config.0.subnetwork_uri"}, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Service account that used to execute workload.`, + }, + "staging_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A Cloud Storage bucket used to stage workload dependencies, config files, and store +workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, +Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, +and then create and manage project-level, per-location staging and temporary buckets. +This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.`, + }, + "subnetwork_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Subnetwork configuration for workload execution.`, + ConflictsWith: []string{"environment_config.0.execution_config.0.network_uri"}, + }, + "ttl": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The duration after which the workload will be terminated. +When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing +work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it +exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, +it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. +Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), +the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or +when ttl has been exceeded, whichever occurs first.`, + }, + }, + }, + }, + "peripherals_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Peripherals configuration that workload has access to.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metastore_service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Resource name of an existing Dataproc Metastore service.`, + }, + "spark_history_server_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The Spark History Server configuration for the workload.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_cluster": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels to associate with this batch. + + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location in which the batch will be created in.`, + }, + "pyspark_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `PySpark batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted into the working directory of each executor. +Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver. Do not include arguments that can be set as batch +properties, such as --conf, since a collision can occur that causes an incorrect batch submission.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be placed in the working directory of each executor.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "main_python_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.`, + }, + "python_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS file URIs of Python files to pass to the PySpark framework. +Supported file types: .py, .egg, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "runtime_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Runtime configuration for the batch execution.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional custom container image for the job runtime environment. If not specified, a default container image will be used.`, + }, + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names to values, which are used to configure workload execution.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: CloudDataprocBatchRuntimeConfigVersionDiffSuppress, + Description: `Version of the batch runtime.`, + }, + "effective_properties": { + Type: schema.TypeMap, + Computed: true, + Description: `A mapping of property names to values, which are used to configure workload execution.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "spark_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Spark batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted into the working directory of each executor. +Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver. Do not include arguments that can be set as batch +properties, such as --conf, since a collision can occur that causes an incorrect batch submission.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be placed in the working directory of each executor.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the driver main class. The jar file that contains the class must be in the +classpath or specified in jarFileUris.`, + ExactlyOneOf: []string{"spark_batch.0.main_jar_file_uri"}, + }, + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the jar file that contains the main class.`, + ExactlyOneOf: []string{"spark_batch.0.main_class"}, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "spark_r_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `SparkR batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted into the working directory of each executor. +Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver. Do not include arguments that can be set as batch +properties, such as --conf, since a collision can occur that causes an incorrect batch submission.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be placed in the working directory of each executor.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "main_r_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.`, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "spark_sql_batch": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Spark SQL batch config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to be added to the Spark CLASSPATH.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the script that contains Spark SQL queries to execute.`, + }, + "query_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + ExactlyOneOf: []string{"pyspark_batch", "spark_batch", "spark_sql_batch", "spark_r_batch"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the batch was created.`, + }, + "creator": { + Type: schema.TypeString, + Computed: true, + Description: `The email address of the user who created the batch.`, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the batch.`, + }, + "operation": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the operation associated with this batch.`, + }, + "runtime_info": { + Type: schema.TypeList, + Computed: true, + Description: `Runtime information about batch execution.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "approximate_usage": { + Type: schema.TypeList, + Computed: true, + Description: `Approximate workload resource usage, calculated when the workload completes(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing))`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_type": { + Type: schema.TypeString, + Computed: true, + Description: `Accelerator type being used, if any`, + }, + "milli_accelerator_seconds": { + Type: schema.TypeString, + Computed: true, + Description: `Accelerator usage in (milliAccelerator x seconds)`, + }, + "milli_dcu_seconds": { + Type: schema.TypeString, + Computed: true, + Description: `DCU (Dataproc Compute Units) usage in (milliDCU x seconds)`, + }, + "shuffle_storage_gb_seconds": { + Type: schema.TypeString, + Computed: true, + Description: `Shuffle storage usage in (GB x seconds)`, + }, + }, + }, + }, + "current_usage": { + Type: schema.TypeList, + Computed: true, + Description: `Snapshot of current workload resource usage(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing))`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_type": { + Type: schema.TypeString, + Computed: true, + Description: `Accelerator type being used, if any.`, + }, + "milli_accelerator": { + Type: schema.TypeString, + Computed: true, + Description: `Milli (one-thousandth) accelerator..`, + }, + "milli_dcu": { + Type: schema.TypeString, + Computed: true, + Description: `Milli (one-thousandth) Dataproc Compute Units (DCUs).`, + }, + "milli_dcu_premium": { + Type: schema.TypeString, + Computed: true, + Description: `Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier.`, + }, + "shuffle_storage_gb": { + Type: schema.TypeString, + Computed: true, + Description: `Shuffle Storage in gigabytes (GB).`, + }, + "shuffle_storage_gb_premium": { + Type: schema.TypeString, + Computed: true, + Description: `Shuffle Storage in gigabytes (GB) charged at premium tier.`, + }, + "snapshot_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of the usage snapshot.`, + }, + }, + }, + }, + "diagnostic_output_uri": { + Type: schema.TypeString, + Computed: true, + Description: `A URI pointing to the location of the diagnostics tarball.`, + }, + "endpoints": { + Type: schema.TypeMap, + Computed: true, + Description: `Map of remote access endpoints (such as web interfaces and APIs) to their URIs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "output_uri": { + Type: schema.TypeString, + Computed: true, + Description: `A URI pointing to the location of the stdout and stderr of the workload.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the batch. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State).`, + }, + "state_history": { + Type: schema.TypeList, + Computed: true, + Description: `Historical state information for the batch.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the batch at this point in history. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State).`, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Details about the state at this point in history.`, + }, + "state_start_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the batch entered the historical state.`, + }, + }, + }, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Batch state details, such as a failure description if the state is FAILED.`, + }, + "state_time": { + Type: schema.TypeString, + Computed: true, + Description: `Batch state details, such as a failure description if the state is FAILED.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource + and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "uuid": { + Type: schema.TypeString, + Computed: true, + Description: `A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataprocBatchCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + runtimeConfigProp, err := expandDataprocBatchRuntimeConfig(d.Get("runtime_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeConfigProp)) && (ok || !reflect.DeepEqual(v, runtimeConfigProp)) { + obj["runtimeConfig"] = runtimeConfigProp + } + environmentConfigProp, err := expandDataprocBatchEnvironmentConfig(d.Get("environment_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("environment_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(environmentConfigProp)) && (ok || !reflect.DeepEqual(v, environmentConfigProp)) { + obj["environmentConfig"] = environmentConfigProp + } + pysparkBatchProp, err := expandDataprocBatchPysparkBatch(d.Get("pyspark_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pyspark_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(pysparkBatchProp)) && (ok || !reflect.DeepEqual(v, pysparkBatchProp)) { + obj["pysparkBatch"] = pysparkBatchProp + } + sparkBatchProp, err := expandDataprocBatchSparkBatch(d.Get("spark_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(sparkBatchProp)) && (ok || !reflect.DeepEqual(v, sparkBatchProp)) { + obj["sparkBatch"] = sparkBatchProp + } + sparkRBatchProp, err := expandDataprocBatchSparkRBatch(d.Get("spark_r_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark_r_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(sparkRBatchProp)) && (ok || !reflect.DeepEqual(v, sparkRBatchProp)) { + obj["sparkRBatch"] = sparkRBatchProp + } + sparkSqlBatchProp, err := expandDataprocBatchSparkSqlBatch(d.Get("spark_sql_batch"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark_sql_batch"); !tpgresource.IsEmptyValue(reflect.ValueOf(sparkSqlBatchProp)) && (ok || !reflect.DeepEqual(v, sparkSqlBatchProp)) { + obj["sparkSqlBatch"] = sparkSqlBatchProp + } + labelsProp, err := expandDataprocBatchEffectiveLabels(d.Get("effective_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/batches?batchId={{batch_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Batch: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Batch: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating Batch: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DataprocOperationWaitTime( + config, res, project, "Creating Batch", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Batch: %s", err) + } + + log.Printf("[DEBUG] Finished creating Batch %q: %#v", d.Id(), res) + + return resourceDataprocBatchRead(d, meta) +} + +func resourceDataprocBatchRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Batch: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataprocBatch %q", d.Id())) + } + + res, err = resourceDataprocBatchDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing DataprocBatch because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + + if err := d.Set("name", flattenDataprocBatchName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("uuid", flattenDataprocBatchUuid(res["uuid"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("create_time", flattenDataprocBatchCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("runtime_info", flattenDataprocBatchRuntimeInfo(res["runtimeInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state", flattenDataprocBatchState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state_message", flattenDataprocBatchStateMessage(res["stateMessage"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state_time", flattenDataprocBatchStateTime(res["stateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("creator", flattenDataprocBatchCreator(res["creator"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("labels", flattenDataprocBatchLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("runtime_config", flattenDataprocBatchRuntimeConfig(res["runtimeConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("environment_config", flattenDataprocBatchEnvironmentConfig(res["environmentConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("operation", flattenDataprocBatchOperation(res["operation"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("state_history", flattenDataprocBatchStateHistory(res["stateHistory"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("pyspark_batch", flattenDataprocBatchPysparkBatch(res["pysparkBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("spark_batch", flattenDataprocBatchSparkBatch(res["sparkBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("spark_r_batch", flattenDataprocBatchSparkRBatch(res["sparkRBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("spark_sql_batch", flattenDataprocBatchSparkSqlBatch(res["sparkSqlBatch"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("terraform_labels", flattenDataprocBatchTerraformLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + if err := d.Set("effective_labels", flattenDataprocBatchEffectiveLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Batch: %s", err) + } + + return nil +} + +func resourceDataprocBatchUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + return resourceDataprocBatchRead(d, meta) +} + +func resourceDataprocBatchDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Batch: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting Batch %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Batch") + } + + err = DataprocOperationWaitTime( + config, res, project, "Deleting Batch", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Batch %q: %#v", d.Id(), res) + return nil +} + +func resourceDataprocBatchImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/locations/(?P[^/]+)/batches/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataprocBatchName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchUuid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["output_uri"] = + flattenDataprocBatchRuntimeInfoOutputUri(original["outputUri"], d, config) + transformed["diagnostic_output_uri"] = + flattenDataprocBatchRuntimeInfoDiagnosticOutputUri(original["diagnosticOutputUri"], d, config) + transformed["endpoints"] = + flattenDataprocBatchRuntimeInfoEndpoints(original["endpoints"], d, config) + transformed["approximate_usage"] = + flattenDataprocBatchRuntimeInfoApproximateUsage(original["approximateUsage"], d, config) + transformed["current_usage"] = + flattenDataprocBatchRuntimeInfoCurrentUsage(original["currentUsage"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeInfoOutputUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoDiagnosticOutputUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["milli_dcu_seconds"] = + flattenDataprocBatchRuntimeInfoApproximateUsageMilliDcuSeconds(original["milliDcuSeconds"], d, config) + transformed["shuffle_storage_gb_seconds"] = + flattenDataprocBatchRuntimeInfoApproximateUsageShuffleStorageGbSeconds(original["shuffleStorageGbSeconds"], d, config) + transformed["milli_accelerator_seconds"] = + flattenDataprocBatchRuntimeInfoApproximateUsageMilliAcceleratorSeconds(original["milliAcceleratorSeconds"], d, config) + transformed["accelerator_type"] = + flattenDataprocBatchRuntimeInfoApproximateUsageAcceleratorType(original["acceleratorType"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeInfoApproximateUsageMilliDcuSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsageShuffleStorageGbSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsageMilliAcceleratorSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoApproximateUsageAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["milli_dcu"] = + flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcu(original["milliDcu"], d, config) + transformed["shuffle_storage_gb"] = + flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGb(original["shuffleStorageGb"], d, config) + transformed["milli_dcu_premium"] = + flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcuPremium(original["milliDcuPremium"], d, config) + transformed["shuffle_storage_gb_premium"] = + flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGbPremium(original["shuffleStorageGbPremium"], d, config) + transformed["milli_accelerator"] = + flattenDataprocBatchRuntimeInfoCurrentUsageMilliAccelerator(original["milliAccelerator"], d, config) + transformed["accelerator_type"] = + flattenDataprocBatchRuntimeInfoCurrentUsageAcceleratorType(original["acceleratorType"], d, config) + transformed["snapshot_time"] = + flattenDataprocBatchRuntimeInfoCurrentUsageSnapshotTime(original["snapshotTime"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcu(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageMilliDcuPremium(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageShuffleStorageGbPremium(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageMilliAccelerator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeInfoCurrentUsageSnapshotTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchCreator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenDataprocBatchRuntimeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["version"] = + flattenDataprocBatchRuntimeConfigVersion(original["version"], d, config) + transformed["container_image"] = + flattenDataprocBatchRuntimeConfigContainerImage(original["containerImage"], d, config) + transformed["properties"] = + flattenDataprocBatchRuntimeConfigProperties(original["properties"], d, config) + transformed["effective_properties"] = + flattenDataprocBatchRuntimeConfigEffectiveProperties(original["effective_properties"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchRuntimeConfigVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeConfigContainerImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeConfigProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchRuntimeConfigEffectiveProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["execution_config"] = + flattenDataprocBatchEnvironmentConfigExecutionConfig(original["executionConfig"], d, config) + transformed["peripherals_config"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfig(original["peripheralsConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigExecutionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(original["serviceAccount"], d, config) + transformed["network_tags"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(original["networkTags"], d, config) + transformed["kms_key"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigKmsKey(original["kmsKey"], d, config) + transformed["ttl"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigTtl(original["ttl"], d, config) + transformed["staging_bucket"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(original["stagingBucket"], d, config) + transformed["network_uri"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(original["networkUri"], d, config) + transformed["subnetwork_uri"] = + flattenDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(original["subnetworkUri"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigKmsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigPeripheralsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["metastore_service"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(original["metastoreService"], d, config) + transformed["spark_history_server_config"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(original["sparkHistoryServerConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataproc_cluster"] = + flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(original["dataprocCluster"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchOperation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateHistory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "state": flattenDataprocBatchStateHistoryState(original["state"], d, config), + "state_message": flattenDataprocBatchStateHistoryStateMessage(original["stateMessage"], d, config), + "state_start_time": flattenDataprocBatchStateHistoryStateStartTime(original["stateStartTime"], d, config), + }) + } + return transformed +} +func flattenDataprocBatchStateHistoryState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateHistoryStateMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchStateHistoryStateStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["main_python_file_uri"] = + flattenDataprocBatchPysparkBatchMainPythonFileUri(original["mainPythonFileUri"], d, config) + transformed["args"] = + flattenDataprocBatchPysparkBatchArgs(original["args"], d, config) + transformed["python_file_uris"] = + flattenDataprocBatchPysparkBatchPythonFileUris(original["pythonFileUris"], d, config) + transformed["jar_file_uris"] = + flattenDataprocBatchPysparkBatchJarFileUris(original["jarFileUris"], d, config) + transformed["file_uris"] = + flattenDataprocBatchPysparkBatchFileUris(original["fileUris"], d, config) + transformed["archive_uris"] = + flattenDataprocBatchPysparkBatchArchiveUris(original["archiveUris"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchPysparkBatchMainPythonFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchPythonFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchJarFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchPysparkBatchArchiveUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["args"] = + flattenDataprocBatchSparkBatchArgs(original["args"], d, config) + transformed["jar_file_uris"] = + flattenDataprocBatchSparkBatchJarFileUris(original["jarFileUris"], d, config) + transformed["file_uris"] = + flattenDataprocBatchSparkBatchFileUris(original["fileUris"], d, config) + transformed["archive_uris"] = + flattenDataprocBatchSparkBatchArchiveUris(original["archiveUris"], d, config) + transformed["main_jar_file_uri"] = + flattenDataprocBatchSparkBatchMainJarFileUri(original["mainJarFileUri"], d, config) + transformed["main_class"] = + flattenDataprocBatchSparkBatchMainClass(original["mainClass"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchSparkBatchArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchJarFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchArchiveUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchMainJarFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkBatchMainClass(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["main_r_file_uri"] = + flattenDataprocBatchSparkRBatchMainRFileUri(original["mainRFileUri"], d, config) + transformed["args"] = + flattenDataprocBatchSparkRBatchArgs(original["args"], d, config) + transformed["file_uris"] = + flattenDataprocBatchSparkRBatchFileUris(original["fileUris"], d, config) + transformed["archive_uris"] = + flattenDataprocBatchSparkRBatchArchiveUris(original["archiveUris"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchSparkRBatchMainRFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatchArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatchFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkRBatchArchiveUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkSqlBatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["query_file_uri"] = + flattenDataprocBatchSparkSqlBatchQueryFileUri(original["queryFileUri"], d, config) + transformed["jar_file_uris"] = + flattenDataprocBatchSparkSqlBatchJarFileUris(original["jarFileUris"], d, config) + transformed["query_variables"] = + flattenDataprocBatchSparkSqlBatchQueryVariables(original["queryVariables"], d, config) + return []interface{}{transformed} +} +func flattenDataprocBatchSparkSqlBatchQueryFileUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkSqlBatchJarFileUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchSparkSqlBatchQueryVariables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocBatchTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + transformed := make(map[string]interface{}) + if l, ok := d.GetOkExists("terraform_labels"); ok { + for k := range l.(map[string]interface{}) { + transformed[k] = v.(map[string]interface{})[k] + } + } + + return transformed +} + +func flattenDataprocBatchEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataprocBatchRuntimeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandDataprocBatchRuntimeConfigVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedContainerImage, err := expandDataprocBatchRuntimeConfigContainerImage(original["container_image"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainerImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containerImage"] = transformedContainerImage + } + + transformedProperties, err := expandDataprocBatchRuntimeConfigProperties(original["properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["properties"] = transformedProperties + } + + transformedEffectiveProperties, err := expandDataprocBatchRuntimeConfigEffectiveProperties(original["effective_properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEffectiveProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["effective_properties"] = transformedEffectiveProperties + } + + return transformed, nil +} + +func expandDataprocBatchRuntimeConfigVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchRuntimeConfigContainerImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchRuntimeConfigProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocBatchRuntimeConfigEffectiveProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocBatchEnvironmentConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExecutionConfig, err := expandDataprocBatchEnvironmentConfigExecutionConfig(original["execution_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExecutionConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["executionConfig"] = transformedExecutionConfig + } + + transformedPeripheralsConfig, err := expandDataprocBatchEnvironmentConfigPeripheralsConfig(original["peripherals_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeripheralsConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["peripheralsConfig"] = transformedPeripheralsConfig + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccount, err := expandDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(original["service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccount"] = transformedServiceAccount + } + + transformedNetworkTags, err := expandDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(original["network_tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkTags"] = transformedNetworkTags + } + + transformedKmsKey, err := expandDataprocBatchEnvironmentConfigExecutionConfigKmsKey(original["kms_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKey"] = transformedKmsKey + } + + transformedTtl, err := expandDataprocBatchEnvironmentConfigExecutionConfigTtl(original["ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ttl"] = transformedTtl + } + + transformedStagingBucket, err := expandDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(original["staging_bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStagingBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stagingBucket"] = transformedStagingBucket + } + + transformedNetworkUri, err := expandDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(original["network_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkUri"] = transformedNetworkUri + } + + transformedSubnetworkUri, err := expandDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(original["subnetwork_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubnetworkUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subnetworkUri"] = transformedSubnetworkUri + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigNetworkTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigKmsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigStagingBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigNetworkUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigExecutionConfigSubnetworkUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetastoreService, err := expandDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(original["metastore_service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetastoreService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metastoreService"] = transformedMetastoreService + } + + transformedSparkHistoryServerConfig, err := expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(original["spark_history_server_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSparkHistoryServerConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sparkHistoryServerConfig"] = transformedSparkHistoryServerConfig + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfigMetastoreService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataprocCluster, err := expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(original["dataproc_cluster"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataprocCluster); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataprocCluster"] = transformedDataprocCluster + } + + return transformed, nil +} + +func expandDataprocBatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigDataprocCluster(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMainPythonFileUri, err := expandDataprocBatchPysparkBatchMainPythonFileUri(original["main_python_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainPythonFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainPythonFileUri"] = transformedMainPythonFileUri + } + + transformedArgs, err := expandDataprocBatchPysparkBatchArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedPythonFileUris, err := expandDataprocBatchPysparkBatchPythonFileUris(original["python_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPythonFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pythonFileUris"] = transformedPythonFileUris + } + + transformedJarFileUris, err := expandDataprocBatchPysparkBatchJarFileUris(original["jar_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJarFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jarFileUris"] = transformedJarFileUris + } + + transformedFileUris, err := expandDataprocBatchPysparkBatchFileUris(original["file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileUris"] = transformedFileUris + } + + transformedArchiveUris, err := expandDataprocBatchPysparkBatchArchiveUris(original["archive_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArchiveUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["archiveUris"] = transformedArchiveUris + } + + return transformed, nil +} + +func expandDataprocBatchPysparkBatchMainPythonFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchPythonFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchJarFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchPysparkBatchArchiveUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedArgs, err := expandDataprocBatchSparkBatchArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedJarFileUris, err := expandDataprocBatchSparkBatchJarFileUris(original["jar_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJarFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jarFileUris"] = transformedJarFileUris + } + + transformedFileUris, err := expandDataprocBatchSparkBatchFileUris(original["file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileUris"] = transformedFileUris + } + + transformedArchiveUris, err := expandDataprocBatchSparkBatchArchiveUris(original["archive_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArchiveUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["archiveUris"] = transformedArchiveUris + } + + transformedMainJarFileUri, err := expandDataprocBatchSparkBatchMainJarFileUri(original["main_jar_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainJarFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainJarFileUri"] = transformedMainJarFileUri + } + + transformedMainClass, err := expandDataprocBatchSparkBatchMainClass(original["main_class"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainClass); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainClass"] = transformedMainClass + } + + return transformed, nil +} + +func expandDataprocBatchSparkBatchArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchJarFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchArchiveUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchMainJarFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkBatchMainClass(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMainRFileUri, err := expandDataprocBatchSparkRBatchMainRFileUri(original["main_r_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMainRFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mainRFileUri"] = transformedMainRFileUri + } + + transformedArgs, err := expandDataprocBatchSparkRBatchArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedFileUris, err := expandDataprocBatchSparkRBatchFileUris(original["file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileUris"] = transformedFileUris + } + + transformedArchiveUris, err := expandDataprocBatchSparkRBatchArchiveUris(original["archive_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArchiveUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["archiveUris"] = transformedArchiveUris + } + + return transformed, nil +} + +func expandDataprocBatchSparkRBatchMainRFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatchArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatchFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkRBatchArchiveUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkSqlBatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedQueryFileUri, err := expandDataprocBatchSparkSqlBatchQueryFileUri(original["query_file_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryFileUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryFileUri"] = transformedQueryFileUri + } + + transformedJarFileUris, err := expandDataprocBatchSparkSqlBatchJarFileUris(original["jar_file_uris"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJarFileUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jarFileUris"] = transformedJarFileUris + } + + transformedQueryVariables, err := expandDataprocBatchSparkSqlBatchQueryVariables(original["query_variables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryVariables); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryVariables"] = transformedQueryVariables + } + + return transformed, nil +} + +func expandDataprocBatchSparkSqlBatchQueryFileUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkSqlBatchJarFileUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocBatchSparkSqlBatchQueryVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocBatchEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func resourceDataprocBatchDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if obj1, ok := res["runtimeConfig"]; ok { + if rconfig, ok := obj1.(map[string]interface{}); ok { + if obj2, ok := rconfig["properties"]; ok { + if properties, ok := obj2.(map[string]interface{}); ok { + // Update effective_properties to include both server set and client set properties + propertiesCopy := make(map[string]interface{}) + for k, v := range properties { + propertiesCopy[k] = v + } + rconfig["effectiveProperties"] = propertiesCopy + + // Update properties back to original client set properties + originalPropertiesCopy := make(map[string]interface{}) + originalProperties := d.Get("runtime_config.0.properties").(interface{}).(map[string]interface{}) + for k, v := range originalProperties { + originalPropertiesCopy[k] = v + } + rconfig["properties"] = originalPropertiesCopy + return res, nil + } + } + } + } + + return res, nil +} diff --git a/google/services/dataproc/resource_dataproc_batch_generated_test.go b/google/services/dataproc/resource_dataproc_batch_generated_test.go new file mode 100644 index 00000000000..12596d6804a --- /dev/null +++ b/google/services/dataproc/resource_dataproc_batch_generated_test.go @@ -0,0 +1,446 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccDataprocBatch_dataprocBatchSparkExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "prevent_destroy": false, + "subnetwork_name": acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-spark-test-network", "dataproc-spark-test-subnetwork"), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocBatchDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocBatch_dataprocBatchSparkExample(context), + }, + { + ResourceName: "google_dataproc_batch.example_batch_spark", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"batch_id", "labels", "location", "runtime_config.0.properties", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataprocBatch_dataprocBatchSparkExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataproc_batch" "example_batch_spark" { + + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + labels = {"batch_test": "terraform"} + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "%{subnetwork_name}" + ttl = "3600s" + network_tags = ["tag1"] + } + } + + spark_batch { + main_class = "org.apache.spark.examples.SparkPi" + args = ["10"] + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + } +} +`, context) +} + +func TestAccDataprocBatch_dataprocBatchSparkFullExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "prevent_destroy": false, + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocBatchDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocBatch_dataprocBatchSparkFullExample(context), + }, + { + ResourceName: "google_dataproc_batch.example_batch_spark", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"batch_id", "labels", "location", "runtime_config.0.properties", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataprocBatch_dataprocBatchSparkFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { +} + +data "google_storage_project_service_account" "gcs_account" { +} + +resource "google_dataproc_batch" "example_batch_spark" { + batch_id = "tf-test-dataproc-batch%{random_suffix}" + location = "us-central1" + labels = {"batch_test": "terraform"} + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + version = "2.2" + } + + environment_config { + execution_config { + ttl = "3600s" + network_tags = ["tag1"] + kms_key = google_kms_crypto_key.crypto_key.id + network_uri = "default" + service_account = "${data.google_project.project.number}-compute@developer.gserviceaccount.com" + staging_bucket = google_storage_bucket.bucket.name + } + peripherals_config { + metastore_service = google_dataproc_metastore_service.ms.name + spark_history_server_config { + dataproc_cluster = google_dataproc_cluster.basic.id + } + } + } + + spark_batch { + main_class = "org.apache.spark.examples.SparkPi" + args = ["10"] + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key_member_1, + ] +} + +resource "google_storage_bucket" "bucket" { + uniform_bucket_level_access = true + name = "tf-test-dataproc-bucket%{random_suffix}" + location = "US" + force_destroy = true +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "tf-test-example-key%{random_suffix}" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + name = "tf-test-example-keyring%{random_suffix}" + location = "us-central1" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key_member_1" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@dataproc-accounts.iam.gserviceaccount.com" +} + +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dataproc-batch%{random_suffix}" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + "spark:spark.history.fs.logDirectory" = "gs://${google_storage_bucket.bucket.name}/*/spark-job-history" + } + } + + endpoint_config { + enable_http_port_access = true + } + + master_config { + num_instances = 1 + machine_type = "e2-standard-2" + disk_config { + boot_disk_size_gb = 35 + } + } + + metastore_config { + dataproc_metastore_service = google_dataproc_metastore_service.ms.name + } + } +} + + resource "google_dataproc_metastore_service" "ms" { + service_id = "tf-test-dataproc-batch%{random_suffix}" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "3.1.2" + } +} +`, context) +} + +func TestAccDataprocBatch_dataprocBatchSparksqlExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "prevent_destroy": false, + "subnetwork_name": acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-sparksql-test-network", "dataproc-sparksql-test-subnetwork"), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocBatchDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocBatch_dataprocBatchSparksqlExample(context), + }, + { + ResourceName: "google_dataproc_batch.example_batch_sparsql", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"batch_id", "labels", "location", "runtime_config.0.properties", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataprocBatch_dataprocBatchSparksqlExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataproc_batch" "example_batch_sparsql" { + + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "%{subnetwork_name}" + } + } + + spark_sql_batch { + query_file_uri = "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql" + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + query_variables = { + name = "value" + } + } +} +`, context) +} + +func TestAccDataprocBatch_dataprocBatchPysparkExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "prevent_destroy": false, + "subnetwork_name": acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-pyspark-test-network", "dataproc-pyspark-test-subnetwork"), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocBatchDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocBatch_dataprocBatchPysparkExample(context), + }, + { + ResourceName: "google_dataproc_batch.example_batch_pyspark", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"batch_id", "labels", "location", "runtime_config.0.properties", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataprocBatch_dataprocBatchPysparkExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataproc_batch" "example_batch_pyspark" { + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "%{subnetwork_name}" + } + } + + pyspark_batch { + main_python_file_uri = "https://storage.googleapis.com/terraform-batches/test_util.py" + args = ["10"] + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + python_file_uris = ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"] + archive_uris = [ + "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked", + "https://storage.googleapis.com/terraform-batches/animals.txt.jar", + "https://storage.googleapis.com/terraform-batches/animals.txt" + ] + file_uris = ["https://storage.googleapis.com/terraform-batches/people.txt"] + } +} +`, context) +} + +func TestAccDataprocBatch_dataprocBatchSparkrExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "prevent_destroy": false, + "subnetwork_name": acctest.BootstrapSubnetWithFirewallForDataprocBatches(t, "dataproc-pyspark-test-network", "dataproc-pyspark-test-subnetwork"), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocBatchDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocBatch_dataprocBatchSparkrExample(context), + }, + { + ResourceName: "google_dataproc_batch.example_batch_sparkr", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"batch_id", "labels", "location", "runtime_config.0.properties", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataprocBatch_dataprocBatchSparkrExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataproc_batch" "example_batch_sparkr" { + + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + labels = {"batch_test": "terraform"} + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "%{subnetwork_name}" + ttl = "3600s" + network_tags = ["tag1"] + } + } + + spark_r_batch { + main_r_file_uri = "https://storage.googleapis.com/terraform-batches/spark-r-flights.r" + args = ["https://storage.googleapis.com/terraform-batches/flights.csv"] + } +} +`, context) +} + +func testAccCheckDataprocBatchDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_dataproc_batch" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/batches/{{batch_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("DataprocBatch still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/services/dataproc/resource_dataproc_batch_sweeper.go b/google/services/dataproc/resource_dataproc_batch_sweeper.go new file mode 100644 index 00000000000..93ca47e2ccc --- /dev/null +++ b/google/services/dataproc/resource_dataproc_batch_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataprocBatch", testSweepDataprocBatch) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataprocBatch(region string) error { + resourceName := "DataprocBatch" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://dataproc.googleapis.com/v1/projects/{{project}}/locations/{{location}}/batches", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["batches"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://dataproc.googleapis.com/v1/projects/{{project}}/locations/{{location}}/batches/{{batch_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/dataproc/resource_dataproc_batch_test.go b/google/services/dataproc/resource_dataproc_batch_test.go new file mode 100644 index 00000000000..c760075de30 --- /dev/null +++ b/google/services/dataproc/resource_dataproc_batch_test.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc + +import ( + "testing" +) + +func TestCloudDataprocBatchRuntimeConfigVersionDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "old version is empty, new version has a value": { + Old: "", + New: "2.2.100", + ExpectDiffSuppress: false, + }, + "old version is the prefix of the new version": { + Old: "2.2", + New: "2.2.100", + ExpectDiffSuppress: true, + }, + "old version is not the prefix of the new version": { + Old: "2.1", + New: "2.2.100", + ExpectDiffSuppress: false, + }, + "new version is empty, old version has a value": { + Old: "2.2.100", + New: "", + ExpectDiffSuppress: false, + }, + "new version is the prefix of the old version": { + Old: "2.2.100", + New: "2.2", + ExpectDiffSuppress: true, + }, + "new version is not the prefix of the old version": { + Old: "2.2.100", + New: "2.1", + ExpectDiffSuppress: false, + }, + "old version is the same with the new version": { + Old: "2.2.100", + New: "2.2.100", + ExpectDiffSuppress: true, + }, + "both new version and old version are empty string": { + Old: "", + New: "", + ExpectDiffSuppress: true, + }, + } + + for tn, tc := range cases { + if CloudDataprocBatchRuntimeConfigVersionDiffSuppressFunc(tc.Old, tc.New) != tc.ExpectDiffSuppress { + t.Errorf("bad: %s, %q => %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} diff --git a/website/docs/r/dataproc_batch.html.markdown b/website/docs/r/dataproc_batch.html.markdown new file mode 100644 index 00000000000..caa04208f9d --- /dev/null +++ b/website/docs/r/dataproc_batch.html.markdown @@ -0,0 +1,682 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Dataproc" +description: |- + Dataproc Serverless Batches lets you run Spark workloads without requiring you to + provision and manage your own Dataproc cluster. +--- + +# google_dataproc_batch + +Dataproc Serverless Batches lets you run Spark workloads without requiring you to +provision and manage your own Dataproc cluster. + + +To get more information about Batch, see: + +* [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches) +* How-to Guides + * [Dataproc Serverless Batches Intro](https://cloud.google.com/dataproc-serverless/docs/overview) + +## Example Usage - Dataproc Batch Spark + + +```hcl +resource "google_dataproc_batch" "example_batch_spark" { + + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + labels = {"batch_test": "terraform"} + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "default" + ttl = "3600s" + network_tags = ["tag1"] + } + } + + spark_batch { + main_class = "org.apache.spark.examples.SparkPi" + args = ["10"] + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + } +} +``` +## Example Usage - Dataproc Batch Spark Full + + +```hcl +data "google_project" "project" { +} + +data "google_storage_project_service_account" "gcs_account" { +} + +resource "google_dataproc_batch" "example_batch_spark" { + batch_id = "dataproc-batch" + location = "us-central1" + labels = {"batch_test": "terraform"} + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + version = "2.2" + } + + environment_config { + execution_config { + ttl = "3600s" + network_tags = ["tag1"] + kms_key = google_kms_crypto_key.crypto_key.id + network_uri = "default" + service_account = "${data.google_project.project.number}-compute@developer.gserviceaccount.com" + staging_bucket = google_storage_bucket.bucket.name + } + peripherals_config { + metastore_service = google_dataproc_metastore_service.ms.name + spark_history_server_config { + dataproc_cluster = google_dataproc_cluster.basic.id + } + } + } + + spark_batch { + main_class = "org.apache.spark.examples.SparkPi" + args = ["10"] + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key_member_1, + ] +} + +resource "google_storage_bucket" "bucket" { + uniform_bucket_level_access = true + name = "dataproc-bucket" + location = "US" + force_destroy = true +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "example-key" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + name = "example-keyring" + location = "us-central1" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key_member_1" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@dataproc-accounts.iam.gserviceaccount.com" +} + +resource "google_dataproc_cluster" "basic" { + name = "dataproc-batch" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + "spark:spark.history.fs.logDirectory" = "gs://${google_storage_bucket.bucket.name}/*/spark-job-history" + } + } + + endpoint_config { + enable_http_port_access = true + } + + master_config { + num_instances = 1 + machine_type = "e2-standard-2" + disk_config { + boot_disk_size_gb = 35 + } + } + + metastore_config { + dataproc_metastore_service = google_dataproc_metastore_service.ms.name + } + } +} + + resource "google_dataproc_metastore_service" "ms" { + service_id = "dataproc-batch" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "3.1.2" + } +} +``` +## Example Usage - Dataproc Batch Sparksql + + +```hcl +resource "google_dataproc_batch" "example_batch_sparsql" { + + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "default" + } + } + + spark_sql_batch { + query_file_uri = "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql" + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + query_variables = { + name = "value" + } + } +} +``` +## Example Usage - Dataproc Batch Pyspark + + +```hcl +resource "google_dataproc_batch" "example_batch_pyspark" { + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "default" + } + } + + pyspark_batch { + main_python_file_uri = "https://storage.googleapis.com/terraform-batches/test_util.py" + args = ["10"] + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + python_file_uris = ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"] + archive_uris = [ + "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked", + "https://storage.googleapis.com/terraform-batches/animals.txt.jar", + "https://storage.googleapis.com/terraform-batches/animals.txt" + ] + file_uris = ["https://storage.googleapis.com/terraform-batches/people.txt"] + } +} +``` +## Example Usage - Dataproc Batch Sparkr + + +```hcl +resource "google_dataproc_batch" "example_batch_sparkr" { + + batch_id = "tf-test-batch%{random_suffix}" + location = "us-central1" + labels = {"batch_test": "terraform"} + + runtime_config { + properties = { "spark.dynamicAllocation.enabled": "false", "spark.executor.instances": "2" } + } + + environment_config { + execution_config { + subnetwork_uri = "default" + ttl = "3600s" + network_tags = ["tag1"] + } + } + + spark_r_batch { + main_r_file_uri = "https://storage.googleapis.com/terraform-batches/spark-r-flights.r" + args = ["https://storage.googleapis.com/terraform-batches/flights.csv"] + } +} +``` + +## Argument Reference + +The following arguments are supported: + + + +- - - + + +* `labels` - + (Optional) + The labels to associate with this batch. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `runtime_config` - + (Optional) + Runtime configuration for the batch execution. + Structure is [documented below](#nested_runtime_config). + +* `environment_config` - + (Optional) + Environment configuration for the batch execution. + Structure is [documented below](#nested_environment_config). + +* `pyspark_batch` - + (Optional) + PySpark batch config. + Structure is [documented below](#nested_pyspark_batch). + +* `spark_batch` - + (Optional) + Spark batch config. + Structure is [documented below](#nested_spark_batch). + +* `spark_r_batch` - + (Optional) + SparkR batch config. + Structure is [documented below](#nested_spark_r_batch). + +* `spark_sql_batch` - + (Optional) + Spark SQL batch config. + Structure is [documented below](#nested_spark_sql_batch). + +* `location` - + (Optional) + The location in which the batch will be created in. + +* `batch_id` - + (Optional) + The ID to use for the batch, which will become the final component of the batch's resource name. + This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +The `runtime_config` block supports: + +* `version` - + (Optional) + Version of the batch runtime. + +* `container_image` - + (Optional) + Optional custom container image for the job runtime environment. If not specified, a default container image will be used. + +* `properties` - + (Optional) + A mapping of property names to values, which are used to configure workload execution. + +* `effective_properties` - + (Output) + A mapping of property names to values, which are used to configure workload execution. + +The `environment_config` block supports: + +* `execution_config` - + (Optional) + Execution configuration for a workload. + Structure is [documented below](#nested_execution_config). + +* `peripherals_config` - + (Optional) + Peripherals configuration that workload has access to. + Structure is [documented below](#nested_peripherals_config). + + +The `execution_config` block supports: + +* `service_account` - + (Optional) + Service account that used to execute workload. + +* `network_tags` - + (Optional) + Tags used for network traffic control. + +* `kms_key` - + (Optional) + The Cloud KMS key to use for encryption. + +* `ttl` - + (Optional) + The duration after which the workload will be terminated. + When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing + work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it + exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, + it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. + Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idleTtl are specified (for an interactive session), + the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idleTtl or + when ttl has been exceeded, whichever occurs first. + +* `staging_bucket` - + (Optional) + A Cloud Storage bucket used to stage workload dependencies, config files, and store + workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, + Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, + and then create and manage project-level, per-location staging and temporary buckets. + This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket. + +* `network_uri` - + (Optional) + Network configuration for workload execution. + +* `subnetwork_uri` - + (Optional) + Subnetwork configuration for workload execution. + +The `peripherals_config` block supports: + +* `metastore_service` - + (Optional) + Resource name of an existing Dataproc Metastore service. + +* `spark_history_server_config` - + (Optional) + The Spark History Server configuration for the workload. + Structure is [documented below](#nested_spark_history_server_config). + + +The `spark_history_server_config` block supports: + +* `dataproc_cluster` - + (Optional) + Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. + +The `pyspark_batch` block supports: + +* `main_python_file_uri` - + (Optional) + The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file. + +* `args` - + (Optional) + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + +* `python_file_uris` - + (Optional) + HCFS file URIs of Python files to pass to the PySpark framework. + Supported file types: .py, .egg, and .zip. + +* `jar_file_uris` - + (Optional) + HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + +* `file_uris` - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. + +* `archive_uris` - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + +The `spark_batch` block supports: + +* `args` - + (Optional) + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + +* `jar_file_uris` - + (Optional) + HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. + +* `file_uris` - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. + +* `archive_uris` - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + +* `main_jar_file_uri` - + (Optional) + The HCFS URI of the jar file that contains the main class. + +* `main_class` - + (Optional) + The name of the driver main class. The jar file that contains the class must be in the + classpath or specified in jarFileUris. + +The `spark_r_batch` block supports: + +* `main_r_file_uri` - + (Optional) + The HCFS URI of the main R file to use as the driver. Must be a .R or .r file. + +* `args` - + (Optional) + The arguments to pass to the driver. Do not include arguments that can be set as batch + properties, such as --conf, since a collision can occur that causes an incorrect batch submission. + +* `file_uris` - + (Optional) + HCFS URIs of files to be placed in the working directory of each executor. + +* `archive_uris` - + (Optional) + HCFS URIs of archives to be extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + +The `spark_sql_batch` block supports: + +* `query_file_uri` - + (Optional) + The HCFS URI of the script that contains Spark SQL queries to execute. + +* `jar_file_uris` - + (Optional) + HCFS URIs of jar files to be added to the Spark CLASSPATH. + +* `query_variables` - + (Optional) + Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/batches/{{batch_id}}` + +* `name` - + The resource name of the batch. + +* `uuid` - + A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch. + +* `create_time` - + The time when the batch was created. + +* `runtime_info` - + Runtime information about batch execution. + Structure is [documented below](#nested_runtime_info). + +* `state` - + The state of the batch. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State). + +* `state_message` - + Batch state details, such as a failure description if the state is FAILED. + +* `state_time` - + Batch state details, such as a failure description if the state is FAILED. + +* `creator` - + The email address of the user who created the batch. + +* `operation` - + The resource name of the operation associated with this batch. + +* `state_history` - + Historical state information for the batch. + Structure is [documented below](#nested_state_history). + +* `terraform_labels` - + The combination of labels configured directly on the resource + and default labels configured on the provider. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + + +The `runtime_info` block contains: + +* `output_uri` - + (Output) + A URI pointing to the location of the stdout and stderr of the workload. + +* `diagnostic_output_uri` - + (Output) + A URI pointing to the location of the diagnostics tarball. + +* `endpoints` - + (Output) + Map of remote access endpoints (such as web interfaces and APIs) to their URIs. + +* `approximate_usage` - + (Output) + Approximate workload resource usage, calculated when the workload completes(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing)) + Structure is [documented below](#nested_approximate_usage). + +* `current_usage` - + (Output) + Snapshot of current workload resource usage(see [Dataproc Serverless pricing](https://cloud.google.com/dataproc-serverless/pricing)) + Structure is [documented below](#nested_current_usage). + + +The `approximate_usage` block contains: + +* `milli_dcu_seconds` - + (Output) + DCU (Dataproc Compute Units) usage in (milliDCU x seconds) + +* `shuffle_storage_gb_seconds` - + (Output) + Shuffle storage usage in (GB x seconds) + +* `milli_accelerator_seconds` - + (Output) + Accelerator usage in (milliAccelerator x seconds) + +* `accelerator_type` - + (Output) + Accelerator type being used, if any + +The `current_usage` block contains: + +* `milli_dcu` - + (Output) + Milli (one-thousandth) Dataproc Compute Units (DCUs). + +* `shuffle_storage_gb` - + (Output) + Shuffle Storage in gigabytes (GB). + +* `milli_dcu_premium` - + (Output) + Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier. + +* `shuffle_storage_gb_premium` - + (Output) + Shuffle Storage in gigabytes (GB) charged at premium tier. + +* `milli_accelerator` - + (Output) + Milli (one-thousandth) accelerator.. + +* `accelerator_type` - + (Output) + Accelerator type being used, if any. + +* `snapshot_time` - + (Output) + The timestamp of the usage snapshot. + +The `state_history` block contains: + +* `state` - + (Output) + The state of the batch at this point in history. For possible values, see the [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches#State). + +* `state_message` - + (Output) + Details about the state at this point in history. + +* `state_start_time` - + (Output) + The time when the batch entered the historical state. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 5 minutes. + +## Import + + +Batch can be imported using any of these accepted formats: + +* `projects/{{project}}/locations/{{location}}/batches/{{batch_id}}` +* `{{project}}/{{location}}/{{batch_id}}` +* `{{location}}/{{batch_id}}` + + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Batch using one of the formats above. For example: + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/batches/{{batch_id}}" + to = google_dataproc_batch.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Batch can be imported using one of the formats above. For example: + +``` +$ terraform import google_dataproc_batch.default projects/{{project}}/locations/{{location}}/batches/{{batch_id}} +$ terraform import google_dataproc_batch.default {{project}}/{{location}}/{{batch_id}} +$ terraform import google_dataproc_batch.default {{location}}/{{batch_id}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override).