From e751da4844754e6f762c89242677c073448001ae Mon Sep 17 00:00:00 2001 From: rawmind0 Date: Thu, 2 Sep 2021 12:47:37 +0200 Subject: [PATCH 1/6] Added rancher2_cluster_v2 resource to support rke2 and k3s (tech preview) --- docs/data-sources/cluster_v2.md | 40 ++ docs/resources/cluster_v2.md | 425 ++++++++++++++++++ rancher2/0_provider_upgrade_test.go | 1 + rancher2/data_source_rancher2_cluster_v2.go | 104 +++++ .../data_source_rancher2_cluster_v2_test.go | 33 ++ rancher2/import_rancher2_cluster_v2.go | 14 + rancher2/provider.go | 2 + rancher2/resource_rancher2_cluster.go | 60 ++- rancher2/resource_rancher2_cluster_v2.go | 286 ++++++++++++ rancher2/resource_rancher2_cluster_v2_test.go | 183 ++++++++ rancher2/schema_cluster_v2.go | 95 ++++ rancher2/schema_cluster_v2_rke_config.go | 127 ++++++ rancher2/schema_cluster_v2_rke_config_etcd.go | 82 ++++ ...uster_v2_rke_config_local_auth_endpoint.go | 27 ++ ...hema_cluster_v2_rke_config_machine_pool.go | 116 +++++ .../schema_cluster_v2_rke_config_registry.go | 87 ++++ ...ema_cluster_v2_rke_config_system_config.go | 73 +++ ..._cluster_v2_rke_config_upgrade_strategy.go | 103 +++++ rancher2/schema_taint_v2.go | 39 ++ rancher2/structure_cluster_v2.go | 123 +++++ rancher2/structure_cluster_v2_rke_config.go | 90 ++++ .../structure_cluster_v2_rke_config_etcd.go | 117 +++++ ...ructure_cluster_v2_rke_config_etcd_test.go | 138 ++++++ ...uster_v2_rke_config_local_auth_endpoint.go | 50 +++ ..._v2_rke_config_local_auth_endpoint_test.go | 71 +++ ...ture_cluster_v2_rke_config_machine_pool.go | 176 ++++++++ ...cluster_v2_rke_config_machine_pool_test.go | 229 ++++++++++ ...tructure_cluster_v2_rke_config_registry.go | 146 ++++++ ...ure_cluster_v2_rke_config_registry_test.go | 193 ++++++++ ...ure_cluster_v2_rke_config_system_config.go | 136 ++++++ ...luster_v2_rke_config_system_config_test.go | 198 ++++++++ ..._cluster_v2_rke_config_upgrade_strategy.go | 123 +++++ ...ter_v2_rke_config_upgrade_strategy_test.go | 142 ++++++ .../structure_cluster_v2_rke_config_z_test.go | 94 ++++ rancher2/structure_cluster_v2_test.go | 132 ++++++ rancher2/structure_env_var_v2.go | 54 +++ rancher2/structure_env_var_v2_test.go | 78 ++++ rancher2/structure_taint_v2.go | 58 +++ rancher2/structure_taint_v2_test.go | 72 +++ 39 files changed, 4305 insertions(+), 12 deletions(-) create mode 100644 docs/data-sources/cluster_v2.md create mode 100644 docs/resources/cluster_v2.md create mode 100644 rancher2/data_source_rancher2_cluster_v2.go create mode 100644 rancher2/data_source_rancher2_cluster_v2_test.go create mode 100644 rancher2/import_rancher2_cluster_v2.go create mode 100644 rancher2/resource_rancher2_cluster_v2.go create mode 100644 rancher2/resource_rancher2_cluster_v2_test.go create mode 100644 rancher2/schema_cluster_v2.go create mode 100644 rancher2/schema_cluster_v2_rke_config.go create mode 100644 rancher2/schema_cluster_v2_rke_config_etcd.go create mode 100644 rancher2/schema_cluster_v2_rke_config_local_auth_endpoint.go create mode 100644 rancher2/schema_cluster_v2_rke_config_machine_pool.go create mode 100644 rancher2/schema_cluster_v2_rke_config_registry.go create mode 100644 rancher2/schema_cluster_v2_rke_config_system_config.go create mode 100644 rancher2/schema_cluster_v2_rke_config_upgrade_strategy.go create mode 100644 rancher2/schema_taint_v2.go create mode 100644 rancher2/structure_cluster_v2.go create mode 100644 rancher2/structure_cluster_v2_rke_config.go create mode 100644 rancher2/structure_cluster_v2_rke_config_etcd.go create mode 100644 rancher2/structure_cluster_v2_rke_config_etcd_test.go create mode 100644 rancher2/structure_cluster_v2_rke_config_local_auth_endpoint.go create mode 100644 rancher2/structure_cluster_v2_rke_config_local_auth_endpoint_test.go create mode 100644 rancher2/structure_cluster_v2_rke_config_machine_pool.go create mode 100644 rancher2/structure_cluster_v2_rke_config_machine_pool_test.go create mode 100644 rancher2/structure_cluster_v2_rke_config_registry.go create mode 100644 rancher2/structure_cluster_v2_rke_config_registry_test.go create mode 100644 rancher2/structure_cluster_v2_rke_config_system_config.go create mode 100644 rancher2/structure_cluster_v2_rke_config_system_config_test.go create mode 100644 rancher2/structure_cluster_v2_rke_config_upgrade_strategy.go create mode 100644 rancher2/structure_cluster_v2_rke_config_upgrade_strategy_test.go create mode 100644 rancher2/structure_cluster_v2_rke_config_z_test.go create mode 100644 rancher2/structure_cluster_v2_test.go create mode 100644 rancher2/structure_env_var_v2.go create mode 100644 rancher2/structure_env_var_v2_test.go create mode 100644 rancher2/structure_taint_v2.go create mode 100644 rancher2/structure_taint_v2_test.go diff --git a/docs/data-sources/cluster_v2.md b/docs/data-sources/cluster_v2.md new file mode 100644 index 000000000..389fe4b0f --- /dev/null +++ b/docs/data-sources/cluster_v2.md @@ -0,0 +1,40 @@ +--- +page_title: "rancher2_cluster_v2 Data Source" +--- + +# rancher2\_cluster\_v2 Data Source + +Use this data source to retrieve information about a Rancher v2 cluster. + +## Example Usage + +```hcl +data "rancher2_cluster_v2" "foo" { + name = "foo" + fleet_namespace = "fleet-ns" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Cluster v2 (string) +* `fleet_namespace` - (Optional) The fleet namespace of the Cluster v2. Default: `\"fleet-default\"` (string) + +## Attributes Reference + +The following attributes are exported: + +* `id` - (Computed) The ID of the resource (string) +* `cluster_registration_token` - (Computed/Sensitive) Cluster Registration Token generated for the cluster v2 (list maxitems:1) +* `kube_config` - (Computed/Sensitive) Kube Config generated for the cluster v2 (string) +* `cluster_v1_id` - (Computed) Cluster v1 id for cluster v2 (string) +* `resource_version` - (Computed) Cluster v2 k8s resource version (string) +* `kubernetes_version` - (Computed) The kubernetes version of the Cluster v2 (list maxitems:1) +* `agent_env_vars` - (Computed) Optional Agent Env Vars for Rancher agent (list) +* `rke_config` - (Computed) The RKE configuration for `k3s` and `rke2` Clusters v2. (list maxitems:1) +* `cloud_credential_secret_name` - (Computed) Cluster V2 cloud credential secret name (string) +* `default_pod_security_policy_template_name` - (Computed) Cluster V2 default pod security policy template name (string) +* `default_cluster_role_for_project_members` - (Computed) Cluster V2 default cluster role for project members (string) +* `enable_network_policy` - (Computed) Enable k8s network policy at Cluster V2 (bool) diff --git a/docs/resources/cluster_v2.md b/docs/resources/cluster_v2.md new file mode 100644 index 000000000..7de485a93 --- /dev/null +++ b/docs/resources/cluster_v2.md @@ -0,0 +1,425 @@ +--- +page_title: "rancher2_cluster_v2 Resource" +--- + +# rancher2\_cluster\_v2 Resource + +Provides a Rancher v2 Cluster v2 resource. This can be used to create RKE2 and K3S Clusters for Rancher v2 environments and retrieve their information. This resource is supported as tech preview from Rancher v2.6.0 and above. + +## Example Usage + + +### Creating Rancher v2 custom cluster v2 + +```hcl +# Create a new rancher v2 RKE2 custom Cluster v2 +resource "rancher2_cluster_v2" "foo" { + name = "foo" + fleet_namespace = "fleet-ns" + kubernetes_version = "v1.21.4+rke2r2" + enable_network_policy = false + default_cluster_role_for_project_members = "user" +} + +# Create a new rancher v2 K3S custom Cluster v2 +resource "rancher2_cluster_v2" "foo" { + name = "foo" + fleet_namespace = "fleet-ns" + kubernetes_version = "v1.21.4+k3s1" + enable_network_policy = false + default_cluster_role_for_project_members = "user" +} +``` + +**Note** Once created, get the node command from `rancher2_cluster_v2.foo.cluster_registration_token` + +### Creating Rancher v2 amazonec2 cluster v2 + +```hcl +# Create amazonec2 cloud credential +resource "rancher2_cloud_credential" "foo" { + name = "foo" + amazonec2_credential_config { + access_key = "" + secret_key = "" + } +} + +# Create amazonec2 machine config v2 +resource "rancher2_machine_config_v2" "foo" { + generate_name = "test-foo" + amazonec2_config { + ami = "" + region = "" + security_group = [] + subnet_id = "" + vpc_id = "" + zone = "" + } +} + +# Create a new rancher v2 amazonec2 RKE2 Cluster v2 +resource "rancher2_cluster_v2" "foo-rke2" { + name = "foo-rke2" + kubernetes_version = "v1.21.4+rke2r2" + enable_network_policy = false + default_cluster_role_for_project_members = "user" + rke_config { + machine_pools { + name = "pool1" + cloud_credential_secret_name = rancher2_cloud_credential.foo.id + control_plane_role = true + etcd_role = true + worker_role = true + quantity = 1 + machine_config { + kind = rancher2_machine_config_v2.foo.kind + name = rancher2_machine_config_v2.foo.name + } + } + } +} + +# Create a new rancher v2 amazonec2 K3S Cluster v2 +resource "rancher2_cluster_v2" "foo-k3s" { + name = "foo-k3s" + kubernetes_version = "v1.21.4+k3s1" + enable_network_policy = false + default_cluster_role_for_project_members = "user" + rke_config { + machine_pools { + name = "pool1" + cloud_credential_secret_name = rancher2_cloud_credential.foo.id + control_plane_role = true + etcd_role = true + worker_role = true + quantity = 1 + machine_config { + kind = rancher2_machine_config_v2.foo.kind + name = rancher2_machine_config_v2.foo.name + } + } + } +} +``` + +```hcl +# Create amazonec2 cloud credential +resource "rancher2_cloud_credential" "foo" { + name = "foo" + amazonec2_credential_config { + access_key = "" + secret_key = "" + } +} + +# Create amazonec2 machine config v2 +resource "rancher2_machine_config_v2" "foo" { + generate_name = "test-foo" + amazonec2_config { + ami = "" + region = "" + security_group = [] + subnet_id = "" + vpc_id = "" + zone = "" + } +} + +resource "rancher2_cluster_v2" "foo" { + name = "foo" + kubernetes_version = "v1.21.4+k3s1" + enable_network_policy = false + rke_config { + machine_pools { + name = "pool1" + cloud_credential_secret_name = rancher2_cloud_credential.foo.id + control_plane_role = true + etcd_role = true + worker_role = true + quantity = 1 + machine_config { + kind = rancher2_machine_config_v2.foo.kind + name = rancher2_machine_config_v2.foo.name + } + } + machine_global_config = < 0 { - err = client.APIBaseClient.Action(managementClient.ClusterType, "generateKubeconfig", clusterResource, nil, kubeConfig) - if err != nil { - return err - } + kubeConfig, err := getClusterKubeconfig(meta.(*Config), cluster.ID) + if err != nil && !IsForbidden(err) { + return err } var monitoringInput *managementClient.MonitoringInput @@ -582,6 +573,51 @@ func createClusterRegistrationToken(client *managementClient.Client, clusterID s return newRegToken, nil } +func getClusterKubeconfig(c *Config, id string) (*managementClient.GenerateKubeConfigOutput, error) { + action := "generateKubeconfig" + cluster := &Cluster{} + + client, err := c.ManagementClient() + if err != nil { + return nil, fmt.Errorf("Getting cluster Kubeconfig: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) + defer cancel() + for { + err = client.APIBaseClient.ByID(managementClient.ClusterType, id, cluster) + if err != nil { + if !IsNotFound(err) && !IsForbidden(err) && !IsServiceUnavailableError(err) { + return nil, fmt.Errorf("Getting cluster Kubeconfig: %v", err) + } + } else if len(cluster.Actions[action]) > 0 { + clusterResource := &norman.Resource{ + ID: cluster.ID, + Type: cluster.Type, + Links: cluster.Links, + Actions: cluster.Actions, + } + kubeConfig := &managementClient.GenerateKubeConfigOutput{} + err = client.APIBaseClient.Action(managementClient.ClusterType, action, clusterResource, nil, kubeConfig) + if err == nil { + return kubeConfig, nil + } + if cluster.LocalClusterAuthEndpoint != nil && cluster.LocalClusterAuthEndpoint.Enabled && IsServiceUnavailableError(err) { + log.Printf("[WARN] Getting cluster Kubeconfig: kubeconfig is not yet available for cluster %s", cluster.Name) + return kubeConfig, nil + } + if !IsNotFound(err) && !IsForbidden(err) && !IsServiceUnavailableError(err) { + return nil, fmt.Errorf("Getting cluster Kubeconfig: %v", err) + } + } + select { + case <-time.After(rancher2RetriesWait * time.Second): + case <-ctx.Done(): + return nil, fmt.Errorf("Timeout getting cluster Kubeconfig: %v", err) + } + } +} + func updateClusterMonitoringApps(meta interface{}, systemProjectID, version string) error { cliProject, err := meta.(*Config).ProjectClient(systemProjectID) if err != nil { diff --git a/rancher2/resource_rancher2_cluster_v2.go b/rancher2/resource_rancher2_cluster_v2.go new file mode 100644 index 000000000..f877d31ec --- /dev/null +++ b/rancher2/resource_rancher2_cluster_v2.go @@ -0,0 +1,286 @@ +package rancher2 + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + norman "github.com/rancher/norman/types" + managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3" +) + +func resourceRancher2ClusterV2() *schema.Resource { + return &schema.Resource{ + Create: resourceRancher2ClusterV2Create, + Read: resourceRancher2ClusterV2Read, + Update: resourceRancher2ClusterV2Update, + Delete: resourceRancher2ClusterV2Delete, + Importer: &schema.ResourceImporter{ + State: resourceRancher2ClusterV2Import, + }, + Schema: clusterV2Fields(), + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + } +} + +func resourceRancher2ClusterV2Create(d *schema.ResourceData, meta interface{}) error { + name := d.Get("name").(string) + cluster := expandClusterV2(d) + + log.Printf("[INFO] Creating Cluster V2 %s", name) + + newCluster, err := createClusterV2(meta.(*Config), cluster) + if err != nil { + return err + } + d.SetId(newCluster.ID) + newCluster, err = waitForClusterV2State(meta.(*Config), newCluster.ID, clusterV2CreatedCondition, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + // Waiting for cluster v2 active if it has machine pools defined + if newCluster.Spec.RKEConfig != nil && newCluster.Spec.RKEConfig.MachinePools != nil && len(newCluster.Spec.RKEConfig.MachinePools) > 0 { + newCluster, err = waitForClusterV2State(meta.(*Config), newCluster.ID, clusterV2ActiveCondition, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + + return resourceRancher2ClusterV2Read(d, meta) +} + +func resourceRancher2ClusterV2Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] Refreshing Cluster V2 %s", d.Id()) + + cluster, err := getClusterV2ByID(meta.(*Config), d.Id()) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + log.Printf("[INFO] Cluster V2 %s not found", d.Id()) + d.SetId("") + return nil + } + return err + } + d.Set("cluster_v1_id", cluster.Status.ClusterName) + err = setClusterV2LegacyData(d, meta.(*Config)) + if err != nil { + return err + } + return flattenClusterV2(d, cluster) +} + +func resourceRancher2ClusterV2Update(d *schema.ResourceData, meta interface{}) error { + cluster := expandClusterV2(d) + log.Printf("[INFO] Updating Cluster V2 %s", d.Id()) + + newCluster, err := updateClusterV2(meta.(*Config), d.Id(), cluster) + if err != nil { + return err + } + // Waiting for cluster v2 active if it has machine pools defined + if newCluster.Spec.RKEConfig != nil && newCluster.Spec.RKEConfig.MachinePools != nil && len(newCluster.Spec.RKEConfig.MachinePools) > 0 { + newCluster, err = waitForClusterV2State(meta.(*Config), newCluster.ID, clusterV2ActiveCondition, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + return resourceRancher2ClusterV2Read(d, meta) +} + +func resourceRancher2ClusterV2Delete(d *schema.ResourceData, meta interface{}) error { + name := d.Get("name").(string) + log.Printf("[INFO] Deleting Cluster V2 %s", name) + + cluster, err := getClusterV2ByID(meta.(*Config), d.Id()) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + d.SetId("") + return nil + } + } + err = deleteClusterV2(meta.(*Config), cluster) + if err != nil { + return err + } + stateConf := &resource.StateChangeConf{ + Pending: []string{}, + Target: []string{"removed"}, + Refresh: clusterV2StateRefreshFunc(meta, cluster.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + } + _, waitErr := stateConf.WaitForState() + if waitErr != nil { + return fmt.Errorf("[ERROR] waiting for cluster (%s) to be removed: %s", cluster.ID, waitErr) + } + d.SetId("") + return nil +} + +// clusterV2StateRefreshFunc returns a resource.StateRefreshFunc, used to watch a Rancher Cluster v2. +func clusterV2StateRefreshFunc(meta interface{}, objID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + obj, err := getClusterV2ByID(meta.(*Config), objID) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + return obj, "removed", nil + } + return nil, "", err + } + return obj, "active", nil + } +} + +// Rancher2 Cluster V2 API CRUD functions +func createClusterV2(c *Config, obj *ClusterV2) (*ClusterV2, error) { + if c == nil { + return nil, fmt.Errorf("Creating cluster V2: Provider config is nil") + } + if obj == nil { + return nil, fmt.Errorf("Creating cluster V2: Cluster V2 is nil") + } + resp := &ClusterV2{} + err := c.createObjectV2(rancher2DefaultLocalClusterID, clusterV2APIType, obj, resp) + if err != nil { + return nil, fmt.Errorf("Creating cluster V2: %s", err) + } + return resp, nil +} + +func deleteClusterV2(c *Config, obj *ClusterV2) error { + if c == nil { + return fmt.Errorf("Deleting cluster V2: Provider config is nil") + } + if obj == nil { + return fmt.Errorf("Deleting cluster V2: Cluster V2 is nil") + } + resource := &norman.Resource{ + ID: obj.ID, + Type: clusterV2APIType, + Links: obj.Links, + Actions: obj.Actions, + } + return c.deleteObjectV2(rancher2DefaultLocalClusterID, resource) +} + +func getClusterV2ByID(c *Config, id string) (*ClusterV2, error) { + if c == nil { + return nil, fmt.Errorf("Getting cluster V2: Provider config is nil") + } + if len(id) == 0 { + return nil, fmt.Errorf("Getting cluster V2: Cluster V2 ID is empty") + } + resp := &ClusterV2{} + err := c.getObjectV2ByID(rancher2DefaultLocalClusterID, id, clusterV2APIType, resp) + if err != nil { + if !IsServerError(err) && !IsNotFound(err) && !IsForbidden(err) { + return nil, fmt.Errorf("Getting cluster V2: %s", err) + } + return nil, err + } + return resp, nil +} + +func updateClusterV2(c *Config, id string, obj *ClusterV2) (*ClusterV2, error) { + if c == nil { + return nil, fmt.Errorf("Updating cluster V2: Provider config is nil") + } + if len(id) == 0 { + return nil, fmt.Errorf("Updating cluster V2: Cluster V2 ID is empty") + } + if obj == nil { + return nil, fmt.Errorf("Updating cluster V2: Cluster V2 is nil") + } + resp := &ClusterV2{} + err := c.updateObjectV2(rancher2DefaultLocalClusterID, id, clusterV2APIType, obj, resp) + return resp, err +} + +func waitForClusterV2State(c *Config, id, state string, interval time.Duration) (*ClusterV2, error) { + if id == "" || state == "" { + return nil, fmt.Errorf("Cluster V2 ID and/or condition is nil") + } + + ctx, cancel := context.WithTimeout(context.Background(), interval) + defer cancel() + for { + obj, err := getClusterV2ByID(c, id) + if err != nil { + return nil, fmt.Errorf("Getting cluster V2 ID (%s): %v", id, err) + } + for i := range obj.Status.Conditions { + if obj.Status.Conditions[i].Type == state { + // Status of the condition, one of True, False, Unknown. + if obj.Status.Conditions[i].Status == "Unknown" { + break + } + if obj.Status.Conditions[i].Status == "True" { + return obj, nil + } + // When cluster condition is false, retrying if it has been updated for last rancher2WaitFalseCond seconds + lastUpdate, err := time.Parse(time.RFC3339, obj.Status.Conditions[i].LastUpdateTime) + if err == nil && time.Since(lastUpdate) < rancher2WaitFalseCond*time.Second { + break + } + return nil, fmt.Errorf("Cluster V2 ID %s: %s", id, obj.Status.Conditions[i].Message) + } + } + select { + case <-time.After(rancher2RetriesWait * time.Second): + case <-ctx.Done(): + return nil, fmt.Errorf("Timeout waiting for cluster V2 ID %s", id) + } + } +} + +func setClusterV2LegacyData(d *schema.ResourceData, c *Config) error { + if c == nil { + return fmt.Errorf("Setting cluster V2 legacy data: Provider config is nil") + } + clusterV1ID := d.Get("cluster_v1_id").(string) + if len(clusterV1ID) == 0 { + return fmt.Errorf("Setting cluster V2 legacy data: cluster_v1_id is empty") + } + + client, err := c.ManagementClient() + if err != nil { + return fmt.Errorf("Setting cluster V2 legacy data: %v", err) + } + + cluster := &Cluster{} + err = client.APIBaseClient.ByID(managementClient.ClusterType, clusterV1ID, cluster) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + log.Printf("[INFO] Cluster ID %s not found.", cluster.ID) + return nil + } + return fmt.Errorf("Setting cluster V2 legacy data: %v", err) + } + + clusterRegistrationToken, err := findClusterRegistrationToken(client, cluster.ID) + if err != nil && !IsForbidden(err) { + return fmt.Errorf("Setting cluster V2 legacy data: %v", err) + } + regToken, _ := flattenClusterRegistationToken(clusterRegistrationToken) + err = d.Set("cluster_registration_token", regToken) + if err != nil { + return fmt.Errorf("Setting cluster V2 legacy data: %v", err) + } + + kubeConfig, err := getClusterKubeconfig(c, cluster.ID) + if err != nil { + return fmt.Errorf("Setting cluster V2 legacy data: %v", err) + } + d.Set("kube_config", kubeConfig.Config) + + return nil +} diff --git a/rancher2/resource_rancher2_cluster_v2_test.go b/rancher2/resource_rancher2_cluster_v2_test.go new file mode 100644 index 000000000..c934920aa --- /dev/null +++ b/rancher2/resource_rancher2_cluster_v2_test.go @@ -0,0 +1,183 @@ +package rancher2 + +import ( + "fmt" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +const testAccRancher2ClusterV2Type = "rancher2_cluster_v2" + +var ( + testAccRancher2ClusterV2 string + testAccRancher2ClusterV2Update string + testAccRancher2ClusterV2Config string + testAccRancher2ClusterV2UpdateConfig string +) + +func init() { + testAccRancher2ClusterV2 = ` +resource "` + testAccRancher2ClusterV2Type + `" "foo" { + name = "foo" + kubernetes_version = "v1.21.4+k3s1" + enable_network_policy = true + default_cluster_role_for_project_members = "user" +} +` + testAccRancher2ClusterV2Update = ` +resource "` + testAccRancher2ClusterV2Type + `" "foo" { + name = "foo" + kubernetes_version = "v1.21.4+k3s1" + enable_network_policy = false + default_cluster_role_for_project_members = "user2" +} + ` + testAccRancher2ClusterV2Config = testAccCheckRancher2ClusterSyncTestacc + testAccRancher2ClusterV2 + testAccRancher2ClusterV2UpdateConfig = testAccCheckRancher2ClusterSyncTestacc + testAccRancher2ClusterV2Update +} + +func TestAccRancher2ClusterV2_basic(t *testing.T) { + var cluster *ClusterV2 + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRancher2ClusterV2Destroy, + Steps: []resource.TestStep{ + { + Config: testAccRancher2ClusterV2Config, + Check: resource.ComposeTestCheckFunc( + testAccCheckRancher2ClusterV2Exists(testAccRancher2ClusterV2Type+".foo", cluster), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "name", "foo"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "fleet_namespace", "fleet-default"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "kubernetes_version", "v1.21.4+k3s1"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "enable_network_policy", "true"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "default_cluster_role_for_project_members", "user"), + ), + }, + { + Config: testAccRancher2ClusterV2UpdateConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckRancher2ClusterV2Exists(testAccRancher2ClusterV2Type+".foo", cluster), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "name", "foo"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "fleet_namespace", "fleet-default"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "kubernetes_version", "v1.21.4+k3s1"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "enable_network_policy", "false"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "default_cluster_role_for_project_members", "user2"), + ), + }, + { + Config: testAccRancher2ClusterV2Config, + Check: resource.ComposeTestCheckFunc( + testAccCheckRancher2ClusterV2Exists(testAccRancher2ClusterV2Type+".foo", cluster), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "name", "foo"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "fleet_namespace", "fleet-default"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "kubernetes_version", "v1.21.4+k3s1"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "enable_network_policy", "true"), + resource.TestCheckResourceAttr(testAccRancher2ClusterV2Type+".foo", "default_cluster_role_for_project_members", "user"), + ), + }, + }, + }) +} + +func TestAccRancher2ClusterV2_disappears(t *testing.T) { + var cluster *ClusterV2 + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRancher2ClusterV2Destroy, + Steps: []resource.TestStep{ + { + Config: testAccRancher2ClusterV2Config, + Check: resource.ComposeTestCheckFunc( + testAccCheckRancher2ClusterV2Exists(testAccRancher2ClusterV2Type+".foo", cluster), + testAccRancher2ClusterV2Disappears(cluster), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccRancher2ClusterV2Disappears(cat *ClusterV2) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != testAccRancher2ClusterV2Type { + continue + } + cluster, err := getClusterV2ByID(testAccProvider.Meta().(*Config), rs.Primary.ID) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + return nil + } + return fmt.Errorf("testAccRancher2ClusterV2Disappears-get: %v", err) + } + err = deleteClusterV2(testAccProvider.Meta().(*Config), cluster) + if err != nil { + return fmt.Errorf("testAccRancher2ClusterV2Disappears-delete: %v", err) + } + stateConf := &resource.StateChangeConf{ + Pending: []string{}, + Target: []string{"removed"}, + Refresh: clusterV2StateRefreshFunc(testAccProvider.Meta(), cluster.ID), + Timeout: 120 * time.Second, + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + } + _, waitErr := stateConf.WaitForState() + if waitErr != nil { + return fmt.Errorf("[ERROR] waiting for cluster (%s) to be deleted: %s", cluster.ID, waitErr) + } + } + return nil + + } +} + +func testAccCheckRancher2ClusterV2Exists(n string, cat *ClusterV2) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No cluster ID is set") + } + + foundReg, err := getClusterV2ByID(testAccProvider.Meta().(*Config), rs.Primary.ID) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + return nil + } + return fmt.Errorf("testAccCheckRancher2ClusterV2Exists: %v", err) + } + + cat = foundReg + + return nil + } +} + +func testAccCheckRancher2ClusterV2Destroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != testAccRancher2ClusterV2Type { + continue + } + _, err := getClusterV2ByID(testAccProvider.Meta().(*Config), rs.Primary.ID) + if err != nil { + if IsNotFound(err) { + return nil + } + return fmt.Errorf("testAccCheckRancher2ClusterV2Destroy: %v", err) + } + return fmt.Errorf("ClusterV2 still exists") + } + return nil +} diff --git a/rancher2/schema_cluster_v2.go b/rancher2/schema_cluster_v2.go new file mode 100644 index 000000000..6b19ed03f --- /dev/null +++ b/rancher2/schema_cluster_v2.go @@ -0,0 +1,95 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func clusterV2Fields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster V2 name", + }, + "fleet_namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "fleet-default", + }, + "kubernetes_version": { + Type: schema.TypeString, + Required: true, + Description: "Cluster V2 kubernetes version", + }, + "rke_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: "Cluster V2 rke config", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigFields(), + }, + }, + "agent_env_vars": { + Type: schema.TypeList, + Optional: true, + Description: "Cluster V2 default agent env vars", + Elem: &schema.Resource{ + Schema: envVarFields(), + }, + }, + "cloud_credential_secret_name": { + Type: schema.TypeString, + Optional: true, + Description: "Cluster V2 cloud credential secret name", + }, + "default_pod_security_policy_template_name": { + Type: schema.TypeString, + Optional: true, + Description: "Cluster V2 default pod security policy template name", + }, + "default_cluster_role_for_project_members": { + Type: schema.TypeString, + Optional: true, + Description: "Cluster V2 default cluster role for project members", + }, + "enable_network_policy": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "Enable k8s network policy", + }, + // Computed attributes + "cluster_registration_token": { + Type: schema.TypeList, + MaxItems: 1, + Computed: true, + Sensitive: true, + Elem: &schema.Resource{ + Schema: clusterRegistationTokenFields(), + }, + }, + "kube_config": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "cluster_v1_id": { + Type: schema.TypeString, + Computed: true, + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + }, + } + + for k, v := range commonAnnotationLabelFields() { + s[k] = v + } + + return s +} diff --git a/rancher2/schema_cluster_v2_rke_config.go b/rancher2/schema_cluster_v2_rke_config.go new file mode 100644 index 000000000..d2a9b8b7c --- /dev/null +++ b/rancher2/schema_cluster_v2_rke_config.go @@ -0,0 +1,127 @@ +package rancher2 + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Types + +func clusterV2RKEConfigFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "additional_manifest": { + Type: schema.TypeString, + Optional: true, + Description: "Cluster V2 additional manifest", + }, + "local_auth_endpoint": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Cluster V2 local auth endpoint", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigLocalAuthEndpointFields(), + }, + }, + "upgrade_strategy": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Cluster V2 upgrade strategy", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigUpgradeStrategyFields(), + }, + }, + "chart_values": { + Type: schema.TypeString, + Optional: true, + Description: "Cluster V2 chart values. It should be in YAML format", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v, ok := val.(string) + if !ok || len(v) == 0 { + return + } + _, err := ghodssyamlToMapInterface(v) + if err != nil { + errs = append(errs, fmt.Errorf("%q must be in yaml format, error: %v", key, err)) + return + } + return + }, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == "" || new == "" { + return false + } + oldMap, _ := ghodssyamlToMapInterface(old) + newMap, _ := ghodssyamlToMapInterface(new) + return reflect.DeepEqual(oldMap, newMap) + }, + }, + "machine_global_config": { + Type: schema.TypeString, + Optional: true, + Description: "Cluster V2 machine global config", + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + v, ok := val.(string) + if !ok || len(v) == 0 { + return + } + _, err := ghodssyamlToMapInterface(v) + if err != nil { + errs = append(errs, fmt.Errorf("%q must be in yaml format, error: %v", key, err)) + return + } + return + }, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == "" || new == "" { + return false + } + oldMap, _ := ghodssyamlToMapInterface(old) + newMap, _ := ghodssyamlToMapInterface(new) + return reflect.DeepEqual(oldMap, newMap) + }, + }, + "machine_pools": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Cluster V2 machine pools", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigMachinePoolFields(), + }, + }, + "machine_selector_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "Cluster V2 machine selector config", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigSystemConfigFields(), + }, + }, + "registries": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Cluster V2 registries", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigRegistryFields(), + }, + }, + "etcd": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: "Cluster V2 etcd", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigETCDFields(), + }, + }, + } + + return s +} diff --git a/rancher2/schema_cluster_v2_rke_config_etcd.go b/rancher2/schema_cluster_v2_rke_config_etcd.go new file mode 100644 index 000000000..368664d29 --- /dev/null +++ b/rancher2/schema_cluster_v2_rke_config_etcd.go @@ -0,0 +1,82 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Types + +func clusterV2RKEConfigETCDSnapshotS3Fields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "ETCD snapshot S3 bucket", + }, + "cloud_credential_name": { + Type: schema.TypeString, + Optional: true, + Description: "ETCD snapshot S3 cloud credential name", + }, + "endpoint": { + Type: schema.TypeString, + Required: true, + Description: "ETCD snapshot S3 endpoint", + }, + "endpoint_ca": { + Type: schema.TypeString, + Optional: true, + Description: "ETCD snapshot S3 endpoint CA", + }, + "folder": { + Type: schema.TypeString, + Optional: true, + Description: "ETCD snapshot S3 folder", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: "ETCD snapshot S3 region", + }, + "skip_ssl_verify": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Disable ETCD skip ssl verify", + }, + } + + return s +} + +func clusterV2RKEConfigETCDFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "disable_snapshots": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Disable ETCD snapshots", + }, + "snapshot_schedule_cron": { + Type: schema.TypeString, + Optional: true, + Description: "ETCD snapshot schedule cron (e.g `\"0 */5 * * *\"`)", + }, + "snapshot_retention": { + Type: schema.TypeInt, + Optional: true, + Description: "ETCD snapshot retention", + }, + "s3_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "ETCD snapshot S3 config", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigETCDSnapshotS3Fields(), + }, + }, + } + + return s +} diff --git a/rancher2/schema_cluster_v2_rke_config_local_auth_endpoint.go b/rancher2/schema_cluster_v2_rke_config_local_auth_endpoint.go new file mode 100644 index 000000000..b2fdc9e08 --- /dev/null +++ b/rancher2/schema_cluster_v2_rke_config_local_auth_endpoint.go @@ -0,0 +1,27 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Types + +func clusterV2RKEConfigLocalAuthEndpointFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "ca_certs": { + Type: schema.TypeString, + Optional: true, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "fqdn": { + Type: schema.TypeString, + Optional: true, + }, + } + + return s +} diff --git a/rancher2/schema_cluster_v2_rke_config_machine_pool.go b/rancher2/schema_cluster_v2_rke_config_machine_pool.go new file mode 100644 index 000000000..27dcbfb59 --- /dev/null +++ b/rancher2/schema_cluster_v2_rke_config_machine_pool.go @@ -0,0 +1,116 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +//Types + +func clusterV2RKEConfigMachinePoolMachineConfigFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "kind": { + Type: schema.TypeString, + Required: true, + Description: "Machine config kind", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Machine config name", + }, + } + + return s +} + +func clusterV2RKEConfigMachinePoolRollingUpdateFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "max_unavailable": { + Type: schema.TypeString, + Optional: true, + Description: "Rolling update max unavailable", + }, + "max_surge": { + Type: schema.TypeString, + Optional: true, + Description: "Rolling update max surge", + }, + } + + return s +} + +func clusterV2RKEConfigMachinePoolFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Machine pool name", + }, + "cloud_credential_secret_name": { + Type: schema.TypeString, + Required: true, + Description: "Machine pool cloud credential secret name", + }, + "machine_config": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "Machine config data", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigMachinePoolMachineConfigFields(), + }, + }, + "control_plane_role": { + Type: schema.TypeBool, + Optional: true, + Description: "Machine pool control plane role", + }, + "etcd_role": { + Type: schema.TypeBool, + Optional: true, + Description: "Machine pool etcd role", + }, + "paused": { + Type: schema.TypeBool, + Optional: true, + Description: "Machine pool paused", + }, + "quantity": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: validation.IntAtLeast(1), + Description: "Machine pool quantity", + }, + "rolling_update": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Machine pool rolling update", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigMachinePoolRollingUpdateFields(), + }, + }, + "taints": { + Type: schema.TypeList, + Optional: true, + Description: "Machine pool taints", + Elem: &schema.Resource{ + Schema: taintV2Fields(), + }, + }, + "worker_role": { + Type: schema.TypeBool, + Optional: true, + Description: "Machine pool worker role", + }, + } + + for k, v := range commonAnnotationLabelFields() { + s[k] = v + } + + return s +} diff --git a/rancher2/schema_cluster_v2_rke_config_registry.go b/rancher2/schema_cluster_v2_rke_config_registry.go new file mode 100644 index 000000000..6b0235f93 --- /dev/null +++ b/rancher2/schema_cluster_v2_rke_config_registry.go @@ -0,0 +1,87 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Types + +func clusterV2RKEConfigRegistryConfigFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Required: true, + Description: "Registry hostname", + }, + "auth_config_secret_name": { + Type: schema.TypeString, + Optional: true, + Description: "Registry auth config secret name", + }, + "tls_secret_name": { + Type: schema.TypeString, + Optional: true, + Description: "Registry TLS secret name. TLS is a pair of Cert/Key", + }, + "ca_bundle": { + Type: schema.TypeString, + Optional: true, + Description: "Registry CA bundle", + }, + "insecure": { + Type: schema.TypeBool, + Optional: true, + Description: "Registry insecure connectivity", + }, + } + + return s +} + +func clusterV2RKEConfigRegistryMirrorFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "endpoints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Registry mirror endpoints", + }, + "hostname": { + Type: schema.TypeString, + Required: true, + Description: "Registry hostname", + }, + "rewrites": { + Type: schema.TypeMap, + Optional: true, + Description: "Registry mirror rewrites", + }, + } + + return s +} + +func clusterV2RKEConfigRegistryFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "configs": { + Type: schema.TypeList, + Optional: true, + Description: "Registry config", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigRegistryConfigFields(), + }, + }, + "mirrors": { + Type: schema.TypeList, + Optional: true, + Description: "Registry mirrors", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigRegistryMirrorFields(), + }, + }, + } + + return s +} diff --git a/rancher2/schema_cluster_v2_rke_config_system_config.go b/rancher2/schema_cluster_v2_rke_config_system_config.go new file mode 100644 index 000000000..0c74177ca --- /dev/null +++ b/rancher2/schema_cluster_v2_rke_config_system_config.go @@ -0,0 +1,73 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Types + +func clusterV2RKEConfigSystemConfigLabelSelectorExpressionFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, + Description: "Label selector requirement key", + }, + "operator": { + Type: schema.TypeString, + Optional: true, + Description: "Label selector operator", + }, + "values": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Label selector requirement values", + }, + } + + return s +} + +func clusterV2RKEConfigSystemConfigLabelSelectorFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "match_labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Label selector match labels", + }, + "match_expressions": { + Type: schema.TypeList, + Optional: true, + Description: "Label selector match expressions", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigSystemConfigLabelSelectorExpressionFields(), + }, + }, + } + + return s +} + +func clusterV2RKEConfigSystemConfigFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "machine_label_selector": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Machine label selector", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigSystemConfigLabelSelectorFields(), + }, + }, + "config": { + Type: schema.TypeMap, + Optional: true, + Description: "Machine selector config", + }, + } + + return s +} diff --git a/rancher2/schema_cluster_v2_rke_config_upgrade_strategy.go b/rancher2/schema_cluster_v2_rke_config_upgrade_strategy.go new file mode 100644 index 000000000..98ab3fe3d --- /dev/null +++ b/rancher2/schema_cluster_v2_rke_config_upgrade_strategy.go @@ -0,0 +1,103 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Types + +func clusterV2RKEConfigUpgradeStrategyDrainOptionsFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Drain options enabled?", + }, + "force": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Drain options force", + }, + "ignore_daemon_sets": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Drain options ignore daemon sets", + }, + "ignore_errors": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Drain options ignore errors", + }, + "delete_empty_dir_data": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Drain options delete empty dir data", + }, + "disable_eviction": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Drain options disable eviction", + }, + "grace_period": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Drain options grace period", + }, + "timeout": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Drain options timeout", + }, + "skip_wait_for_delete_timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Drain options skip wait for delete timeout seconds", + }, + } + + return s +} + +func clusterV2RKEConfigUpgradeStrategyFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "control_plane_concurrency": { + Type: schema.TypeString, + Optional: true, + Description: "How many controlplane nodes should be upgrade at time, 0 is infinite. Percentages are also accepted", + }, + "control_plane_drain_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Controlplane nodes drain options", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigUpgradeStrategyDrainOptionsFields(), + }, + }, + "worker_concurrency": { + Type: schema.TypeString, + Optional: true, + Description: "How many worker nodes should be upgrade at time", + }, + "worker_drain_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Worker nodes drain options", + Elem: &schema.Resource{ + Schema: clusterV2RKEConfigUpgradeStrategyDrainOptionsFields(), + }, + }, + } + + return s +} diff --git a/rancher2/schema_taint_v2.go b/rancher2/schema_taint_v2.go new file mode 100644 index 000000000..8ef89bdfc --- /dev/null +++ b/rancher2/schema_taint_v2.go @@ -0,0 +1,39 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +const ( + taintV2EffectNoExecute = "NoExecute" + taintV2EffectNoSchedule = "NoSchedule" + taintV2EffectPreferNoSchedule = "PreferNoSchedule" +) + +var ( + taintV2EffectTypes = []string{taintV2EffectNoExecute, taintV2EffectNoSchedule, taintV2EffectPreferNoSchedule} +) + +//Schemas + +func taintV2Fields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Optional: true, + Default: taintEffectNoSchedule, + ValidateFunc: validation.StringInSlice(taintV2EffectTypes, true), + }, + } + + return s +} diff --git a/rancher2/structure_cluster_v2.go b/rancher2/structure_cluster_v2.go new file mode 100644 index 000000000..cda42adef --- /dev/null +++ b/rancher2/structure_cluster_v2.go @@ -0,0 +1,123 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + norman "github.com/rancher/norman/types" + provisioningV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" +) + +const ( + clusterV2Kind = "Cluster" + clusterV2APIVersion = "provisioning.cattle.io/v1" + clusterV2APIType = "provisioning.cattle.io.cluster" + clusterV2ClusterIDsep = "/" + clusterV2ActiveCondition = "Updated" + clusterV2CreatedCondition = "Created" +) + +//Types + +type ClusterV2 struct { + norman.Resource + provisioningV1.Cluster +} + +// Flatteners + +func flattenClusterV2(d *schema.ResourceData, in *ClusterV2) error { + if in == nil { + return nil + } + + if len(in.ID) > 0 { + d.SetId(in.ID) + } + d.Set("name", in.ObjectMeta.Name) + d.Set("fleet_namespace", in.ObjectMeta.Namespace) + err := d.Set("annotations", toMapInterface(in.ObjectMeta.Annotations)) + if err != nil { + return err + } + err = d.Set("labels", toMapInterface(in.ObjectMeta.Labels)) + if err != nil { + return err + } + d.Set("resource_version", in.ObjectMeta.ResourceVersion) + + if len(in.Spec.KubernetesVersion) > 0 { + d.Set("kubernetes_version", in.Spec.KubernetesVersion) + } + if in.Spec.RKEConfig != nil { + d.Set("rke_config", flattenClusterV2RKEConfig(in.Spec.RKEConfig)) + } + if in.Spec.AgentEnvVars != nil && len(in.Spec.AgentEnvVars) > 0 { + d.Set("agent_env_vars", flattenEnvVarsV2(in.Spec.AgentEnvVars)) + } + if len(in.Spec.CloudCredentialSecretName) > 0 { + d.Set("cloud_credential_secret_name", in.Spec.CloudCredentialSecretName) + } + if len(in.Spec.DefaultPodSecurityPolicyTemplateName) > 0 { + d.Set("default_pod_security_policy_template_name", in.Spec.DefaultPodSecurityPolicyTemplateName) + } + if len(in.Spec.DefaultClusterRoleForProjectMembers) > 0 { + d.Set("default_cluster_role_for_project_members", in.Spec.DefaultClusterRoleForProjectMembers) + } + if in.Spec.EnableNetworkPolicy != nil { + d.Set("enable_network_policy", *in.Spec.EnableNetworkPolicy) + } + if len(in.Status.ClusterName) > 0 { + d.Set("cluster_v1_id", in.Status.ClusterName) + } + + return nil +} + +// Expanders + +func expandClusterV2(in *schema.ResourceData) *ClusterV2 { + if in == nil { + return nil + } + obj := &ClusterV2{} + + if len(in.Id()) > 0 { + obj.ID = in.Id() + } + obj.TypeMeta.Kind = clusterV2Kind + obj.TypeMeta.APIVersion = clusterV2APIVersion + + obj.ObjectMeta.Name = in.Get("name").(string) + obj.ObjectMeta.Namespace = in.Get("fleet_namespace").(string) + if v, ok := in.Get("annotations").(map[string]interface{}); ok && len(v) > 0 { + obj.ObjectMeta.Annotations = toMapString(v) + } + if v, ok := in.Get("labels").(map[string]interface{}); ok && len(v) > 0 { + obj.ObjectMeta.Labels = toMapString(v) + } + if v, ok := in.Get("resource_version").(string); ok { + obj.ObjectMeta.ResourceVersion = v + } + if v, ok := in.Get("kubernetes_version").(string); ok && len(v) > 0 { + obj.Spec.KubernetesVersion = v + } + if v, ok := in.Get("rke_config").([]interface{}); ok { + obj.Spec.RKEConfig = expandClusterV2RKEConfig(v) + } + if v, ok := in.Get("agent_env_vars").([]interface{}); ok { + obj.Spec.AgentEnvVars = expandEnvVarsV2(v) + } + if v, ok := in.Get("cloud_credential_secret_name").(string); ok && len(v) > 0 { + obj.Spec.CloudCredentialSecretName = v + } + if v, ok := in.Get("default_pod_security_policy_template_name").(string); ok && len(v) > 0 { + obj.Spec.DefaultPodSecurityPolicyTemplateName = v + } + if v, ok := in.Get("default_cluster_role_for_project_members").(string); ok && len(v) > 0 { + obj.Spec.DefaultClusterRoleForProjectMembers = v + } + if v, ok := in.Get("enable_network_policy").(bool); ok { + obj.Spec.EnableNetworkPolicy = &v + } + + return obj +} diff --git a/rancher2/structure_cluster_v2_rke_config.go b/rancher2/structure_cluster_v2_rke_config.go new file mode 100644 index 000000000..94d4925bf --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config.go @@ -0,0 +1,90 @@ +package rancher2 + +import ( + provisionv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" +) + +// Flatteners + +func flattenClusterV2RKEConfig(in *provisionv1.RKEConfig) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.AdditionalManifest) > 0 { + obj["additional_manifest"] = in.AdditionalManifest + } + + obj["local_auth_endpoint"] = flattenClusterV2RKEConfigLocalAuthEndpoint(in.LocalClusterAuthEndpoint) + obj["upgrade_strategy"] = flattenClusterV2RKEConfigUpgradeStrategy(in.UpgradeStrategy) + + if in.ChartValues.Data != nil && len(in.ChartValues.Data) > 0 { + yamlData, _ := interfaceToGhodssyaml(in.ChartValues.Data) + obj["chart_values"] = yamlData + } + if in.MachineGlobalConfig.Data != nil && len(in.MachineGlobalConfig.Data) > 0 { + yamlData, _ := interfaceToGhodssyaml(in.MachineGlobalConfig.Data) + obj["machine_global_config"] = yamlData + } + if in.MachinePools != nil && len(in.MachinePools) > 0 { + obj["machine_pools"] = flattenClusterV2RKEConfigMachinePools(in.MachinePools) + } + if in.MachineSelectorConfig != nil && len(in.MachineSelectorConfig) > 0 { + obj["machine_selector_config"] = flattenClusterV2RKEConfigSystemConfig(in.MachineSelectorConfig) + } + if in.Registries != nil { + obj["registries"] = flattenClusterV2RKEConfigRegistry(in.Registries) + } + if in.ETCD != nil { + obj["etcd"] = flattenClusterV2RKEConfigETCD(in.ETCD) + } + + return []interface{}{obj} +} + +// Expanders + +func expandClusterV2RKEConfig(p []interface{}) *provisionv1.RKEConfig { + obj := &provisionv1.RKEConfig{} + if p == nil || len(p) == 0 || p[0] == nil { + return obj + } + + in := p[0].(map[string]interface{}) + + if v, ok := in["additional_manifest"].(string); ok && len(v) > 0 { + obj.AdditionalManifest = v + } + + if v, ok := in["local_auth_endpoint"].([]interface{}); ok && len(v) > 0 { + obj.LocalClusterAuthEndpoint = expandClusterV2RKEConfigLocalAuthEndpoint(v) + } + if v, ok := in["upgrade_strategy"].([]interface{}); ok && len(v) > 0 { + obj.UpgradeStrategy = expandClusterV2RKEConfigUpgradeStrategy(v) + } + + if v, ok := in["chart_values"].(string); ok && len(v) > 0 { + values, _ := ghodssyamlToMapInterface(v) + obj.ChartValues.Data = values + } + if v, ok := in["machine_global_config"].(string); ok && len(v) > 0 { + values, _ := ghodssyamlToMapInterface(v) + obj.MachineGlobalConfig.Data = values + } + if v, ok := in["machine_pools"].([]interface{}); ok && len(v) > 0 { + obj.MachinePools = expandClusterV2RKEConfigMachinePools(v) + } + if v, ok := in["machine_selector_config"].([]interface{}); ok && len(v) > 0 { + obj.MachineSelectorConfig = expandClusterV2RKEConfigSystemConfig(v) + } + if v, ok := in["registries"].([]interface{}); ok && len(v) > 0 { + obj.Registries = expandClusterV2RKEConfigRegistry(v) + } + if v, ok := in["etcd"].([]interface{}); ok && len(v) > 0 { + obj.ETCD = expandClusterV2RKEConfigETCD(v) + } + + return obj +} diff --git a/rancher2/structure_cluster_v2_rke_config_etcd.go b/rancher2/structure_cluster_v2_rke_config_etcd.go new file mode 100644 index 000000000..d9c07c272 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_etcd.go @@ -0,0 +1,117 @@ +package rancher2 + +import ( + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +// Flatteners + +func flattenClusterV2RKEConfigETCDSnapshotS3(in *rkev1.ETCDSnapshotS3) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.Bucket) > 0 { + obj["bucket"] = in.Bucket + } + if len(in.CloudCredentialName) > 0 { + obj["cloud_credential_name"] = in.CloudCredentialName + } + if len(in.Endpoint) > 0 { + obj["endpoint"] = in.Endpoint + } + if len(in.EndpointCA) > 0 { + obj["endpoint_ca"] = in.EndpointCA + } + if len(in.Folder) > 0 { + obj["folder"] = in.Folder + } + if len(in.Region) > 0 { + obj["region"] = in.Region + } + obj["skip_ssl_verify"] = in.SkipSSLVerify + + return []interface{}{obj} +} + +func flattenClusterV2RKEConfigETCD(in *rkev1.ETCD) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + obj["disable_snapshots"] = in.DisableSnapshots + + if len(in.SnapshotScheduleCron) > 0 { + obj["snapshot_schedule_cron"] = in.SnapshotScheduleCron + } + if in.SnapshotRetention > 0 { + obj["snapshot_retention"] = in.SnapshotRetention + } + if in.S3 != nil { + obj["s3_config"] = flattenClusterV2RKEConfigETCDSnapshotS3(in.S3) + } + + return []interface{}{obj} +} + +// Expanders + +func expandClusterV2RKEConfigETCDSnapshotS3(p []interface{}) *rkev1.ETCDSnapshotS3 { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + obj := &rkev1.ETCDSnapshotS3{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["bucket"].(string); ok && len(v) > 0 { + obj.Bucket = v + } + if v, ok := in["cloud_credential_name"].(string); ok && len(v) > 0 { + obj.CloudCredentialName = v + } + if v, ok := in["endpoint"].(string); ok && len(v) > 0 { + obj.Endpoint = v + } + if v, ok := in["endpoint_ca"].(string); ok && len(v) > 0 { + obj.EndpointCA = v + } + if v, ok := in["folder"].(string); ok && len(v) > 0 { + obj.Folder = v + } + if v, ok := in["region"].(string); ok && len(v) > 0 { + obj.Region = v + } + obj.SkipSSLVerify = in["skip_ssl_verify"].(bool) + + return obj +} + +func expandClusterV2RKEConfigETCD(p []interface{}) *rkev1.ETCD { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + obj := &rkev1.ETCD{} + + in := p[0].(map[string]interface{}) + + obj.DisableSnapshots = in["disable_snapshots"].(bool) + + if v, ok := in["snapshot_schedule_cron"].(string); ok && len(v) > 0 { + obj.SnapshotScheduleCron = v + } + if v, ok := in["snapshot_retention"].(int); ok && v > 0 { + obj.SnapshotRetention = v + } + if v, ok := in["s3_config"].([]interface{}); ok && len(v) > 0 { + obj.S3 = expandClusterV2RKEConfigETCDSnapshotS3(v) + } + + return obj +} diff --git a/rancher2/structure_cluster_v2_rke_config_etcd_test.go b/rancher2/structure_cluster_v2_rke_config_etcd_test.go new file mode 100644 index 000000000..446beb830 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_etcd_test.go @@ -0,0 +1,138 @@ +package rancher2 + +import ( + "reflect" + "testing" + + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +var ( + testClusterV2RKEConfigETCDSnapshotS3Conf *rkev1.ETCDSnapshotS3 + testClusterV2RKEConfigETCDSnapshotS3Interface []interface{} + testClusterV2RKEConfigETCDConf *rkev1.ETCD + testClusterV2RKEConfigETCDInterface []interface{} +) + +func init() { + testClusterV2RKEConfigETCDSnapshotS3Conf = &rkev1.ETCDSnapshotS3{ + Bucket: "bucket", + CloudCredentialName: "cloud_credential_name", + Endpoint: "endpoint", + EndpointCA: "endpoint_ca", + Folder: "folder", + Region: "region", + SkipSSLVerify: true, + } + + testClusterV2RKEConfigETCDSnapshotS3Interface = []interface{}{ + map[string]interface{}{ + "bucket": "bucket", + "cloud_credential_name": "cloud_credential_name", + "endpoint": "endpoint", + "endpoint_ca": "endpoint_ca", + "folder": "folder", + "region": "region", + "skip_ssl_verify": true, + }, + } + testClusterV2RKEConfigETCDConf = &rkev1.ETCD{ + DisableSnapshots: true, + SnapshotScheduleCron: "snapshot_schedule_cron", + SnapshotRetention: 10, + S3: testClusterV2RKEConfigETCDSnapshotS3Conf, + } + + testClusterV2RKEConfigETCDInterface = []interface{}{ + map[string]interface{}{ + "disable_snapshots": true, + "snapshot_schedule_cron": "snapshot_schedule_cron", + "snapshot_retention": 10, + "s3_config": testClusterV2RKEConfigETCDSnapshotS3Interface, + }, + } +} + +func TestFlattenClusterV2RKEConfigETCDSnapshotS3(t *testing.T) { + + cases := []struct { + Input *rkev1.ETCDSnapshotS3 + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigETCDSnapshotS3Conf, + testClusterV2RKEConfigETCDSnapshotS3Interface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigETCDSnapshotS3(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigETCD(t *testing.T) { + + cases := []struct { + Input *rkev1.ETCD + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigETCDConf, + testClusterV2RKEConfigETCDInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigETCD(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigETCDSnapshotS3(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput *rkev1.ETCDSnapshotS3 + }{ + { + testClusterV2RKEConfigETCDSnapshotS3Interface, + testClusterV2RKEConfigETCDSnapshotS3Conf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigETCDSnapshotS3(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigETCD(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput *rkev1.ETCD + }{ + { + testClusterV2RKEConfigETCDInterface, + testClusterV2RKEConfigETCDConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigETCD(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_cluster_v2_rke_config_local_auth_endpoint.go b/rancher2/structure_cluster_v2_rke_config_local_auth_endpoint.go new file mode 100644 index 000000000..458cd461f --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_local_auth_endpoint.go @@ -0,0 +1,50 @@ +package rancher2 + +import ( + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +// Flatteners + +func flattenClusterV2RKEConfigLocalAuthEndpoint(in rkev1.LocalClusterAuthEndpoint) []interface{} { + empty := rkev1.LocalClusterAuthEndpoint{} + if in == empty { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.CACerts) > 0 { + obj["ca_certs"] = in.CACerts + } + obj["enabled"] = in.Enabled + if len(in.FQDN) > 0 { + obj["fqdn"] = in.FQDN + } + + return []interface{}{obj} +} + +// Expanders + +func expandClusterV2RKEConfigLocalAuthEndpoint(p []interface{}) rkev1.LocalClusterAuthEndpoint { + if p == nil || len(p) == 0 || p[0] == nil { + return rkev1.LocalClusterAuthEndpoint{} + } + + obj := rkev1.LocalClusterAuthEndpoint{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["ca_certs"].(string); ok && len(v) > 0 { + obj.CACerts = v + } + if v, ok := in["enabled"].(bool); ok { + obj.Enabled = v + } + if v, ok := in["fqdn"].(string); ok && len(v) > 0 { + obj.FQDN = v + } + + return obj +} diff --git a/rancher2/structure_cluster_v2_rke_config_local_auth_endpoint_test.go b/rancher2/structure_cluster_v2_rke_config_local_auth_endpoint_test.go new file mode 100644 index 000000000..b15f47e51 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_local_auth_endpoint_test.go @@ -0,0 +1,71 @@ +package rancher2 + +import ( + "reflect" + "testing" + + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +var ( + testClusterV2RKEConfigLocalAuthEndpointConf rkev1.LocalClusterAuthEndpoint + testClusterV2RKEConfigLocalAuthEndpointInterface []interface{} +) + +func init() { + testClusterV2RKEConfigLocalAuthEndpointConf = rkev1.LocalClusterAuthEndpoint{ + CACerts: "ca_certs", + Enabled: true, + FQDN: "fqdn", + } + + testClusterV2RKEConfigLocalAuthEndpointInterface = []interface{}{ + map[string]interface{}{ + "ca_certs": "ca_certs", + "enabled": true, + "fqdn": "fqdn", + }, + } +} + +func TestFlattenClusterV2RKEConfigLocalAuthEndpoint(t *testing.T) { + + cases := []struct { + Input rkev1.LocalClusterAuthEndpoint + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigLocalAuthEndpointConf, + testClusterV2RKEConfigLocalAuthEndpointInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigLocalAuthEndpoint(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigLocalAuthEndpoint(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput rkev1.LocalClusterAuthEndpoint + }{ + { + testClusterV2RKEConfigLocalAuthEndpointInterface, + testClusterV2RKEConfigLocalAuthEndpointConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigLocalAuthEndpoint(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_cluster_v2_rke_config_machine_pool.go b/rancher2/structure_cluster_v2_rke_config_machine_pool.go new file mode 100644 index 000000000..6eac846e0 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_machine_pool.go @@ -0,0 +1,176 @@ +package rancher2 + +import ( + provisionv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Flatteners + +func flattenClusterV2RKEConfigMachinePoolMachineConfig(in *corev1.ObjectReference) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + obj["kind"] = in.Kind + obj["name"] = in.Name + + return []interface{}{obj} +} + +func flattenClusterV2RKEConfigMachinePoolRollingUpdate(in *provisionv1.RKEMachinePoolRollingUpdate) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if in.MaxSurge != nil { + obj["max_surge"] = in.MaxSurge.String() + } + if in.MaxUnavailable != nil { + obj["max_unavailable"] = in.MaxUnavailable.String() + } + + return []interface{}{obj} +} + +func flattenClusterV2RKEConfigMachinePools(p []provisionv1.RKEMachinePool) []interface{} { + if p == nil { + return nil + } + out := make([]interface{}, len(p)) + for i, in := range p { + obj := map[string]interface{}{} + + obj["name"] = in.Name + if len(in.CloudCredentialSecretName) > 0 { + obj["cloud_credential_secret_name"] = in.CloudCredentialSecretName + } + if in.NodeConfig != nil { + obj["machine_config"] = flattenClusterV2RKEConfigMachinePoolMachineConfig(in.NodeConfig) + } + obj["control_plane_role"] = in.ControlPlaneRole + obj["etcd_role"] = in.EtcdRole + + if len(in.MachineDeploymentAnnotations) > 0 { + obj["annotations"] = toMapInterface(in.MachineDeploymentAnnotations) + } + if len(in.MachineDeploymentLabels) > 0 { + obj["labels"] = toMapInterface(in.MachineDeploymentLabels) + } + obj["paused"] = in.Paused + if in.Quantity != nil { + obj["quantity"] = int(*in.Quantity) + } + if in.RollingUpdate != nil { + obj["rolling_update"] = flattenClusterV2RKEConfigMachinePoolRollingUpdate(in.RollingUpdate) + } + if len(in.Taints) > 0 { + obj["taints"] = flattenTaintsV2(in.Taints) + } + obj["worker_role"] = in.WorkerRole + out[i] = obj + } + + return out +} + +// Expanders + +func expandClusterV2RKEConfigMachinePoolMachineConfig(p []interface{}) *corev1.ObjectReference { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + obj := &corev1.ObjectReference{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["kind"].(string); ok { + obj.Kind = v + } + if v, ok := in["name"].(string); ok { + obj.Name = v + } + + return obj +} + +func expandClusterV2RKEConfigMachinePoolRollingUpdate(p []interface{}) *provisionv1.RKEMachinePoolRollingUpdate { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + obj := &provisionv1.RKEMachinePoolRollingUpdate{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["max_surge"].(string); ok && len(v) > 0 { + maxSurge := intstr.FromString(v) + obj.MaxSurge = &maxSurge + } + if v, ok := in["max_unavailable"].(string); ok && len(v) > 0 { + maxUnavailable := intstr.FromString(v) + obj.MaxUnavailable = &maxUnavailable + } + + return obj +} + +func expandClusterV2RKEConfigMachinePools(p []interface{}) []provisionv1.RKEMachinePool { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + out := make([]provisionv1.RKEMachinePool, len(p)) + for i := range p { + in := p[i].(map[string]interface{}) + obj := provisionv1.RKEMachinePool{} + + if v, ok := in["name"].(string); ok { + obj.Name = v + obj.DisplayName = v + } + if v, ok := in["cloud_credential_secret_name"].(string); ok && len(v) > 0 { + obj.CloudCredentialSecretName = v + } + if v, ok := in["machine_config"].([]interface{}); ok && len(v) > 0 { + obj.NodeConfig = expandClusterV2RKEConfigMachinePoolMachineConfig(v) + } + if v, ok := in["control_plane_role"].(bool); ok { + obj.ControlPlaneRole = v + } + if v, ok := in["etcd_role"].(bool); ok { + obj.EtcdRole = v + } + if v, ok := in["annotations"].(map[string]interface{}); ok && len(v) > 0 { + obj.MachineDeploymentAnnotations = toMapString(v) + } + if v, ok := in["labels"].(map[string]interface{}); ok && len(v) > 0 { + obj.MachineDeploymentLabels = toMapString(v) + } + if v, ok := in["paused"].(bool); ok { + obj.Paused = v + } + if v, ok := in["quantity"].(int); ok { + quantity := int32(v) + obj.Quantity = &quantity + } + if v, ok := in["rolling_update"].([]interface{}); ok && len(v) > 0 { + obj.RollingUpdate = expandClusterV2RKEConfigMachinePoolRollingUpdate(v) + } + if v, ok := in["taints"].([]interface{}); ok && len(v) > 0 { + obj.Taints = expandTaintsV2(v) + } + if v, ok := in["worker_role"].(bool); ok { + obj.WorkerRole = v + } + out[i] = obj + } + + return out +} diff --git a/rancher2/structure_cluster_v2_rke_config_machine_pool_test.go b/rancher2/structure_cluster_v2_rke_config_machine_pool_test.go new file mode 100644 index 000000000..cd04c78d2 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_machine_pool_test.go @@ -0,0 +1,229 @@ +package rancher2 + +import ( + "reflect" + "testing" + + provisionv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var ( + testClusterV2RKEConfigMachinePoolMachineConfigConf *corev1.ObjectReference + testClusterV2RKEConfigMachinePoolMachineConfigInterface []interface{} + testClusterV2RKEConfigMachinePoolRollingUpdateConf *provisionv1.RKEMachinePoolRollingUpdate + testClusterV2RKEConfigMachinePoolRollingUpdateInterface []interface{} + testClusterV2RKEConfigMachinePoolsConf []provisionv1.RKEMachinePool + testClusterV2RKEConfigMachinePoolsInterface []interface{} +) + +func init() { + testClusterV2RKEConfigMachinePoolMachineConfigConf = &corev1.ObjectReference{ + Kind: "kind", + Name: "name", + } + testClusterV2RKEConfigMachinePoolMachineConfigInterface = []interface{}{ + map[string]interface{}{ + "kind": "kind", + "name": "name", + }, + } + maxSurge := intstr.FromString("max_surge") + maxUnavailable := intstr.FromString("max_unavailable") + testClusterV2RKEConfigMachinePoolRollingUpdateConf = &provisionv1.RKEMachinePoolRollingUpdate{ + MaxSurge: &maxSurge, + MaxUnavailable: &maxUnavailable, + } + + testClusterV2RKEConfigMachinePoolRollingUpdateInterface = []interface{}{ + map[string]interface{}{ + "max_surge": "max_surge", + "max_unavailable": "max_unavailable", + }, + } + quantity := int32(10) + testClusterV2RKEConfigMachinePoolsConf = []provisionv1.RKEMachinePool{ + { + Name: "test", + DisplayName: "test", + NodeConfig: testClusterV2RKEConfigMachinePoolMachineConfigConf, + ControlPlaneRole: true, + EtcdRole: true, + MachineDeploymentAnnotations: map[string]string{ + "anno_one": "one", + "anno_two": "two", + }, + MachineDeploymentLabels: map[string]string{ + "label_one": "one", + "label_two": "two", + }, + Quantity: &quantity, + Paused: true, + RollingUpdate: testClusterV2RKEConfigMachinePoolRollingUpdateConf, + WorkerRole: true, + }, + } + testClusterV2RKEConfigMachinePoolsConf[0].CloudCredentialSecretName = "cloud_credential_secret_name" + testClusterV2RKEConfigMachinePoolsConf[0].Taints = []corev1.Taint{ + { + Key: "key", + Value: "value", + Effect: "recipient", + }, + } + testClusterV2RKEConfigMachinePoolsInterface = []interface{}{ + map[string]interface{}{ + "name": "test", + "cloud_credential_secret_name": "cloud_credential_secret_name", + "machine_config": testClusterV2RKEConfigMachinePoolMachineConfigInterface, + "control_plane_role": true, + "etcd_role": true, + "annotations": map[string]interface{}{ + "anno_one": "one", + "anno_two": "two", + }, + "labels": map[string]interface{}{ + "label_one": "one", + "label_two": "two", + }, + "quantity": 10, + "paused": true, + "rolling_update": testClusterV2RKEConfigMachinePoolRollingUpdateInterface, + "taints": []interface{}{ + map[string]interface{}{ + "key": "key", + "value": "value", + "effect": "recipient", + }, + }, + "worker_role": true, + }, + } +} + +func TestFlattenClusterV2RKEConfigMachinePoolMachineConfig(t *testing.T) { + + cases := []struct { + Input *corev1.ObjectReference + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigMachinePoolMachineConfigConf, + testClusterV2RKEConfigMachinePoolMachineConfigInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigMachinePoolMachineConfig(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigMachinePoolRollingUpdate(t *testing.T) { + + cases := []struct { + Input *provisionv1.RKEMachinePoolRollingUpdate + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigMachinePoolRollingUpdateConf, + testClusterV2RKEConfigMachinePoolRollingUpdateInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigMachinePoolRollingUpdate(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigMachinePools(t *testing.T) { + + cases := []struct { + Input []provisionv1.RKEMachinePool + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigMachinePoolsConf, + testClusterV2RKEConfigMachinePoolsInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigMachinePools(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigMachinePoolMachineConfig(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput *corev1.ObjectReference + }{ + { + testClusterV2RKEConfigMachinePoolMachineConfigInterface, + testClusterV2RKEConfigMachinePoolMachineConfigConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigMachinePoolMachineConfig(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigMachinePoolRollingUpdate(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput *provisionv1.RKEMachinePoolRollingUpdate + }{ + { + testClusterV2RKEConfigMachinePoolRollingUpdateInterface, + testClusterV2RKEConfigMachinePoolRollingUpdateConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigMachinePoolRollingUpdate(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigMachinePools(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput []provisionv1.RKEMachinePool + }{ + { + testClusterV2RKEConfigMachinePoolsInterface, + testClusterV2RKEConfigMachinePoolsConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigMachinePools(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_cluster_v2_rke_config_registry.go b/rancher2/structure_cluster_v2_rke_config_registry.go new file mode 100644 index 000000000..5ea6a5149 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_registry.go @@ -0,0 +1,146 @@ +package rancher2 + +import ( + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +// Flatteners + +func flattenClusterV2RKEConfigRegistryConfigs(p map[string]rkev1.RegistryConfig) []interface{} { + if p == nil { + return nil + } + out := make([]interface{}, len(p)) + i := 0 + for k, in := range p { + obj := map[string]interface{}{} + + obj["hostname"] = k + + if len(in.AuthConfigSecretName) > 0 { + obj["auth_config_secret_name"] = in.AuthConfigSecretName + } + if len(in.TLSSecretName) > 0 { + obj["tls_secret_name"] = in.TLSSecretName + } + if len(in.CABundle) > 0 { + obj["ca_bundle"] = string(in.CABundle) + } + obj["insecure"] = in.InsecureSkipVerify + out[i] = obj + i++ + } + + return out +} + +func flattenClusterV2RKEConfigRegistryMirrors(p map[string]rkev1.Mirror) []interface{} { + if p == nil { + return nil + } + out := make([]interface{}, len(p)) + i := 0 + for k, in := range p { + obj := map[string]interface{}{} + + obj["hostname"] = k + + if len(in.Endpoints) > 0 { + obj["endpoints"] = toArrayInterfaceSorted(in.Endpoints) + } + if len(in.Rewrites) > 0 { + obj["rewrites"] = toMapInterface(in.Rewrites) + } + out[i] = obj + i++ + } + + return out +} + +func flattenClusterV2RKEConfigRegistry(in *rkev1.Registry) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + if len(in.Configs) > 0 { + obj["configs"] = flattenClusterV2RKEConfigRegistryConfigs(in.Configs) + } + if len(in.Mirrors) > 0 { + obj["mirrors"] = flattenClusterV2RKEConfigRegistryMirrors(in.Mirrors) + } + + return []interface{}{obj} +} + +// Expanders + +func expandClusterV2RKEConfigRegistryConfigs(p []interface{}) map[string]rkev1.RegistryConfig { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + out := map[string]rkev1.RegistryConfig{} + for i := range p { + in := p[i].(map[string]interface{}) + obj := rkev1.RegistryConfig{} + + if v, ok := in["auth_config_secret_name"].(string); ok && len(v) > 0 { + obj.AuthConfigSecretName = v + } + if v, ok := in["tls_secret_name"].(string); ok && len(v) > 0 { + obj.TLSSecretName = v + } + if v, ok := in["ca_bundle"].(string); ok && len(v) > 0 { + obj.CABundle = []byte(v) + } + if v, ok := in["insecure"].(bool); ok { + obj.InsecureSkipVerify = v + } + out[in["hostname"].(string)] = obj + } + + return out +} + +func expandClusterV2RKEConfigRegistryMirrors(p []interface{}) map[string]rkev1.Mirror { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + out := map[string]rkev1.Mirror{} + for i := range p { + in := p[i].(map[string]interface{}) + obj := rkev1.Mirror{} + + if v, ok := in["endpoints"].([]interface{}); ok && len(v) > 0 { + obj.Endpoints = toArrayStringSorted(v) + } + if v, ok := in["rewrites"].(map[string]interface{}); ok && len(v) > 0 { + obj.Rewrites = toMapString(v) + } + out[in["hostname"].(string)] = obj + } + + return out +} + +func expandClusterV2RKEConfigRegistry(p []interface{}) *rkev1.Registry { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + obj := &rkev1.Registry{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["configs"].([]interface{}); ok && len(v) > 0 { + obj.Configs = expandClusterV2RKEConfigRegistryConfigs(v) + } + if v, ok := in["mirrors"].([]interface{}); ok && len(v) > 0 { + obj.Mirrors = expandClusterV2RKEConfigRegistryMirrors(v) + } + + return obj +} diff --git a/rancher2/structure_cluster_v2_rke_config_registry_test.go b/rancher2/structure_cluster_v2_rke_config_registry_test.go new file mode 100644 index 000000000..4464a9dcc --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_registry_test.go @@ -0,0 +1,193 @@ +package rancher2 + +import ( + "reflect" + "testing" + + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +var ( + testClusterV2RKEConfigRegistryConfigsConf map[string]rkev1.RegistryConfig + testClusterV2RKEConfigRegistryConfigsInterface []interface{} + testClusterV2RKEConfigRegistryMirrorsConf map[string]rkev1.Mirror + testClusterV2RKEConfigRegistryMirrorsInterface []interface{} + testClusterV2RKEConfigRegistryConf *rkev1.Registry + testClusterV2RKEConfigRegistryInterface []interface{} +) + +func init() { + testClusterV2RKEConfigRegistryConfigsConf = map[string]rkev1.RegistryConfig{ + "test": { + AuthConfigSecretName: "auth_config_secret_name", + TLSSecretName: "tls_secret_name", + CABundle: []byte("ca_bundle"), + InsecureSkipVerify: true, + }, + } + + testClusterV2RKEConfigRegistryConfigsInterface = []interface{}{ + map[string]interface{}{ + "hostname": "test", + "auth_config_secret_name": "auth_config_secret_name", + "tls_secret_name": "tls_secret_name", + "ca_bundle": "ca_bundle", + "insecure": true, + }, + } + testClusterV2RKEConfigRegistryMirrorsConf = map[string]rkev1.Mirror{ + "test": { + Endpoints: []string{"value1", "value2"}, + Rewrites: map[string]string{ + "rw_one": "one", + "rw_two": "two", + }, + }, + } + testClusterV2RKEConfigRegistryMirrorsInterface = []interface{}{ + map[string]interface{}{ + "hostname": "test", + "endpoints": []interface{}{"value1", "value2"}, + "rewrites": map[string]interface{}{ + "rw_one": "one", + "rw_two": "two", + }, + }, + } + testClusterV2RKEConfigRegistryConf = &rkev1.Registry{ + Configs: testClusterV2RKEConfigRegistryConfigsConf, + Mirrors: testClusterV2RKEConfigRegistryMirrorsConf, + } + testClusterV2RKEConfigRegistryInterface = []interface{}{ + map[string]interface{}{ + "configs": testClusterV2RKEConfigRegistryConfigsInterface, + "mirrors": testClusterV2RKEConfigRegistryMirrorsInterface, + }, + } +} + +func TestFlattenClusterV2RKEConfigRegistryConfigs(t *testing.T) { + + cases := []struct { + Input map[string]rkev1.RegistryConfig + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigRegistryConfigsConf, + testClusterV2RKEConfigRegistryConfigsInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigRegistryConfigs(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigRegistryMirrors(t *testing.T) { + + cases := []struct { + Input map[string]rkev1.Mirror + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigRegistryMirrorsConf, + testClusterV2RKEConfigRegistryMirrorsInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigRegistryMirrors(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigRegistry(t *testing.T) { + + cases := []struct { + Input *rkev1.Registry + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigRegistryConf, + testClusterV2RKEConfigRegistryInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigRegistry(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigRegistryConfigs(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput map[string]rkev1.RegistryConfig + }{ + { + testClusterV2RKEConfigRegistryConfigsInterface, + testClusterV2RKEConfigRegistryConfigsConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigRegistryConfigs(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigRegistryMirrors(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput map[string]rkev1.Mirror + }{ + { + testClusterV2RKEConfigRegistryMirrorsInterface, + testClusterV2RKEConfigRegistryMirrorsConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigRegistryMirrors(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigRegistry(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput *rkev1.Registry + }{ + { + testClusterV2RKEConfigRegistryInterface, + testClusterV2RKEConfigRegistryConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigRegistry(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_cluster_v2_rke_config_system_config.go b/rancher2/structure_cluster_v2_rke_config_system_config.go new file mode 100644 index 000000000..494f0e603 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_system_config.go @@ -0,0 +1,136 @@ +package rancher2 + +import ( + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Flatteners + +func flattenClusterV2RKEConfigSystemConfigLabelSelectorExpression(p []metav1.LabelSelectorRequirement) []interface{} { + if p == nil { + return nil + } + out := make([]interface{}, len(p)) + for i, in := range p { + obj := map[string]interface{}{} + + if len(in.Key) > 0 { + obj["key"] = in.Key + } + if len(in.Operator) > 0 { + obj["operator"] = string(in.Operator) + } + if len(in.Values) > 0 { + obj["values"] = toArrayInterfaceSorted(in.Values) + } + out[i] = obj + } + + return out +} + +func flattenClusterV2RKEConfigSystemConfigLabelSelector(in *metav1.LabelSelector) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.MatchLabels) > 0 { + obj["match_labels"] = toMapInterface(in.MatchLabels) + } + if len(in.MatchExpressions) > 0 { + obj["match_expressions"] = flattenClusterV2RKEConfigSystemConfigLabelSelectorExpression(in.MatchExpressions) + } + + return []interface{}{obj} +} + +func flattenClusterV2RKEConfigSystemConfig(p []rkev1.RKESystemConfig) []interface{} { + if p == nil { + return nil + } + out := make([]interface{}, len(p)) + for i, in := range p { + obj := map[string]interface{}{} + + if in.MachineLabelSelector != nil { + obj["machine_label_selector"] = flattenClusterV2RKEConfigSystemConfigLabelSelector(in.MachineLabelSelector) + } + if len(in.Config.Data) > 0 { + obj["config"] = in.Config.Data + } + out[i] = obj + } + + return out +} + +// Expanders + +func expandClusterV2RKEConfigSystemConfigLabelSelectorExpression(p []interface{}) []metav1.LabelSelectorRequirement { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + out := make([]metav1.LabelSelectorRequirement, len(p)) + for i := range p { + in := p[i].(map[string]interface{}) + obj := metav1.LabelSelectorRequirement{} + + if v, ok := in["key"].(string); ok && len(v) > 0 { + obj.Key = v + } + if v, ok := in["operator"].(string); ok && len(v) > 0 { + obj.Operator = metav1.LabelSelectorOperator(v) + } + if v, ok := in["values"].([]interface{}); ok && len(v) > 0 { + obj.Values = toArrayStringSorted(v) + } + out[i] = obj + } + + return out +} + +func expandClusterV2RKEConfigSystemConfigLabelSelector(p []interface{}) *metav1.LabelSelector { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + obj := &metav1.LabelSelector{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["match_labels"].(map[string]interface{}); ok && len(v) > 0 { + obj.MatchLabels = toMapString(v) + } + if v, ok := in["match_expressions"].([]interface{}); ok && len(v) > 0 { + obj.MatchExpressions = expandClusterV2RKEConfigSystemConfigLabelSelectorExpression(v) + } + + return obj +} + +func expandClusterV2RKEConfigSystemConfig(p []interface{}) []rkev1.RKESystemConfig { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + out := make([]rkev1.RKESystemConfig, len(p)) + for i := range p { + in := p[i].(map[string]interface{}) + obj := rkev1.RKESystemConfig{} + + if v, ok := in["machine_label_selector"].([]interface{}); ok && len(v) > 0 { + obj.MachineLabelSelector = expandClusterV2RKEConfigSystemConfigLabelSelector(v) + } + if v, ok := in["config"].(map[string]interface{}); ok && len(v) > 0 { + obj.Config.Data = v + } + out[i] = obj + } + + return out +} diff --git a/rancher2/structure_cluster_v2_rke_config_system_config_test.go b/rancher2/structure_cluster_v2_rke_config_system_config_test.go new file mode 100644 index 000000000..721bd8755 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_system_config_test.go @@ -0,0 +1,198 @@ +package rancher2 + +import ( + "reflect" + "testing" + + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionConf []metav1.LabelSelectorRequirement + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionInterface []interface{} + testClusterV2RKEConfigSystemConfigLabelSelectorConf *metav1.LabelSelector + testClusterV2RKEConfigSystemConfigLabelSelectorInterface []interface{} + testClusterV2RKEConfigSystemConfigConf []rkev1.RKESystemConfig + testClusterV2RKEConfigSystemConfigInterface []interface{} +) + +func init() { + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionConf = []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOperator("operator"), + Values: []string{"value1", "value2"}, + }, + } + + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionInterface = []interface{}{ + map[string]interface{}{ + "key": "key", + "operator": "operator", + "values": []interface{}{"value1", "value2"}, + }, + } + testClusterV2RKEConfigSystemConfigLabelSelectorConf = &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label_one": "one", + "label_two": "two", + }, + MatchExpressions: testClusterV2RKEConfigSystemConfigLabelSelectorExpressionConf, + } + testClusterV2RKEConfigSystemConfigLabelSelectorInterface = []interface{}{ + map[string]interface{}{ + "match_labels": map[string]interface{}{ + "label_one": "one", + "label_two": "two", + }, + "match_expressions": testClusterV2RKEConfigSystemConfigLabelSelectorExpressionInterface, + }, + } + testClusterV2RKEConfigSystemConfigConf = []rkev1.RKESystemConfig{ + { + MachineLabelSelector: testClusterV2RKEConfigSystemConfigLabelSelectorConf, + Config: rkev1.GenericMap{ + Data: map[string]interface{}{ + "config_one": "one", + "config_two": "two", + }, + }, + }, + } + testClusterV2RKEConfigSystemConfigInterface = []interface{}{ + map[string]interface{}{ + "machine_label_selector": testClusterV2RKEConfigSystemConfigLabelSelectorInterface, + "config": map[string]interface{}{ + "config_one": "one", + "config_two": "two", + }, + }, + } +} + +func TestFlattenClusterV2RKEConfigSystemConfigLabelSelectorExpression(t *testing.T) { + + cases := []struct { + Input []metav1.LabelSelectorRequirement + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionConf, + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigSystemConfigLabelSelectorExpression(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigSystemConfigLabelSelector(t *testing.T) { + + cases := []struct { + Input *metav1.LabelSelector + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigSystemConfigLabelSelectorConf, + testClusterV2RKEConfigSystemConfigLabelSelectorInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigSystemConfigLabelSelector(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigSystemConfig(t *testing.T) { + + cases := []struct { + Input []rkev1.RKESystemConfig + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigSystemConfigConf, + testClusterV2RKEConfigSystemConfigInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigSystemConfig(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigSystemConfigLabelSelectorExpression(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput []metav1.LabelSelectorRequirement + }{ + { + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionInterface, + testClusterV2RKEConfigSystemConfigLabelSelectorExpressionConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigSystemConfigLabelSelectorExpression(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigSystemConfigLabelSelector(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput *metav1.LabelSelector + }{ + { + testClusterV2RKEConfigSystemConfigLabelSelectorInterface, + testClusterV2RKEConfigSystemConfigLabelSelectorConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigSystemConfigLabelSelector(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigSystemConfig(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput []rkev1.RKESystemConfig + }{ + { + testClusterV2RKEConfigSystemConfigInterface, + testClusterV2RKEConfigSystemConfigConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigSystemConfig(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_cluster_v2_rke_config_upgrade_strategy.go b/rancher2/structure_cluster_v2_rke_config_upgrade_strategy.go new file mode 100644 index 000000000..7e83ffa3f --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_upgrade_strategy.go @@ -0,0 +1,123 @@ +package rancher2 + +import ( + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +// Flatteners + +func flattenClusterV2RKEConfigUpgradeStrategyDrainOptions(in rkev1.DrainOptions) []interface{} { + empty := rkev1.DrainOptions{} + if in == empty { + return nil + } + + obj := make(map[string]interface{}) + + obj["enabled"] = in.Enabled + obj["force"] = in.Force + if in.IgnoreDaemonSets != nil { + obj["ignore_daemon_sets"] = *in.IgnoreDaemonSets + } + obj["ignore_errors"] = in.IgnoreErrors + obj["delete_empty_dir_data"] = in.DeleteEmptyDirData + obj["disable_eviction"] = in.DisableEviction + if in.GracePeriod > 0 { + obj["grace_period"] = in.GracePeriod + } + if in.Timeout > 0 { + obj["timeout"] = in.Timeout + } + if in.SkipWaitForDeleteTimeoutSeconds > 0 { + obj["skip_wait_for_delete_timeout_seconds"] = in.SkipWaitForDeleteTimeoutSeconds + } + + return []interface{}{obj} +} + +func flattenClusterV2RKEConfigUpgradeStrategy(in rkev1.ClusterUpgradeStrategy) []interface{} { + empty := rkev1.ClusterUpgradeStrategy{} + if in == empty { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.ControlPlaneConcurrency) > 0 { + obj["control_plane_concurrency"] = in.ControlPlaneConcurrency + } + obj["control_plane_drain_options"] = flattenClusterV2RKEConfigUpgradeStrategyDrainOptions(in.ControlPlaneDrainOptions) + if len(in.WorkerConcurrency) > 0 { + obj["worker_concurrency"] = in.WorkerConcurrency + } + obj["worker_drain_options"] = flattenClusterV2RKEConfigUpgradeStrategyDrainOptions(in.WorkerDrainOptions) + + return []interface{}{obj} +} + +// Expanders + +func expandClusterV2RKEConfigUpgradeStrategyDrainOptions(p []interface{}) rkev1.DrainOptions { + if p == nil || len(p) == 0 || p[0] == nil { + return rkev1.DrainOptions{} + } + + obj := rkev1.DrainOptions{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["enabled"].(bool); ok { + obj.Enabled = v + } + if v, ok := in["force"].(bool); ok { + obj.Force = v + } + if v, ok := in["ignore_daemon_sets"].(bool); ok { + obj.IgnoreDaemonSets = &v + } + if v, ok := in["ignore_errors"].(bool); ok { + obj.IgnoreErrors = v + } + if v, ok := in["delete_empty_dir_data"].(bool); ok { + obj.DeleteEmptyDirData = v + } + if v, ok := in["disable_eviction"].(bool); ok { + obj.DisableEviction = v + } + if v, ok := in["grace_period"].(int); ok && v > 0 { + obj.GracePeriod = v + } + if v, ok := in["timeout"].(int); ok && v > 0 { + obj.Timeout = v + } + if v, ok := in["skip_wait_for_delete_timeout_seconds"].(int); ok && v > 0 { + obj.SkipWaitForDeleteTimeoutSeconds = v + } + + return obj +} + +func expandClusterV2RKEConfigUpgradeStrategy(p []interface{}) rkev1.ClusterUpgradeStrategy { + if p == nil || len(p) == 0 || p[0] == nil { + return rkev1.ClusterUpgradeStrategy{} + } + + obj := rkev1.ClusterUpgradeStrategy{} + + in := p[0].(map[string]interface{}) + + if v, ok := in["control_plane_concurrency"].(string); ok && len(v) > 0 { + obj.ControlPlaneConcurrency = v + } + if v, ok := in["control_plane_drain_options"].([]interface{}); ok && len(v) > 0 { + obj.ControlPlaneDrainOptions = expandClusterV2RKEConfigUpgradeStrategyDrainOptions(v) + } + if v, ok := in["worker_concurrency"].(string); ok && len(v) > 0 { + obj.WorkerConcurrency = v + } + if v, ok := in["worker_drain_options"].([]interface{}); ok && len(v) > 0 { + obj.WorkerDrainOptions = expandClusterV2RKEConfigUpgradeStrategyDrainOptions(v) + } + + return obj +} diff --git a/rancher2/structure_cluster_v2_rke_config_upgrade_strategy_test.go b/rancher2/structure_cluster_v2_rke_config_upgrade_strategy_test.go new file mode 100644 index 000000000..540f420fa --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_upgrade_strategy_test.go @@ -0,0 +1,142 @@ +package rancher2 + +import ( + "reflect" + "testing" + + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +var ( + testClusterV2RKEConfigUpgradeStrategyDrainOptionsConf rkev1.DrainOptions + testClusterV2RKEConfigUpgradeStrategyDrainOptionsInterface []interface{} + testClusterV2RKEConfigUpgradeStrategyConf rkev1.ClusterUpgradeStrategy + testClusterV2RKEConfigUpgradeStrategyInterface []interface{} +) + +func init() { + testClusterV2RKEConfigUpgradeStrategyDrainOptionsConf = rkev1.DrainOptions{ + Enabled: false, + Force: true, + IgnoreDaemonSets: newTrue(), + IgnoreErrors: true, + DeleteEmptyDirData: true, + DisableEviction: true, + GracePeriod: 30, + Timeout: 20, + SkipWaitForDeleteTimeoutSeconds: 10, + } + + testClusterV2RKEConfigUpgradeStrategyDrainOptionsInterface = []interface{}{ + map[string]interface{}{ + "enabled": false, + "force": true, + "ignore_daemon_sets": true, + "ignore_errors": true, + "delete_empty_dir_data": true, + "disable_eviction": true, + "grace_period": 30, + "timeout": 20, + "skip_wait_for_delete_timeout_seconds": 10, + }, + } + testClusterV2RKEConfigUpgradeStrategyConf = rkev1.ClusterUpgradeStrategy{ + ControlPlaneConcurrency: "control_plane_concurrency", + ControlPlaneDrainOptions: testClusterV2RKEConfigUpgradeStrategyDrainOptionsConf, + WorkerConcurrency: "worker_concurrency", + WorkerDrainOptions: testClusterV2RKEConfigUpgradeStrategyDrainOptionsConf, + } + + testClusterV2RKEConfigUpgradeStrategyInterface = []interface{}{ + map[string]interface{}{ + "control_plane_concurrency": "control_plane_concurrency", + "control_plane_drain_options": testClusterV2RKEConfigUpgradeStrategyDrainOptionsInterface, + "worker_concurrency": "worker_concurrency", + "worker_drain_options": testClusterV2RKEConfigUpgradeStrategyDrainOptionsInterface, + }, + } +} + +func TestFlattenClusterV2RKEConfigUpgradeStrategyDrainOptions(t *testing.T) { + + cases := []struct { + Input rkev1.DrainOptions + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigUpgradeStrategyDrainOptionsConf, + testClusterV2RKEConfigUpgradeStrategyDrainOptionsInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigUpgradeStrategyDrainOptions(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestFlattenClusterV2RKEConfigUpgradeStrategy(t *testing.T) { + + cases := []struct { + Input rkev1.ClusterUpgradeStrategy + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigUpgradeStrategyConf, + testClusterV2RKEConfigUpgradeStrategyInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfigUpgradeStrategy(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigUpgradeStrategyDrainOptions(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput rkev1.DrainOptions + }{ + { + testClusterV2RKEConfigUpgradeStrategyDrainOptionsInterface, + testClusterV2RKEConfigUpgradeStrategyDrainOptionsConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigUpgradeStrategyDrainOptions(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfigUpgradeStrategy(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput rkev1.ClusterUpgradeStrategy + }{ + { + testClusterV2RKEConfigUpgradeStrategyInterface, + testClusterV2RKEConfigUpgradeStrategyConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfigUpgradeStrategy(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_cluster_v2_rke_config_z_test.go b/rancher2/structure_cluster_v2_rke_config_z_test.go new file mode 100644 index 000000000..422809080 --- /dev/null +++ b/rancher2/structure_cluster_v2_rke_config_z_test.go @@ -0,0 +1,94 @@ +package rancher2 + +import ( + "reflect" + "testing" + + provisionv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +var ( + testClusterV2RKEConfigConf *provisionv1.RKEConfig + testClusterV2RKEConfigInterface []interface{} +) + +func init() { + testClusterV2RKEConfigConf = &provisionv1.RKEConfig{ + MachinePools: testClusterV2RKEConfigMachinePoolsConf, + } + testClusterV2RKEConfigConf.AdditionalManifest = "additional_manifest" + testClusterV2RKEConfigConf.LocalClusterAuthEndpoint = testClusterV2RKEConfigLocalAuthEndpointConf + testClusterV2RKEConfigConf.UpgradeStrategy = testClusterV2RKEConfigUpgradeStrategyConf + testClusterV2RKEConfigConf.ChartValues = rkev1.GenericMap{ + Data: map[string]interface{}{ + "chart_one": "one", + "chart_two": "two", + }, + } + testClusterV2RKEConfigConf.MachineGlobalConfig = rkev1.GenericMap{ + Data: map[string]interface{}{ + "config_one": "one", + "config_two": "two", + }, + } + testClusterV2RKEConfigConf.MachineSelectorConfig = testClusterV2RKEConfigSystemConfigConf + testClusterV2RKEConfigConf.Registries = testClusterV2RKEConfigRegistryConf + testClusterV2RKEConfigConf.ETCD = testClusterV2RKEConfigETCDConf + + testClusterV2RKEConfigInterface = []interface{}{ + map[string]interface{}{ + "additional_manifest": "additional_manifest", + "local_auth_endpoint": testClusterV2RKEConfigLocalAuthEndpointInterface, + "upgrade_strategy": testClusterV2RKEConfigUpgradeStrategyInterface, + "chart_values": "chart_one: one\nchart_two: two\n", + "machine_global_config": "config_one: one\nconfig_two: two\n", + "machine_pools": testClusterV2RKEConfigMachinePoolsInterface, + "machine_selector_config": testClusterV2RKEConfigSystemConfigInterface, + "registries": testClusterV2RKEConfigRegistryInterface, + "etcd": testClusterV2RKEConfigETCDInterface, + }, + } +} + +func TestFlattenClusterV2RKEConfig(t *testing.T) { + + cases := []struct { + Input *provisionv1.RKEConfig + ExpectedOutput []interface{} + }{ + { + testClusterV2RKEConfigConf, + testClusterV2RKEConfigInterface, + }, + } + + for _, tc := range cases { + output := flattenClusterV2RKEConfig(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandClusterV2RKEConfig(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput *provisionv1.RKEConfig + }{ + { + testClusterV2RKEConfigInterface, + testClusterV2RKEConfigConf, + }, + } + + for _, tc := range cases { + output := expandClusterV2RKEConfig(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_cluster_v2_test.go b/rancher2/structure_cluster_v2_test.go new file mode 100644 index 000000000..ccd55bc31 --- /dev/null +++ b/rancher2/structure_cluster_v2_test.go @@ -0,0 +1,132 @@ +package rancher2 + +import ( + "reflect" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +var ( + testClusterV2EnvVarConf []rkev1.EnvVar + testClusterV2EnvVarInterface []interface{} + testClusterV2Conf *ClusterV2 + testClusterV2Interface map[string]interface{} +) + +func init() { + testClusterV2EnvVarConf = []rkev1.EnvVar{ + { + Name: "name1", + Value: "value1", + }, + { + Name: "name2", + Value: "value2", + }, + } + testClusterV2EnvVarInterface = []interface{}{ + map[string]interface{}{ + "name": "name1", + "value": "value1", + }, + map[string]interface{}{ + "name": "name2", + "value": "value2", + }, + } + testClusterV2Conf = &ClusterV2{} + + testClusterV2Conf.TypeMeta.Kind = clusterV2Kind + testClusterV2Conf.TypeMeta.APIVersion = clusterV2APIVersion + + testClusterV2Conf.ObjectMeta.Name = "name" + testClusterV2Conf.ObjectMeta.Namespace = "fleet_namespace" + testClusterV2Conf.ObjectMeta.Annotations = map[string]string{ + "value1": "one", + "value2": "two", + } + testClusterV2Conf.ObjectMeta.Labels = map[string]string{ + "label1": "one", + "label2": "two", + } + testClusterV2Conf.Spec.KubernetesVersion = "kubernetes_version" + testClusterV2Conf.Spec.RKEConfig = testClusterV2RKEConfigConf + testClusterV2Conf.Spec.AgentEnvVars = testClusterV2EnvVarConf + testClusterV2Conf.Spec.CloudCredentialSecretName = "cloud_credential_secret_name" + testClusterV2Conf.Spec.DefaultPodSecurityPolicyTemplateName = "default_pod_security_policy_template_name" + testClusterV2Conf.Spec.DefaultClusterRoleForProjectMembers = "default_cluster_role_for_project_members" + testClusterV2Conf.Spec.EnableNetworkPolicy = newTrue() + + testClusterV2Interface = map[string]interface{}{ + "name": "name", + "fleet_namespace": "fleet_namespace", + "kubernetes_version": "kubernetes_version", + "rke_config": testClusterV2RKEConfigInterface, + "agent_env_vars": testClusterV2EnvVarInterface, + "cloud_credential_secret_name": "cloud_credential_secret_name", + "default_pod_security_policy_template_name": "default_pod_security_policy_template_name", + "default_cluster_role_for_project_members": "default_cluster_role_for_project_members", + "enable_network_policy": true, + "annotations": map[string]interface{}{ + "value1": "one", + "value2": "two", + }, + "labels": map[string]interface{}{ + "label1": "one", + "label2": "two", + }, + } +} + +func TestFlattenClusterV2(t *testing.T) { + + cases := []struct { + Input *ClusterV2 + ExpectedOutput map[string]interface{} + }{ + { + testClusterV2Conf, + testClusterV2Interface, + }, + } + + for _, tc := range cases { + output := schema.TestResourceDataRaw(t, clusterV2Fields(), tc.ExpectedOutput) + err := flattenClusterV2(output, tc.Input) + if err != nil { + t.Fatalf("[ERROR] on flattener: %#v", err) + } + expectedOutput := map[string]interface{}{} + for k := range tc.ExpectedOutput { + expectedOutput[k] = output.Get(k) + } + if !reflect.DeepEqual(expectedOutput, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, expectedOutput) + } + } +} + +func TestExpandClusterV2(t *testing.T) { + + cases := []struct { + Input map[string]interface{} + ExpectedOutput *ClusterV2 + }{ + { + testClusterV2Interface, + testClusterV2Conf, + }, + } + + for _, tc := range cases { + inputResourceData := schema.TestResourceDataRaw(t, clusterV2Fields(), tc.Input) + output := expandClusterV2(inputResourceData) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_env_var_v2.go b/rancher2/structure_env_var_v2.go new file mode 100644 index 000000000..131c261d8 --- /dev/null +++ b/rancher2/structure_env_var_v2.go @@ -0,0 +1,54 @@ +package rancher2 + +import ( + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +// Flatteners + +func flattenEnvVarsV2(p []rkev1.EnvVar) []interface{} { + if p == nil || len(p) == 0 { + return nil + } + + out := make([]interface{}, len(p)) + for i, in := range p { + obj := make(map[string]interface{}) + + if len(in.Name) > 0 { + obj["name"] = in.Name + } + + if len(in.Value) > 0 { + obj["value"] = in.Value + } + + out[i] = obj + } + + return out +} + +// Expanders + +func expandEnvVarsV2(p []interface{}) []rkev1.EnvVar { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + + obj := make([]rkev1.EnvVar, len(p)) + + for i := range p { + in := p[i].(map[string]interface{}) + + if v, ok := in["name"].(string); ok && len(v) > 0 { + obj[i].Name = v + } + + if v, ok := in["value"].(string); ok && len(v) > 0 { + obj[i].Value = v + } + } + + return obj +} diff --git a/rancher2/structure_env_var_v2_test.go b/rancher2/structure_env_var_v2_test.go new file mode 100644 index 000000000..4a94cca1e --- /dev/null +++ b/rancher2/structure_env_var_v2_test.go @@ -0,0 +1,78 @@ +package rancher2 + +import ( + "reflect" + "testing" + + rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" +) + +var ( + testEnvVarV2Conf []rkev1.EnvVar + testEnvVarV2Interface []interface{} +) + +func init() { + testEnvVarV2Conf = []rkev1.EnvVar{ + { + Name: "name1", + Value: "value1", + }, + { + Name: "name2", + Value: "value2", + }, + } + testEnvVarV2Interface = []interface{}{ + map[string]interface{}{ + "name": "name1", + "value": "value1", + }, + map[string]interface{}{ + "name": "name2", + "value": "value2", + }, + } +} + +func TestFlattenEnvVarsV2(t *testing.T) { + + cases := []struct { + Input []rkev1.EnvVar + ExpectedOutput []interface{} + }{ + { + testEnvVarV2Conf, + testEnvVarV2Interface, + }, + } + + for _, tc := range cases { + output := flattenEnvVarsV2(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandEnvVarsV2(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput []rkev1.EnvVar + }{ + { + testEnvVarV2Interface, + testEnvVarV2Conf, + }, + } + + for _, tc := range cases { + output := expandEnvVarsV2(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} diff --git a/rancher2/structure_taint_v2.go b/rancher2/structure_taint_v2.go new file mode 100644 index 000000000..413c25726 --- /dev/null +++ b/rancher2/structure_taint_v2.go @@ -0,0 +1,58 @@ +package rancher2 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Flatteners + +func flattenTaintsV2(p []corev1.Taint) []interface{} { + if len(p) == 0 { + return []interface{}{} + } + + out := make([]interface{}, len(p)) + for i, in := range p { + obj := make(map[string]interface{}) + + if len(in.Key) > 0 { + obj["key"] = in.Key + } + if len(in.Value) > 0 { + obj["value"] = in.Value + } + if len(in.Effect) > 0 { + obj["effect"] = string(in.Effect) + } + + out[i] = obj + } + + return out +} + +// Expanders + +func expandTaintsV2(p []interface{}) []corev1.Taint { + if len(p) == 0 || p[0] == nil { + return []corev1.Taint{} + } + + obj := make([]corev1.Taint, len(p)) + + for i := range p { + in := p[i].(map[string]interface{}) + + if v, ok := in["key"].(string); ok && len(v) > 0 { + obj[i].Key = v + } + if v, ok := in["value"].(string); ok && len(v) > 0 { + obj[i].Value = v + } + if v, ok := in["effect"].(string); ok && len(v) > 0 { + obj[i].Effect = corev1.TaintEffect(v) + } + } + + return obj +} diff --git a/rancher2/structure_taint_v2_test.go b/rancher2/structure_taint_v2_test.go new file mode 100644 index 000000000..019049b9e --- /dev/null +++ b/rancher2/structure_taint_v2_test.go @@ -0,0 +1,72 @@ +package rancher2 + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" +) + +var ( + testTaintsV2Conf []corev1.Taint + testTaintsV2Interface []interface{} +) + +func init() { + testTaintsV2Conf = []corev1.Taint{ + { + Key: "key", + Value: "value", + Effect: "recipient", + }, + } + testTaintsV2Interface = []interface{}{ + map[string]interface{}{ + "key": "key", + "value": "value", + "effect": "recipient", + }, + } +} + +func TestFlattenTaintsV2(t *testing.T) { + + cases := []struct { + Input []corev1.Taint + ExpectedOutput []interface{} + }{ + { + testTaintsV2Conf, + testTaintsV2Interface, + }, + } + + for _, tc := range cases { + output := flattenTaintsV2(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} + +func TestExpandTaintsV2(t *testing.T) { + + cases := []struct { + Input []interface{} + ExpectedOutput []corev1.Taint + }{ + { + testTaintsV2Interface, + testTaintsV2Conf, + }, + } + + for _, tc := range cases { + output := expandTaintsV2(tc.Input) + if !reflect.DeepEqual(output, tc.ExpectedOutput) { + t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", + tc.ExpectedOutput, output) + } + } +} From ee38457e3423a3b8d4a25849825d11ef90533ae5 Mon Sep 17 00:00:00 2001 From: rawmind0 Date: Fri, 10 Sep 2021 12:04:10 +0200 Subject: [PATCH 2/6] Updated cluster_registration_token attribute, added insecure_node_command and insecure_windows_node_command --- docs/resources/cluster.md | 2 ++ rancher2/schema_cluster.go | 8 +++++++ rancher2/structure_cluster.go | 2 ++ rancher2/structure_cluster_test.go | 38 +++++++++++++++++------------- 4 files changed, 33 insertions(+), 17 deletions(-) diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index abb7e23b6..8d3ef3089 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -1749,6 +1749,8 @@ The following arguments are supported: * `name` - (Computed) Name of cluster registration token (string) * `command` - (Computed) Command to execute in a imported k8s cluster (string) * `insecure_command` - (Computed) Insecure command to execute in a imported k8s cluster (string) +* `insecure_node_command` - (Computed) Insecure node command to execute in a imported k8s cluster (string) +* `insecure_windows_node_command` - (Computed) Insecure windows command to execute in a imported k8s cluster (string) * `manifest_url` - (Computed) K8s manifest url to execute with `kubectl` to import an existing k8s cluster (string) * `node_command` - (Computed) Node command to execute in linux nodes for custom k8s cluster (string) * `token` - (Computed) Token for cluster registration token object (string) diff --git a/rancher2/schema_cluster.go b/rancher2/schema_cluster.go index dfa20825e..333a78824 100644 --- a/rancher2/schema_cluster.go +++ b/rancher2/schema_cluster.go @@ -54,6 +54,14 @@ func clusterRegistationTokenFields() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "insecure_node_command": { + Type: schema.TypeString, + Computed: true, + }, + "insecure_windows_node_command": { + Type: schema.TypeString, + Computed: true, + }, "manifest_url": { Type: schema.TypeString, Computed: true, diff --git a/rancher2/structure_cluster.go b/rancher2/structure_cluster.go index 3288e0e32..b0fc99a4a 100644 --- a/rancher2/structure_cluster.go +++ b/rancher2/structure_cluster.go @@ -20,6 +20,8 @@ func flattenClusterRegistationToken(in *managementClient.ClusterRegistrationToke obj["name"] = in.Name obj["command"] = in.Command obj["insecure_command"] = in.InsecureCommand + obj["insecure_node_command"] = in.InsecureNodeCommand + obj["insecure_windows_node_command"] = in.InsecureWindowsNodeCommand obj["manifest_url"] = in.ManifestURL obj["node_command"] = in.NodeCommand obj["token"] = in.Token diff --git a/rancher2/structure_cluster_test.go b/rancher2/structure_cluster_test.go index 44ab12030..763dd0e02 100644 --- a/rancher2/structure_cluster_test.go +++ b/rancher2/structure_cluster_test.go @@ -140,14 +140,16 @@ func testCluster() { }, } testClusterRegistrationTokenConf = &managementClient.ClusterRegistrationToken{ - ClusterID: "cluster_test", - Name: clusterRegistrationTokenName, - Command: "command", - InsecureCommand: "insecure_command", - ManifestURL: "manifest", - NodeCommand: "node_command", - Token: "token", - WindowsNodeCommand: "win_node_command", + ClusterID: "cluster_test", + Name: clusterRegistrationTokenName, + Command: "command", + InsecureCommand: "insecure_command", + InsecureNodeCommand: "insecure_node_command", + InsecureWindowsNodeCommand: "insecure_windows_node_command", + ManifestURL: "manifest", + NodeCommand: "node_command", + Token: "token", + WindowsNodeCommand: "win_node_command", Annotations: map[string]string{ "node_one": "one", "node_two": "two", @@ -171,15 +173,17 @@ func testCluster() { } testClusterRegistrationTokenInterface = []interface{}{ map[string]interface{}{ - "id": "id", - "cluster_id": "cluster_test", - "name": clusterRegistrationTokenName, - "command": "command", - "insecure_command": "insecure_command", - "manifest_url": "manifest", - "node_command": "node_command", - "token": "token", - "windows_node_command": "win_node_command", + "id": "id", + "cluster_id": "cluster_test", + "name": clusterRegistrationTokenName, + "command": "command", + "insecure_command": "insecure_command", + "insecure_node_command": "insecure_node_command", + "insecure_windows_node_command": "insecure_windows_node_command", + "manifest_url": "manifest", + "node_command": "node_command", + "token": "token", + "windows_node_command": "win_node_command", "annotations": map[string]interface{}{ "node_one": "one", "node_two": "two", From efce302a3fefd620a603115572c3bbb9164c654f Mon Sep 17 00:00:00 2001 From: rawmind0 Date: Mon, 13 Sep 2021 16:34:05 +0200 Subject: [PATCH 3/6] Added note to bootstrap Rancher v2.6.0 and above --- docs/resources/bootstrap.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/resources/bootstrap.md b/docs/resources/bootstrap.md index 5e405850c..82285d9b3 100644 --- a/docs/resources/bootstrap.md +++ b/docs/resources/bootstrap.md @@ -18,6 +18,8 @@ Rancher2 admin `token` can also be regenerated if `token_update` is set to true. Login to Rancher2 is done by trying to use `token` first. If it fails, it uses admin `current_password`. If admin password has been changed outside of terraform and the terraform `token` is expired, `current_password` field can be specified to allow terraform to manage admin password and token again. +**Note** Starting from Rancher v2.6.0, the Rancher installation is setting a random admin password by default. To be able to still use the `rancher2_bootstrap` resource, the Rancher admin password should be set on installation time, using helm chart [`bootstrapPassword`](https://github.com/rancher/rancher/blob/release/v2.6/chart/values.yaml#L157) value for HA installation or docker env variable [`CATTLE_BOOTSTRAP_PASSWORD`](https://github.com/rancher/rancher/blob/release/v2.6/chart/templates/deployment.yaml#L135) for single node installation. If the Rancher admin password is set to something distinct than `admin` (previous default admin password), the `rancher2_bootstrap.current_password` argument should also be set with same value at tf file. + ## Example Usage ```hcl From a45268cdf7cd75fa63a628b9947edeb553d8c5a8 Mon Sep 17 00:00:00 2001 From: rawmind0 Date: Mon, 13 Sep 2021 17:03:45 +0200 Subject: [PATCH 4/6] Added new optional argument rancher2_cloud_credential.amazonec2_credential_config.default_region --- docs/resources/cloud_credential.md | 1 + rancher2/schema_cloud_credential_amazonec2.go | 10 ++++++++-- rancher2/structure_cloud_credential_amazonec2.go | 8 ++++++++ rancher2/structure_cloud_credential_amazonec2_test.go | 10 ++++++---- 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/docs/resources/cloud_credential.md b/docs/resources/cloud_credential.md index 0ca3e5b1e..2cbbcc29d 100644 --- a/docs/resources/cloud_credential.md +++ b/docs/resources/cloud_credential.md @@ -53,6 +53,7 @@ The following attributes are exported: * `access_key` - (Required/Sensitive) AWS access key (string) * `secret_key` - (Required/Sensitive) AWS secret key (string) +* `default_region` - (Optional) AWS default region (string) ### `azure_credential_config` diff --git a/rancher2/schema_cloud_credential_amazonec2.go b/rancher2/schema_cloud_credential_amazonec2.go index 5530a4f4d..9085d64ff 100644 --- a/rancher2/schema_cloud_credential_amazonec2.go +++ b/rancher2/schema_cloud_credential_amazonec2.go @@ -7,8 +7,9 @@ import ( //Types type amazonec2CredentialConfig struct { - AccessKey string `json:"accessKey,omitempty" yaml:"accessKey,omitempty"` - SecretKey string `json:"secretKey,omitempty" yaml:"secretKey,omitempty"` + AccessKey string `json:"accessKey,omitempty" yaml:"accessKey,omitempty"` + SecretKey string `json:"secretKey,omitempty" yaml:"secretKey,omitempty"` + DefaultRegion string `json:"defaultRegion,omitempty" yaml:"defaultRegion,omitempty"` } //Schemas @@ -27,6 +28,11 @@ func cloudCredentialAmazonec2Fields() map[string]*schema.Schema { Sensitive: true, Description: "AWS Secret Key", }, + "default_region": { + Type: schema.TypeString, + Optional: true, + Description: "AWS default region", + }, } return s diff --git a/rancher2/structure_cloud_credential_amazonec2.go b/rancher2/structure_cloud_credential_amazonec2.go index 7c09cf61b..d211eca9a 100644 --- a/rancher2/structure_cloud_credential_amazonec2.go +++ b/rancher2/structure_cloud_credential_amazonec2.go @@ -22,6 +22,10 @@ func flattenCloudCredentialAmazonec2(in *amazonec2CredentialConfig, p []interfac obj["secret_key"] = in.SecretKey } + if len(in.DefaultRegion) > 0 { + obj["default_region"] = in.DefaultRegion + } + return []interface{}{obj} } @@ -42,5 +46,9 @@ func expandCloudCredentialAmazonec2(p []interface{}) *amazonec2CredentialConfig obj.SecretKey = v } + if v, ok := in["default_region"].(string); ok && len(v) > 0 { + obj.DefaultRegion = v + } + return obj } diff --git a/rancher2/structure_cloud_credential_amazonec2_test.go b/rancher2/structure_cloud_credential_amazonec2_test.go index d43b22a1f..3783146d0 100644 --- a/rancher2/structure_cloud_credential_amazonec2_test.go +++ b/rancher2/structure_cloud_credential_amazonec2_test.go @@ -12,13 +12,15 @@ var ( func init() { testCloudCredentialAmazonec2Conf = &amazonec2CredentialConfig{ - AccessKey: "access_key", - SecretKey: "secret_key", + AccessKey: "access_key", + SecretKey: "secret_key", + DefaultRegion: "default_region", } testCloudCredentialAmazonec2Interface = []interface{}{ map[string]interface{}{ - "access_key": "access_key", - "secret_key": "secret_key", + "access_key": "access_key", + "secret_key": "secret_key", + "default_region": "default_region", }, } } From 3e327f61ef8632b052db0404247f49117bfa7a26 Mon Sep 17 00:00:00 2001 From: rawmind0 Date: Tue, 14 Sep 2021 04:39:51 +0200 Subject: [PATCH 5/6] Added resource rancher2_machine_config_v2. Go and docs files --- docs/resources/machine_config_v2.md | 259 +++++++++++++ rancher2/provider.go | 1 + .../resource_rancher2_machine_config_v2.go | 364 ++++++++++++++++++ rancher2/schema_machine_config_v2.go | 92 +++++ .../schema_machine_config_v2_amazonec2.go | 214 ++++++++++ rancher2/schema_machine_config_v2_azure.go | 181 +++++++++ .../schema_machine_config_v2_digitalocean.go | 97 +++++ rancher2/schema_machine_config_v2_linode.go | 104 +++++ .../schema_machine_config_v2_openstack.go | 193 ++++++++++ rancher2/schema_machine_config_v2_vsphere.go | 212 ++++++++++ rancher2/structure_machine_config_v2.go | 138 +++++++ .../structure_machine_config_v2_amazonec2.go | 337 ++++++++++++++++ rancher2/structure_machine_config_v2_azure.go | 299 ++++++++++++++ ...tructure_machine_config_v2_digitalocean.go | 173 +++++++++ .../structure_machine_config_v2_linode.go | 200 ++++++++++ .../structure_machine_config_v2_openstack.go | 273 +++++++++++++ .../structure_machine_config_v2_vsphere.go | 277 +++++++++++++ 17 files changed, 3414 insertions(+) create mode 100644 docs/resources/machine_config_v2.md create mode 100644 rancher2/resource_rancher2_machine_config_v2.go create mode 100644 rancher2/schema_machine_config_v2.go create mode 100644 rancher2/schema_machine_config_v2_amazonec2.go create mode 100644 rancher2/schema_machine_config_v2_azure.go create mode 100644 rancher2/schema_machine_config_v2_digitalocean.go create mode 100644 rancher2/schema_machine_config_v2_linode.go create mode 100644 rancher2/schema_machine_config_v2_openstack.go create mode 100644 rancher2/schema_machine_config_v2_vsphere.go create mode 100644 rancher2/structure_machine_config_v2.go create mode 100644 rancher2/structure_machine_config_v2_amazonec2.go create mode 100644 rancher2/structure_machine_config_v2_azure.go create mode 100644 rancher2/structure_machine_config_v2_digitalocean.go create mode 100644 rancher2/structure_machine_config_v2_linode.go create mode 100644 rancher2/structure_machine_config_v2_openstack.go create mode 100644 rancher2/structure_machine_config_v2_vsphere.go diff --git a/docs/resources/machine_config_v2.md b/docs/resources/machine_config_v2.md new file mode 100644 index 000000000..1bf72cf1a --- /dev/null +++ b/docs/resources/machine_config_v2.md @@ -0,0 +1,259 @@ +--- +page_title: "rancher2_machine_config_v2 Resource" +--- + +# rancher2\_machine\_config\_v2 Resource + +Provides a Rancher v2 Machine config v2 resource. This can be used to create Machine Config v2 for Rancher v2 and retrieve their information. This resource is supported as tech preview from Rancher v2.6.0 and above. + +`amazonec2`, `azure`, `digitalocean`, `linode`, `openstack`, and `vsphere` cloud providers are supported for machine config V2 + +**Note** This resource is used by + +## Example Usage + +```hcl +# Create amazonec2 machine config v2 +resource "rancher2_machine_config_v2" "foo" { + generate_name = "test-foo" + amazonec2_config { + ami = "" + region = "" + security_group = [] + subnet_id = "" + vpc_id = "" + zone = "" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `generate_name` - (Required) Cluster V2 generate name. The pattern to generate machine config name. e.g generate_name=\"prod-pool1\" will generate \"nc-prod-pool1-?????\" name computed at `name` attribute (string) +* `fleet_namespace` - (Optional) Cluster V2 fleet namespace +* `amazonec2_config` - (Optional) AWS config for the Machine Config V2. Conflicts with `azure_config`, `digitalocean_config`, `linode_config`, `openstack_config` and `vsphere_config` (list maxitems:1) +* `azure_config` - (Optional) Azure config for the Machine Config V2. Conflicts with `amazonec2_config`, `digitalocean_config`, `linode_config`, `openstack_config` and `vsphere_config` (list maxitems:1) +* `digitalocean_config` - (Optional) Digitalocean config for the Machine Config V2. Conflicts with `amazonec2_config`, `azure_config`, `linode_config`, `openstack_config` and `vsphere_config` (list maxitems:1) +* `linode_config` - (Optional) Linode config for the Machine Config V2. Conflicts with `amazonec2_config`, `azure_config`, `digitalocean_config`, `openstack_config` and `vsphere_config` (list maxitems:1) +* `openstack_config` - (Optional) Openstack config for the Machine Config V2. Conflicts with `amazonec2_config`, `azure_config`, `digitalocean_config`, `linode_config` and `vsphere_config` (list maxitems:1) +* `vsphere_config` - (Optional) vSphere config for the Machine Config V2. Conflicts with `amazonec2_config`, `azure_config`, `digitalocean_config`, `linode_config` and `openstack_config` (list maxitems:1) +* `annotations` - (Optional) Annotations for Machine Config V2 object (map) +* `labels` - (Optional/Computed) Labels for Machine Config V2 object (map) + +**Note** `labels` and `node_taints` will be applied to nodes deployed using the Machine Config V2 + +## Attributes Reference + +The following attributes are exported: + +* `id` - (Computed) The ID of the resource (string) +* `kind` - (Computed) The machine config kind (string) +* `name` - (Computed) The machine config name (string) + +## Nested blocks + +### `amazonec2_config` + +#### Arguments + +* `ami` - (Required) AWS machine image (string) +* `region` - (Required) AWS region. (string) +* `security_group` - (Required) AWS VPC security group. (list) +* `subnet_id` - (Required) AWS VPC subnet id (string) +* `vpc_id` - (Required) AWS VPC id. (string) +* `zone` - (Required) AWS zone for instance (i.e. a,b,c,d,e) (string) +* `block_duration_minutes` - (Optional) AWS spot instance duration in minutes (60, 120, 180, 240, 300, or 360). Default `0` (string) +* `device_name` - (Optional) AWS root device name. Default `/dev/sda1` (string) +* `encrypt_ebs_volume` - (Optional) Encrypt EBS volume. Default `false` (bool) +* `endpoint` - (Optional) Optional endpoint URL (hostname only or fully qualified URI) (string) +* `http_endpoint` - (Optional) Enables or disables the HTTP metadata endpoint on your instances (string) +* `http_tokens` - (Optional) The state of token usage for your instance metadata requests (string) +* `iam_instance_profile` - (Optional) AWS IAM Instance Profile (string) +* `insecure_transport` - (Optional) Disable SSL when sending requests (bool) +* `instance_type` - (Optional) AWS instance type. Default `t3a.medium` (string) +* `keypair_name` - (Optional) AWS keypair to use; requires --amazonec2-ssh-keypath (string) +* `kms_key` - (Optional) Custom KMS key ID using the AWS Managed CMK (string) +* `monitoring` - (Optional) Set this flag to enable CloudWatch monitoring. Deafult `false` (bool) +* `open_port` - (Optional) Make the specified port number accessible from the Internet. (list) +* `private_address_only` - (Optional) Only use a private IP address. Default `false` (bool) +* `request_spot_instance` - (Optional) Set this flag to request spot instance. Default `false` (bool) +* `retries` - (Optional) Set retry count for recoverable failures (use -1 to disable). Default `5` (string) +* `root_size` - (Optional) AWS root disk size (in GB). Default `16` (string) +* `security_group_readonly` - (Optional) Skip adding default rules to security groups (bool) +* `session_token` - (Optional/Sensitive) AWS Session Token (string) +* `spot_price` - (Optional) AWS spot instance bid price (in dollar). Default `0.50` (string) +* `ssh_key_contents` - (Optional/Sensitive) SSH Key for Instance (string) +* `ssh_user` - (Optional) Set the name of the ssh user (string) +* `tags` - (Optional) AWS Tags (e.g. key1,value1,key2,value2) (string) +* `use_ebs_optimized_instance` - (Optional) Create an EBS optimized instance. Default `false` (bool) +* `use_private_address` - (Optional) Force the usage of private IP address. Default `false` (bool) +* `userdata` - (Optional) Path to file with cloud-init user data (string) +* `volume_type` - (Optional) Amazon EBS volume type. Default `gp2` (string) + +### `azure_config` + +#### Arguments + +* `client_id` - (Optional/Sensitive) Azure Service Principal Account ID. Mandatory on Rancher v2.0.x and v2.1.x. Use `rancher2_cloud_credential` from Rancher v2.2.x (string) +* `client_secret` - (Optional/Sensitive) Azure Service Principal Account password. Mandatory on Rancher v2.0.x and v2.1.x. Use `rancher2_cloud_credential` from Rancher v2.2.x (string) +* `subscription_id` - (Optional/Sensitive) Azure Subscription ID. Mandatory on Rancher v2.0.x and v2.1.x. Use `rancher2_cloud_credential` from Rancher v2.2.x (string) +* `availability_set` - (Optional) Azure Availability Set to place the virtual machine into. Default `docker-machine` (string) +* `custom_data` - (Optional) Path to file with custom-data (string) +* `disk_size` - (Optional) Disk size if using managed disk. Just for Rancher v2.3.x and above. Default `30` (string) +* `dns` - (Optional) A unique DNS label for the public IP adddress (string) +* `docker_port` - (Optional) Port number for Docker engine. Default `2376` (string) +* `environment` - (Optional) Azure environment (e.g. AzurePublicCloud, AzureChinaCloud). Default `AzurePublicCloud` (string) +* `fault_domain_count` - (Optional) Fault domain count to use for availability set. Default `3` (string) +* `image` - (Optional) Azure virtual machine OS image. Default `canonical:UbuntuServer:18.04-LTS:latest` (string) +* `location` - (Optional) Azure region to create the virtual machine. Default `westus` (string) +* `managed_disks` - (Optional) Configures VM and availability set for managed disks. Just for Rancher v2.3.x and above. Default `false` (bool) +* `no_public_ip` - (Optional) Do not create a public IP address for the machine. Default `false` (bool) +* `nsg` - (Optional) Azure Network Security Group to assign this node to (accepts either a name or resource ID, default is to create a new NSG for each machine). Default `docker-machine-nsg` (string) +* `open_port` - (Optional) Make the specified port number accessible from the Internet. (list) +* `private_ip_address` - (Optional) Specify a static private IP address for the machine. (string) +* `resource_group` - (Optional) Azure Resource Group name (will be created if missing). Default `docker-machine` (string) +* `size` - (Optional) Size for Azure Virtual Machine. Default `Standard_A2` (string) +* `ssh_user` - (Optional) Username for SSH login (string) +* `static_public_ip` - (Optional) Assign a static public IP address to the machine. Default `false` (bool) +* `storage_type` - (Optional) Type of Storage Account to host the OS Disk for the machine. Default `Standard_LRS` (string) +* `subnet` - (Optional) Azure Subnet Name to be used within the Virtual Network. Default `docker-machine` (string) +* `subnet_prefix` - (Optional) Private CIDR block to be used for the new subnet, should comply RFC 1918. Default `192.168.0.0/16` (string) +* `subscription_id` - (Optional) Azure Subscription ID (string) +* `tenant_id` - (Optional) Azure Tenant ID (string) +* `update_domain_count` - (Optional) Update domain count to use for availability set. Default `5` (string) +* `use_private_ip` - (Optional) Use private IP address of the machine to connect. Default `false` (bool) +* `vnet` - (Optional) Azure Virtual Network name to connect the virtual machine (in [resourcegroup:]name format). Default `docker-machine-vnet` (string) + +### `digitalocean_config` + +#### Arguments + +* `access_token` - (Optional/Sensitive) Digital Ocean access token. Mandatory on Rancher v2.0.x and v2.1.x. Use `rancher2_cloud_credential` from Rancher v2.2.x (string) +* `backups` - (Optional) Enable backups for droplet. Default `false` (bool) +* `image` - (Optional) Digital Ocean Image. Default `ubuntu-16-04-x64` (string) +* `ipv6` - (Optional) Enable ipv6 for droplet. Default `false` (bool) +* `monitoring` - (Optional) Enable monitoring for droplet. Default `false` (bool) +* `private_networking` - (Optional) Enable private networking for droplet. Default `false` (bool) +* `region` - (Optional) Digital Ocean region. Default `nyc3` (string) +* `size` - (Optional) Digital Ocean size. Default `s-1vcpu-1gb` (string) +* `ssh_key_contents` - (Optional/Sensitive) SSH private key contents (string) +* `ssh_key_fingerprint` - (Optional/Sensitive) SSH key fingerprint (string) +* `ssh_port` - (Optional) SSH port. Default `22` (string) +* `ssh_user` - (Optional) SSH username. Default `root` (string) +* `tags` - (Optional) Comma-separated list of tags to apply to the Droplet (string) +* `userdata` - (Optional) Path to file with cloud-init user-data (string) + +### `linode_config` + +#### Arguments + +* `authorized_users` - (Optional) Linode user accounts (seperated by commas) whose Linode SSH keys will be permitted root access to the created node. (string) +* `create_private_ip` - (Optional) Create private IP for the instance. Default `false` (bool) +* `docker_port` - (Optional) Docker Port. Default `2376` (string) +* `image` - (Optional) Specifies the Linode Instance image which determines the OS distribution and base files. Default `linode/ubuntu18.04` (string) +* `instance_type` - (Optional) Specifies the Linode Instance type which determines CPU, memory, disk size, etc. Default `g6-standard-4` (string) +* `label` - (Optional) Linode Instance Label. (string) +* `region` - (Optional) Specifies the region (location) of the Linode instance. Default `us-east` (string) +* `root_pass` - (Optional/Sensitive) Root Password (string) +* `ssh_port` - (Optional) SSH port. Default `22` (string) +* `ssh_user` - (Optional) SSH username. Default `root` (string) +* `stackscript` - (Optional) Specifies the Linode StackScript to use to create the instance. (string) +* `stackscript_data` - (Optional) A JSON string specifying data for the selected StackScript. (string) +* `swap_size` - (Optional) Linode Instance Swap Size (MB). Default `512` (string) +* `tags` - (Optional) A comma separated list of tags to apply to the the Linode resource (string) +* `token` - (Optional/Sensitive) Linode API token. Mandatory on Rancher v2.0.x and v2.1.x. Use `rancher2_cloud_credential` from Rancher v2.2.x (string) +* `ua_prefix` - (Optional) Prefix the User-Agent in Linode API calls with some 'product/version' (string) + +### `openstack_config` + +#### Arguments + +* `auth_url` - (Required) OpenStack authentication URL (string) +* `availability_zone` - (Required) OpenStack availability zone (string) +* `region` - (Required) OpenStack region name (string) +* `username` - (Required++) OpenStack username (string) +* `active_timeout`- (Optional) OpenStack active timeout Default `200` (string) +* `cacert` - (Optional) CA certificate bundle to verify against (string) +* `config_drive` - (Optional) Enables the OpenStack config drive for the instance. Default `false` (bool) +* `domain_id` - (Required++) OpenStack domain ID. Identity v3 only. Conflicts with `domain_name` (string) +* `domain_name` - (Required++) OpenStack domain name. Identity v3 only. Conflicts with `domain_id` (string) +* `endpoint_type` - (Optional) OpenStack endpoint type. adminURL, internalURL or publicURL (string) +* `flavor_id` - (Required+) OpenStack flavor id to use for the instance. Conflicts with `flavor_name` (string) +* `flavor_name` - (Required+) OpenStack flavor name to use for the instance. Conflicts with `flavor_id` (string) +* `floating_ip_pool` - (Optional) OpenStack floating IP pool to get an IP from to assign to the instance (string) +* `image_id` - (Required+) OpenStack image id to use for the instance. Conflicts with `image_name` (string) +* `image_name` - (Required+) OpenStack image name to use for the instance. Conflicts with `image_id` (string) +* `insecure` - (Optional) Disable TLS credential checking. Default `false` (bool) +* `ip_version` - (Optional) OpenStack version of IP address assigned for the machine Default `4` (string) +* `keypair_name` - (Optional) OpenStack keypair to use to SSH to the instance (string) +* `net_id` - (Required+) OpenStack network id the machine will be connected on. Conflicts with `net_name` (string) +* `net_name` - (Required+) OpenStack network name the machine will be connected on. Conflicts with `net_id` (string) +* `nova_network` - (Optional) Use the nova networking services instead of neutron (string) +* `password` - (Optional/Sensitive) OpenStack password. Mandatory on Rancher v2.0.x and v2.1.x. Use `rancher2_cloud_credential` from Rancher v2.2.x (string) +* `private_key_file` - (Optional/Sensitive) Private key content to use for SSH (string) +* `sec_groups` - (Optional) OpenStack comma separated security groups for the machine (string) +* `ssh_port` - (Optional) OpenStack SSH port * Default `22` (string) +* `ssh_user` - (Optional) OpenStack SSH user * Default: `root` (string) +* `tenant_id` - (Required++) OpenStack tenant id. Conflicts with `tenant_name` (string) +* `tenant_name` - (Required++) OpenStack tenant name. Conflicts with `tenant_id` (string) +* `tenant_domain_id` - (Required++) OpenStack tenant domain id. Conflicts with `tenant_domain_name` (string) +* `tenant_domain_name` - (Required++) OpenStack tenant domain name. Conflicts with `tenant_domain_id` (string) +* `user_data_file` - (Optional) File containing an openstack userdata script (string) +* `user_domain_id` - (Required++) OpenStack user domain id. Conflicts with `user_domain_name` (string) +* `user_domain_name` - (Required++) OpenStack user domain name. Conflicts with `user_domain_id` (string) +* `application_credential_id` - (Optional) OpenStack application credential id. Conflicts with `application_credential_name` (string) +* `application_credential_name` - (Optional) OpenStack application credential name. Conflicts with `application_credential_id` (string) +* `application_credential_secret` - (Optional) OpenStack application credential secret (string) +* `boot_from_volume` - (Optional) Enable booting from volume. Default is `false` (bool) +* `volume_size` - (Optional) OpenStack volume size (GiB). Required when `boot_from_volume` is `true` (string) +* `volume_type` - (Optional) OpenStack volume type. Required when `boot_from_volume` is `true` and openstack cloud does not have a default volume type (string) +* `volume_id` - (Optional) OpenStack volume id of existing volume. Applicable only when `boot_from_volume` is `true` (string) +* `volume_name` - (Optional) OpenStack volume name of existing volume. Applicable only when `boot_from_volume` is `true` (string) +* `volume_device_path` - (Optional) OpenStack volume device path (attaching). Applicable only when `boot_from_volume` is `true`. Omit for auto `/dev/vdb`. (string) +> **Note**: `Required+` denotes that either the _name or _id is required but you cannot use both. +> **Note**: `Required++` denotes that either the _name or _id is required unless `application_credential_id` is defined. + +### `vsphere_config` + +#### Arguments + +* `boot2docker_url` - (Optional) vSphere URL for boot2docker iso image. Default `https://releases.rancher.com/os/latest/rancheros-vmware.iso` (string) +* `cfgparam` - (Optional) vSphere vm configuration parameters (used for guestinfo) (list) +* `clone_from` - (Optional) If you choose creation type vm (clone vm) a name of what vm you want to clone is required (string) +* `cloud_config` - (Optional) Filepath to a cloud-config yaml file to put into the ISO user-data (string) +* `cloudinit` - (Optional) vSphere cloud-init file or url to set in the guestinfo (string) +* `content_library` - (Optional) If you choose to clone from a content library template specify the name of the library (string) +* `cpu_count` - (Optional) vSphere CPU number for docker VM. Default `2` (string) +* `creation_type` - (Optional) Creation type when creating a new virtual machine. Supported values: vm, template, library, legacy. Default `legacy` (string) +* `custom_attributes` - (Optional) vSphere custom attributes, format key/value e.g. `200=my custom value` (List) +* `datacenter` - (Optional) vSphere datacenter for docker VM (string) +* `datastore` - (Optional) vSphere datastore for docker VM (string) +* `datastore_cluster` - (Optional) vSphere datastore cluster for virtual machine (string) +* `disk_size` - (Optional) vSphere size of disk for docker VM (in MB). Default `20480` (string) +* `folder` - (Optional) vSphere folder for the docker VM. This folder must already exist in the datacenter (string) +* `hostsystem` - (Optional) vSphere compute resource where the docker VM will be instantiated. This can be omitted if using a cluster with DRS (string) +* `memory_size` - (Optional) vSphere size of memory for docker VM (in MB). Default `2048` (string) +* `network` - (Optional) vSphere network where the docker VM will be attached (list) +* `pool` - (Optional) vSphere resource pool for docker VM (string) +* `ssh_password` - (Optional) If using a non-B2D image you can specify the ssh password. Default `tcuser` (string) +* `ssh_port` - (Optional) If using a non-B2D image you can specify the ssh port. Default `22` (string) +* `ssh_user` - (Optional) If using a non-B2D image you can specify the ssh user. Default `docker`. (string) +* `ssh_user_group` - (Optional) If using a non-B2D image the uploaded keys will need chown'ed. Default `staff` (string) +* `tags` - (Optional) vSphere tags id e.g. `urn:xxx` (list) +* `vapp_ip_allocation_policy` - (Optional) vSphere vApp IP allocation policy. Supported values are: `dhcp`, `fixed`, `transient` and `fixedAllocated` (string) +* `vapp_ip_protocol` - (Optional) vSphere vApp IP protocol for this deployment. Supported values are: `IPv4` and `IPv6` (string) +* `vapp_property` - (Optional) vSphere vApp properties (list) +* `vapp_transport` - (Optional) vSphere OVF environment transports to use for properties. Supported values are: `iso` and `com.vmware.guestInfo` (string) +* `vcenter` - (Optional/Sensitive) vSphere IP/hostname for vCenter (string) +* `vcenter_port` - (Optional/Sensitive) vSphere Port for vCenter Default `443` (string) + +## Timeouts + +`rancher2_machine_config_v2` provides the following +[Timeouts](https://www.terraform.io/docs/configuration/resources.html#operation-timeouts) configuration options: + +- `create` - (Default `10 minutes`) Used for creating machine configs. +- `update` - (Default `10 minutes`) Used for machine config modifications. +- `delete` - (Default `10 minutes`) Used for deleting machine configs. diff --git a/rancher2/provider.go b/rancher2/provider.go index f1a1bde24..5c26303c0 100644 --- a/rancher2/provider.go +++ b/rancher2/provider.go @@ -138,6 +138,7 @@ func Provider() terraform.ResourceProvider { "rancher2_global_dns_provider": resourceRancher2GlobalDNSProvider(), "rancher2_global_role": resourceRancher2GlobalRole(), "rancher2_global_role_binding": resourceRancher2GlobalRoleBinding(), + "rancher2_machine_config_v2": resourceRancher2MachineConfigV2(), "rancher2_multi_cluster_app": resourceRancher2MultiClusterApp(), "rancher2_namespace": resourceRancher2Namespace(), "rancher2_node_driver": resourceRancher2NodeDriver(), diff --git a/rancher2/resource_rancher2_machine_config_v2.go b/rancher2/resource_rancher2_machine_config_v2.go new file mode 100644 index 000000000..1e54d5c9d --- /dev/null +++ b/rancher2/resource_rancher2_machine_config_v2.go @@ -0,0 +1,364 @@ +package rancher2 + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + norman "github.com/rancher/norman/types" +) + +func resourceRancher2MachineConfigV2() *schema.Resource { + return &schema.Resource{ + Create: resourceRancher2MachineConfigV2Create, + Read: resourceRancher2MachineConfigV2Read, + Update: resourceRancher2MachineConfigV2Update, + Delete: resourceRancher2MachineConfigV2Delete, + Schema: machineConfigV2Fields(), + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + } +} + +func resourceRancher2MachineConfigV2Create(d *schema.ResourceData, meta interface{}) error { + name := d.Get("name").(string) + obj := expandMachineConfigV2(d) + + log.Printf("[INFO] Creating Machine Config V2 %s kind %s", name, obj.TypeMeta.Kind) + + newObj, err := createMachineConfigV2(meta.(*Config), obj) + if err != nil { + return err + } + + d.SetId(newObj.ID) + d.Set("kind", newObj.TypeMeta.Kind) + stateConf := &resource.StateChangeConf{ + Pending: []string{}, + Target: []string{"active"}, + Refresh: machineConfigV2StateRefreshFunc(meta, newObj.ID, newObj.TypeMeta.Kind), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + } + _, waitErr := stateConf.WaitForState() + if waitErr != nil { + return fmt.Errorf("[ERROR] waiting for machine config (%s) to be active: %s", newObj.ID, waitErr) + } + + return resourceRancher2MachineConfigV2Read(d, meta) +} + +func resourceRancher2MachineConfigV2Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] Refreshing Machine Config V2 %s", d.Id()) + + kind := d.Get("kind").(string) + obj, err := getMachineConfigV2ByID(meta.(*Config), d.Id(), kind) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + log.Printf("[INFO] Machine Config V2 %s not found", d.Id()) + d.SetId("") + return nil + } + return err + } + return flattenMachineConfigV2(d, obj) +} + +func resourceRancher2MachineConfigV2Update(d *schema.ResourceData, meta interface{}) error { + obj := expandMachineConfigV2(d) + log.Printf("[INFO] Updating Machine Config V2 %s", d.Id()) + + newObj, err := updateMachineConfigV2(meta.(*Config), obj) + if err != nil { + return err + } + d.SetId(newObj.ID) + stateConf := &resource.StateChangeConf{ + Pending: []string{}, + Target: []string{"active"}, + Refresh: machineConfigV2StateRefreshFunc(meta, newObj.ID, newObj.TypeMeta.Kind), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + } + _, waitErr := stateConf.WaitForState() + if waitErr != nil { + return fmt.Errorf("[ERROR] waiting for machine config (%s) to be active: %s", newObj.ID, waitErr) + } + return resourceRancher2MachineConfigV2Read(d, meta) +} + +func resourceRancher2MachineConfigV2Delete(d *schema.ResourceData, meta interface{}) error { + name := d.Get("name").(string) + kind := d.Get("kind").(string) + log.Printf("[INFO] Deleting Machine Config V2 %s", name) + + obj, err := getMachineConfigV2ByID(meta.(*Config), d.Id(), kind) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + d.SetId("") + return nil + } + } + err = deleteMachineConfigV2(meta.(*Config), obj) + if err != nil { + return err + } + stateConf := &resource.StateChangeConf{ + Pending: []string{}, + Target: []string{"removed"}, + Refresh: machineConfigV2StateRefreshFunc(meta, obj.ID, obj.TypeMeta.Kind), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + } + _, waitErr := stateConf.WaitForState() + if waitErr != nil { + return fmt.Errorf("[ERROR] waiting for machine config v2 (%s) to be removed: %s", obj.ID, waitErr) + } + d.SetId("") + return nil +} + +// machineConfigV2StateRefreshFunc returns a resource.StateRefreshFunc, used to watch a Rancher Machine Config v2. +func machineConfigV2StateRefreshFunc(meta interface{}, objID, kind string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + obj, err := getMachineConfigV2ByID(meta.(*Config), objID, kind) + if err != nil { + if IsNotFound(err) || IsForbidden(err) { + return obj, "removed", nil + } + return nil, "", err + } + return obj, "active", nil + } +} + +// Rancher2 Machine Config V2 API CRUD functions +func createMachineConfigV2(c *Config, obj *MachineConfigV2) (*MachineConfigV2, error) { + if c == nil { + return nil, fmt.Errorf("Creating Machine Config V2: Provider config is nil") + } + if obj == nil { + return nil, fmt.Errorf("Creating Machine Config V2: Machine Config V2 is nil") + } + var err error + out := &MachineConfigV2{} + kind := obj.TypeMeta.Kind + switch kind { + case machineConfigV2Amazonec2Kind: + resp := &MachineConfigV2Amazonec2{} + err = c.createObjectV2(rancher2DefaultLocalClusterID, machineConfigV2Amazonec2APIType, obj.Amazonec2Config, resp) + out.Amazonec2Config = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2AzureKind: + resp := &MachineConfigV2Azure{} + err = c.createObjectV2(rancher2DefaultLocalClusterID, machineConfigV2AzureAPIType, obj.AzureConfig, resp) + out.AzureConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2DigitaloceanKind: + resp := &MachineConfigV2Digitalocean{} + err = c.createObjectV2(rancher2DefaultLocalClusterID, machineConfigV2DigitaloceanAPIType, obj.DigitaloceanConfig, resp) + out.DigitaloceanConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2LinodeKind: + resp := &MachineConfigV2Linode{} + err = c.createObjectV2(rancher2DefaultLocalClusterID, machineConfigV2LinodeAPIType, obj.LinodeConfig, resp) + out.LinodeConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2OpenstackKind: + resp := &MachineConfigV2Openstack{} + err = c.createObjectV2(rancher2DefaultLocalClusterID, machineConfigV2OpenstackAPIType, obj.OpenstackConfig, resp) + out.OpenstackConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2VmwarevsphereKind: + resp := &MachineConfigV2Vmwarevsphere{} + err = c.createObjectV2(rancher2DefaultLocalClusterID, machineConfigV2VmwarevsphereAPIType, obj.VmwarevsphereConfig, resp) + out.VmwarevsphereConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + default: + return nil, fmt.Errorf("[ERROR] Unsupported driver on node template: %s", kind) + } + if err != nil { + return nil, fmt.Errorf("Creating Machine Config V2: %s", err) + } + return out, nil +} + +func deleteMachineConfigV2(c *Config, obj *MachineConfigV2) error { + if c == nil { + return fmt.Errorf("Deleting Machine Config V2: Provider config is nil") + } + if obj == nil { + return fmt.Errorf("Deleting Machine Config V2: Machine Config V2 is nil") + } + resource := &norman.Resource{ + ID: obj.ID, + Links: obj.Links, + Type: obj.Type, + Actions: obj.Actions, + } + return c.deleteObjectV2(rancher2DefaultLocalClusterID, resource) +} + +func getMachineConfigV2ByID(c *Config, id, kind string) (*MachineConfigV2, error) { + if c == nil { + return nil, fmt.Errorf("Getting Machine Config V2: Provider config is nil") + } + if len(id) == 0 { + return nil, fmt.Errorf("Getting Machine Config V2: Machine Config V2 ID is empty") + } + var err error + out := &MachineConfigV2{} + switch kind { + case machineConfigV2Amazonec2Kind: + resp := &MachineConfigV2Amazonec2{} + err = c.getObjectV2ByID(rancher2DefaultLocalClusterID, id, machineConfigV2Amazonec2APIType, resp) + out.Amazonec2Config = resp + out.ID = resp.ID + out.Links = resp.Links + out.Actions = resp.Actions + out.Type = resp.Type + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2AzureKind: + resp := &MachineConfigV2Azure{} + err = c.getObjectV2ByID(rancher2DefaultLocalClusterID, id, machineConfigV2AzureAPIType, resp) + out.AzureConfig = resp + out.ID = resp.ID + out.Links = resp.Links + out.Actions = resp.Actions + out.Type = resp.Type + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2DigitaloceanKind: + resp := &MachineConfigV2Digitalocean{} + err = c.getObjectV2ByID(rancher2DefaultLocalClusterID, id, machineConfigV2DigitaloceanAPIType, resp) + out.DigitaloceanConfig = resp + out.ID = resp.ID + out.Links = resp.Links + out.Actions = resp.Actions + out.Type = resp.Type + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2LinodeKind: + resp := &MachineConfigV2Linode{} + err = c.getObjectV2ByID(rancher2DefaultLocalClusterID, id, machineConfigV2LinodeAPIType, resp) + out.LinodeConfig = resp + out.ID = resp.ID + out.Links = resp.Links + out.Actions = resp.Actions + out.Type = resp.Type + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2OpenstackKind: + resp := &MachineConfigV2Openstack{} + err = c.getObjectV2ByID(rancher2DefaultLocalClusterID, id, machineConfigV2OpenstackAPIType, resp) + out.OpenstackConfig = resp + out.ID = resp.ID + out.Links = resp.Links + out.Actions = resp.Actions + out.Type = resp.Type + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2VmwarevsphereKind: + resp := &MachineConfigV2Vmwarevsphere{} + err = c.getObjectV2ByID(rancher2DefaultLocalClusterID, id, machineConfigV2VmwarevsphereAPIType, resp) + out.VmwarevsphereConfig = resp + out.ID = resp.ID + out.Links = resp.Links + out.Actions = resp.Actions + out.Type = resp.Type + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + default: + return nil, fmt.Errorf("[ERROR] Unsupported driver on node template: %s", kind) + } + if err != nil { + if !IsServerError(err) && !IsNotFound(err) && !IsForbidden(err) { + return nil, fmt.Errorf("Getting Machine Config V2: %s", err) + } + return nil, err + } + return out, nil +} + +func updateMachineConfigV2(c *Config, obj *MachineConfigV2) (*MachineConfigV2, error) { + if c == nil { + return nil, fmt.Errorf("Updating Machine Config V2: Provider config is nil") + } + if obj == nil { + return nil, fmt.Errorf("Updating Machine Config V2: Machine Config V2 is nil") + } + var err error + out := &MachineConfigV2{} + kind := obj.TypeMeta.Kind + switch kind { + case machineConfigV2Amazonec2Kind: + resp := &MachineConfigV2Amazonec2{} + err = c.updateObjectV2(rancher2DefaultLocalClusterID, obj.ID, machineConfigV2Amazonec2APIType, obj.Amazonec2Config, resp) + out.Amazonec2Config = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2AzureKind: + resp := &MachineConfigV2Azure{} + err = c.updateObjectV2(rancher2DefaultLocalClusterID, obj.ID, machineConfigV2AzureAPIType, obj.AzureConfig, resp) + out.AzureConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2DigitaloceanKind: + resp := &MachineConfigV2Digitalocean{} + err = c.updateObjectV2(rancher2DefaultLocalClusterID, obj.ID, machineConfigV2DigitaloceanAPIType, obj.DigitaloceanConfig, resp) + out.DigitaloceanConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2LinodeKind: + resp := &MachineConfigV2Linode{} + err = c.updateObjectV2(rancher2DefaultLocalClusterID, obj.ID, machineConfigV2LinodeAPIType, obj.LinodeConfig, resp) + out.LinodeConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2OpenstackKind: + resp := &MachineConfigV2Openstack{} + err = c.updateObjectV2(rancher2DefaultLocalClusterID, obj.ID, machineConfigV2OpenstackAPIType, obj.OpenstackConfig, resp) + out.OpenstackConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + case machineConfigV2VmwarevsphereKind: + resp := &MachineConfigV2Vmwarevsphere{} + err = c.updateObjectV2(rancher2DefaultLocalClusterID, obj.ID, machineConfigV2VmwarevsphereAPIType, obj.VmwarevsphereConfig, resp) + out.VmwarevsphereConfig = resp + out.ID = resp.ID + out.TypeMeta = resp.TypeMeta + out.ObjectMeta = resp.ObjectMeta + default: + return nil, fmt.Errorf("[ERROR] Unsupported driver on node template: %s", kind) + } + if err != nil { + return nil, fmt.Errorf("Creating Machine Config V2: %s", err) + } + return out, err +} diff --git a/rancher2/schema_machine_config_v2.go b/rancher2/schema_machine_config_v2.go new file mode 100644 index 000000000..a24e6997d --- /dev/null +++ b/rancher2/schema_machine_config_v2.go @@ -0,0 +1,92 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Schemas + +func machineConfigV2Fields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "generate_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster V2 generate name. The pattern to generate machine config name. e.g generate_name=\"prod-pool1\" will generate \"nc-prod-pool1-?????\" names", + }, + "fleet_namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "fleet-default", + }, + "amazonec2_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"azure_config", "digitalocean_config", "linode_config", "openstack_config", "vsphere_config"}, + Elem: &schema.Resource{ + Schema: machineConfigV2Amazonec2Fields(), + }, + }, + "azure_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"amazonec2_config", "digitalocean_config", "linode_config", "openstack_config", "vsphere_config"}, + Elem: &schema.Resource{ + Schema: machineConfigV2AzureFields(), + }, + }, + "digitalocean_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"amazonec2_config", "azure_config", "linode_config", "openstack_config", "vsphere_config"}, + Elem: &schema.Resource{ + Schema: machineConfigV2DigitaloceanFields(), + }, + }, + "kind": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "linode_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"amazonec2_config", "azure_config", "digitalocean_config", "openstack_config", "vsphere_config"}, + Elem: &schema.Resource{ + Schema: machineConfigV2LinodeFields(), + }, + }, + "openstack_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"amazonec2_config", "azure_config", "digitalocean_config", "linode_config", "vsphere_config"}, + Elem: &schema.Resource{ + Schema: machineConfigV2OpenstackFields(), + }, + }, + "vsphere_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"amazonec2_config", "azure_config", "digitalocean_config", "linode_config", "openstack_config"}, + Elem: &schema.Resource{ + Schema: machineConfigV2VmwarevsphereFields(), + }, + }, + } + + for k, v := range commonAnnotationLabelFields() { + s[k] = v + } + + return s +} diff --git a/rancher2/schema_machine_config_v2_amazonec2.go b/rancher2/schema_machine_config_v2_amazonec2.go new file mode 100644 index 000000000..e3f016f30 --- /dev/null +++ b/rancher2/schema_machine_config_v2_amazonec2.go @@ -0,0 +1,214 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Schemas + +func machineConfigV2Amazonec2Fields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "ami": { + Type: schema.TypeString, + Required: true, + Description: "AWS machine image", + }, + "region": { + Type: schema.TypeString, + Required: true, + Description: "AWS Region", + }, + "security_group": { + Type: schema.TypeList, + Required: true, + Description: "AWS VPC security group", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subnet_id": { + Type: schema.TypeString, + Required: true, + Description: "AWS VPC subnet id", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "AWS VPC id", + }, + "zone": { + Type: schema.TypeString, + Required: true, + Description: "AWS zone for instance (i.e. a,b,c,d,e)", + }, + "access_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "AWS Access Key", + }, + "block_duration_minutes": { + Type: schema.TypeString, + Optional: true, + Default: "0", + Description: "AWS spot instance duration in minutes (60, 120, 180, 240, 300, or 360)", + }, + "device_name": { + Type: schema.TypeString, + Optional: true, + Description: "AWS root device name", + }, + "encrypt_ebs_volume": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Encrypt EBS volume", + }, + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "Optional endpoint URL (hostname only or fully qualified URI)", + }, + "http_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "Enables or disables the HTTP metadata endpoint on your instances", + }, + "http_tokens": { + Type: schema.TypeString, + Optional: true, + Description: "The state of token usage for your instance metadata requests", + }, + "iam_instance_profile": { + Type: schema.TypeString, + Optional: true, + Description: "AWS IAM Instance Profile", + }, + "insecure_transport": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Disable SSL when sending requests", + }, + "instance_type": { + Type: schema.TypeString, + Optional: true, + Default: "t2.micro", + Description: "AWS instance type", + }, + "keypair_name": { + Type: schema.TypeString, + Optional: true, + Description: "AWS keypair to use; requires --amazonec2-ssh-keypath", + }, + "kms_key": { + Type: schema.TypeString, + Optional: true, + Description: "Custom KMS key ID using the AWS Managed CMK", + }, + "monitoring": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Set this flag to enable CloudWatch monitoring", + }, + "open_port": { + Type: schema.TypeList, + Optional: true, + Description: "Make the specified port number accessible from the Internet", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "private_address_only": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Only use a private IP address", + }, + "request_spot_instance": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Set this flag to request spot instance", + }, + "retries": { + Type: schema.TypeString, + Optional: true, + Default: "5", + Description: "Set retry count for recoverable failures (use -1 to disable)", + }, + "root_size": { + Type: schema.TypeString, + Optional: true, + Default: "16", + Description: "AWS root disk size (in GB)", + }, + "secret_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "AWS Secret Key", + }, + "security_group_readonly": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Skip adding default rules to security groups", + }, + "session_token": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "AWS Session Token", + }, + "spot_price": { + Type: schema.TypeString, + Optional: true, + Default: "0.50", + Description: "AWS spot instance bid price (in dollar)", + }, + "ssh_key_contents": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "SSH Key file contents for sshKeyContents", + }, + "ssh_user": { + Type: schema.TypeString, + Optional: true, + Default: "ubuntu", + Description: "Set the name of the ssh user", + }, + "tags": { + Type: schema.TypeString, + Optional: true, + Description: "AWS Tags (e.g. key1,value1,key2,value2)", + }, + "use_ebs_optimized_instance": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Create an EBS optimized instance", + }, + "use_private_address": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Force the usage of private IP address", + }, + "userdata": { + Type: schema.TypeString, + Optional: true, + Description: "Path to file with cloud-init user data", + }, + "volume_type": { + Type: schema.TypeString, + Optional: true, + Default: "gp2", + Description: "Amazon EBS volume type", + }, + } + + return s +} diff --git a/rancher2/schema_machine_config_v2_azure.go b/rancher2/schema_machine_config_v2_azure.go new file mode 100644 index 000000000..79333cc62 --- /dev/null +++ b/rancher2/schema_machine_config_v2_azure.go @@ -0,0 +1,181 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Schemas + +func machineConfigV2AzureFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "availability_set": { + Type: schema.TypeString, + Optional: true, + Default: "docker-machine", + Description: "Azure Availability Set to place the virtual machine into", + }, + "client_id": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Azure Service Principal Account ID (optional, browser auth is used if not specified)", + }, + "client_secret": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Azure Service Principal Account password (optional, browser auth is used if not specified)", + }, + "custom_data": { + Type: schema.TypeString, + Optional: true, + Description: "Path to file with custom-data", + }, + "disk_size": { + Type: schema.TypeString, + Optional: true, + Default: "30", + Description: "Disk size if using managed disk", + }, + "dns": { + Type: schema.TypeString, + Optional: true, + Description: "A unique DNS label for the public IP adddress", + }, + "docker_port": { + Type: schema.TypeString, + Optional: true, + Default: "2376", + Description: "Port number for Docker engine", + }, + "environment": { + Type: schema.TypeString, + Optional: true, + Default: "AzurePublicCloud", + Description: "Azure environment (e.g. AzurePublicCloud, AzureChinaCloud)", + }, + "fault_domain_count": { + Type: schema.TypeString, + Optional: true, + Default: "3", + Description: "Fault domain count to use for availability set", + }, + "image": { + Type: schema.TypeString, + Optional: true, + Default: "canonical:UbuntuServer:18.04-LTS:latest", + Description: "Azure virtual machine OS image", + }, + "location": { + Type: schema.TypeString, + Optional: true, + Default: "westus", + Description: "Azure region to create the virtual machine", + }, + "managed_disks": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Configures VM and availability set for managed disks", + }, + "no_public_ip": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Do not create a public IP address for the machine", + }, + "nsg": { + Type: schema.TypeString, + Optional: true, + Default: "docker-machine-nsg", + Description: "Azure Network Security Group to assign this node to (accepts either a name or resource ID, default is to create a new NSG for each machine)", + }, + "open_port": { + Type: schema.TypeList, + Optional: true, + Description: "Make the specified port number accessible from the Internet", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "private_ip_address": { + Type: schema.TypeString, + Optional: true, + Description: "Specify a static private IP address for the machine", + }, + "resource_group": { + Type: schema.TypeString, + Optional: true, + Default: "docker-machine", + Description: "Azure Resource Group name (will be created if missing)", + }, + "size": { + Type: schema.TypeString, + Optional: true, + Default: "Standard_D2_v2", + Description: "Size for Azure Virtual Machine", + }, + "ssh_user": { + Type: schema.TypeString, + Optional: true, + Default: "docker-user", + Description: "Username for SSH login", + }, + "static_public_ip": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Assign a static public IP address to the machine", + }, + "storage_type": { + Type: schema.TypeString, + Optional: true, + Default: "Standard_LRS", + Description: "Type of Storage Account to host the OS Disk for the machine", + }, + "subnet": { + Type: schema.TypeString, + Optional: true, + Default: "docker-machine", + Description: "Azure Subnet Name to be used within the Virtual Network", + }, + "subnet_prefix": { + Type: schema.TypeString, + Optional: true, + Default: "192.168.0.0/16", + Description: "Private CIDR block to be used for the new subnet, should comply RFC 1918", + }, + "subscription_id": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Azure Subscription ID", + }, + "tenant_id": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Azure Tenant ID", + }, + "update_domain_count": { + Type: schema.TypeString, + Optional: true, + Default: "5", + Description: "Update domain count to use for availability set", + }, + "use_private_ip": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Use private IP address of the machine to connect", + }, + "vnet": { + Type: schema.TypeString, + Optional: true, + Default: "docker-machine-vnet", + Description: "Azure Virtual Network name to connect the virtual machine (in [resourcegroup:]name format)", + }, + } + + return s +} diff --git a/rancher2/schema_machine_config_v2_digitalocean.go b/rancher2/schema_machine_config_v2_digitalocean.go new file mode 100644 index 000000000..285c6b236 --- /dev/null +++ b/rancher2/schema_machine_config_v2_digitalocean.go @@ -0,0 +1,97 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Schemas + +func machineConfigV2DigitaloceanFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "access_token": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Digital Ocean access token", + }, + "backups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Enable backups for droplet", + }, + "image": { + Type: schema.TypeString, + Optional: true, + Default: "ubuntu-16-04-x64", + Description: "Digital Ocean Image", + }, + "ipv6": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Enable ipv6 for droplet", + }, + "monitoring": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Enable monitoring for droplet", + }, + "private_networking": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Enable private networking for droplet", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Default: "nyc3", + Description: "Digital Ocean region", + }, + "size": { + Type: schema.TypeString, + Optional: true, + Default: "s-1vcpu-1gb", + Description: "Digital Ocean size", + }, + "ssh_key_contents": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "SSH private key contents", + }, + "ssh_key_fingerprint": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "SSH key fingerprint", + }, + "ssh_port": { + Type: schema.TypeString, + Optional: true, + Default: "22", + Description: "SSH port", + }, + "ssh_user": { + Type: schema.TypeString, + Optional: true, + Default: "root", + Description: "SSH username", + }, + "tags": { + Type: schema.TypeString, + Optional: true, + Description: "Comma-separated list of tags to apply to the Droplet", + }, + "userdata": { + Type: schema.TypeString, + Optional: true, + Default: "docker-user", + Description: "Path to file with cloud-init user-data", + }, + } + + return s +} diff --git a/rancher2/schema_machine_config_v2_linode.go b/rancher2/schema_machine_config_v2_linode.go new file mode 100644 index 000000000..ea07c6334 --- /dev/null +++ b/rancher2/schema_machine_config_v2_linode.go @@ -0,0 +1,104 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Schemas + +func machineConfigV2LinodeFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "authorized_users": { + Type: schema.TypeString, + Optional: true, + Sensitive: false, + Description: "Linode user accounts (seperated by commas) whose Linode SSH keys will be permitted root access to the created node", + }, + "create_private_ip": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Create private IP for the instance", + }, + "docker_port": { + Type: schema.TypeString, + Optional: true, + Default: "2376", + Description: "Docker Port", + }, + "image": { + Type: schema.TypeString, + Optional: true, + Default: "linode/ubuntu18.04", + Description: "Specifies the Linode Instance image which determines the OS distribution and base files", + }, + "instance_type": { + Type: schema.TypeString, + Optional: true, + Default: "g6-standard-4", + Description: "Specifies the Linode Instance type which determines CPU, memory, disk size, etc.", + }, + "label": { + Type: schema.TypeString, + Optional: true, + Description: "Linode Instance Label", + }, + "region": { + Type: schema.TypeString, + Optional: true, + Default: "us-east", + Description: "Specifies the region (location) of the Linode instance", + }, + "root_pass": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Root Password", + }, + "ssh_port": { + Type: schema.TypeString, + Optional: true, + Default: "22", + Description: "Linode Instance SSH Port", + }, + "ssh_user": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies the user as which docker-machine should log in to the Linode instance to install Docker.", + }, + "stackscript": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies the Linode StackScript to use to create the instance", + }, + "stackscript_data": { + Type: schema.TypeString, + Optional: true, + Description: "A JSON string specifying data for the selected StackScript", + }, + "swap_size": { + Type: schema.TypeString, + Optional: true, + Default: "512", + Description: "Linode Instance Swap Size (MB)", + }, + "tags": { + Type: schema.TypeString, + Optional: true, + Description: "A comma separated list of tags to apply to the the Linode resource", + }, + "token": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Linode API Token", + }, + "ua_prefix": { + Type: schema.TypeString, + Optional: true, + Description: "Prefix the User-Agent in Linode API calls with some 'product/version'", + }, + } + + return s +} diff --git a/rancher2/schema_machine_config_v2_openstack.go b/rancher2/schema_machine_config_v2_openstack.go new file mode 100644 index 000000000..f01be1ad6 --- /dev/null +++ b/rancher2/schema_machine_config_v2_openstack.go @@ -0,0 +1,193 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +//Schemas + +func machineConfigV2OpenstackFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "auth_url": { + Type: schema.TypeString, + Required: true, + }, + "availability_zone": { + Type: schema.TypeString, + Required: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + }, + "username": { + Type: schema.TypeString, + Optional: true, + }, + "active_timeout": { + Type: schema.TypeString, + Optional: true, + Default: "200", + }, + "cacert": { + Type: schema.TypeString, + Optional: true, + }, + "config_drive": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "domain_id": { + Type: schema.TypeString, + Optional: true, + }, + "domain_name": { + Type: schema.TypeString, + Optional: true, + }, + "endpoint_type": { + Type: schema.TypeString, + Optional: true, + }, + "flavor_id": { + Type: schema.TypeString, + Optional: true, + }, + "flavor_name": { + Type: schema.TypeString, + Optional: true, + }, + "floating_ip_pool": { + Type: schema.TypeString, + Optional: true, + }, + "image_id": { + Type: schema.TypeString, + Optional: true, + }, + "image_name": { + Type: schema.TypeString, + Optional: true, + }, + "insecure": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "ip_version": { + Type: schema.TypeString, + Optional: true, + Default: "4", + }, + "keypair_name": { + Type: schema.TypeString, + Optional: true, + }, + "net_id": { + Type: schema.TypeString, + Optional: true, + }, + "net_name": { + Type: schema.TypeString, + Optional: true, + }, + "nova_network": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "private_key_file": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "sec_groups": { + Type: schema.TypeString, + Optional: true, + }, + "ssh_port": { + Type: schema.TypeString, + Optional: true, + Default: "22", + }, + "ssh_user": { + Type: schema.TypeString, + Optional: true, + Default: "root", + }, + "tenant_id": { + Type: schema.TypeString, + Optional: true, + }, + "tenant_name": { + Type: schema.TypeString, + Optional: true, + }, + "tenant_domain_id": { + Type: schema.TypeString, + Optional: true, + }, + "tenant_domain_name": { + Type: schema.TypeString, + Optional: true, + }, + "user_domain_id": { + Type: schema.TypeString, + Optional: true, + }, + "user_domain_name": { + Type: schema.TypeString, + Optional: true, + }, + "user_data_file": { + Type: schema.TypeString, + Optional: true, + }, + "application_credential_id": { + Type: schema.TypeString, + Optional: true, + }, + "application_credential_name": { + Type: schema.TypeString, + Optional: true, + }, + "application_credential_secret": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "boot_from_volume": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "volume_size": { + Type: schema.TypeString, + Optional: true, + }, + "volume_type": { + Type: schema.TypeString, + Optional: true, + }, + "volume_id": { + Type: schema.TypeString, + Optional: true, + }, + "volume_name": { + Type: schema.TypeString, + Optional: true, + }, + "volume_device_path": { + Type: schema.TypeString, + Optional: true, + }, + } + + return s +} diff --git a/rancher2/schema_machine_config_v2_vsphere.go b/rancher2/schema_machine_config_v2_vsphere.go new file mode 100644 index 000000000..b1730f27f --- /dev/null +++ b/rancher2/schema_machine_config_v2_vsphere.go @@ -0,0 +1,212 @@ +package rancher2 + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +const ( + machineConfigV2VmwarevsphereCreationTypeDefault = "legacy" +) + +var ( + machineConfigV2VmwarevsphereCreationType = []string{"vm", "template", "library", "legacy"} + machineConfigV2VmwarevsphereVappIpallocationpolicies = []string{"dhcp", "fixed", "transient", "fixedAllocated"} + machineConfigV2VmwarevsphereVappIpprotocols = []string{"IPv4", "IPv6"} + machineConfigV2VmwarevsphereVappTransports = []string{"iso", "com.vmware.guestInfo"} +) + +//Schemas + +func machineConfigV2VmwarevsphereFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "boot2docker_url": { + Type: schema.TypeString, + Optional: true, + Default: "https://releases.rancher.com/os/latest/rancheros-vmware.iso", + Description: "vSphere URL for boot2docker image", + }, + "cfgparam": { + Type: schema.TypeList, + Optional: true, + Description: "vSphere vm configuration parameters (used for guestinfo)", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "clone_from": { + Type: schema.TypeString, + Optional: true, + Description: "If you choose creation type clone a name of what you want to clone is required", + }, + "cloud_config": { + Type: schema.TypeString, + Optional: true, + Description: "Filepath to a cloud-config yaml file to put into the ISO user-data", + }, + "cloudinit": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere cloud-init filepath or url to add to guestinfo", + }, + "content_library": { + Type: schema.TypeString, + Optional: true, + Description: "If you choose to clone from a content library template specify the name of the library", + }, + "cpu_count": { + Type: schema.TypeString, + Optional: true, + Default: "2", + Description: "vSphere CPU number for docker VM", + }, + "creation_type": { + Type: schema.TypeString, + Optional: true, + Default: machineConfigV2VmwarevsphereCreationTypeDefault, + ValidateFunc: validation.StringInSlice(machineConfigV2VmwarevsphereCreationType, true), + Description: "Creation type when creating a new virtual machine. Supported values: vm, template, library, legacy", + }, + "custom_attributes": { + Type: schema.TypeList, + Optional: true, + Description: "vSphere custom attributes, format key/value e.g. '200=my custom value'", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "datacenter": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere datacenter for virtual machine", + }, + "datastore": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere datastore for virtual machine", + }, + "datastore_cluster": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere datastore cluster for virtual machine", + }, + "disk_size": { + Type: schema.TypeString, + Optional: true, + Default: "20480", + Description: "vSphere size of disk for docker VM (in MB)", + }, + "folder": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere folder for the docker VM. This folder must already exist in the datacenter", + }, + "hostsystem": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere compute resource where the docker VM will be instantiated. This can be omitted if using a cluster with DRS", + }, + "memory_size": { + Type: schema.TypeString, + Optional: true, + Default: "2048", + Description: "vSphere size of memory for docker VM (in MB)", + }, + "network": { + Type: schema.TypeList, + Optional: true, + Description: "vSphere network where the virtual machine will be attached", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "vSphere password", + }, + "pool": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere resource pool for docker VM", + }, + "ssh_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Default: "tcuser", + Description: "If using a non-B2D image you can specify the ssh password", + }, + "ssh_port": { + Type: schema.TypeString, + Optional: true, + Default: "22", + Description: "If using a non-B2D image you can specify the ssh port", + }, + "ssh_user": { + Type: schema.TypeString, + Optional: true, + Default: "docker", + Description: "If using a non-B2D image you can specify the ssh user", + }, + "ssh_user_group": { + Type: schema.TypeString, + Optional: true, + Default: "staff", + Description: "If using a non-B2D image the uploaded keys will need chown'ed, defaults to staff e.g. docker:staff", + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: "vSphere tags id e.g. urn:xxx", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "username": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere username", + }, + "vapp_ip_allocation_policy": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere vApp IP allocation policy. Supported values are: dhcp, fixed, transient and fixedAllocated", + ValidateFunc: validation.StringInSlice(machineConfigV2VmwarevsphereVappIpallocationpolicies, true), + }, + "vapp_ip_protocol": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere vApp IP protocol for this deployment. Supported values are: IPv4 and IPv6", + ValidateFunc: validation.StringInSlice(machineConfigV2VmwarevsphereVappIpprotocols, true), + }, + "vapp_property": { + Type: schema.TypeList, + Optional: true, + Description: "vSphere vApp properties", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vapp_transport": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere OVF environment transports to use for properties. Supported values are: iso and com.vmware.guestInfo", + ValidateFunc: validation.StringInSlice(machineConfigV2VmwarevsphereVappTransports, true), + }, + "vcenter": { + Type: schema.TypeString, + Optional: true, + Description: "vSphere IP/hostname for vCenter", + }, + "vcenter_port": { + Type: schema.TypeString, + Optional: true, + Default: "443", + Description: "vSphere Port for vCenter", + }, + } + + return s +} diff --git a/rancher2/structure_machine_config_v2.go b/rancher2/structure_machine_config_v2.go new file mode 100644 index 000000000..e9d93e72c --- /dev/null +++ b/rancher2/structure_machine_config_v2.go @@ -0,0 +1,138 @@ +package rancher2 + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + norman "github.com/rancher/norman/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + machineConfigV2Kind = "MachineConfig" + machineConfigV2APIVersion = "rke-machine-config.cattle.io/v1" + machineConfigV2APIType = "rke-machine-config.cattle.io" +) + +//Types + +type machineConfigV2 struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Amazonec2Config *MachineConfigV2Amazonec2 `json:"amazonec2Config,omitempty" yaml:"amazonec2Config,omitempty"` + AzureConfig *MachineConfigV2Azure `json:"azureConfig,omitempty" yaml:"azureConfig,omitempty"` + DigitaloceanConfig *MachineConfigV2Digitalocean `json:"digitaloceanConfig,omitempty" yaml:"digitaloceanConfig,omitempty"` + LinodeConfig *MachineConfigV2Linode `json:"linodeConfig,omitempty" yaml:"linodeConfig,omitempty"` + OpenstackConfig *MachineConfigV2Openstack `json:"openstackConfig,omitempty" yaml:"openstackConfig,omitempty"` + VmwarevsphereConfig *MachineConfigV2Vmwarevsphere `json:"vmwarevsphereConfig,omitempty" yaml:"vmwarevsphereConfig,omitempty"` +} + +type MachineConfigV2 struct { + norman.Resource + machineConfigV2 +} + +// Flatteners + +func flattenMachineConfigV2(d *schema.ResourceData, in *MachineConfigV2) error { + if in == nil { + return nil + } + kind := in.TypeMeta.Kind + d.Set("kind", kind) + switch kind { + case machineConfigV2Amazonec2Kind: + err := d.Set("amazonec2_config", flattenMachineConfigV2Amazonec2(in.Amazonec2Config)) + if err != nil { + return err + } + case machineConfigV2AzureKind: + err := d.Set("azure_config", flattenMachineConfigV2Azure(in.AzureConfig)) + if err != nil { + return err + } + case machineConfigV2DigitaloceanKind: + err := d.Set("digitalocean_config", flattenMachineConfigV2Digitalocean(in.DigitaloceanConfig)) + if err != nil { + return err + } + case machineConfigV2LinodeKind: + err := d.Set("linode_config", flattenMachineConfigV2Linode(in.LinodeConfig)) + if err != nil { + return err + } + case machineConfigV2OpenstackKind: + err := d.Set("openstack_config", flattenMachineConfigV2Openstack(in.OpenstackConfig)) + if err != nil { + return err + } + case machineConfigV2VmwarevsphereKind: + err := d.Set("vsphere_config", flattenMachineConfigV2Vmwarevsphere(in.VmwarevsphereConfig)) + if err != nil { + return err + } + default: + return fmt.Errorf("[ERROR] Unsupported driver on node template: %s", kind) + } + + if len(in.ID) > 0 { + d.SetId(in.ID) + } + d.Set("name", in.ObjectMeta.Name) + d.Set("fleet_namespace", in.ObjectMeta.Namespace) + err := d.Set("annotations", toMapInterface(in.ObjectMeta.Annotations)) + if err != nil { + return err + } + err = d.Set("labels", toMapInterface(in.ObjectMeta.Labels)) + if err != nil { + return err + } + d.Set("resource_version", in.ObjectMeta.ResourceVersion) + + return nil +} + +// Expanders + +func expandMachineConfigV2(in *schema.ResourceData) *MachineConfigV2 { + if in == nil { + return nil + } + + obj := &MachineConfigV2{} + if len(in.Id()) > 0 { + obj.ID = in.Id() + } + obj.ObjectMeta.GenerateName = "nc-" + in.Get("generate_name").(string) + "-" + obj.ObjectMeta.Namespace = in.Get("fleet_namespace").(string) + if v, ok := in.Get("annotations").(map[string]interface{}); ok && len(v) > 0 { + obj.ObjectMeta.Annotations = toMapString(v) + } + if v, ok := in.Get("labels").(map[string]interface{}); ok && len(v) > 0 { + obj.ObjectMeta.Labels = toMapString(v) + } + if v, ok := in.Get("resource_version").(string); ok { + obj.ObjectMeta.ResourceVersion = v + } + if v, ok := in.Get("amazonec2_config").([]interface{}); ok && len(v) > 0 { + obj.Amazonec2Config = expandMachineConfigV2Amazonec2(v, obj) + } + if v, ok := in.Get("azure_config").([]interface{}); ok && len(v) > 0 { + obj.AzureConfig = expandMachineConfigV2Azure(v, obj) + } + if v, ok := in.Get("digitalocean_config").([]interface{}); ok && len(v) > 0 { + obj.DigitaloceanConfig = expandMachineConfigV2Digitalocean(v, obj) + } + if v, ok := in.Get("linode_config").([]interface{}); ok && len(v) > 0 { + obj.LinodeConfig = expandMachineConfigV2Linode(v, obj) + } + if v, ok := in.Get("openstack_config").([]interface{}); ok && len(v) > 0 { + obj.OpenstackConfig = expandMachineConfigV2Openstack(v, obj) + } + if v, ok := in.Get("vsphere_config").([]interface{}); ok && len(v) > 0 { + obj.VmwarevsphereConfig = expandMachineConfigV2Vmwarevsphere(v, obj) + } + + return obj +} diff --git a/rancher2/structure_machine_config_v2_amazonec2.go b/rancher2/structure_machine_config_v2_amazonec2.go new file mode 100644 index 000000000..8a3a8195f --- /dev/null +++ b/rancher2/structure_machine_config_v2_amazonec2.go @@ -0,0 +1,337 @@ +package rancher2 + +import ( + norman "github.com/rancher/norman/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + machineConfigV2Amazonec2Kind = "Amazonec2Config" + machineConfigV2Amazonec2APIVersion = "rke-machine-config.cattle.io/v1" + machineConfigV2Amazonec2APIType = "rke-machine-config.cattle.io.amazonec2config" + machineConfigV2Amazonec2ClusterIDsep = "." +) + +//Types + +type machineConfigV2Amazonec2 struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + AccessKey string `json:"accessKey,omitempty" yaml:"accessKey,omitempty"` + Ami string `json:"ami,omitempty" yaml:"ami,omitempty"` + BlockDurationMinutes string `json:"blockDurationMinutes,omitempty" yaml:"blockDurationMinutes,omitempty"` + DeviceName string `json:"deviceName,omitempty" yaml:"deviceName,omitempty"` + EncryptEbsVolume bool `json:"encryptEbsVolume,omitempty" yaml:"encryptEbsVolume,omitempty"` + Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"` + HTTPEndpoint string `json:"httpEndpoint,omitempty" yaml:"httpEndpoint,omitempty"` + HTTPTokens string `json:"httpTokens,omitempty" yaml:"httpTokens,omitempty"` + IamInstanceProfile string `json:"iamInstanceProfile,omitempty" yaml:"iamInstanceProfile,omitempty"` + InsecureTransport bool `json:"insecureTransport,omitempty" yaml:"insecureTransport,omitempty"` + InstanceType string `json:"instanceType,omitempty" yaml:"instanceType,omitempty"` + KeypairName string `json:"keypairName,omitempty" yaml:"keypairName,omitempty"` + KmsKey string `json:"kmsKey,omitempty" yaml:"kmsKey,omitempty"` + Monitoring bool `json:"monitoring,omitempty" yaml:"monitoring,omitempty"` + OpenPort []string `json:"openPort,omitempty" yaml:"openPort,omitempty"` + PrivateAddressOnly bool `json:"privateAddressOnly,omitempty" yaml:"privateAddressOnly,omitempty"` + Region string `json:"region,omitempty" yaml:"region,omitempty"` + RequestSpotInstance bool `json:"requestSpotInstance,omitempty" yaml:"requestSpotInstance,omitempty"` + Retries string `json:"retries,omitempty" yaml:"retries,omitempty"` + RootSize string `json:"rootSize,omitempty" yaml:"rootSize,omitempty"` + SecretKey string `json:"secretKey,omitempty" yaml:"secretKey,omitempty"` + SecurityGroup []string `json:"securityGroup,omitempty" yaml:"securityGroup,omitempty"` + SecurityGroupReadonly bool `json:"securityGroupReadonly,omitempty" yaml:"securityGroupReadonly,omitempty"` + SessionToken string `json:"sessionToken,omitempty" yaml:"sessionToken,omitempty"` + SpotPrice string `json:"spotPrice,omitempty" yaml:"spotPrice,omitempty"` + SSHKeyContents string `json:"sshKeyContents,omitempty" yaml:"sshKeyContents,omitempty"` + SSHUser string `json:"sshUser,omitempty" yaml:"sshUser,omitempty"` + SubnetID string `json:"subnetId,omitempty" yaml:"subnetId,omitempty"` + Tags string `json:"tags,omitempty" yaml:"tags,omitempty"` + UseEbsOptimizedInstance bool `json:"useEbsOptimizedInstance,omitempty" yaml:"useEbsOptimizedInstance,omitempty"` + UsePrivateAddress bool `json:"usePrivateAddress,omitempty" yaml:"usePrivateAddress,omitempty"` + Userdata string `json:"userdata,omitempty" yaml:"userdata,omitempty"` + VolumeType string `json:"volumeType,omitempty" yaml:"volumeType,omitempty"` + VpcID string `json:"vpcId,omitempty" yaml:"vpcId,omitempty"` + Zone string `json:"zone,omitempty" yaml:"zone,omitempty"` +} + +type MachineConfigV2Amazonec2 struct { + norman.Resource + machineConfigV2Amazonec2 +} + +// Flatteners + +func flattenMachineConfigV2Amazonec2(in *MachineConfigV2Amazonec2) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.Ami) > 0 { + obj["ami"] = in.Ami + } + + if len(in.BlockDurationMinutes) > 0 { + obj["block_duration_minutes"] = in.BlockDurationMinutes + } + + if len(in.DeviceName) > 0 { + obj["device_name"] = in.DeviceName + } + + obj["encrypt_ebs_volume"] = in.EncryptEbsVolume + + if len(in.Endpoint) > 0 { + obj["endpoint"] = in.Endpoint + } + if len(in.HTTPEndpoint) > 0 { + obj["http_endpoint"] = in.HTTPEndpoint + } + if len(in.HTTPTokens) > 0 { + obj["http_tokens"] = in.HTTPTokens + } + + if len(in.IamInstanceProfile) > 0 { + obj["iam_instance_profile"] = in.IamInstanceProfile + } + + obj["insecure_transport"] = in.InsecureTransport + + if len(in.InstanceType) > 0 { + obj["instance_type"] = in.InstanceType + } + + if len(in.KeypairName) > 0 { + obj["keypair_name"] = in.KeypairName + } + + if len(in.KmsKey) > 0 { + obj["kms_key"] = in.KmsKey + } + + obj["monitoring"] = in.Monitoring + + if len(in.OpenPort) > 0 { + obj["open_port"] = toArrayInterface(in.OpenPort) + } + + obj["private_address_only"] = in.PrivateAddressOnly + + if len(in.Region) > 0 { + obj["region"] = in.Region + } + + obj["request_spot_instance"] = in.RequestSpotInstance + + if len(in.Retries) > 0 { + obj["retries"] = in.Retries + } + + if len(in.RootSize) > 0 { + obj["root_size"] = in.RootSize + } + + if len(in.SecurityGroup) > 0 { + obj["security_group"] = toArrayInterface(in.SecurityGroup) + } + + obj["security_group_readonly"] = in.SecurityGroupReadonly + + if len(in.SessionToken) > 0 { + obj["session_token"] = in.SessionToken + } + + if len(in.SpotPrice) > 0 { + obj["spot_price"] = in.SpotPrice + } + + if len(in.SSHKeyContents) > 0 { + obj["ssh_key_contents"] = in.SSHKeyContents + } + + if len(in.SSHUser) > 0 { + obj["ssh_user"] = in.SSHUser + } + + if len(in.SubnetID) > 0 { + obj["subnet_id"] = in.SubnetID + } + + if len(in.Tags) > 0 { + obj["tags"] = in.Tags + } + + obj["use_ebs_optimized_instance"] = in.UseEbsOptimizedInstance + + obj["use_private_address"] = in.UsePrivateAddress + + if len(in.Userdata) > 0 { + obj["userdata"] = in.Userdata + } + + if len(in.VolumeType) > 0 { + obj["volume_type"] = in.VolumeType + } + + if len(in.VpcID) > 0 { + obj["vpc_id"] = in.VpcID + } + + if len(in.Zone) > 0 { + obj["zone"] = in.Zone + } + + return []interface{}{obj} +} + +// Expanders + +func expandMachineConfigV2Amazonec2(p []interface{}, source *MachineConfigV2) *MachineConfigV2Amazonec2 { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + obj := &MachineConfigV2Amazonec2{} + + if len(source.ID) > 0 { + obj.ID = source.ID + } + in := p[0].(map[string]interface{}) + + obj.TypeMeta.Kind = machineConfigV2Amazonec2Kind + obj.TypeMeta.APIVersion = machineConfigV2Amazonec2APIVersion + source.TypeMeta = obj.TypeMeta + obj.ObjectMeta = source.ObjectMeta + + if v, ok := in["ami"].(string); ok && len(v) > 0 { + obj.Ami = v + } + + if v, ok := in["block_duration_minutes"].(string); ok && len(v) > 0 { + obj.BlockDurationMinutes = v + } + + if v, ok := in["device_name"].(string); ok && len(v) > 0 { + obj.DeviceName = v + } + + if v, ok := in["encrypt_ebs_volume"].(bool); ok { + obj.EncryptEbsVolume = v + } + + if v, ok := in["endpoint"].(string); ok && len(v) > 0 { + obj.Endpoint = v + } + if v, ok := in["http_endpoint"].(string); ok && len(v) > 0 { + obj.HTTPEndpoint = v + } + if v, ok := in["http_tokens"].(string); ok && len(v) > 0 { + obj.HTTPTokens = v + } + + if v, ok := in["iam_instance_profile"].(string); ok && len(v) > 0 { + obj.IamInstanceProfile = v + } + + if v, ok := in["insecure_transport"].(bool); ok { + obj.InsecureTransport = v + } + + if v, ok := in["instance_type"].(string); ok && len(v) > 0 { + obj.InstanceType = v + } + + if v, ok := in["keypair_name"].(string); ok && len(v) > 0 { + obj.KeypairName = v + } + + if v, ok := in["kms_key"].(string); ok && len(v) > 0 { + obj.KmsKey = v + } + + if v, ok := in["monitoring"].(bool); ok { + obj.Monitoring = v + } + + if v, ok := in["open_port"].([]interface{}); ok && len(v) > 0 { + obj.OpenPort = toArrayString(v) + } + + if v, ok := in["private_address_only"].(bool); ok { + obj.PrivateAddressOnly = v + } + + if v, ok := in["region"].(string); ok && len(v) > 0 { + obj.Region = v + } + + if v, ok := in["request_spot_instance"].(bool); ok { + obj.RequestSpotInstance = v + } + + if v, ok := in["retries"].(string); ok && len(v) > 0 { + obj.Retries = v + } + + if v, ok := in["root_size"].(string); ok && len(v) > 0 { + obj.RootSize = v + } + + if v, ok := in["security_group"].([]interface{}); ok && len(v) > 0 { + obj.SecurityGroup = toArrayString(v) + } + + if v, ok := in["security_group_readonly"].(bool); ok { + obj.SecurityGroupReadonly = v + } + + if v, ok := in["session_token"].(string); ok && len(v) > 0 { + obj.SessionToken = v + } + + if v, ok := in["spot_price"].(string); ok && len(v) > 0 { + obj.SpotPrice = v + } + + if v, ok := in["ssh_key_contents"].(string); ok && len(v) > 0 { + obj.SSHKeyContents = v + } + + if v, ok := in["ssh_user"].(string); ok && len(v) > 0 { + obj.SSHUser = v + } + + if v, ok := in["subnet_id"].(string); ok && len(v) > 0 { + obj.SubnetID = v + } + + if v, ok := in["tags"].(string); ok && len(v) > 0 { + obj.Tags = v + } + + if v, ok := in["use_ebs_optimized_instance"].(bool); ok { + obj.UseEbsOptimizedInstance = v + } + + if v, ok := in["use_private_address"].(bool); ok { + obj.UsePrivateAddress = v + } + + if v, ok := in["userdata"].(string); ok && len(v) > 0 { + obj.Userdata = v + } + + if v, ok := in["volume_type"].(string); ok && len(v) > 0 { + obj.VolumeType = v + } + + if v, ok := in["vpc_id"].(string); ok && len(v) > 0 { + obj.VpcID = v + } + + if v, ok := in["zone"].(string); ok && len(v) > 0 { + obj.Zone = v + } + + return obj +} diff --git a/rancher2/structure_machine_config_v2_azure.go b/rancher2/structure_machine_config_v2_azure.go new file mode 100644 index 000000000..4d0977ce7 --- /dev/null +++ b/rancher2/structure_machine_config_v2_azure.go @@ -0,0 +1,299 @@ +package rancher2 + +import ( + norman "github.com/rancher/norman/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + machineConfigV2AzureKind = "AzureConfig" + machineConfigV2AzureAPIVersion = "rke-machine-config.cattle.io/v1" + machineConfigV2AzureAPIType = "rke-machine-config.cattle.io.azureconfig" + machineConfigV2AzureClusterIDsep = "." +) + +//Types + +type machineConfigV2Azure struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + AvailabilitySet string `json:"availabilitySet,omitempty" yaml:"availabilitySet,omitempty"` + ClientID string `json:"clientId,omitempty" yaml:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty" yaml:"clientSecret,omitempty"` + CustomData string `json:"customData,omitempty" yaml:"customData,omitempty"` + DiskSize string `json:"diskSize,omitempty" yaml:"diskSize,omitempty"` + DNS string `json:"dns,omitempty" yaml:"dns,omitempty"` + Environment string `json:"environment,omitempty" yaml:"environment,omitempty"` + FaultDomainCount string `json:"faultDomainCount,omitempty" yaml:"faultDomainCount,omitempty"` + Image string `json:"image,omitempty" yaml:"image,omitempty"` + Location string `json:"location,omitempty" yaml:"location,omitempty"` + ManagedDisks bool `json:"managedDisks,omitempty" yaml:"managedDisks,omitempty"` + NoPublicIP bool `json:"noPublicIp,omitempty" yaml:"noPublicIp,omitempty"` + NSG string `json:"nsg,omitempty" yaml:"nsg,omitempty"` + OpenPort []string `json:"openPort,omitempty" yaml:"openPort,omitempty"` + PrivateAddressOnly bool `json:"privateAddressOnly,omitempty" yaml:"privateAddressOnly,omitempty"` + PrivateIPAddress string `json:"privateIpAddress,omitempty" yaml:"privateIpAddress,omitempty"` + ResourceGroup string `json:"resourceGroup,omitempty" yaml:"resourceGroup,omitempty"` + Size string `json:"size,omitempty" yaml:"size,omitempty"` + SSHUser string `json:"sshUser,omitempty" yaml:"sshUser,omitempty"` + StaticPublicIP bool `json:"staticPublicIp,omitempty" yaml:"staticPublicIp,omitempty"` + StorageType string `json:"storageType,omitempty" yaml:"storageType,omitempty"` + Subnet string `json:"subnet,omitempty" yaml:"subnet,omitempty"` + SubnetPrefix string `json:"subnetPrefix,omitempty" yaml:"subnetPrefix,omitempty"` + SubscriptionID string `json:"subscriptionId,omitempty" yaml:"subscriptionId,omitempty"` + TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"` + UpdateDomainCount string `json:"updateDomainCount,omitempty" yaml:"updateDomainCount,omitempty"` + UsePrivateIP bool `json:"usePrivateIp,omitempty" yaml:"usePrivateIp,omitempty"` + Vnet string `json:"vnet,omitempty" yaml:"vnet,omitempty"` +} + +type MachineConfigV2Azure struct { + norman.Resource + machineConfigV2Azure +} + +// Flatteners + +func flattenMachineConfigV2Azure(in *MachineConfigV2Azure) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.AvailabilitySet) > 0 { + obj["availability_set"] = in.AvailabilitySet + } + + if len(in.ClientID) > 0 { + obj["client_id"] = in.ClientID + } + + if len(in.ClientSecret) > 0 { + obj["client_secret"] = in.ClientSecret + } + + if len(in.CustomData) > 0 { + obj["custom_data"] = in.CustomData + } + + if len(in.DiskSize) > 0 { + obj["disk_size"] = in.DiskSize + } + + if len(in.DNS) > 0 { + obj["dns"] = in.DNS + } + + if len(in.Environment) > 0 { + obj["environment"] = in.Environment + } + + if len(in.FaultDomainCount) > 0 { + obj["fault_domain_count"] = in.FaultDomainCount + } + + if len(in.Image) > 0 { + obj["image"] = in.Image + } + + if len(in.Location) > 0 { + obj["location"] = in.Location + } + + obj["managed_disks"] = in.ManagedDisks + obj["no_public_ip"] = in.NoPublicIP + + if len(in.NSG) > 0 { + obj["nsg"] = in.NSG + } + + if len(in.OpenPort) > 0 { + obj["open_port"] = toArrayInterface(in.OpenPort) + } + + obj["private_address_only"] = in.PrivateAddressOnly + + if len(in.PrivateIPAddress) > 0 { + obj["private_ip_address"] = in.PrivateIPAddress + } + + if len(in.ResourceGroup) > 0 { + obj["resource_group"] = in.ResourceGroup + } + + if len(in.Size) > 0 { + obj["size"] = in.Size + } + + if len(in.SSHUser) > 0 { + obj["ssh_user"] = in.SSHUser + } + + obj["static_public_ip"] = in.StaticPublicIP + + if len(in.StorageType) > 0 { + obj["storage_type"] = in.StorageType + } + + if len(in.Subnet) > 0 { + obj["subnet"] = in.Subnet + } + + if len(in.SubnetPrefix) > 0 { + obj["subnet_prefix"] = in.SubnetPrefix + } + + if len(in.SubscriptionID) > 0 { + obj["subscription_id"] = in.SubscriptionID + } + + if len(in.TenantID) > 0 { + obj["tenant_id"] = in.TenantID + } + + if len(in.UpdateDomainCount) > 0 { + obj["update_domain_count"] = in.UpdateDomainCount + } + + obj["use_private_ip"] = in.UsePrivateIP + + if len(in.Vnet) > 0 { + obj["vnet"] = in.Vnet + } + + return []interface{}{obj} +} + +// Expanders + +func expandMachineConfigV2Azure(p []interface{}, source *MachineConfigV2) *MachineConfigV2Azure { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + obj := &MachineConfigV2Azure{} + + if len(source.ID) > 0 { + obj.ID = source.ID + } + in := p[0].(map[string]interface{}) + + obj.TypeMeta.Kind = machineConfigV2AzureKind + obj.TypeMeta.APIVersion = machineConfigV2AzureAPIVersion + source.TypeMeta = obj.TypeMeta + obj.ObjectMeta = source.ObjectMeta + + if v, ok := in["availability_set"].(string); ok && len(v) > 0 { + obj.AvailabilitySet = v + } + + if v, ok := in["client_id"].(string); ok && len(v) > 0 { + obj.ClientID = v + } + + if v, ok := in["client_secret"].(string); ok && len(v) > 0 { + obj.ClientSecret = v + } + + if v, ok := in["custom_data"].(string); ok && len(v) > 0 { + obj.CustomData = v + } + + if v, ok := in["disk_size"].(string); ok && len(v) > 0 { + obj.DiskSize = v + } + + if v, ok := in["dns"].(string); ok && len(v) > 0 { + obj.DNS = v + } + + if v, ok := in["environment"].(string); ok && len(v) > 0 { + obj.Environment = v + } + + if v, ok := in["fault_domain_count"].(string); ok && len(v) > 0 { + obj.FaultDomainCount = v + } + + if v, ok := in["image"].(string); ok && len(v) > 0 { + obj.Image = v + } + + if v, ok := in["location"].(string); ok && len(v) > 0 { + obj.Location = v + } + + if v, ok := in["managed_disks"].(bool); ok { + obj.ManagedDisks = v + } + + if v, ok := in["no_public_ip"].(bool); ok { + obj.NoPublicIP = v + } + + if v, ok := in["nsg"].(string); ok && len(v) > 0 { + obj.NSG = v + } + + if v, ok := in["open_port"].([]interface{}); ok && len(v) > 0 { + obj.OpenPort = toArrayString(v) + } + + if v, ok := in["private_address_only"].(bool); ok { + obj.PrivateAddressOnly = v + } + + if v, ok := in["private_ip_address"].(string); ok && len(v) > 0 { + obj.PrivateIPAddress = v + } + + if v, ok := in["resource_group"].(string); ok && len(v) > 0 { + obj.ResourceGroup = v + } + + if v, ok := in["size"].(string); ok && len(v) > 0 { + obj.Size = v + } + + if v, ok := in["ssh_user"].(string); ok && len(v) > 0 { + obj.SSHUser = v + } + + if v, ok := in["static_public_ip"].(bool); ok { + obj.StaticPublicIP = v + } + + if v, ok := in["storage_type"].(string); ok && len(v) > 0 { + obj.StorageType = v + } + + if v, ok := in["subnet"].(string); ok && len(v) > 0 { + obj.Subnet = v + } + + if v, ok := in["subnet_prefix"].(string); ok && len(v) > 0 { + obj.SubnetPrefix = v + } + + if v, ok := in["subscription_id"].(string); ok && len(v) > 0 { + obj.SubscriptionID = v + } + + if v, ok := in["tenant_id"].(string); ok && len(v) > 0 { + obj.TenantID = v + } + + if v, ok := in["update_domain_count"].(string); ok && len(v) > 0 { + obj.UpdateDomainCount = v + } + + if v, ok := in["use_private_ip"].(bool); ok { + obj.UsePrivateIP = v + } + + if v, ok := in["vnet"].(string); ok && len(v) > 0 { + obj.Vnet = v + } + + return obj +} diff --git a/rancher2/structure_machine_config_v2_digitalocean.go b/rancher2/structure_machine_config_v2_digitalocean.go new file mode 100644 index 000000000..40f9cb918 --- /dev/null +++ b/rancher2/structure_machine_config_v2_digitalocean.go @@ -0,0 +1,173 @@ +package rancher2 + +import ( + norman "github.com/rancher/norman/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + machineConfigV2DigitaloceanKind = "DigitaloceanConfig" + machineConfigV2DigitaloceanAPIVersion = "rke-machine-config.cattle.io/v1" + machineConfigV2DigitaloceanAPIType = "rke-machine-config.cattle.io.digitaloceanconfig" + machineConfigV2DigitaloceanClusterIDsep = "." +) + +//Types + +type machineConfigV2Digitalocean struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + AccessToken string `json:"accessToken,omitempty" yaml:"accessToken,omitempty"` + Backups bool `json:"backups,omitempty" yaml:"backups,omitempty"` + Image string `json:"image,omitempty" yaml:"image,omitempty"` + IPV6 bool `json:"ipv6,omitempty" yaml:"ipv6,omitempty"` + Monitoring bool `json:"monitoring,omitempty" yaml:"monitoring,omitempty"` + PrivateNetworking bool `json:"privateNetworking,omitempty" yaml:"privateNetworking,omitempty"` + Region string `json:"region,omitempty" yaml:"region,omitempty"` + Size string `json:"size,omitempty" yaml:"size,omitempty"` + SSHKeyContents string `json:"sshKeyContents,omitempty" yaml:"sshKeyContents,omitempty"` + SSHKeyFingerprint string `json:"sshKeyFingerprint,omitempty" yaml:"sshKeyFingerprint,omitempty"` + SSHPort string `json:"sshPort,omitempty" yaml:"sshPort,omitempty"` + SSHUser string `json:"sshUser,omitempty" yaml:"sshUser,omitempty"` + Tags string `json:"tags,omitempty" yaml:"tags,omitempty"` + Userdata string `json:"userdata,omitempty" yaml:"userdata,omitempty"` +} + +type MachineConfigV2Digitalocean struct { + norman.Resource + machineConfigV2Digitalocean +} + +// Flatteners + +func flattenMachineConfigV2Digitalocean(in *MachineConfigV2Digitalocean) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.AccessToken) > 0 { + obj["access_token"] = in.AccessToken + } + + obj["backups"] = in.Backups + + if len(in.Image) > 0 { + obj["image"] = in.Image + } + + obj["ipv6"] = in.IPV6 + obj["monitoring"] = in.Monitoring + obj["private_networking"] = in.PrivateNetworking + + if len(in.Region) > 0 { + obj["region"] = in.Region + } + + if len(in.Size) > 0 { + obj["size"] = in.Size + } + + if len(in.SSHKeyContents) > 0 { + obj["ssh_key_contents"] = in.SSHKeyContents + } + + if len(in.SSHKeyFingerprint) > 0 { + obj["ssh_key_fingerprint"] = in.SSHKeyFingerprint + } + + if len(in.SSHPort) > 0 { + obj["ssh_port"] = in.SSHPort + } + + if len(in.SSHUser) > 0 { + obj["ssh_user"] = in.SSHUser + } + + if len(in.Tags) > 0 { + obj["tags"] = in.Tags + } + + if len(in.Userdata) > 0 { + obj["userdata"] = in.Userdata + } + + return []interface{}{obj} +} + +// Expanders + +func expandMachineConfigV2Digitalocean(p []interface{}, source *MachineConfigV2) *MachineConfigV2Digitalocean { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + obj := &MachineConfigV2Digitalocean{} + + if len(source.ID) > 0 { + obj.ID = source.ID + } + in := p[0].(map[string]interface{}) + + obj.TypeMeta.Kind = machineConfigV2DigitaloceanKind + obj.TypeMeta.APIVersion = machineConfigV2DigitaloceanAPIVersion + source.TypeMeta = obj.TypeMeta + obj.ObjectMeta = source.ObjectMeta + + if v, ok := in["access_token"].(string); ok && len(v) > 0 { + obj.AccessToken = v + } + + if v, ok := in["backups"].(bool); ok { + obj.Backups = v + } + + if v, ok := in["image"].(string); ok && len(v) > 0 { + obj.Image = v + } + + if v, ok := in["ipv6"].(bool); ok { + obj.IPV6 = v + } + + if v, ok := in["monitoring"].(bool); ok { + obj.Monitoring = v + } + if v, ok := in["private_networking"].(bool); ok { + obj.PrivateNetworking = v + } + + if v, ok := in["region"].(string); ok && len(v) > 0 { + obj.Region = v + } + + if v, ok := in["size"].(string); ok && len(v) > 0 { + obj.Size = v + } + + if v, ok := in["ssh_key_contents"].(string); ok && len(v) > 0 { + obj.SSHKeyContents = v + } + + if v, ok := in["ssh_key_fingerprint"].(string); ok && len(v) > 0 { + obj.SSHKeyFingerprint = v + } + + if v, ok := in["ssh_port"].(string); ok && len(v) > 0 { + obj.SSHPort = v + } + + if v, ok := in["ssh_user"].(string); ok && len(v) > 0 { + obj.SSHUser = v + } + + if v, ok := in["tags"].(string); ok && len(v) > 0 { + obj.Tags = v + } + + if v, ok := in["userdata"].(string); ok && len(v) > 0 { + obj.Userdata = v + } + + return obj +} diff --git a/rancher2/structure_machine_config_v2_linode.go b/rancher2/structure_machine_config_v2_linode.go new file mode 100644 index 000000000..7c9ca6b3d --- /dev/null +++ b/rancher2/structure_machine_config_v2_linode.go @@ -0,0 +1,200 @@ +package rancher2 + +import ( + norman "github.com/rancher/norman/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + machineConfigV2LinodeKind = "LinodeConfig" + machineConfigV2LinodeAPIVersion = "rke-machine-config.cattle.io/v1" + machineConfigV2LinodeAPIType = "rke-machine-config.cattle.io.linodeconfig" + machineConfigV2LinodeClusterIDsep = "." +) + +//Types + +type machineConfigV2Linode struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + AuthorizedUsers string `json:"authorizedUsers,omitempty" yaml:"authorizedUsers,omitempty"` + CreatePrivateIP bool `json:"createPrivateIp,omitempty" yaml:"createPrivateIp,omitempty"` + DockerPort string `json:"dockerPort,omitempty" yaml:"dockerPort,omitempty"` + Image string `json:"image,omitempty" yaml:"image,omitempty"` + InstanceType string `json:"instanceType,omitempty" yaml:"instanceType,omitempty"` + Label string `json:"label,omitempty" yaml:"label,omitempty"` + Region string `json:"region,omitempty" yaml:"region,omitempty"` + RootPass string `json:"rootPass,omitempty" yaml:"rootPass,omitempty"` + SSHPort string `json:"sshPort,omitempty" yaml:"sshPort,omitempty"` + SSHUser string `json:"sshUser,omitempty" yaml:"sshUser,omitempty"` + StackScript string `json:"stackscript,omitempty" yaml:"stackscript,omitempty"` + StackscriptData string `json:"stackscriptData,omitempty" yaml:"stackscriptData,omitempty"` + SwapSize string `json:"swapSize,omitempty" yaml:"swapSize,omitempty"` + Tags string `json:"tags,omitempty" yaml:"tags,omitempty"` + Token string `json:"token,omitempty" yaml:"token,omitempty"` + UAPrefix string `json:"uaPrefix,omitempty" yaml:"uaPrefix,omitempty"` +} + +type MachineConfigV2Linode struct { + norman.Resource + machineConfigV2Linode +} + +// Flatteners + +func flattenMachineConfigV2Linode(in *MachineConfigV2Linode) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.AuthorizedUsers) > 0 { + obj["authorized_users"] = in.AuthorizedUsers + } + + obj["create_private_ip"] = in.CreatePrivateIP + + if len(in.DockerPort) > 0 { + obj["docker_port"] = in.DockerPort + } + + if len(in.Image) > 0 { + obj["image"] = in.Image + } + + if len(in.InstanceType) > 0 { + obj["instance_type"] = in.InstanceType + } + + if len(in.Label) > 0 { + obj["label"] = in.Label + } + + if len(in.Region) > 0 { + obj["region"] = in.Region + } + + if len(in.RootPass) > 0 { + obj["root_pass"] = in.RootPass + } + + if len(in.SSHPort) > 0 { + obj["ssh_port"] = in.SSHPort + } + + if len(in.SSHUser) > 0 { + obj["ssh_user"] = in.SSHUser + } + + if len(in.StackScript) > 0 { + obj["stackscript"] = in.StackScript + } + + if len(in.StackscriptData) > 0 { + obj["stackscript_data"] = in.StackscriptData + } + + if len(in.SwapSize) > 0 { + obj["swap_size"] = in.SwapSize + } + + if len(in.Tags) > 0 { + obj["tags"] = in.Tags + } + + if len(in.Token) > 0 { + obj["token"] = in.Token + } + + if len(in.UAPrefix) > 0 { + obj["ua_prefix"] = in.UAPrefix + } + + return []interface{}{obj} +} + +// Expanders + +func expandMachineConfigV2Linode(p []interface{}, source *MachineConfigV2) *MachineConfigV2Linode { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + obj := &MachineConfigV2Linode{} + + if len(source.ID) > 0 { + obj.ID = source.ID + } + in := p[0].(map[string]interface{}) + + obj.TypeMeta.Kind = machineConfigV2LinodeKind + obj.TypeMeta.APIVersion = machineConfigV2LinodeAPIVersion + source.TypeMeta = obj.TypeMeta + obj.ObjectMeta = source.ObjectMeta + + if v, ok := in["authorized_users"].(string); ok && len(v) > 0 { + obj.AuthorizedUsers = v + } + + if v, ok := in["create_private_ip"].(bool); ok { + obj.CreatePrivateIP = v + } + + if v, ok := in["docker_port"].(string); ok && len(v) > 0 { + obj.DockerPort = v + } + + if v, ok := in["image"].(string); ok && len(v) > 0 { + obj.Image = v + } + + if v, ok := in["instance_type"].(string); ok && len(v) > 0 { + obj.InstanceType = v + } + + if v, ok := in["label"].(string); ok && len(v) > 0 { + obj.Label = v + } + + if v, ok := in["region"].(string); ok && len(v) > 0 { + obj.Region = v + } + + if v, ok := in["root_pass"].(string); ok && len(v) > 0 { + obj.RootPass = v + } + + if v, ok := in["ssh_port"].(string); ok && len(v) > 0 { + obj.SSHPort = v + } + + if v, ok := in["ssh_user"].(string); ok && len(v) > 0 { + obj.SSHUser = v + } + + if v, ok := in["stackscript"].(string); ok && len(v) > 0 { + obj.StackScript = v + } + + if v, ok := in["stackscript_data"].(string); ok && len(v) > 0 { + obj.StackscriptData = v + } + + if v, ok := in["swap_size"].(string); ok && len(v) > 0 { + obj.SwapSize = v + } + + if v, ok := in["tags"].(string); ok && len(v) > 0 { + obj.Tags = v + } + + if v, ok := in["token"].(string); ok && len(v) > 0 { + obj.Token = v + } + + if v, ok := in["ua_prefix"].(string); ok && len(v) > 0 { + obj.UAPrefix = v + } + + return obj +} diff --git a/rancher2/structure_machine_config_v2_openstack.go b/rancher2/structure_machine_config_v2_openstack.go new file mode 100644 index 000000000..e528d7a44 --- /dev/null +++ b/rancher2/structure_machine_config_v2_openstack.go @@ -0,0 +1,273 @@ +package rancher2 + +import ( + norman "github.com/rancher/norman/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + machineConfigV2OpenstackKind = "OpenstackConfig" + machineConfigV2OpenstackAPIVersion = "rke-machine-config.cattle.io/v1" + machineConfigV2OpenstackAPIType = "rke-machine-config.cattle.io.openstackconfig" + machineConfigV2OpenstackClusterIDsep = "." +) + +//Types + +type machineConfigV2Openstack struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + ActiveTimeout string `json:"activeTimeout,omitempty" yaml:"activeTimeout,omitempty"` + AuthURL string `json:"authUrl,omitempty" yaml:"authUrl,omitempty"` + AvailabilityZone string `json:"availabilityZone,omitempty" yaml:"availabilityZone,omitempty"` + CaCert string `json:"cacert,omitempty" yaml:"cacert,omitempty"` + ConfigDrive bool `json:"configDrive,omitempty" yaml:"configDrive,omitempty"` + DomainID string `json:"domainId,omitempty" yaml:"domainId,omitempty"` + DomainName string `json:"domainName,omitempty" yaml:"domainName,omitempty"` + EndpointType string `json:"endpointType,omitempty" yaml:"endpointType,omitempty"` + FlavorID string `json:"flavorId,omitempty" yaml:"flavorId,omitempty"` + FlavorName string `json:"flavorName,omitempty" yaml:"flavorName,omitempty"` + FloatingIPPool string `json:"floatingipPool,omitempty" yaml:"floatingipPool,omitempty"` + ImageID string `json:"imageId,omitempty" yaml:"imageId,omitempty"` + ImageName string `json:"imageName,omitempty" yaml:"imageName,omitempty"` + Insecure bool `json:"insecure,omitempty" yaml:"insecure,omitempty"` + IPVersion string `json:"ipVersion,omitempty" yaml:"ipVersion,omitempty"` + KeypairName string `json:"keypairName,omitempty" yaml:"keypairName,omitempty"` + NetID string `json:"netId,omitempty" yaml:"netId,omitempty"` + NetName string `json:"netName,omitempty" yaml:"netName,omitempty"` + NovaNetwork bool `json:"novaNetwork,omitempty" yaml:"novaNetwork,omitempty"` + Password string `json:"password,omitempty" yaml:"password,omitempty"` + PrivateKeyFile string `json:"privateKeyFile,omitempty" yaml:"privateKeyFile,omitempty"` + Region string `json:"region,omitempty" yaml:"region,omitempty"` + SecGroups string `json:"secGroups,omitempty" yaml:"secGroups,omitempty"` + SSHPort string `json:"sshPort,omitempty" yaml:"sshPort,omitempty"` + SSHUser string `json:"sshUser,omitempty" yaml:"sshUser,omitempty"` + TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty" yaml:"tenantName,omitempty"` + TenantDomainID string `json:"tenantDomainId,omitempty" yaml:"tenantDomainId,omitempty"` + TenantDomainName string `json:"tenantDomainName,omitempty" yaml:"tenantDomainName,omitempty"` + UserDataFile string `json:"userDataFile,omitempty" yaml:"userDataFile,omitempty"` + Username string `json:"username,omitempty" yaml:"username,omitempty"` + UserDomainID string `json:"userDomainId,omitempty" yaml:"userDomainId,omitempty"` + UserDomainName string `json:"userDomainName,omitempty" yaml:"userDomainName,omitempty"` + ApplicationCredentialID string `json:"applicationCredentialId,omitempty" yaml:"applicationCredentialId,omitempty"` + ApplicationCredentialName string `json:"applicationCredentialName,omitempty" yaml:"applicationCredentialName,omitempty"` + ApplicationCredentialSecret string `json:"applicationCredentialSecret,omitempty" yaml:"applicationCredentialSecret,omitempty"` + BootFromVolume bool `json:"bootFromVolume,omitempty" yaml:"bootFromVolume,omitempty"` + VolumeType string `json:"volumeType,omitempty" yaml:"volumeType,omitempty"` + VolumeSize string `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty"` + VolumeID string `json:"volumeId,omitempty" yaml:"volumeId,omitempty"` + VolumeName string `json:"volumeName,omitempty" yaml:"volumeName,omitempty"` + VolumeDevicePath string `json:"volumeDevicePath,omitempty" yaml:"volumeDevicePath,omitempty"` +} + +type MachineConfigV2Openstack struct { + norman.Resource + machineConfigV2Openstack +} + +// Flatteners + +func flattenMachineConfigV2Openstack(in *MachineConfigV2Openstack) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + obj["active_timeout"] = in.ActiveTimeout + obj["auth_url"] = in.AuthURL + obj["availability_zone"] = in.AvailabilityZone + obj["cacert"] = in.CaCert + obj["config_drive"] = in.ConfigDrive + obj["domain_id"] = in.DomainID + obj["domain_name"] = in.DomainName + obj["endpoint_type"] = in.EndpointType + obj["flavor_id"] = in.FlavorID + obj["flavor_name"] = in.FlavorName + obj["floating_ip_pool"] = in.FloatingIPPool + obj["image_id"] = in.ImageID + obj["image_name"] = in.ImageName + obj["insecure"] = in.Insecure + obj["ip_version"] = in.IPVersion + obj["keypair_name"] = in.KeypairName + obj["net_id"] = in.NetID + obj["net_name"] = in.NetName + obj["nova_network"] = in.NovaNetwork + obj["password"] = in.Password + obj["private_key_file"] = in.PrivateKeyFile + obj["region"] = in.Region + obj["sec_groups"] = in.SecGroups + obj["ssh_port"] = in.SSHPort + obj["ssh_user"] = in.SSHUser + obj["tenant_id"] = in.TenantID + obj["tenant_name"] = in.TenantName + obj["tenant_domain_id"] = in.TenantDomainID + obj["tenant_domain_name"] = in.TenantDomainName + obj["user_data_file"] = in.UserDataFile + obj["username"] = in.Username + obj["user_domain_id"] = in.UserDomainID + obj["user_domain_name"] = in.UserDomainName + obj["application_credential_id"] = in.ApplicationCredentialID + obj["application_credential_name"] = in.ApplicationCredentialName + obj["application_credential_secret"] = in.ApplicationCredentialSecret + obj["boot_from_volume"] = in.BootFromVolume + obj["volume_size"] = in.VolumeSize + obj["volume_type"] = in.VolumeType + obj["volume_id"] = in.VolumeID + obj["volume_name"] = in.VolumeName + obj["volume_device_path"] = in.VolumeDevicePath + + return []interface{}{obj} +} + +// Expanders + +func expandMachineConfigV2Openstack(p []interface{}, source *MachineConfigV2) *MachineConfigV2Openstack { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + obj := &MachineConfigV2Openstack{} + + if len(source.ID) > 0 { + obj.ID = source.ID + } + in := p[0].(map[string]interface{}) + + obj.TypeMeta.Kind = machineConfigV2OpenstackKind + obj.TypeMeta.APIVersion = machineConfigV2OpenstackAPIVersion + source.TypeMeta = obj.TypeMeta + obj.ObjectMeta = source.ObjectMeta + + if v, ok := in["active_timeout"].(string); ok && len(v) > 0 { + obj.ActiveTimeout = v + } + if v, ok := in["auth_url"].(string); ok && len(v) > 0 { + obj.AuthURL = v + } + if v, ok := in["availability_zone"].(string); ok && len(v) > 0 { + obj.AvailabilityZone = v + } + if v, ok := in["cacert"].(string); ok && len(v) > 0 { + obj.CaCert = v + } + if v, ok := in["config_drive"].(bool); ok { + obj.ConfigDrive = v + } + if v, ok := in["domain_id"].(string); ok && len(v) > 0 { + obj.DomainID = v + } + if v, ok := in["domain_name"].(string); ok && len(v) > 0 { + obj.DomainName = v + } + if v, ok := in["endpoint_type"].(string); ok && len(v) > 0 { + obj.EndpointType = v + } + if v, ok := in["flavor_id"].(string); ok && len(v) > 0 { + obj.FlavorID = v + } + if v, ok := in["flavor_name"].(string); ok && len(v) > 0 { + obj.FlavorName = v + } + if v, ok := in["floating_ip_pool"].(string); ok && len(v) > 0 { + obj.FloatingIPPool = v + } + if v, ok := in["ip_version"].(string); ok && len(v) > 0 { + obj.IPVersion = v + } + if v, ok := in["image_id"].(string); ok && len(v) > 0 { + obj.ImageID = v + } + if v, ok := in["image_name"].(string); ok && len(v) > 0 { + obj.ImageName = v + } + if v, ok := in["insecure"].(bool); ok { + obj.Insecure = v + } + if v, ok := in["ip_version"].(string); ok && len(v) > 0 { + obj.IPVersion = v + } + if v, ok := in["keypair_name"].(string); ok && len(v) > 0 { + obj.KeypairName = v + } + if v, ok := in["net_id"].(string); ok && len(v) > 0 { + obj.NetID = v + } + if v, ok := in["net_name"].(string); ok && len(v) > 0 { + obj.NetName = v + } + if v, ok := in["nova_network"].(bool); ok { + obj.NovaNetwork = v + } + if v, ok := in["password"].(string); ok && len(v) > 0 { + obj.Password = v + } + if v, ok := in["private_key_file"].(string); ok && len(v) > 0 { + obj.PrivateKeyFile = v + } + if v, ok := in["region"].(string); ok && len(v) > 0 { + obj.Region = v + } + if v, ok := in["sec_groups"].(string); ok && len(v) > 0 { + obj.SecGroups = v + } + if v, ok := in["ssh_port"].(string); ok && len(v) > 0 { + obj.SSHPort = v + } + if v, ok := in["ssh_user"].(string); ok && len(v) > 0 { + obj.SSHUser = v + } + if v, ok := in["tenant_id"].(string); ok && len(v) > 0 { + obj.TenantID = v + } + if v, ok := in["tenant_name"].(string); ok && len(v) > 0 { + obj.TenantName = v + } + if v, ok := in["tenant_domain_id"].(string); ok && len(v) > 0 { + obj.TenantDomainID = v + } + if v, ok := in["tenant_domain_name"].(string); ok && len(v) > 0 { + obj.TenantDomainName = v + } + if v, ok := in["user_data_file"].(string); ok && len(v) > 0 { + obj.UserDataFile = v + } + if v, ok := in["username"].(string); ok && len(v) > 0 { + obj.Username = v + } + if v, ok := in["user_domain_id"].(string); ok && len(v) > 0 { + obj.UserDomainID = v + } + if v, ok := in["user_domain_name"].(string); ok && len(v) > 0 { + obj.UserDomainName = v + } + if v, ok := in["application_credential_id"].(string); ok && len(v) > 0 { + obj.ApplicationCredentialID = v + } + if v, ok := in["application_credential_name"].(string); ok && len(v) > 0 { + obj.ApplicationCredentialName = v + } + if v, ok := in["application_credential_secret"].(string); ok && len(v) > 0 { + obj.ApplicationCredentialSecret = v + } + if v, ok := in["boot_from_volume"].(bool); ok { + obj.BootFromVolume = v + } + if v, ok := in["volume_size"].(string); ok && len(v) > 0 { + obj.VolumeSize = v + } + if v, ok := in["volume_type"].(string); ok && len(v) > 0 { + obj.VolumeType = v + } + if v, ok := in["volume_id"].(string); ok && len(v) > 0 { + obj.VolumeID = v + } + if v, ok := in["volume_name"].(string); ok && len(v) > 0 { + obj.VolumeName = v + } + if v, ok := in["volume_device_path"].(string); ok && len(v) > 0 { + obj.VolumeDevicePath = v + } + + return obj +} diff --git a/rancher2/structure_machine_config_v2_vsphere.go b/rancher2/structure_machine_config_v2_vsphere.go new file mode 100644 index 000000000..900a7b0ee --- /dev/null +++ b/rancher2/structure_machine_config_v2_vsphere.go @@ -0,0 +1,277 @@ +package rancher2 + +import ( + norman "github.com/rancher/norman/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + machineConfigV2VmwarevsphereKind = "VmwarevsphereConfig" + machineConfigV2VmwarevsphereAPIVersion = "rke-machine-config.cattle.io/v1" + machineConfigV2VmwarevsphereAPIType = "rke-machine-config.cattle.io.vmwarevsphereconfig" + machineConfigV2VmwarevsphereClusterIDsep = "." +) + +//Types + +type machineConfigV2Vmwarevsphere struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Boot2dockerURL string `json:"boot2dockerUrl,omitempty" yaml:"boot2dockerUrl,omitempty"` + Cfgparam []string `json:"cfgparam,omitempty" yaml:"cfgparam,omitempty"` + CloneFrom string `json:"cloneFrom,omitempty" yaml:"cloneFrom,omitempty"` + CloudConfig string `json:"cloudConfig,omitempty" yaml:"cloudConfig,omitempty"` + Cloudinit string `json:"cloudinit,omitempty" yaml:"cloudinit,omitempty"` + ContentLibrary string `json:"contentLibrary,omitempty" yaml:"contentLibrary,omitempty"` + CPUCount string `json:"cpuCount,omitempty" yaml:"cpuCount,omitempty"` + CreationType string `json:"creationType,omitempty" yaml:"creationType,omitempty"` + CustomAttributes []string `json:"customAttribute,omitempty" yaml:"customAttribute,omitempty"` + Datacenter string `json:"datacenter,omitempty" yaml:"datacenter,omitempty"` + Datastore string `json:"datastore,omitempty" yaml:"datastore,omitempty"` + DatastoreCluster string `json:"datastoreCluster,omitempty" yaml:"datastoreCluster,omitempty"` + DiskSize string `json:"diskSize,omitempty" yaml:"diskSize,omitempty"` + Folder string `json:"folder,omitempty" yaml:"folder,omitempty"` + Hostsystem string `json:"hostsystem,omitempty" yaml:"hostsystem,omitempty"` + MemorySize string `json:"memorySize,omitempty" yaml:"memorySize,omitempty"` + Network []string `json:"network,omitempty" yaml:"network,omitempty"` + Password string `json:"password,omitempty" yaml:"password,omitempty"` + Pool string `json:"pool,omitempty" yaml:"pool,omitempty"` + SSHPassword string `json:"sshPassword,omitempty" yaml:"sshPassword,omitempty"` + SSHPort string `json:"sshPort,omitempty" yaml:"sshPort,omitempty"` + SSHUser string `json:"sshUser,omitempty" yaml:"sshUser,omitempty"` + SSHUserGroup string `json:"sshUserGroup,omitempty" yaml:"sshUserGroup,omitempty"` + Tags []string `json:"tag,omitempty" yaml:"tag,omitempty"` + Username string `json:"username,omitempty" yaml:"username,omitempty"` + VappIpallocationpolicy string `json:"vappIpallocationpolicy,omitempty" yaml:"vappIpallocationpolicy,omitempty"` + VappIpprotocol string `json:"vappIpprotocol,omitempty" yaml:"vappIpprotocol,omitempty"` + VappProperty []string `json:"vappProperty,omitempty" yaml:"vappProperty,omitempty"` + VappTransport string `json:"vappTransport,omitempty" yaml:"vappTransport,omitempty"` + Vcenter string `json:"vcenter,omitempty" yaml:"vcenter,omitempty"` + VcenterPort string `json:"vcenterPort,omitempty" yaml:"vcenterPort,omitempty"` +} + +type MachineConfigV2Vmwarevsphere struct { + norman.Resource + machineConfigV2Vmwarevsphere +} + +// Flatteners + +func flattenMachineConfigV2Vmwarevsphere(in *MachineConfigV2Vmwarevsphere) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.Boot2dockerURL) > 0 { + obj["boot2docker_url"] = in.Boot2dockerURL + } + if len(in.Cfgparam) > 0 { + obj["cfgparam"] = toArrayInterface(in.Cfgparam) + } + if len(in.CloneFrom) > 0 { + obj["clone_from"] = in.CloneFrom + } + if len(in.CloudConfig) > 0 { + obj["cloud_config"] = in.CloudConfig + } + if len(in.Cloudinit) > 0 { + obj["cloudinit"] = in.Cloudinit + } + if len(in.ContentLibrary) > 0 { + obj["content_library"] = in.ContentLibrary + } + if len(in.CPUCount) > 0 { + obj["cpu_count"] = in.CPUCount + } + if len(in.CreationType) > 0 { + obj["creation_type"] = in.CreationType + } + if len(in.CustomAttributes) > 0 { + obj["custom_attributes"] = toArrayInterface(in.CustomAttributes) + } + if len(in.Datacenter) > 0 { + obj["datacenter"] = in.Datacenter + } + if len(in.Datastore) > 0 { + obj["datastore"] = in.Datastore + } + if len(in.DatastoreCluster) > 0 { + obj["datastore_cluster"] = in.DatastoreCluster + } + if len(in.DiskSize) > 0 { + obj["disk_size"] = in.DiskSize + } + if len(in.Folder) > 0 { + obj["folder"] = in.Folder + } + if len(in.Hostsystem) > 0 { + obj["hostsystem"] = in.Hostsystem + } + if len(in.MemorySize) > 0 { + obj["memory_size"] = in.MemorySize + } + if len(in.Network) > 0 { + obj["network"] = toArrayInterface(in.Network) + } + if len(in.Password) > 0 { + obj["password"] = in.Password + } + if len(in.Pool) > 0 { + obj["pool"] = in.Pool + } + if len(in.SSHPassword) > 0 { + obj["ssh_password"] = in.SSHPassword + } + if len(in.SSHPort) > 0 { + obj["ssh_port"] = in.SSHPort + } + if len(in.SSHUser) > 0 { + obj["ssh_user"] = in.SSHUser + } + if len(in.SSHUserGroup) > 0 { + obj["ssh_user_group"] = in.SSHUserGroup + } + if len(in.Tags) > 0 { + obj["tags"] = toArrayInterface(in.Tags) + } + if len(in.Username) > 0 { + obj["username"] = in.Username + } + if len(in.VappIpallocationpolicy) > 0 { + obj["vapp_ip_allocation_policy"] = in.VappIpallocationpolicy + } + if len(in.VappIpprotocol) > 0 { + obj["vapp_ip_protocol"] = in.VappIpprotocol + } + if len(in.VappProperty) > 0 { + obj["vapp_property"] = toArrayInterface(in.VappProperty) + } + if len(in.VappTransport) > 0 { + obj["vapp_transport"] = in.VappTransport + } + if len(in.Vcenter) > 0 { + obj["vcenter"] = in.Vcenter + } + if len(in.VcenterPort) > 0 { + obj["vcenter_port"] = in.VcenterPort + } + + return []interface{}{obj} +} + +// Expanders + +func expandMachineConfigV2Vmwarevsphere(p []interface{}, source *MachineConfigV2) *MachineConfigV2Vmwarevsphere { + if p == nil || len(p) == 0 || p[0] == nil { + return nil + } + obj := &MachineConfigV2Vmwarevsphere{} + + if len(source.ID) > 0 { + obj.ID = source.ID + } + in := p[0].(map[string]interface{}) + + obj.TypeMeta.Kind = machineConfigV2VmwarevsphereKind + obj.TypeMeta.APIVersion = machineConfigV2VmwarevsphereAPIVersion + source.TypeMeta = obj.TypeMeta + obj.ObjectMeta = source.ObjectMeta + + if v, ok := in["boot2docker_url"].(string); ok && len(v) > 0 { + obj.Boot2dockerURL = v + } + if v, ok := in["cfgparam"].([]interface{}); ok && len(v) > 0 { + obj.Cfgparam = toArrayString(v) + } + if v, ok := in["clone_from"].(string); ok && len(v) > 0 { + obj.CloneFrom = v + } + if v, ok := in["cloud_config"].(string); ok && len(v) > 0 { + obj.CloudConfig = v + } + if v, ok := in["cloudinit"].(string); ok && len(v) > 0 { + obj.Cloudinit = v + } + if v, ok := in["content_library"].(string); ok && len(v) > 0 { + obj.ContentLibrary = v + } + if v, ok := in["cpu_count"].(string); ok && len(v) > 0 { + obj.CPUCount = v + } + if v, ok := in["creation_type"].(string); ok && len(v) > 0 { + obj.CreationType = v + } + if v, ok := in["custom_attributes"].([]interface{}); ok && len(v) > 0 { + obj.CustomAttributes = toArrayString(v) + } + if v, ok := in["datacenter"].(string); ok && len(v) > 0 { + obj.Datacenter = v + } + if v, ok := in["datastore"].(string); ok && len(v) > 0 { + obj.Datastore = v + } + if v, ok := in["datastore_cluster"].(string); ok && len(v) > 0 { + obj.DatastoreCluster = v + } + if v, ok := in["disk_size"].(string); ok && len(v) > 0 { + obj.DiskSize = v + } + if v, ok := in["folder"].(string); ok && len(v) > 0 { + obj.Folder = v + } + if v, ok := in["hostsystem"].(string); ok && len(v) > 0 { + obj.Hostsystem = v + } + if v, ok := in["memory_size"].(string); ok && len(v) > 0 { + obj.MemorySize = v + } + if v, ok := in["network"].([]interface{}); ok && len(v) > 0 { + obj.Network = toArrayString(v) + } + if v, ok := in["password"].(string); ok && len(v) > 0 { + obj.Password = v + } + if v, ok := in["pool"].(string); ok && len(v) > 0 { + obj.Pool = v + } + if v, ok := in["ssh_password"].(string); ok && len(v) > 0 { + obj.SSHPassword = v + } + if v, ok := in["ssh_port"].(string); ok && len(v) > 0 { + obj.SSHPort = v + } + if v, ok := in["ssh_user"].(string); ok && len(v) > 0 { + obj.SSHUser = v + } + if v, ok := in["ssh_user_group"].(string); ok && len(v) > 0 { + obj.SSHUserGroup = v + } + if v, ok := in["tags"].([]interface{}); ok && len(v) > 0 { + obj.Tags = toArrayString(v) + } + if v, ok := in["username"].(string); ok && len(v) > 0 { + obj.Username = v + } + if v, ok := in["vapp_ip_allocation_policy"].(string); ok && len(v) > 0 { + obj.VappIpallocationpolicy = v + } + if v, ok := in["vapp_ip_protocol"].(string); ok && len(v) > 0 { + obj.VappIpprotocol = v + } + if v, ok := in["vapp_property"].([]interface{}); ok && len(v) > 0 { + obj.VappProperty = toArrayString(v) + } + if v, ok := in["vapp_transport"].(string); ok && len(v) > 0 { + obj.VappTransport = v + } + if v, ok := in["vcenter"].(string); ok && len(v) > 0 { + obj.Vcenter = v + } + if v, ok := in["vcenter_port"].(string); ok && len(v) > 0 { + obj.VcenterPort = v + } + + return obj +} From 4da9910571b24fa545ad4a067dfe8b6d3e3e274a Mon Sep 17 00:00:00 2001 From: rawmind0 Date: Fri, 10 Sep 2021 13:23:06 +0200 Subject: [PATCH 6/6] Updated CHANGELOG.md --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab25da5ff..3fc71e554 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,16 @@ -## 1.20.0 (Unreleased) +## 1.20.0 (September 17, 2021) FEATURES: * **New Argument:** `rancher2_cluster.aks_config_v2` - (Optional) The Azure AKS v2 configuration for creating/import `aks` Clusters. Conflicts with `aks_config`, `eks_config`, `eks_config_v2`, `gke_config`, `gke_config_v2`, `oke_config` `k3s_config` and `rke_config`. Just for Rancher v2.6.0 and above (list maxitems:1) * **New Argument:** `rancher2_cloud_credential.azure_credential_config.environment` - (Optional/Computed) Azure environment (e.g. AzurePublicCloud, AzureChinaCloud) (string) * **New Argument:** `rancher2_cloud_credential.azure_credential_config.tenant_id` - (Optional/Computed) Azure Tenant ID (string) +* **New Attribute:** `rancher2_cluster.cluster_registration_token.insecure_node_command` - (Computed) Insecure node command to execute in a imported k8s cluster (string) +* **New Attribute:** `rancher2_cluster.cluster_registration_token.insecure_windows_node_command` - (Computed) Insecure windows command to execute in a imported k8s cluster (string) +* **New Attribute:** `rancher2_cloud_credential.amazonec2_credential_config.default_region` - (Optional) AWS default region (string) +* **New Resource:** `rancher2_machine_config_v2` - Provides a Rancher v2 Machine config v2 resource. Available as tech preview at Rancher v2.6.0 and above. +* **New Resource:** `rancher2_cluster_v2` - Provides Rancher cluster v2 resource to manage RKE2 and K3S cluster. Available as tech preview at Rancher v2.6.0 and above. +* **New Data Source:** `rancher2_cluster_v2` - Provides Rancher cluster v2 resource to manage RKE2 and K3S cluster. Available at Rancher v2.6.0 and above. ENHANCEMENTS: @@ -13,6 +19,7 @@ ENHANCEMENTS: * Updated `rancher2_cluster.gke_config_v2` schema to support rancher v2.6.0 https://github.com/rancher/gke-operator/pull/49 * Updated `rancher2_cluster.eks_config_v2` schema to support rancher v2.6.0 https://github.com/rancher/eks-operator/pull/38 * Updated `rancher2_cluster.gke_config_v2` schema to support rancher v2.6.0 https://github.com/rancher/rancher/issues/34291 +* Updated docs, adding note to use `rancher2_bootstrap` resource on Rancher v2.6.0 and above BUG FIXES: