From 87243a77776623418103ddd0238b33f0785616fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20R=C3=B6hrich?= Date: Tue, 3 Sep 2024 16:22:17 +0200 Subject: [PATCH] Add VM tags test, LoadBalancer test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add acceptance tests for VirtualMachine labels (tags) and LoadBalancer resources. The labeling/tagging mechnism is crucial for these tests to work, since a LoadBalancer is required by the admission webhook to have at least one VM that matches its selectors. Also improve documentation. Signed-off-by: Moritz Röhrich --- docs/resources/virtualmachine.md | 9 +- .../resources/harvester_ippool/resources.tf | 2 +- .../harvester_loadbalancer/resources.tf | 47 ++++++++ .../resource_loadbalancer_constructor.go | 32 ++++++ .../loadbalancer/schema_loadbalancer.go | 47 +++++++- .../virtualmachine/resource_virtualmachine.go | 27 +++-- internal/tests/provider_test.go | 7 +- internal/tests/resource_loadbalancer_test.go | 36 ++++++ .../tests/resource_virtualmachine_test.go | 107 +++++++++++++++--- pkg/constants/constants_loadbalancer.go | 16 ++- 10 files changed, 295 insertions(+), 35 deletions(-) create mode 100644 examples/resources/harvester_loadbalancer/resources.tf diff --git a/docs/resources/virtualmachine.md b/docs/resources/virtualmachine.md index 7f68930a..9256c054 100644 --- a/docs/resources/virtualmachine.md +++ b/docs/resources/virtualmachine.md @@ -211,7 +211,7 @@ resource "harvester_virtualmachine" "opensuse154" { - **secure_boot** (Boolean) EFI must be enabled to use this feature - **ssh_keys** (List of String) - **start** (Boolean, Deprecated) -- **tags** (Map of String) +- **tags** (Map of String) (otherwise known as labels, see [below](#tags)) - **tpm** (Block List, Max: 1) (see [below for nested schema](#nestedblock--tpm)) ### Read-Only @@ -299,6 +299,13 @@ Optional: - **name** (String) just add this field for doc generation + +### Tags / Labels + +Optional, map of strings. The keys of the tags will appear as labels +`tags.harvesterhci.io/${KEY}`. The values of the map will be the +corresponding values of the labels. + ## Import Import is supported using the following syntax: diff --git a/examples/resources/harvester_ippool/resources.tf b/examples/resources/harvester_ippool/resources.tf index 1b82f710..62b16a80 100644 --- a/examples/resources/harvester_ippool/resources.tf +++ b/examples/resources/harvester_ippool/resources.tf @@ -1,5 +1,5 @@ resource "harvester_ippool" "service_ips" { - name = "service_ips" + name = "service-ips" range { start = "10.11.0.1" diff --git a/examples/resources/harvester_loadbalancer/resources.tf b/examples/resources/harvester_loadbalancer/resources.tf new file mode 100644 index 00000000..62e5fb5f --- /dev/null +++ b/examples/resources/harvester_loadbalancer/resources.tf @@ -0,0 +1,47 @@ +resource "harvester_loadbalancer" "service_loadbalancer" { + name = "service-loadbalancer" + + # This ensures correct ordering for the creation of the resources. + # The loadbalancer resource will be rejected by the admission webhook, if not + # at least one virtual machine with labels matching the backend_selector(s) + # already exists. This dependency ordering can be used to create that virtual + # machine with the same Terraform file. + depends_on = [ + harvester_virtualmachine.name + ] + + listener { + port = 443 + protocol = "tcp" + backend_port = 8080 + } + + listener { + port = 80 + protocol = "tcp" + backend_port = 8080 + } + + ipam = "ippool" + ippool = "service-ips" + + workload_type = "vm" + + backend_selector { + key = "app" + values = [ "test" ] + } + + backend_selector { + key = "component" + values = [ "frontend", "ui" ] + } + + healthcheck { + port = 443 + success_threshold = 1 + failure_threshold = 3 + period_seconds = 10 + timeout_seconds = 5 + } +} diff --git a/internal/provider/loadbalancer/resource_loadbalancer_constructor.go b/internal/provider/loadbalancer/resource_loadbalancer_constructor.go index d23b21cf..616322cc 100644 --- a/internal/provider/loadbalancer/resource_loadbalancer_constructor.go +++ b/internal/provider/loadbalancer/resource_loadbalancer_constructor.go @@ -6,6 +6,8 @@ import ( loadbalancerv1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1" corev1 "k8s.io/api/core/v1" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/harvester/terraform-provider-harvester/internal/util" "github.com/harvester/terraform-provider-harvester/pkg/constants" ) @@ -40,6 +42,11 @@ func (c *Constructor) Setup() util.Processors { Parser: c.subresourceLoadBalancerListenerParser, Required: true, }, + { + Field: constants.SubresourceTypeLoadBalancerBackendSelector, + Parser: c.subresourceLoadBalancerBackendSelectorParser, + Required: false, + }, { Field: constants.SubresourceTypeLoadBalancerHealthCheck, Parser: c.subresourceLoadBalancerHealthCheckParser, @@ -119,6 +126,31 @@ func (c *Constructor) subresourceLoadBalancerListenerParser(data interface{}) er return nil } +func (c *Constructor) subresourceLoadBalancerBackendSelectorParser(data interface{}) error { + backendServerSelector := make(map[string][]string) + + selectorSet := data.(*schema.Set) + + selectors := selectorSet.List() + + for _, selectorData := range selectors { + selector := selectorData.(map[string]interface{}) + + key := selector[constants.FieldBackendSelectorKey].(string) + valuesData := selector[constants.FieldBackendSelectorValues].([]interface{}) + + values := make([]string, 0) + + for _, valueData := range valuesData { + values = append(values, valueData.(string)) + } + + backendServerSelector[key] = values + } + c.LoadBalancer.Spec.BackendServerSelector = backendServerSelector + return nil +} + func (c *Constructor) subresourceLoadBalancerHealthCheckParser(data interface{}) error { healthcheck := data.(map[string]interface{}) diff --git a/internal/provider/loadbalancer/schema_loadbalancer.go b/internal/provider/loadbalancer/schema_loadbalancer.go index 041175c3..cdf4efca 100644 --- a/internal/provider/loadbalancer/schema_loadbalancer.go +++ b/internal/provider/loadbalancer/schema_loadbalancer.go @@ -46,10 +46,13 @@ func Schema() map[string]*schema.Schema { Schema: subresourceSchemaLoadBalancerListener(), }, }, - constants.FieldLoadBalancerBackendServerSelector: { - Type: schema.TypeMap, + constants.SubresourceTypeLoadBalancerBackendSelector: { + Type: schema.TypeSet, Optional: true, Description: "", + Elem: &schema.Resource{ + Schema: subresourceSchemaLoadBalancerBackendSelector(), + }, }, constants.SubresourceTypeLoadBalancerHealthCheck: { Type: schema.TypeList, @@ -94,7 +97,45 @@ func subresourceSchemaLoadBalancerListener() map[string]*schema.Schema { return s } +func subresourceSchemaLoadBalancerBackendSelector() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + constants.FieldBackendSelectorKey: { + Type: schema.TypeString, + Required: true, + }, + constants.FieldBackendSelectorValues: { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + } + return s +} + func subresourceSchemaLoadBalancerHealthCheck() map[string]*schema.Schema { - s := map[string]*schema.Schema{} + s := map[string]*schema.Schema{ + constants.FieldHealthCheckPort: { + Type: schema.TypeInt, + Required: true, + }, + constants.FieldHealthCheckSuccessThreshold: { + Type: schema.TypeInt, + Optional: true, + }, + constants.FieldHealthCheckFailureThreshold: { + Type: schema.TypeInt, + Optional: true, + }, + constants.FieldHealthCheckPeriodSeconds: { + Type: schema.TypeInt, + Optional: true, + }, + constants.FieldHealthCheckTimeoutSeconds: { + Type: schema.TypeInt, + Optional: true, + }, + } return s } diff --git a/internal/provider/virtualmachine/resource_virtualmachine.go b/internal/provider/virtualmachine/resource_virtualmachine.go index 5020d5ae..173e3366 100644 --- a/internal/provider/virtualmachine/resource_virtualmachine.go +++ b/internal/provider/virtualmachine/resource_virtualmachine.go @@ -2,7 +2,6 @@ package virtualmachine import ( "context" - "fmt" "strings" "time" @@ -177,17 +176,27 @@ func resourceVirtualMachineDelete(ctx context.Context, d *schema.ResourceData, m return diag.FromErr(err) } - ctxDeadline, _ := ctx.Deadline() - events, err := c.HarvesterClient. - KubevirtV1(). - VirtualMachines(namespace). - Watch(ctx, util.WatchOptions(name, time.Until(ctxDeadline))) + stateConf := &resource.StateChangeConf{ + Pending: []string{ + constants.StateCommonReady, + constants.StateCommonFailed, + constants.StateCommonUnknown, + constants.StateVirtualMachineRunning, + constants.StateVirtualMachineStarting, + constants.StateVirtualMachineStopping, + constants.StateVirtualMachineStopped, + }, + Target: []string{constants.StateCommonRemoved}, + Refresh: resourceVirtualMachineRefresh(ctx, d, meta, namespace, name, ""), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + _, err = stateConf.WaitForStateContext(ctx) if err != nil { return diag.FromErr(err) } - if !util.HasDeleted(events) { - return diag.FromErr(fmt.Errorf("timeout waiting for virtualmachine %s to be deleted", d.Id())) - } + d.SetId("") return nil } diff --git a/internal/tests/provider_test.go b/internal/tests/provider_test.go index d2d63c11..22a06944 100644 --- a/internal/tests/provider_test.go +++ b/internal/tests/provider_test.go @@ -26,6 +26,7 @@ const ( testAccResourceStateRemoved = "removed" testAccResourceStateExist = "exist" + testAccResourceStateError = "error" ) func init() { @@ -54,8 +55,8 @@ func getStateChangeConf(refresh resource.StateRefreshFunc) *resource.StateChange Pending: []string{testAccResourceStateExist}, Target: []string{testAccResourceStateRemoved}, Refresh: refresh, - Timeout: 1 * time.Minute, - Delay: 1 * time.Second, + Timeout: 2 * time.Minute, + Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } } @@ -67,7 +68,7 @@ func getResourceStateRefreshFunc(getResourceFunc func() (interface{}, error)) re if apierrors.IsNotFound(err) { return obj, testAccResourceStateRemoved, nil } - return nil, "", err + return nil, testAccResourceStateError, err } return obj, testAccResourceStateExist, nil } diff --git a/internal/tests/resource_loadbalancer_test.go b/internal/tests/resource_loadbalancer_test.go index 3128c186..d874f8d6 100644 --- a/internal/tests/resource_loadbalancer_test.go +++ b/internal/tests/resource_loadbalancer_test.go @@ -38,14 +38,50 @@ func TestAccLoadBalancer_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: ` +resource "harvester_virtualmachine" "test_vm" { + name = "test-vm" + namespace = "default" + + tags = { + app = "testlb" + } + + cpu = 1 + memory = "1Gi" + machine_type = "q35" + run_strategy = "RerunOnFailure" + + network_interface { + name = "default" + } + + disk { + name = "rootdisk" + type = "disk" + bus = "virtio" + boot_order = 1 + + container_image_name = "kubevirt/fedora-cloud-container-disk-demo:v0.35.0" + } +} + resource "harvester_loadbalancer" "test_loadbalancer" { name = "test-loadbalancer" + depends_on = [ + harvester_virtualmachine.test_vm + ] + listener { port = 443 protocol = "tcp" backend_port = 8080 } + + backend_selector { + key = "tag.harvesterhci.io/app" + values = [ "testlb" ] + } } `, Check: resource.ComposeAggregateTestCheckFunc( diff --git a/internal/tests/resource_virtualmachine_test.go b/internal/tests/resource_virtualmachine_test.go index 8dfb6c18..5f9d13a8 100644 --- a/internal/tests/resource_virtualmachine_test.go +++ b/internal/tests/resource_virtualmachine_test.go @@ -89,33 +89,114 @@ func TestAccVirtualMachine_basic(t *testing.T) { }) } -func testAccVirtualMachineExists(ctx context.Context, n string, vm *kubevirtv1.VirtualMachine) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Resource %s not found. ", n) +func TestAccVirtualMachine_labels(t *testing.T) { + var ( + vm *kubevirtv1.VirtualMachine + ctx = context.Background() + expectedLabels = map[string]string{ + "tag.harvesterhci.io/Foobar": "barfoo", } + ) - if rs.Primary.ID == "" { - return fmt.Errorf("Resource %s ID not set. ", n) - } + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVirtualMachineDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: ` +resource "harvester_virtualmachine" "test-acc-labels" { + name = "test-vm" + namespace = "default" - id := rs.Primary.ID - c := testAccProvider.Meta().(*client.Client) + tags = { + Foobar = "barfoo" + } - namespace, name, err := helper.IDParts(id) + cpu = 1 + memory = "1Gi" + + run_strategy = "RerunOnFailure" + machine_type = "q35" + + network_interface { + name = "default" + } + + disk { + name = "rootdisk" + type = "disk" + bus = "virtio" + boot_order = 1 + + container_image_name = "kubevirt/fedora-cloud-container-disk-demo:v0.35.0" + } +} +`, + Check: resource.ComposeAggregateTestCheckFunc( + testAccVirtualMachineExists(ctx, "harvester_virtualmachine.test-acc-labels", vm), + testAccVirtualMachineLabels(ctx, "harvester_virtualmachine.test-acc-labels", expectedLabels), + ), + }, + }, + }) +} + +func testAccVirtualMachineExists(ctx context.Context, n string, vm *kubevirtv1.VirtualMachine) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundVM, err := testAccGetVirtualMachine(ctx, s, n) if err != nil { return err } - foundVM, err := c.HarvesterClient.KubevirtV1().VirtualMachines(namespace).Get(ctx, name, metav1.GetOptions{}) + vm = foundVM + return nil + } +} + +func testAccVirtualMachineLabels(ctx context.Context, n string, labels map[string]string) resource.TestCheckFunc { + return func(s *terraform.State) error { + vm, err := testAccGetVirtualMachine(ctx, s, n) if err != nil { return err } - vm = foundVM + + for key := range labels { + val, ok := vm.Labels[key] + if !ok { + return fmt.Errorf("Label %s not found", key) + } + + if val != labels[key] { + return fmt.Errorf("Label %s contains unexpected value: %s", key, val) + } + } return nil } } +func testAccGetVirtualMachine(ctx context.Context, state *terraform.State, resourceName string) (*kubevirtv1.VirtualMachine, error) { + resource, ok := state.RootModule().Resources[resourceName] + if !ok { + return nil, fmt.Errorf("Resource not found: %s", resourceName) + } + if resource.Primary.ID == "" { + return nil, fmt.Errorf("Resource ID not set: %s", resourceName) + } + + id := resource.Primary.ID + c := testAccProvider.Meta().(*client.Client) + + namespace, name, err := helper.IDParts(id) + if err != nil { + return nil, err + } + + return c.HarvesterClient. + KubevirtV1(). + VirtualMachines(namespace). + Get(ctx, name, metav1.GetOptions{}) +} + func testAccCheckVirtualMachineDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { for _, rs := range s.RootModule().Resources { diff --git a/pkg/constants/constants_loadbalancer.go b/pkg/constants/constants_loadbalancer.go index 51fe19e4..2be49bc5 100644 --- a/pkg/constants/constants_loadbalancer.go +++ b/pkg/constants/constants_loadbalancer.go @@ -3,11 +3,10 @@ package constants const ( ResourceTypeLoadBalancer = "harvester_loadbalancer" - FieldLoadBalancerDescription = "description" - FieldLoadBalancerWorkloadType = "workload_type" - FieldLoadBalancerIPAM = "ipam" - FieldLoadBalancerIPPool = "ippool" - FieldLoadBalancerBackendServerSelector = "backend_server_selector" + FieldLoadBalancerDescription = "description" + FieldLoadBalancerWorkloadType = "workload_type" + FieldLoadBalancerIPAM = "ipam" + FieldLoadBalancerIPPool = "ippool" ) const ( @@ -29,6 +28,13 @@ const ( FieldHealthCheckTimeoutSeconds = "timeout_seconds" ) +const ( + SubresourceTypeLoadBalancerBackendSelector = "backend_selector" + + FieldBackendSelectorKey = "key" + FieldBackendSelectorValues = "values" +) + const ( LoadBalancerWorkloadTypeVM = "vm" LoadBalancerWorkloadTypeCluster = "cluster"