Skip to content

Commit

Permalink
Add VM tags test, LoadBalancer test
Browse files Browse the repository at this point in the history
Add acceptance tests for VirtualMachine labels (tags) and LoadBalancer
resources. The labeling/tagging mechnism is crucial for these tests to
work, since a LoadBalancer is required by the admission webhook to have
at least one VM that matches its selectors.

Also improve documentation.

Signed-off-by: Moritz Röhrich <[email protected]>
  • Loading branch information
m-ildefons committed Sep 3, 2024
1 parent e8d42ae commit e029365
Show file tree
Hide file tree
Showing 10 changed files with 296 additions and 35 deletions.
9 changes: 8 additions & 1 deletion docs/resources/virtualmachine.md
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ resource "harvester_virtualmachine" "opensuse154" {
- **secure_boot** (Boolean) EFI must be enabled to use this feature
- **ssh_keys** (List of String)
- **start** (Boolean, Deprecated)
- **tags** (Map of String)
- **tags** (Map of String) (otherwise known as labels, see [below](#tags))
- **tpm** (Block List, Max: 1) (see [below for nested schema](#nestedblock--tpm))

### Read-Only
Expand Down Expand Up @@ -299,6 +299,13 @@ Optional:

- **name** (String) just add this field for doc generation

<a id="tags"></a>
### Tags / Labels

Optional, map of strings. The keys of the tags will appear as labels
`tags.harvesterhci.io/${KEY}`. The values of the map will be the
corresponding values of the labels.

## Import

Import is supported using the following syntax:
Expand Down
2 changes: 1 addition & 1 deletion examples/resources/harvester_ippool/resources.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
resource "harvester_ippool" "service_ips" {
name = "service_ips"
name = "service-ips"

range {
start = "10.11.0.1"
Expand Down
47 changes: 47 additions & 0 deletions examples/resources/harvester_loadbalancer/resources.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
resource "harvester_loadbalancer" "service_loadbalancer" {
name = "service-loadbalancer"

# This ensures correct ordering for the creation of the resources.
# The loadbalancer resource will be rejected by the admission webhook, if not
# at least one virtual machine with labels matching the backend_selector(s)
# already exists. This dependency ordering can be used to create that virtual
# machine with the same Terraform file.
depends_on = [
harvester_virtualmachine.name
]

listener {
port = 443
protocol = "tcp"
backend_port = 8080
}

listener {
port = 80
protocol = "tcp"
backend_port = 8080
}

ipam = "ippool"
ippool = "service-ips"

workload_type = "vm"

backend_selector {
key = "app"
values = [ "test" ]
}

backend_selector {
key = "component"
values = [ "frontend", "ui" ]
}

healthcheck {
port = 443
success_threshold = 1
failure_threshold = 3
period_seconds = 10
timeout_seconds = 5
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (
loadbalancerv1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1"
corev1 "k8s.io/api/core/v1"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"

"github.com/harvester/terraform-provider-harvester/internal/util"
"github.com/harvester/terraform-provider-harvester/pkg/constants"
)
Expand Down Expand Up @@ -40,6 +42,11 @@ func (c *Constructor) Setup() util.Processors {
Parser: c.subresourceLoadBalancerListenerParser,
Required: true,
},
{
Field: constants.SubresourceTypeLoadBalancerBackendSelector,
Parser: c.subresourceLoadBalancerBackendSelectorParser,
Required: false,
},
{
Field: constants.SubresourceTypeLoadBalancerHealthCheck,
Parser: c.subresourceLoadBalancerHealthCheckParser,
Expand Down Expand Up @@ -119,6 +126,32 @@ func (c *Constructor) subresourceLoadBalancerListenerParser(data interface{}) er
return nil
}

func (c *Constructor) subresourceLoadBalancerBackendSelectorParser(data interface{}) error {

backendServerSelector := make(map[string][]string)

selectorSet := data.(*schema.Set)

selectors := selectorSet.List()

for _, selectorData := range selectors {
selector := selectorData.(map[string]interface{})

key := selector[constants.FieldBackendSelectorKey].(string)
valuesData := selector[constants.FieldBackendSelectorValues].([]interface{})

values := make([]string, 0)

for _, valueData := range valuesData {
values = append(values, valueData.(string))
}

backendServerSelector[key] = values
}
c.LoadBalancer.Spec.BackendServerSelector = backendServerSelector
return nil
}

func (c *Constructor) subresourceLoadBalancerHealthCheckParser(data interface{}) error {
healthcheck := data.(map[string]interface{})

Expand Down
47 changes: 44 additions & 3 deletions internal/provider/loadbalancer/schema_loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,13 @@ func Schema() map[string]*schema.Schema {
Schema: subresourceSchemaLoadBalancerListener(),
},
},
constants.FieldLoadBalancerBackendServerSelector: {
Type: schema.TypeMap,
constants.SubresourceTypeLoadBalancerBackendSelector: {
Type: schema.TypeSet,
Optional: true,
Description: "",
Elem: &schema.Resource{
Schema: subresourceSchemaLoadBalancerBackendSelector(),
},
},
constants.SubresourceTypeLoadBalancerHealthCheck: {
Type: schema.TypeList,
Expand Down Expand Up @@ -94,7 +97,45 @@ func subresourceSchemaLoadBalancerListener() map[string]*schema.Schema {
return s
}

func subresourceSchemaLoadBalancerBackendSelector() map[string]*schema.Schema {
s := map[string]*schema.Schema{
constants.FieldBackendSelectorKey: {
Type: schema.TypeString,
Required: true,
},
constants.FieldBackendSelectorValues: {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
}
return s
}

func subresourceSchemaLoadBalancerHealthCheck() map[string]*schema.Schema {
s := map[string]*schema.Schema{}
s := map[string]*schema.Schema{
constants.FieldHealthCheckPort: {
Type: schema.TypeInt,
Required: true,
},
constants.FieldHealthCheckSuccessThreshold: {
Type: schema.TypeInt,
Optional: true,
},
constants.FieldHealthCheckFailureThreshold: {
Type: schema.TypeInt,
Optional: true,
},
constants.FieldHealthCheckPeriodSeconds: {
Type: schema.TypeInt,
Optional: true,
},
constants.FieldHealthCheckTimeoutSeconds: {
Type: schema.TypeInt,
Optional: true,
},
}
return s
}
27 changes: 18 additions & 9 deletions internal/provider/virtualmachine/resource_virtualmachine.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package virtualmachine

import (
"context"
"fmt"
"strings"
"time"

Expand Down Expand Up @@ -177,17 +176,27 @@ func resourceVirtualMachineDelete(ctx context.Context, d *schema.ResourceData, m
return diag.FromErr(err)
}

ctxDeadline, _ := ctx.Deadline()
events, err := c.HarvesterClient.
KubevirtV1().
VirtualMachines(namespace).
Watch(ctx, util.WatchOptions(name, time.Until(ctxDeadline)))
stateConf := &resource.StateChangeConf{
Pending: []string{
constants.StateCommonReady,
constants.StateCommonFailed,
constants.StateCommonUnknown,
constants.StateVirtualMachineRunning,
constants.StateVirtualMachineStarting,
constants.StateVirtualMachineStopping,
constants.StateVirtualMachineStopped,
},
Target: []string{constants.StateCommonRemoved},
Refresh: resourceVirtualMachineRefresh(ctx, d, meta, namespace, name, ""),
Timeout: d.Timeout(schema.TimeoutDelete),
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForStateContext(ctx)
if err != nil {
return diag.FromErr(err)
}
if !util.HasDeleted(events) {
return diag.FromErr(fmt.Errorf("timeout waiting for virtualmachine %s to be deleted", d.Id()))
}

d.SetId("")
return nil
}
Expand Down
7 changes: 4 additions & 3 deletions internal/tests/provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ const (

testAccResourceStateRemoved = "removed"
testAccResourceStateExist = "exist"
testAccResourceStateError = "error"
)

func init() {
Expand Down Expand Up @@ -54,8 +55,8 @@ func getStateChangeConf(refresh resource.StateRefreshFunc) *resource.StateChange
Pending: []string{testAccResourceStateExist},
Target: []string{testAccResourceStateRemoved},
Refresh: refresh,
Timeout: 1 * time.Minute,
Delay: 1 * time.Second,
Timeout: 2 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
}
Expand All @@ -67,7 +68,7 @@ func getResourceStateRefreshFunc(getResourceFunc func() (interface{}, error)) re
if apierrors.IsNotFound(err) {
return obj, testAccResourceStateRemoved, nil
}
return nil, "", err
return nil, testAccResourceStateError, err
}
return obj, testAccResourceStateExist, nil
}
Expand Down
36 changes: 36 additions & 0 deletions internal/tests/resource_loadbalancer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,50 @@ func TestAccLoadBalancer_basic(t *testing.T) {
Steps: []resource.TestStep{
{
Config: `
resource "harvester_virtualmachine" "test_vm" {
name = "test-vm"
namespace = "default"
tags = {
app = "testlb"
}
cpu = 1
memory = "1Gi"
machine_type = "q35"
run_strategy = "RerunOnFailure"
network_interface {
name = "default"
}
disk {
name = "rootdisk"
type = "disk"
bus = "virtio"
boot_order = 1
container_image_name = "kubevirt/fedora-cloud-container-disk-demo:v0.35.0"
}
}
resource "harvester_loadbalancer" "test_loadbalancer" {
name = "test-loadbalancer"
depends_on = [
harvester_virtualmachine.test_vm
]
listener {
port = 443
protocol = "tcp"
backend_port = 8080
}
backend_selector {
key = "tag.harvesterhci.io/app"
values = [ "testlb" ]
}
}
`,
Check: resource.ComposeAggregateTestCheckFunc(
Expand Down
Loading

0 comments on commit e029365

Please sign in to comment.