Skip to content

Commit

Permalink
Suport vm cpu pinning
Browse files Browse the repository at this point in the history
Signed-off-by: Cooper Tseng <[email protected]>
  • Loading branch information
brandboat committed Sep 26, 2024
1 parent 41bb2f4 commit 2d3d359
Show file tree
Hide file tree
Showing 5 changed files with 193 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@ import (
"errors"
"fmt"

"github.com/harvester/harvester/pkg/builder"
harvesterutil "github.com/harvester/harvester/pkg/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
kubevirtv1 "kubevirt.io/api/core/v1"

"github.com/harvester/harvester/pkg/builder"
harvesterutil "github.com/harvester/harvester/pkg/util"

"github.com/harvester/terraform-provider-harvester/internal/util"
"github.com/harvester/terraform-provider-harvester/pkg/client"
"github.com/harvester/terraform-provider-harvester/pkg/constants"
Expand Down Expand Up @@ -306,6 +307,13 @@ func (c *Constructor) Setup() util.Processors {
return nil
},
},
{
Field: constants.FieldVirtualMachineCPUPinning,
Parser: func(i interface{}) error {
vmBuilder.VirtualMachine.Spec.Template.Spec.Domain.CPU.DedicatedCPUPlacement = i.(bool)
return nil
},
},
}
return append(processors, customProcessors...)
}
Expand Down
6 changes: 6 additions & 0 deletions internal/provider/virtualmachine/schema_virtualmachine.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,12 @@ please use %s instead of this deprecated field:
Optional: true,
Default: false,
},
constants.FieldVirtualMachineCPUPinning: {
Type: schema.TypeBool,
Description: "To enable VM CPU pinning, ensure that at least one node has the CPU manager enabled",
Optional: true,
Default: false,
},
}
util.NamespacedSchemaWrap(s, false)
return s
Expand Down
172 changes: 168 additions & 4 deletions internal/tests/resource_virtualmachine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,20 @@ package tests

import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"testing"
"time"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
kubevirtv1 "kubevirt.io/api/core/v1"

"github.com/harvester/terraform-provider-harvester/pkg/client"
Expand All @@ -30,6 +38,7 @@ resource %s "%s" {
cpu = 1
%s = "%s"
%s = %s
run_strategy = "RerunOnFailure"
machine_type = "q35"
Expand All @@ -50,11 +59,12 @@ resource %s "%s" {
`
)

func buildVirtualMachineConfig(name, description, memory string) string {
func buildVirtualMachineConfig(name, description, memory string, cpuPinning bool) string {
return fmt.Sprintf(testAccVirtualMachineConfigTemplate, constants.ResourceTypeVirtualMachine, name,
constants.FieldCommonName, name,
constants.FieldCommonDescription, description,
constants.FieldVirtualMachineMemory, memory)
constants.FieldVirtualMachineMemory, memory,
constants.FieldVirtualMachineCPUPinning, strconv.FormatBool(cpuPinning))
}

func TestAccVirtualMachine_basic(t *testing.T) {
Expand All @@ -68,21 +78,71 @@ func TestAccVirtualMachine_basic(t *testing.T) {
CheckDestroy: testAccCheckVirtualMachineDestroy(ctx),
Steps: []resource.TestStep{
{
Config: buildVirtualMachineConfig(testAccVirtualMachineName, testAccVirtualMachineDescription, testAccVirtualMachineMemory),
Config: buildVirtualMachineConfig(testAccVirtualMachineName, testAccVirtualMachineDescription, testAccVirtualMachineMemory, false),
Check: resource.ComposeTestCheckFunc(
testAccVirtualMachineExists(ctx, testAccVirtualMachineResourceName, vm),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldCommonName, testAccVirtualMachineName),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldCommonDescription, testAccVirtualMachineDescription),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldVirtualMachineMemory, testAccVirtualMachineMemory),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldVirtualMachineCPUPinning, "false"),
),
},
{
Config: buildVirtualMachineConfig(testAccVirtualMachineName, testAccVirtualMachineDescription, testAccVirtualMachineMemoryUpdate),
Config: buildVirtualMachineConfig(testAccVirtualMachineName, testAccVirtualMachineDescription, testAccVirtualMachineMemoryUpdate, false),
Check: resource.ComposeTestCheckFunc(
testAccVirtualMachineExists(ctx, testAccVirtualMachineResourceName, vm),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldCommonName, testAccVirtualMachineName),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldCommonDescription, testAccVirtualMachineDescription),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldVirtualMachineMemory, testAccVirtualMachineMemoryUpdate),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldVirtualMachineCPUPinning, "false"),
),
},
},
})
}

func TestAccVirtualMachine_cpu_pinning(t *testing.T) {
testAccPreCheck(t)
var (
vm = &kubevirtv1.VirtualMachine{}
ctx = context.Background()
client = testAccProvider.Meta().(*client.Client)
)

nodes, err := client.KubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
if len(nodes.Items) == 0 {
t.Fatal("Failed to find any node")
}

nodeName := nodes.Items[0].Name
t.Log("Enable cpu manager on node " + nodeName)
enableCPUManager(t, ctx, nodeName)
defer func() {
t.Log("Disable CPU manager on node " + nodeName)
disableCPUManager(t, ctx, nodeName)
}()

resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testAccCheckVirtualMachineDestroy(ctx),
Steps: []resource.TestStep{
{
Config: buildVirtualMachineConfig(testAccVirtualMachineName, testAccVirtualMachineDescription, testAccVirtualMachineMemory, true),
Check: resource.ComposeTestCheckFunc(
testAccVirtualMachineExists(ctx, testAccVirtualMachineResourceName, vm),
func(s *terraform.State) error {
if vm.Spec.Template == nil || vm.Spec.Template.Spec.Domain.CPU == nil || !vm.Spec.Template.Spec.Domain.CPU.DedicatedCPUPlacement {
return errors.New("DedicatedCPUPlacement should be true")
}
return nil
},
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldCommonName, testAccVirtualMachineName),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldCommonDescription, testAccVirtualMachineDescription),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldVirtualMachineMemory, testAccVirtualMachineMemory),
resource.TestCheckResourceAttr(testAccVirtualMachineResourceName, constants.FieldVirtualMachineCPUPinning, "true"),
),
},
},
Expand Down Expand Up @@ -141,3 +201,107 @@ func testAccCheckVirtualMachineDestroy(ctx context.Context) resource.TestCheckFu
return nil
}
}

func enableCPUManager(t *testing.T, ctx context.Context, nodeName string) {
updateCPUManagerPolicy(t, ctx, nodeName, true)
}

func disableCPUManager(t *testing.T, ctx context.Context, nodeName string) {
updateCPUManagerPolicy(t, ctx, nodeName, false)
}

func updateCPUManagerPolicy(t *testing.T, ctx context.Context, nodeName string, enableCPUManager bool) {
c := testAccProvider.Meta().(*client.Client)
action := "disableCPUManager"
if enableCPUManager {
action = "enableCPUManager"
}
apiURL := buildNodeActionURL(t, c.RestConfig, nodeName, action)
req := createRequest(t, apiURL, c.RestConfig.BearerToken)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, // #nosec G402
},
},
}

resp, err := client.Do(req)
if err != nil {
t.Fatalf("Failed to send request: %v\n", err)
return
}
defer resp.Body.Close()

if resp.StatusCode != http.StatusNoContent {
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("Failed to read update cpu manager policy response, status code: %d, err: %v", resp.StatusCode, err)
}
t.Fatalf("Failed to update cpu manager policy, status code: %d, err: %s", resp.StatusCode, string(bodyBytes))
}

waitForCPUMangerLabel(t, ctx, c, nodeName, enableCPUManager)
}

func buildNodeActionURL(t *testing.T, config *rest.Config, nodeName, action string) string {
parsedURL, err := url.Parse(config.Host)
if err != nil {
t.Fatalf("Failed to parse restconfig host to url: %v\n", err)
}

parsedURL.Path = "/v1/harvester/nodes/" + nodeName
query := parsedURL.Query()
query.Set("action", action)
parsedURL.RawQuery = query.Encode()

return parsedURL.String()
}

func createRequest(t *testing.T, apiURL, bearerToken string) *http.Request {
req, err := http.NewRequest(http.MethodPost, apiURL, nil)
if err != nil {
t.Fatalf("Failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+bearerToken)
return req
}

func waitForCPUMangerLabel(t *testing.T, ctx context.Context, c *client.Client, nodeName string, enableCPUManager bool) {
waitUntil(t, func() bool {
node, err := c.KubeClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
t.Errorf("Failed to get node: %v\n", err)
return false
}

expectedValue := strconv.FormatBool(enableCPUManager)
if value, exists := node.Labels["cpumanager"]; exists && value == expectedValue {
t.Logf("Node %s has the label cpumanager=%s\n", nodeName, expectedValue)
return true
}

t.Logf("Waiting for label cpumanager=%s on node %s...\n", expectedValue, nodeName)
return false
})
}

func waitUntil(t *testing.T, fun func() bool) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()

ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()

for {
select {
case <-ctx.Done():
t.Fatal("Timeout reached. The condition was not met within 5 minutes.")
return
case <-ticker.C:
if fun() {
return
}
}
}
}
1 change: 1 addition & 0 deletions pkg/constants/constants_virtualmachine.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ const (
FieldVirtualMachineInstanceNodeName = "node_name"
FieldVirtualMachineEFI = "efi"
FieldVirtualMachineSecureBoot = "secure_boot"
FieldVirtualMachineCPUPinning = "cpu_pinning"

StateVirtualMachineStarting = "Starting"
StateVirtualMachineRunning = "Running"
Expand Down
10 changes: 8 additions & 2 deletions pkg/importer/resource_virtualmachine_importer.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@ import (
"encoding/json"
"fmt"

"github.com/harvester/harvester/pkg/builder"
harvesterutil "github.com/harvester/harvester/pkg/util"
corev1 "k8s.io/api/core/v1"
kubevirtv1 "kubevirt.io/api/core/v1"

"github.com/harvester/harvester/pkg/builder"
harvesterutil "github.com/harvester/harvester/pkg/util"

"github.com/harvester/terraform-provider-harvester/pkg/constants"
"github.com/harvester/terraform-provider-harvester/pkg/helper"
)
Expand Down Expand Up @@ -50,6 +51,10 @@ func (v *VMImporter) CPU() int {
return int(v.VirtualMachine.Spec.Template.Spec.Domain.CPU.Cores)
}

func (v *VMImporter) DedicatedCPUPlacement() bool {
return bool(v.VirtualMachine.Spec.Template.Spec.Domain.CPU.DedicatedCPUPlacement)
}

func (v *VMImporter) EFI() bool {
firmware := v.VirtualMachine.Spec.Template.Spec.Domain.Firmware
return firmware != nil && firmware.Bootloader != nil && firmware.Bootloader.EFI != nil
Expand Down Expand Up @@ -380,6 +385,7 @@ func ResourceVirtualMachineStateGetter(vm *kubevirtv1.VirtualMachine, vmi *kubev
constants.FieldVirtualMachineInstanceNodeName: vmImporter.NodeName(),
constants.FieldVirtualMachineEFI: vmImporter.EFI(),
constants.FieldVirtualMachineSecureBoot: vmImporter.SecureBoot(),
constants.FieldVirtualMachineCPUPinning: vmImporter.DedicatedCPUPlacement(),
},
}, nil
}

0 comments on commit 2d3d359

Please sign in to comment.