diff --git a/pkg/providers/instancetype/suite_test.go b/pkg/providers/instancetype/suite_test.go index 828881308e5d..0820c67c3f22 100644 --- a/pkg/providers/instancetype/suite_test.go +++ b/pkg/providers/instancetype/suite_test.go @@ -19,6 +19,7 @@ import ( "fmt" "math" "net" + "reflect" "sort" "strings" "testing" @@ -26,6 +27,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/imdario/mergo" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/samber/lo" @@ -1716,6 +1718,159 @@ var _ = Describe("InstanceTypes", func() { }) }) }) + Context("Provider Cache", func() { + It("changes to kubelet configuration fields should result in a different set of instances types", func() { + // We should expect these kubelet configuration fields to change the result of the instance type call + // kubelet.kubeReserved + // kubelet.systemReserved + // kubelet.evictionHard + // kubelet.evictionSoft + // kubelet.maxPods + nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + KubeReserved: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + SystemReserved: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + EvictionHard: map[string]string{"memory.available": "5%"}, + EvictionSoft: map[string]string{"nodefs.available": "10%"}, + MaxPods: aws.Int32(10), + } + kubeletChanges := []*corev1beta1.KubeletConfiguration{ + {KubeReserved: v1.ResourceList{v1.ResourceCPU: resource.MustParse("20")}}, + {SystemReserved: v1.ResourceList{v1.ResourceMemory: resource.MustParse("10Gi")}}, + {EvictionHard: map[string]string{"memory.available": "52%"}}, + {EvictionSoft: map[string]string{"nodefs.available": "132%"}}, + {MaxPods: aws.Int32(20)}, + } + var instanceTypeResult [][]*corecloudprovider.InstanceType + ExpectApplied(ctx, env.Client, nodeClass) + // Adding the general set of to the instancetype into the cache + fullInstanceTypeList, err := cloudProvider.GetInstanceTypes(ctx, nodePool) + Expect(err).To(BeNil()) + sort.Slice(fullInstanceTypeList, func(x int, y int) bool { + return fullInstanceTypeList[x].Name < fullInstanceTypeList[y].Name + }) + + sorted := nodePool.DeepCopy() + for _, change := range kubeletChanges { + nodePool = sorted.DeepCopy() + Expect(mergo.Merge(nodePool.Spec.Template.Spec.Kubelet, change, mergo.WithOverride, mergo.WithSliceDeepCopy)).To(BeNil()) + // Calling the provider and storing the instance type list to the instancetype provider cache + _, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + Expect(err).To(BeNil()) + // We are making sure to pull from the cache + instanetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + Expect(err).To(BeNil()) + sort.Slice(instanetypes, func(x int, y int) bool { + return instanetypes[x].Name < instanetypes[y].Name + }) + instanceTypeResult = append(instanceTypeResult, instanetypes) + } + + // We expect that the updated kubelet configuration will result in a subset of the full set instance types thats offered + // The goal is to make sure that we don't have different configuration collapsing to the same cache key and have a fewer set of instancesTypes then expected + for _, it := range instanceTypeResult { + Expect(reflect.DeepEqual(fullInstanceTypeList, it)).To(BeFalse()) + } + // Based on the nodeclass configuration, we expect to have 5 unique set of instance types + for x := range instanceTypeResult { + for y := range instanceTypeResult { + if x == y { + continue + } + key := reflect.DeepEqual(instanceTypeResult[x], instanceTypeResult[y]) + if key { + fmt.Printf("failed x: %d, y: %d", x, y) + } + Expect(reflect.DeepEqual(instanceTypeResult[x], instanceTypeResult[y])).To(BeFalse()) + } + } + }) + It("changes to nodeclass fields should result in a different set of instances types", func() { + // We should expect these nodeclass fields to change the result of the instance type + // nodeClass.instanceStorePolicy + // nodeClass.amiFamily + // nodeClass.blockDeviceMapping.rootVolume + // nodeClass.blockDeviceMapping.volumeSize + // nodeClass.blockDeviceMapping.deviceName + nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + { + DeviceName: lo.ToPtr("/dev/xvda"), + EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, + RootVolume: false, + }, + } + nodeClassChanges := []*v1beta1.EC2NodeClass{ + {Spec: v1beta1.EC2NodeClassSpec{InstanceStorePolicy: lo.ToPtr(v1beta1.InstanceStorePolicyRAID0)}}, + {Spec: v1beta1.EC2NodeClassSpec{AMIFamily: &v1beta1.AMIFamilyUbuntu}}, + { + Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + { + DeviceName: lo.ToPtr("/dev/sda1"), + EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, + RootVolume: true, + }, + }, + }}, + { + Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + { + DeviceName: lo.ToPtr("/dev/xvda"), + EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, + RootVolume: true, + }, + }, + }}, + { + Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + { + DeviceName: lo.ToPtr("/dev/xvda"), + EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(20, resource.Giga)}, + RootVolume: false, + }, + }, + }}, + } + var instanceTypeResult [][]*corecloudprovider.InstanceType + ExpectApplied(ctx, env.Client, nodeClass) + nodePool.Spec.Template.Spec.NodeClassRef.Name = nodeClass.Name + // Adding the general set of to the instancetype into the cache + fullInstanceTypeList, err := cloudProvider.GetInstanceTypes(ctx, nodePool) + Expect(err).To(BeNil()) + sort.Slice(fullInstanceTypeList, func(x int, y int) bool { + return fullInstanceTypeList[x].Name < fullInstanceTypeList[y].Name + }) + + sorted := nodeClass.DeepCopy() + for _, change := range nodeClassChanges { + nodeClass = sorted.DeepCopy() + Expect(mergo.Merge(nodeClass, change, mergo.WithOverride)).To(BeNil()) + // Calling the provider and storing the instance type list to the instancetype provider cache + _, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + Expect(err).To(BeNil()) + // We are making sure to pull from the cache + instanetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + Expect(err).To(BeNil()) + sort.Slice(instanetypes, func(x int, y int) bool { + return instanetypes[x].Name < instanetypes[y].Name + }) + instanceTypeResult = append(instanceTypeResult, instanetypes) + } + + // We expect that the updated nodeclass configuration will result in a subset of the full set instance types thats offered + // The goal is to make sure that we don't have different configuration collapsing to the same cache key and have a fewer set of instancesTypes then expected + for _, it := range instanceTypeResult { + Expect(reflect.DeepEqual(fullInstanceTypeList, it)).To(BeFalse()) + } + // Based on the nodeclass configuration, we expect to have 5 unique set of instance types + for x := range instanceTypeResult { + for y := range instanceTypeResult { + if x == y { + continue + } + Expect(reflect.DeepEqual(instanceTypeResult[x], instanceTypeResult[y])).To(BeFalse()) + } + } + }) + }) }) // generateSpotPricing creates a spot price history output for use in a mock that has all spot offerings discounted by 50%