Skip to content

Commit

Permalink
Add instance type cache test
Browse files Browse the repository at this point in the history
  • Loading branch information
engedaam committed Mar 22, 2024
1 parent 69afba1 commit 7cdc5ac
Show file tree
Hide file tree
Showing 9 changed files with 174 additions and 18 deletions.
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ module github.com/aws/karpenter-provider-aws
go 1.22

require (
dario.cat/mergo v1.0.0
github.com/Pallinder/go-randomdata v1.2.0
github.com/PuerkitoBio/goquery v1.9.1
github.com/aws/aws-sdk-go v1.51.1
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
Expand Down
153 changes: 153 additions & 0 deletions pkg/providers/instancetype/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@ import (
"fmt"
"math"
"net"
"reflect"
"sort"
"strings"
"testing"
"time"

"dario.cat/mergo"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo/v2"
Expand Down Expand Up @@ -1716,6 +1718,157 @@ var _ = Describe("InstanceTypes", func() {
})
})
})
Context("Provider Cache", func() {
It("changes to kubelet configuration fields should result in a different set of instances types", func() {
// We should expect these kubelet configuration fields to change the result of the instance type call
// kubelet.kubeReserved
// kubelet.systemReserved
// kubelet.evictionHard
// kubelet.evictionSoft
// kubelet.maxPods
nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{
KubeReserved: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
SystemReserved: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
EvictionHard: map[string]string{"memory.available": "5%"},
EvictionSoft: map[string]string{"nodefs.available": "10%"},
MaxPods: aws.Int32(10),
}
kubeletChanges := []*corev1beta1.KubeletConfiguration{
{KubeReserved: v1.ResourceList{v1.ResourceCPU: resource.MustParse("20")}},
{SystemReserved: v1.ResourceList{v1.ResourceMemory: resource.MustParse("10Gi")}},
{EvictionHard: map[string]string{"memory.available": "52%"}},
{EvictionSoft: map[string]string{"nodefs.available": "132%"}},
{MaxPods: aws.Int32(20)},
}
var instanceTypeResult [][]*corecloudprovider.InstanceType
ExpectApplied(ctx, env.Client, nodeClass)
// Adding the general set of to the instancetype into the cache
fullInstanceTypeList, err := cloudProvider.GetInstanceTypes(ctx, nodePool)
Expect(err).To(BeNil())
sort.Slice(fullInstanceTypeList, func(x int, y int) bool {
return fullInstanceTypeList[x].Name < fullInstanceTypeList[y].Name
})

sorted := nodePool.DeepCopy()
for _, change := range kubeletChanges {
nodePool := sorted.DeepCopy()

Check failure on line 1754 in pkg/providers/instancetype/suite_test.go

View workflow job for this annotation

GitHub Actions / ci

shadow: declaration of "nodePool" shadows declaration at line 108 (govet)
mergo.Merge(nodePool.Spec.Template.Spec.Kubelet, change, mergo.WithOverride, mergo.WithSliceDeepCopy)

Check failure on line 1755 in pkg/providers/instancetype/suite_test.go

View workflow job for this annotation

GitHub Actions / ci

Error return value of `mergo.Merge` is not checked (errcheck)
// Calling the provider and storing the instance type list to the instancetype provider cache
awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass)

Check failure on line 1757 in pkg/providers/instancetype/suite_test.go

View workflow job for this annotation

GitHub Actions / ci

Error return value of `awsEnv.InstanceTypesProvider.List` is not checked (errcheck)
// We are making sure to pull from the cache
instanetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass)
Expect(err).To(BeNil())
sort.Slice(instanetypes, func(x int, y int) bool {
return instanetypes[x].Name < instanetypes[y].Name
})
instanceTypeResult = append(instanceTypeResult, instanetypes)
}

// We expect that the updated kubelet configuration will result in a subset of the full set instance types thats offered
// The goal is to make sure that we don't have a cache crash and have a fewer set of instancesTypes then expected
for _, it := range instanceTypeResult {
Expect(reflect.DeepEqual(fullInstanceTypeList, it)).To(BeFalse())
}
// Based on the nodeclass configuration, we expect to have 5 unique set of instance types
for x := range instanceTypeResult {
for y := range instanceTypeResult {
if x == y {
continue
}
key := reflect.DeepEqual(instanceTypeResult[x], instanceTypeResult[y])
if key {
fmt.Printf("failed x: %d, y: %d", x, y)
}
Expect(reflect.DeepEqual(instanceTypeResult[x], instanceTypeResult[y])).To(BeFalse())
}
}
})
It("changes to nodeclass fields should result in a different set of instances types", func() {
// We should expect these nodeclass fields to change the result of the instance type
// nodeClass.instanceStorePolicy
// nodeClass.amiFamily
// nodeClass.blockDeviceMapping.rootVolume
// nodeClass.blockDeviceMapping.volumeSize
// nodeClass.blockDeviceMapping.deviceName
nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{
{
DeviceName: lo.ToPtr("/dev/xvda"),
EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)},
RootVolume: false,
},
}
nodeClassChanges := []*v1beta1.EC2NodeClass{
{Spec: v1beta1.EC2NodeClassSpec{InstanceStorePolicy: lo.ToPtr(v1beta1.InstanceStorePolicyRAID0)}},
{Spec: v1beta1.EC2NodeClassSpec{AMIFamily: &v1beta1.AMIFamilyUbuntu}},
{
Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{
{
DeviceName: lo.ToPtr("/dev/sda1"),
EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)},
RootVolume: true,
},
},
}},
{
Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{
{
DeviceName: lo.ToPtr("/dev/xvda"),
EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)},
RootVolume: true,
},
},
}},
{
Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{
{
DeviceName: lo.ToPtr("/dev/xvda"),
EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(20, resource.Giga)},
RootVolume: false,
},
},
}},
}
var instanceTypeResult [][]*corecloudprovider.InstanceType
ExpectApplied(ctx, env.Client, nodeClass)
nodePool.Spec.Template.Spec.NodeClassRef.Name = nodeClass.Name
// Adding the general set of to the instancetype into the cache
fullInstanceTypeList, err := cloudProvider.GetInstanceTypes(ctx, nodePool)
Expect(err).To(BeNil())
sort.Slice(fullInstanceTypeList, func(x int, y int) bool {
return fullInstanceTypeList[x].Name < fullInstanceTypeList[y].Name
})

sorted := nodeClass.DeepCopy()
for _, change := range nodeClassChanges {
nodeClass := sorted.DeepCopy()
mergo.Merge(nodeClass, change, mergo.WithOverride)

Check failure on line 1844 in pkg/providers/instancetype/suite_test.go

View workflow job for this annotation

GitHub Actions / ci

Error return value of `mergo.Merge` is not checked (errcheck)
// Calling the provider and storing the instance type list to the instancetype provider cache
awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass)

Check failure on line 1846 in pkg/providers/instancetype/suite_test.go

View workflow job for this annotation

GitHub Actions / ci

Error return value of `awsEnv.InstanceTypesProvider.List` is not checked (errcheck)
// We are making sure to pull from the cache
instanetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass)
Expect(err).To(BeNil())
sort.Slice(instanetypes, func(x int, y int) bool {
return instanetypes[x].Name < instanetypes[y].Name
})
instanceTypeResult = append(instanceTypeResult, instanetypes)
}

// We expect that the updated nodeclass configuration will result in a subset of the full set instance types thats offered
// The goal is to make sure that we don't have a cache crash and have a fewer set of instancesTypes then expected
for _, it := range instanceTypeResult {
Expect(reflect.DeepEqual(fullInstanceTypeList, it)).To(BeFalse())
}
// Based on the nodeclass configuration, we expect to have 5 unique set of instance types
for x := range instanceTypeResult {
for y := range instanceTypeResult {
if x == y {
continue
}
Expect(reflect.DeepEqual(instanceTypeResult[x], instanceTypeResult[y])).To(BeFalse())
}
}
})
})
})

// generateSpotPricing creates a spot price history output for use in a mock that has all spot offerings discounted by 50%
Expand Down
6 changes: 3 additions & 3 deletions website/content/en/docs/troubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,14 +117,14 @@ Karpenter `0.26.1` introduced the `karpenter-crd` Helm chart. When installing th
- In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run:

```shell
kubectl label crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
```

- In the case of `annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "karpenter"` run:

```shell
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
```

## Uninstallation
Expand Down
6 changes: 3 additions & 3 deletions website/content/en/preview/troubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,14 +117,14 @@ Karpenter `0.26.1` introduced the `karpenter-crd` Helm chart. When installing th
- In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run:

```shell
kubectl label crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
```

- In the case of `annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "karpenter"` run:

```shell
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
```

## Uninstallation
Expand Down
6 changes: 3 additions & 3 deletions website/content/en/v0.32/troubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,14 +105,14 @@ Karpenter v0.26.1+ introduced the `karpenter-crd` helm chart. When installing th
- In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run:

```shell
kubectl label crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
```

- In the case of `annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "karpenter"` run:

```shell
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
```

## Uninstallation
Expand Down
6 changes: 3 additions & 3 deletions website/content/en/v0.33/troubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,14 +105,14 @@ Karpenter v0.26.1+ introduced the `karpenter-crd` helm chart. When installing th
- In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run:

```shell
kubectl label crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
```

- In the case of `annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "karpenter"` run:

```shell
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
```

## Uninstallation
Expand Down
6 changes: 3 additions & 3 deletions website/content/en/v0.34/troubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,14 +117,14 @@ Karpenter v0.26.1+ introduced the `karpenter-crd` helm chart. When installing th
- In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run:

```shell
kubectl label crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
```

- In the case of `annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "karpenter"` run:

```shell
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
```

## Uninstallation
Expand Down
6 changes: 3 additions & 3 deletions website/content/en/v0.35/troubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,14 +117,14 @@ Karpenter `0.26.1` introduced the `karpenter-crd` Helm chart. When installing th
- In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run:

```shell
kubectl label crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite
```

- In the case of `annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "karpenter"` run:

```shell
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd ec2nodeclasses.karpenter.k8s.aws nodepools.karpenter.sh nodeclaims.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite
kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh machines.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite
```

## Uninstallation
Expand Down

0 comments on commit 7cdc5ac

Please sign in to comment.