From 7786b5d39b7f9d613ec0d84807277ce1c918e593 Mon Sep 17 00:00:00 2001
From: Jason Deal <jmdeal@amazon.com>
Date: Fri, 2 Feb 2024 14:34:19 -0800
Subject: [PATCH] test: init container e2e tests (#5516)

---
 go.mod                                     |   2 +-
 go.sum                                     |   4 +-
 test/pkg/environment/aws/expectations.go   |   8 +
 test/suites/integration/scheduling_test.go | 743 ++++++++++++---------
 4 files changed, 439 insertions(+), 318 deletions(-)

diff --git a/go.mod b/go.mod
index 03f90655dce5..f6d94dfb49c3 100644
--- a/go.mod
+++ b/go.mod
@@ -27,7 +27,7 @@ require (
 	k8s.io/utils v0.0.0-20230726121419-3b25d923346b
 	knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd
 	sigs.k8s.io/controller-runtime v0.17.0
-	sigs.k8s.io/karpenter v0.33.1-0.20240201005121-3d56ae365c82
+	sigs.k8s.io/karpenter v0.33.1-0.20240202175636-0e77b7842c28
 )
 
 require (
diff --git a/go.sum b/go.sum
index 719cbc451325..7c0a495be559 100644
--- a/go.sum
+++ b/go.sum
@@ -762,8 +762,8 @@ sigs.k8s.io/controller-runtime v0.17.0 h1:fjJQf8Ukya+VjogLO6/bNX9HE6Y2xpsO5+fyS2
 sigs.k8s.io/controller-runtime v0.17.0/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/karpenter v0.33.1-0.20240201005121-3d56ae365c82 h1:98LrXlzhJ6gbO9G2Jjd//FOgrFPYHLfafUJ8ZmyEe74=
-sigs.k8s.io/karpenter v0.33.1-0.20240201005121-3d56ae365c82/go.mod h1:V5s2Bq+41ybF38Bd75k+v+416PWpPU9xz7UBG+sXtCk=
+sigs.k8s.io/karpenter v0.33.1-0.20240202175636-0e77b7842c28 h1:xdyaljnj4ehDZ8W2vicvbanamijcOkVzt/oRZM6780I=
+sigs.k8s.io/karpenter v0.33.1-0.20240202175636-0e77b7842c28/go.mod h1:V5s2Bq+41ybF38Bd75k+v+416PWpPU9xz7UBG+sXtCk=
 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
 sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
 sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
diff --git a/test/pkg/environment/aws/expectations.go b/test/pkg/environment/aws/expectations.go
index 6703a651c17d..2b69ccd47d64 100644
--- a/test/pkg/environment/aws/expectations.go
+++ b/test/pkg/environment/aws/expectations.go
@@ -338,6 +338,14 @@ func (env *Environment) GetK8sVersion(offset int) string {
 	return fmt.Sprintf("%s.%d", serverVersion.Major, minorVersion-offset)
 }
 
+func (env *Environment) GetK8sMinorVersion(offset int) (int, error) {
+	version, err := strconv.Atoi(strings.Split(env.GetK8sVersion(offset), ".")[1])
+	if err != nil {
+		return 0, err
+	}
+	return version, nil
+}
+
 func (env *Environment) GetCustomAMI(amiPath string, versionOffset int) string {
 	version := env.GetK8sVersion(versionOffset)
 	parameter, err := env.SSMAPI.GetParameter(&ssm.GetParameterInput{
diff --git a/test/suites/integration/scheduling_test.go b/test/suites/integration/scheduling_test.go
index ff6b16dbca88..b569cced2d74 100644
--- a/test/suites/integration/scheduling_test.go
+++ b/test/suites/integration/scheduling_test.go
@@ -20,6 +20,7 @@ import (
 
 	"github.com/samber/lo"
 	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
 	"k8s.io/apimachinery/pkg/util/sets"
@@ -59,6 +60,7 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
 		// Ensure that we're exercising all well known labels
 		Expect(lo.Keys(selectors)).To(ContainElements(append(corev1beta1.WellKnownLabels.UnsortedList(), lo.Keys(corev1beta1.NormalizedLabels)...)))
 	})
+
 	It("should apply annotations to the node", func() {
 		nodePool.Spec.Template.Annotations = map[string]string{
 			"foo":                                 "bar",
@@ -70,353 +72,464 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
 		env.ExpectCreatedNodeCount("==", 1)
 		Expect(env.GetNode(pod.Spec.NodeName).Annotations).To(And(HaveKeyWithValue("foo", "bar"), HaveKeyWithValue(corev1beta1.DoNotDisruptAnnotationKey, "true")))
 	})
-	It("should support well-known labels for instance type selection", func() {
-		nodeSelector := map[string]string{
-			// Well Known
-			corev1beta1.NodePoolLabelKey: nodePool.Name,
-			v1.LabelInstanceTypeStable:   "c5.large",
-			// Well Known to AWS
-			v1beta1.LabelInstanceHypervisor:       "nitro",
-			v1beta1.LabelInstanceCategory:         "c",
-			v1beta1.LabelInstanceGeneration:       "5",
-			v1beta1.LabelInstanceFamily:           "c5",
-			v1beta1.LabelInstanceSize:             "large",
-			v1beta1.LabelInstanceCPU:              "2",
-			v1beta1.LabelInstanceMemory:           "4096",
-			v1beta1.LabelInstanceNetworkBandwidth: "750",
-		}
-		selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
-		requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
-			return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+
+	Context("Labels", func() {
+		It("should support well-known labels for instance type selection", func() {
+			nodeSelector := map[string]string{
+				// Well Known
+				corev1beta1.NodePoolLabelKey: nodePool.Name,
+				v1.LabelInstanceTypeStable:   "c5.large",
+				// Well Known to AWS
+				v1beta1.LabelInstanceHypervisor:       "nitro",
+				v1beta1.LabelInstanceCategory:         "c",
+				v1beta1.LabelInstanceGeneration:       "5",
+				v1beta1.LabelInstanceFamily:           "c5",
+				v1beta1.LabelInstanceSize:             "large",
+				v1beta1.LabelInstanceCPU:              "2",
+				v1beta1.LabelInstanceMemory:           "4096",
+				v1beta1.LabelInstanceNetworkBandwidth: "750",
+			}
+			selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
+			requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
+				return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			})
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodeSelector:     nodeSelector,
+				NodePreferences:  requirements,
+				NodeRequirements: requirements,
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
 		})
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodeSelector:     nodeSelector,
-			NodePreferences:  requirements,
-			NodeRequirements: requirements,
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should support well-known labels for local NVME storage", func() {
-		selectors.Insert(v1beta1.LabelInstanceLocalNVME) // Add node selector keys to selectors used in testing to ensure we test all labels
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodePreferences: []v1.NodeSelectorRequirement{
-				{
-					Key:      v1beta1.LabelInstanceLocalNVME,
-					Operator: v1.NodeSelectorOpGt,
-					Values:   []string{"0"},
+		It("should support well-known labels for local NVME storage", func() {
+			selectors.Insert(v1beta1.LabelInstanceLocalNVME) // Add node selector keys to selectors used in testing to ensure we test all labels
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodePreferences: []v1.NodeSelectorRequirement{
+					{
+						Key:      v1beta1.LabelInstanceLocalNVME,
+						Operator: v1.NodeSelectorOpGt,
+						Values:   []string{"0"},
+					},
 				},
-			},
-			NodeRequirements: []v1.NodeSelectorRequirement{
-				{
-					Key:      v1beta1.LabelInstanceLocalNVME,
-					Operator: v1.NodeSelectorOpGt,
-					Values:   []string{"0"},
+				NodeRequirements: []v1.NodeSelectorRequirement{
+					{
+						Key:      v1beta1.LabelInstanceLocalNVME,
+						Operator: v1.NodeSelectorOpGt,
+						Values:   []string{"0"},
+					},
 				},
-			},
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should support well-known labels for encryption in transit", func() {
-		selectors.Insert(v1beta1.LabelInstanceEncryptionInTransitSupported) // Add node selector keys to selectors used in testing to ensure we test all labels
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodePreferences: []v1.NodeSelectorRequirement{
-				{
-					Key:      v1beta1.LabelInstanceEncryptionInTransitSupported,
-					Operator: v1.NodeSelectorOpIn,
-					Values:   []string{"true"},
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
+		})
+		It("should support well-known labels for encryption in transit", func() {
+			selectors.Insert(v1beta1.LabelInstanceEncryptionInTransitSupported) // Add node selector keys to selectors used in testing to ensure we test all labels
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodePreferences: []v1.NodeSelectorRequirement{
+					{
+						Key:      v1beta1.LabelInstanceEncryptionInTransitSupported,
+						Operator: v1.NodeSelectorOpIn,
+						Values:   []string{"true"},
+					},
 				},
-			},
-			NodeRequirements: []v1.NodeSelectorRequirement{
-				{
-					Key:      v1beta1.LabelInstanceEncryptionInTransitSupported,
-					Operator: v1.NodeSelectorOpIn,
-					Values:   []string{"true"},
+				NodeRequirements: []v1.NodeSelectorRequirement{
+					{
+						Key:      v1beta1.LabelInstanceEncryptionInTransitSupported,
+						Operator: v1.NodeSelectorOpIn,
+						Values:   []string{"true"},
+					},
 				},
-			},
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should support well-known deprecated labels", func() {
-		nodeSelector := map[string]string{
-			// Deprecated Labels
-			v1.LabelFailureDomainBetaRegion: env.Region,
-			v1.LabelFailureDomainBetaZone:   fmt.Sprintf("%sa", env.Region),
-			"topology.ebs.csi.aws.com/zone": fmt.Sprintf("%sa", env.Region),
-
-			"beta.kubernetes.io/arch": "amd64",
-			"beta.kubernetes.io/os":   "linux",
-			v1.LabelInstanceType:      "c5.large",
-		}
-		selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
-		requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
-			return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
 		})
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodeSelector:     nodeSelector,
-			NodePreferences:  requirements,
-			NodeRequirements: requirements,
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should support well-known labels for topology and architecture", func() {
-		nodeSelector := map[string]string{
-			// Well Known
-			corev1beta1.NodePoolLabelKey:     nodePool.Name,
-			v1.LabelTopologyRegion:           env.Region,
-			v1.LabelTopologyZone:             fmt.Sprintf("%sa", env.Region),
-			v1.LabelOSStable:                 "linux",
-			v1.LabelArchStable:               "amd64",
-			corev1beta1.CapacityTypeLabelKey: corev1beta1.CapacityTypeOnDemand,
-		}
-		selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
-		requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
-			return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+		It("should support well-known deprecated labels", func() {
+			nodeSelector := map[string]string{
+				// Deprecated Labels
+				v1.LabelFailureDomainBetaRegion: env.Region,
+				v1.LabelFailureDomainBetaZone:   fmt.Sprintf("%sa", env.Region),
+				"topology.ebs.csi.aws.com/zone": fmt.Sprintf("%sa", env.Region),
+
+				"beta.kubernetes.io/arch": "amd64",
+				"beta.kubernetes.io/os":   "linux",
+				v1.LabelInstanceType:      "c5.large",
+			}
+			selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
+			requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
+				return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			})
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodeSelector:     nodeSelector,
+				NodePreferences:  requirements,
+				NodeRequirements: requirements,
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
 		})
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodeSelector:     nodeSelector,
-			NodePreferences:  requirements,
-			NodeRequirements: requirements,
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should support well-known labels for a gpu (nvidia)", func() {
-		nodeSelector := map[string]string{
-			v1beta1.LabelInstanceGPUName:         "t4",
-			v1beta1.LabelInstanceGPUMemory:       "16384",
-			v1beta1.LabelInstanceGPUManufacturer: "nvidia",
-			v1beta1.LabelInstanceGPUCount:        "1",
-		}
-		selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
-		requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
-			return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+		It("should support well-known labels for topology and architecture", func() {
+			nodeSelector := map[string]string{
+				// Well Known
+				corev1beta1.NodePoolLabelKey:     nodePool.Name,
+				v1.LabelTopologyRegion:           env.Region,
+				v1.LabelTopologyZone:             fmt.Sprintf("%sa", env.Region),
+				v1.LabelOSStable:                 "linux",
+				v1.LabelArchStable:               "amd64",
+				corev1beta1.CapacityTypeLabelKey: corev1beta1.CapacityTypeOnDemand,
+			}
+			selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
+			requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
+				return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			})
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodeSelector:     nodeSelector,
+				NodePreferences:  requirements,
+				NodeRequirements: requirements,
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
 		})
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodeSelector:     nodeSelector,
-			NodePreferences:  requirements,
-			NodeRequirements: requirements,
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should support well-known labels for an accelerator (inferentia)", func() {
-		nodeSelector := map[string]string{
-			v1beta1.LabelInstanceAcceleratorName:         "inferentia",
-			v1beta1.LabelInstanceAcceleratorManufacturer: "aws",
-			v1beta1.LabelInstanceAcceleratorCount:        "1",
-		}
-		selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
-		requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
-			return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+		It("should support well-known labels for a gpu (nvidia)", func() {
+			nodeSelector := map[string]string{
+				v1beta1.LabelInstanceGPUName:         "t4",
+				v1beta1.LabelInstanceGPUMemory:       "16384",
+				v1beta1.LabelInstanceGPUManufacturer: "nvidia",
+				v1beta1.LabelInstanceGPUCount:        "1",
+			}
+			selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
+			requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
+				return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			})
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodeSelector:     nodeSelector,
+				NodePreferences:  requirements,
+				NodeRequirements: requirements,
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
 		})
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodeSelector:     nodeSelector,
-			NodePreferences:  requirements,
-			NodeRequirements: requirements,
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should support well-known labels for windows-build version", func() {
-		env.ExpectWindowsIPAMEnabled()
-		DeferCleanup(func() {
-			env.ExpectWindowsIPAMDisabled()
+		It("should support well-known labels for an accelerator (inferentia)", func() {
+			nodeSelector := map[string]string{
+				v1beta1.LabelInstanceAcceleratorName:         "inferentia",
+				v1beta1.LabelInstanceAcceleratorManufacturer: "aws",
+				v1beta1.LabelInstanceAcceleratorCount:        "1",
+			}
+			selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
+			requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
+				return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			})
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodeSelector:     nodeSelector,
+				NodePreferences:  requirements,
+				NodeRequirements: requirements,
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
 		})
+		It("should support well-known labels for windows-build version", func() {
+			env.ExpectWindowsIPAMEnabled()
+			DeferCleanup(func() {
+				env.ExpectWindowsIPAMDisabled()
+			})
 
-		nodeSelector := map[string]string{
-			// Well Known
-			v1.LabelWindowsBuild: v1beta1.Windows2022Build,
-			v1.LabelOSStable:     string(v1.Windows), // Specify the OS to enable vpc-resource-controller to inject the PrivateIPv4Address resource
-		}
-		selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
-		requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
-			return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			nodeSelector := map[string]string{
+				// Well Known
+				v1.LabelWindowsBuild: v1beta1.Windows2022Build,
+				v1.LabelOSStable:     string(v1.Windows), // Specify the OS to enable vpc-resource-controller to inject the PrivateIPv4Address resource
+			}
+			selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
+			requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
+				return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			})
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodeSelector:     nodeSelector,
+				NodePreferences:  requirements,
+				NodeRequirements: requirements,
+				Image:            aws.WindowsDefaultImage,
+			}})
+			nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022
+			// TODO: remove this requirement once VPC RC rolls out m7a.*, r7a.* ENI data (https://github.com/aws/karpenter-provider-aws/issues/4472)
+			test.ReplaceRequirements(nodePool,
+				v1.NodeSelectorRequirement{
+					Key:      v1beta1.LabelInstanceFamily,
+					Operator: v1.NodeSelectorOpNotIn,
+					Values:   aws.ExcludedInstanceFamilies,
+				},
+				v1.NodeSelectorRequirement{
+					Key:      v1.LabelOSStable,
+					Operator: v1.NodeSelectorOpIn,
+					Values:   []string{string(v1.Windows)},
+				},
+			)
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCountWithTimeout(time.Minute*15, labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("==", 1)
 		})
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodeSelector:     nodeSelector,
-			NodePreferences:  requirements,
-			NodeRequirements: requirements,
-			Image:            aws.WindowsDefaultImage,
-		}})
-		nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022
-		// TODO: remove this requirement once VPC RC rolls out m7a.*, r7a.* ENI data (https://github.com/aws/karpenter-provider-aws/issues/4472)
-		test.ReplaceRequirements(nodePool,
-			v1.NodeSelectorRequirement{
-				Key:      v1beta1.LabelInstanceFamily,
-				Operator: v1.NodeSelectorOpNotIn,
-				Values:   aws.ExcludedInstanceFamilies,
-			},
-			v1.NodeSelectorRequirement{
-				Key:      v1.LabelOSStable,
-				Operator: v1.NodeSelectorOpIn,
-				Values:   []string{string(v1.Windows)},
-			},
+		DescribeTable("should support restricted label domain exceptions", func(domain string) {
+			// Assign labels to the nodepool so that it has known values
+			test.ReplaceRequirements(nodePool,
+				v1.NodeSelectorRequirement{Key: domain + "/team", Operator: v1.NodeSelectorOpExists},
+				v1.NodeSelectorRequirement{Key: domain + "/custom-label", Operator: v1.NodeSelectorOpExists},
+				v1.NodeSelectorRequirement{Key: "subdomain." + domain + "/custom-label", Operator: v1.NodeSelectorOpExists},
+			)
+			nodeSelector := map[string]string{
+				domain + "/team":                        "team-1",
+				domain + "/custom-label":                "custom-value",
+				"subdomain." + domain + "/custom-label": "custom-value",
+			}
+			selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
+			requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
+				return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
+			})
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
+				NodeSelector:     nodeSelector,
+				NodePreferences:  requirements,
+				NodeRequirements: requirements,
+			}})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			node := env.ExpectCreatedNodeCount("==", 1)[0]
+			// Ensure that the requirements/labels specified above are propagated onto the node
+			for k, v := range nodeSelector {
+				Expect(node.Labels).To(HaveKeyWithValue(k, v))
+			}
+		},
+			Entry("node-restriction.kuberentes.io", "node-restriction.kuberentes.io"),
+			Entry("node.kubernetes.io", "node.kubernetes.io"),
+			Entry("kops.k8s.io", "kops.k8s.io"),
 		)
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCountWithTimeout(time.Minute*15, labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("==", 1)
 	})
-	DescribeTable("should support restricted label domain exceptions", func(domain string) {
-		// Assign labels to the nodepool so that it has known values
-		test.ReplaceRequirements(nodePool,
-			v1.NodeSelectorRequirement{Key: domain + "/team", Operator: v1.NodeSelectorOpExists},
-			v1.NodeSelectorRequirement{Key: domain + "/custom-label", Operator: v1.NodeSelectorOpExists},
-			v1.NodeSelectorRequirement{Key: "subdomain." + domain + "/custom-label", Operator: v1.NodeSelectorOpExists},
-		)
-		nodeSelector := map[string]string{
-			domain + "/team":                        "team-1",
-			domain + "/custom-label":                "custom-value",
-			"subdomain." + domain + "/custom-label": "custom-value",
-		}
-		selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels
-		requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
-			return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
-		})
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{
-			NodeSelector:     nodeSelector,
-			NodePreferences:  requirements,
-			NodeRequirements: requirements,
-		}})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		node := env.ExpectCreatedNodeCount("==", 1)[0]
-		// Ensure that the requirements/labels specified above are propagated onto the node
-		for k, v := range nodeSelector {
-			Expect(node.Labels).To(HaveKeyWithValue(k, v))
-		}
-	},
-		Entry("node-restriction.kuberentes.io", "node-restriction.kuberentes.io"),
-		Entry("node.kubernetes.io", "node.kubernetes.io"),
-		Entry("kops.k8s.io", "kops.k8s.io"),
-	)
-	It("should provision a node for naked pods", func() {
-		pod := test.Pod()
 
-		env.ExpectCreated(nodeClass, nodePool, pod)
-		env.EventuallyExpectHealthy(pod)
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should provision a node for a deployment", Label(debug.NoWatch), Label(debug.NoEvents), func() {
-		deployment := test.Deployment(test.DeploymentOptions{Replicas: 50})
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
-		env.ExpectCreatedNodeCount("<=", 2) // should probably all land on a single node, but at worst two depending on batching
-	})
-	It("should provision a node for a self-affinity deployment", func() {
-		// just two pods as they all need to land on the same node
-		podLabels := map[string]string{"test": "self-affinity"}
-		deployment := test.Deployment(test.DeploymentOptions{
-			Replicas: 2,
-			PodOptions: test.PodOptions{
-				ObjectMeta: metav1.ObjectMeta{
-					Labels: podLabels,
-				},
-				PodRequirements: []v1.PodAffinityTerm{
-					{
-						LabelSelector: &metav1.LabelSelector{MatchLabels: podLabels},
-						TopologyKey:   v1.LabelHostname,
+	Context("Provisioning", func() {
+		It("should provision a node for naked pods", func() {
+			pod := test.Pod()
+
+			env.ExpectCreated(nodeClass, nodePool, pod)
+			env.EventuallyExpectHealthy(pod)
+			env.ExpectCreatedNodeCount("==", 1)
+		})
+		It("should provision a node for a deployment", Label(debug.NoWatch), Label(debug.NoEvents), func() {
+			deployment := test.Deployment(test.DeploymentOptions{Replicas: 50})
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), int(*deployment.Spec.Replicas))
+			env.ExpectCreatedNodeCount("<=", 2) // should probably all land on a single node, but at worst two depending on batching
+		})
+		It("should provision a node for a self-affinity deployment", func() {
+			// just two pods as they all need to land on the same node
+			podLabels := map[string]string{"test": "self-affinity"}
+			deployment := test.Deployment(test.DeploymentOptions{
+				Replicas: 2,
+				PodOptions: test.PodOptions{
+					ObjectMeta: metav1.ObjectMeta{
+						Labels: podLabels,
+					},
+					PodRequirements: []v1.PodAffinityTerm{
+						{
+							LabelSelector: &metav1.LabelSelector{MatchLabels: podLabels},
+							TopologyKey:   v1.LabelHostname,
+						},
 					},
 				},
-			},
-		})
+			})
 
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), 2)
-		env.ExpectCreatedNodeCount("==", 1)
-	})
-	It("should provision three nodes for a zonal topology spread", func() {
-		// one pod per zone
-		podLabels := map[string]string{"test": "zonal-spread"}
-		deployment := test.Deployment(test.DeploymentOptions{
-			Replicas: 3,
-			PodOptions: test.PodOptions{
-				ObjectMeta: metav1.ObjectMeta{
-					Labels: podLabels,
-				},
-				TopologySpreadConstraints: []v1.TopologySpreadConstraint{
-					{
-						MaxSkew:           1,
-						TopologyKey:       v1.LabelTopologyZone,
-						WhenUnsatisfiable: v1.DoNotSchedule,
-						LabelSelector:     &metav1.LabelSelector{MatchLabels: podLabels},
-						MinDomains:        lo.ToPtr(int32(3)),
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels), 2)
+			env.ExpectCreatedNodeCount("==", 1)
+		})
+		It("should provision three nodes for a zonal topology spread", func() {
+			// one pod per zone
+			podLabels := map[string]string{"test": "zonal-spread"}
+			deployment := test.Deployment(test.DeploymentOptions{
+				Replicas: 3,
+				PodOptions: test.PodOptions{
+					ObjectMeta: metav1.ObjectMeta{
+						Labels: podLabels,
+					},
+					TopologySpreadConstraints: []v1.TopologySpreadConstraint{
+						{
+							MaxSkew:           1,
+							TopologyKey:       v1.LabelTopologyZone,
+							WhenUnsatisfiable: v1.DoNotSchedule,
+							LabelSelector:     &metav1.LabelSelector{MatchLabels: podLabels},
+							MinDomains:        lo.ToPtr(int32(3)),
+						},
 					},
 				},
-			},
-		})
+			})
 
-		env.ExpectCreated(nodeClass, nodePool, deployment)
-		env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(podLabels), 3)
-		// Karpenter will launch three nodes, however if all three nodes don't get register with the cluster at the same time, two pods will be placed on one node.
-		// This can result in a case where all 3 pods are healthy, while there are only two created nodes.
-		// In that case, we still expect to eventually have three nodes.
-		env.EventuallyExpectNodeCount("==", 3)
-	})
-	It("should provision a node using a NodePool with higher priority", func() {
-		nodePoolLowPri := test.NodePool(corev1beta1.NodePool{
-			Spec: corev1beta1.NodePoolSpec{
-				Weight: ptr.Int32(10),
-				Template: corev1beta1.NodeClaimTemplate{
-					Spec: corev1beta1.NodeClaimSpec{
-						NodeClassRef: &corev1beta1.NodeClassReference{
-							Name: nodeClass.Name,
-						},
-						Requirements: []v1.NodeSelectorRequirement{
-							{
-								Key:      v1.LabelOSStable,
-								Operator: v1.NodeSelectorOpIn,
-								Values:   []string{string(v1.Linux)},
+			env.ExpectCreated(nodeClass, nodePool, deployment)
+			env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(podLabels), 3)
+			// Karpenter will launch three nodes, however if all three nodes don't get register with the cluster at the same time, two pods will be placed on one node.
+			// This can result in a case where all 3 pods are healthy, while there are only two created nodes.
+			// In that case, we still expect to eventually have three nodes.
+			env.EventuallyExpectNodeCount("==", 3)
+		})
+		It("should provision a node using a NodePool with higher priority", func() {
+			nodePoolLowPri := test.NodePool(corev1beta1.NodePool{
+				Spec: corev1beta1.NodePoolSpec{
+					Weight: ptr.Int32(10),
+					Template: corev1beta1.NodeClaimTemplate{
+						Spec: corev1beta1.NodeClaimSpec{
+							NodeClassRef: &corev1beta1.NodeClassReference{
+								Name: nodeClass.Name,
 							},
-							{
-								Key:      v1.LabelInstanceTypeStable,
-								Operator: v1.NodeSelectorOpIn,
-								Values:   []string{"t3.nano"},
+							Requirements: []v1.NodeSelectorRequirement{
+								{
+									Key:      v1.LabelOSStable,
+									Operator: v1.NodeSelectorOpIn,
+									Values:   []string{string(v1.Linux)},
+								},
+								{
+									Key:      v1.LabelInstanceTypeStable,
+									Operator: v1.NodeSelectorOpIn,
+									Values:   []string{"t3.nano"},
+								},
 							},
 						},
 					},
 				},
-			},
-		})
-		nodePoolHighPri := test.NodePool(corev1beta1.NodePool{
-			Spec: corev1beta1.NodePoolSpec{
-				Weight: ptr.Int32(100),
-				Template: corev1beta1.NodeClaimTemplate{
-					Spec: corev1beta1.NodeClaimSpec{
-						NodeClassRef: &corev1beta1.NodeClassReference{
-							Name: nodeClass.Name,
-						},
-						Requirements: []v1.NodeSelectorRequirement{
-							{
-								Key:      v1.LabelOSStable,
-								Operator: v1.NodeSelectorOpIn,
-								Values:   []string{string(v1.Linux)},
+			})
+			nodePoolHighPri := test.NodePool(corev1beta1.NodePool{
+				Spec: corev1beta1.NodePoolSpec{
+					Weight: ptr.Int32(100),
+					Template: corev1beta1.NodeClaimTemplate{
+						Spec: corev1beta1.NodeClaimSpec{
+							NodeClassRef: &corev1beta1.NodeClassReference{
+								Name: nodeClass.Name,
 							},
-							{
-								Key:      v1.LabelInstanceTypeStable,
-								Operator: v1.NodeSelectorOpIn,
-								Values:   []string{"c5.large"},
+							Requirements: []v1.NodeSelectorRequirement{
+								{
+									Key:      v1.LabelOSStable,
+									Operator: v1.NodeSelectorOpIn,
+									Values:   []string{string(v1.Linux)},
+								},
+								{
+									Key:      v1.LabelInstanceTypeStable,
+									Operator: v1.NodeSelectorOpIn,
+									Values:   []string{"c5.large"},
+								},
 							},
 						},
 					},
 				},
+			})
+			pod := test.Pod()
+			env.ExpectCreated(pod, nodeClass, nodePoolLowPri, nodePoolHighPri)
+			env.EventuallyExpectHealthy(pod)
+			env.ExpectCreatedNodeCount("==", 1)
+			Expect(ptr.StringValue(env.GetInstance(pod.Spec.NodeName).InstanceType)).To(Equal("c5.large"))
+			Expect(env.GetNode(pod.Spec.NodeName).Labels[corev1beta1.NodePoolLabelKey]).To(Equal(nodePoolHighPri.Name))
+		})
+
+		DescribeTable(
+			"should provision a right-sized node when a pod has InitContainers (cpu)",
+			func(expectedNodeCPU string, containerRequirements v1.ResourceRequirements, initContainers ...v1.Container) {
+				if version, err := env.GetK8sMinorVersion(0); err != nil || version < 29 {
+					Skip("native sidecar containers are only enabled on EKS 1.29+")
+				}
+				test.ReplaceRequirements(nodePool, v1.NodeSelectorRequirement{
+					Key:      v1beta1.LabelInstanceCPU,
+					Operator: v1.NodeSelectorOpIn,
+					Values:   []string{"1", "2"},
+				}, v1.NodeSelectorRequirement{
+					Key:      v1beta1.LabelInstanceCategory,
+					Operator: v1.NodeSelectorOpNotIn,
+					Values:   []string{"t"},
+				})
+				pod := test.Pod(test.PodOptions{
+					InitContainers:       initContainers,
+					ResourceRequirements: containerRequirements,
+				})
+				env.ExpectCreated(nodePool, nodeClass, pod)
+				env.EventuallyExpectHealthy(pod)
+				node := env.ExpectCreatedNodeCount("==", 1)[0]
+				Expect(node.ObjectMeta.GetLabels()[v1beta1.LabelInstanceCPU]).To(Equal(expectedNodeCPU))
 			},
+			Entry("sidecar requirements + later init requirements do exceed container requirements", "2", v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("400m")},
+			}, ephemeralInitContainer(v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("300m")},
+			}), v1.Container{
+				RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
+				Resources: v1.ResourceRequirements{
+					Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("350m")},
+				},
+			}, ephemeralInitContainer(v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
+			})),
+			Entry("sidecar requirements + later init requirements do not exceed container requirements", "1", v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("400m")},
+			}, ephemeralInitContainer(v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("300m")},
+			}), v1.Container{
+				RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
+				Resources: v1.ResourceRequirements{
+					Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("350m")},
+				},
+			}, ephemeralInitContainer(v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("300m")},
+			})),
+			Entry("init container requirements exceed all later requests", "2", v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("400m")},
+			}, v1.Container{
+				RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
+				Resources: v1.ResourceRequirements{
+					Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")},
+				},
+			}, ephemeralInitContainer(v1.ResourceRequirements{
+				Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1500m")},
+			}), v1.Container{
+				RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
+				Resources: v1.ResourceRequirements{
+					Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")},
+				},
+			}),
+		)
+		It("should provision a right-sized node when a pod has InitContainers (mixed resources)", func() {
+			if version, err := env.GetK8sMinorVersion(0); err != nil || version < 29 {
+				Skip("native sidecar containers are only enabled on EKS 1.29+")
+			}
+			test.ReplaceRequirements(nodePool, v1.NodeSelectorRequirement{
+				Key:      v1beta1.LabelInstanceCategory,
+				Operator: v1.NodeSelectorOpNotIn,
+				Values:   []string{"t"},
+			})
+			pod := test.Pod(test.PodOptions{
+				InitContainers: []v1.Container{
+					{
+						RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways),
+						Resources: v1.ResourceRequirements{Requests: v1.ResourceList{
+							v1.ResourceCPU:    resource.MustParse("100m"),
+							v1.ResourceMemory: resource.MustParse("128Mi"),
+						}},
+					},
+					ephemeralInitContainer(v1.ResourceRequirements{Requests: v1.ResourceList{
+						v1.ResourceCPU:    resource.MustParse("50m"),
+						v1.ResourceMemory: resource.MustParse("4Gi"),
+					}}),
+				},
+				ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
+					v1.ResourceCPU:    resource.MustParse("100m"),
+					v1.ResourceMemory: resource.MustParse("128Mi"),
+				}},
+			})
+			env.ExpectCreated(nodePool, nodeClass, pod)
+			env.EventuallyExpectHealthy(pod)
 		})
-		pod := test.Pod()
-		env.ExpectCreated(pod, nodeClass, nodePoolLowPri, nodePoolHighPri)
-		env.EventuallyExpectHealthy(pod)
-		env.ExpectCreatedNodeCount("==", 1)
-		Expect(ptr.StringValue(env.GetInstance(pod.Spec.NodeName).InstanceType)).To(Equal("c5.large"))
-		Expect(env.GetNode(pod.Spec.NodeName).Labels[corev1beta1.NodePoolLabelKey]).To(Equal(nodePoolHighPri.Name))
 	})
 })
+
+func ephemeralInitContainer(requirements v1.ResourceRequirements) v1.Container {
+	return v1.Container{
+		Image:     "alpine",
+		Command:   []string{"/bin/sh"},
+		Args:      []string{"-c", "sleep 5"},
+		Resources: requirements,
+	}
+}