From 4991afbb9d64932824d4297c5b7a2c913b459120 Mon Sep 17 00:00:00 2001 From: Marques Johansson Date: Mon, 31 Aug 2020 13:06:53 -0400 Subject: [PATCH 1/4] add Packet cloudprovider owners Signed-off-by: Marques Johansson --- cluster-autoscaler/cloudprovider/packet/OWNERS | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 cluster-autoscaler/cloudprovider/packet/OWNERS diff --git a/cluster-autoscaler/cloudprovider/packet/OWNERS b/cluster-autoscaler/cloudprovider/packet/OWNERS new file mode 100644 index 000000000000..5bdd0a2b951c --- /dev/null +++ b/cluster-autoscaler/cloudprovider/packet/OWNERS @@ -0,0 +1,14 @@ +approvers: +- d-mo +- detiber +- deitch +- displague +- gianarb +reviewers: +- d-mo +- deitch +- detiber +- displague +- gianarb +- v-pap +- rawkode From 3ce6ab87ebce82e02ea523fef097bf65df294f14 Mon Sep 17 00:00:00 2001 From: v-pap Date: Fri, 6 Mar 2020 21:39:16 +0000 Subject: [PATCH 2/4] Add support for scaling up/down from/to 0 nodes in Packet --- cluster-autoscaler/README.md | 3 + .../cluster-autoscaler-deployment.yaml | 14 +- .../examples/cluster-autoscaler-secret.yaml | 5 +- .../cluster-autoscaler-svcaccount.yaml | 9 +- .../packet/packet_manager_rest.go | 153 +++++++++++++++++- .../packet/packet_manager_rest_test.go | 1 + .../cloudprovider/packet/packet_node_group.go | 2 +- 7 files changed, 177 insertions(+), 10 deletions(-) diff --git a/cluster-autoscaler/README.md b/cluster-autoscaler/README.md index ec912b08e5f9..8746b97f9d3f 100644 --- a/cluster-autoscaler/README.md +++ b/cluster-autoscaler/README.md @@ -16,6 +16,8 @@ You should also take a look at the notes and "gotchas" for your specific cloud p * [Azure](./cloudprovider/azure/README.md) * [AWS](./cloudprovider/aws/README.md) * [BaiduCloud](./cloudprovider/baiducloud/README.md) +* [HuaweiCloud](./cloudprovider/huaweicloud/README.md) +* [Packet](./cloudprovider/packet/README.md#notes) # Releases @@ -142,3 +144,4 @@ Supported cloud providers: * Alibaba Cloud https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/alicloud/README.md * OpenStack Magnum https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/magnum/README.md * DigitalOcean https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/digitalocean/README.md +* Packet https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/packet/README.md diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml index 247acb142cf9..df84a45c1f24 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml @@ -128,6 +128,18 @@ spec: labels: app: cluster-autoscaler spec: + # Node affinity is used to force cluster-autoscaler to stick + # to the master node. This allows the cluster to reliably downscale + # to zero nodes when needed. + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: dedicated + operator: In + values: + - master serviceAccountName: cluster-autoscaler containers: - name: cluster-autoscaler @@ -155,7 +167,7 @@ spec: - --cluster-name=cluster1 - --cloud-config=/config/cloud-config - --cloud-provider=packet - - --nodes=1:10:pool1 + - --nodes=0:10:pool1 - --scale-down-unneeded-time=1m0s - --scale-down-delay-after-add=1m0s - --scale-down-unready-time=1m0s diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml index 2be877f9c971..1412e415c978 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml @@ -15,6 +15,9 @@ metadata: namespace: kube-system type: Opaque stringData: + # kubeadm, kubelet, kubectl are pinned to version 1.17.4 + # The version can be altered by decoding the cloudinit and updating it to + # the desired version cloud-config: |- [Global] project-id=YOUR_PACKET_PROJECT_ID @@ -23,7 +26,7 @@ stringData: os=ubuntu_18_04 plan=t1.small.x86 billing=hourly - cloudinit=IyEvYmluL2Jhc2gKZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQphcHQtZ2V0IHVwZGF0ZSAmJiBhcHQtZ2V0IGluc3RhbGwgLXkgYXB0LXRyYW5zcG9ydC1odHRwcyBjYS1jZXJ0aWZpY2F0ZXMgY3VybCBzb2Z0d2FyZS1wcm9wZXJ0aWVzLWNvbW1vbgpjdXJsIC1mc1NMIGh0dHBzOi8vZG93bmxvYWQuZG9ja2VyLmNvbS9saW51eC91YnVudHUvZ3BnIHwgYXB0LWtleSBhZGQgLQpjdXJsIC1zIGh0dHBzOi8vcGFja2FnZXMuY2xvdWQuZ29vZ2xlLmNvbS9hcHQvZG9jL2FwdC1rZXkuZ3BnIHwgYXB0LWtleSBhZGQgLQpjYXQgPDxFT0YgPi9ldGMvYXB0L3NvdXJjZXMubGlzdC5kL2t1YmVybmV0ZXMubGlzdApkZWIgaHR0cHM6Ly9hcHQua3ViZXJuZXRlcy5pby8ga3ViZXJuZXRlcy14ZW5pYWwgbWFpbgpFT0YKYWRkLWFwdC1yZXBvc2l0b3J5ICAgImRlYiBbYXJjaD1hbWQ2NF0gaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dSAgICQobHNiX3JlbGVhc2UgLWNzKSAgIHN0YWJsZSIKYXB0LWdldCB1cGRhdGUKYXB0LWdldCB1cGdyYWRlIC15CmFwdC1nZXQgaW5zdGFsbCAteSBrdWJlbGV0IGt1YmVhZG0ga3ViZWN0bAphcHQtbWFyayBob2xkIGt1YmVsZXQga3ViZWFkbSBrdWJlY3RsCmN1cmwgLWZzU0wgaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dS9ncGcgfCBhcHQta2V5IGFkZCAtCmFkZC1hcHQtcmVwb3NpdG9yeSAiZGViIFthcmNoPWFtZDY0XSBodHRwczovL2Rvd25sb2FkLmRvY2tlci5jb20vbGludXgvdWJ1bnR1IGJpb25pYyBzdGFibGUiCmFwdCB1cGRhdGUKYXB0IGluc3RhbGwgLXkgZG9ja2VyLWNlPTE4LjA2LjJ+Y2V+My0wfnVidW50dQpjYXQgPiAvZXRjL2RvY2tlci9kYWVtb24uanNvbiA8PEVPRgp7CiAgImV4ZWMtb3B0cyI6IFsibmF0aXZlLmNncm91cGRyaXZlcj1zeXN0ZW1kIl0sCiAgImxvZy1kcml2ZXIiOiAianNvbi1maWxlIiwKICAibG9nLW9wdHMiOiB7CiAgICAibWF4LXNpemUiOiAiMTAwbSIKICB9LAogICJzdG9yYWdlLWRyaXZlciI6ICJvdmVybGF5MiIKfQpFT0YKbWtkaXIgLXAgL2V0Yy9zeXN0ZW1kL3N5c3RlbS9kb2NrZXIuc2VydmljZS5kCnN5c3RlbWN0bCBkYWVtb24tcmVsb2FkCnN5c3RlbWN0bCByZXN0YXJ0IGRvY2tlcgpzd2Fwb2ZmIC1hCm12IC9ldGMvZnN0YWIgL2V0Yy9mc3RhYi5vbGQgJiYgZ3JlcCAtdiBzd2FwIC9ldGMvZnN0YWIub2xkID4gL2V0Yy9mc3RhYgprdWJlYWRtIGpvaW4gLS1kaXNjb3ZlcnktdG9rZW4tdW5zYWZlLXNraXAtY2EtdmVyaWZpY2F0aW9uIC0tdG9rZW4ge3suQm9vdHN0cmFwVG9rZW5JRH19Lnt7LkJvb3RzdHJhcFRva2VuU2VjcmV0fX0ge3suQVBJU2VydmVyRW5kcG9pbnR9fQo= + cloudinit=IyEvYmluL2Jhc2gKZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQphcHQtZ2V0IHVwZGF0ZSAmJiBhcHQtZ2V0IGluc3RhbGwgLXkgYXB0LXRyYW5zcG9ydC1odHRwcyBjYS1jZXJ0aWZpY2F0ZXMgY3VybCBzb2Z0d2FyZS1wcm9wZXJ0aWVzLWNvbW1vbgpjdXJsIC1mc1NMIGh0dHBzOi8vZG93bmxvYWQuZG9ja2VyLmNvbS9saW51eC91YnVudHUvZ3BnIHwgYXB0LWtleSBhZGQgLQpjdXJsIC1zIGh0dHBzOi8vcGFja2FnZXMuY2xvdWQuZ29vZ2xlLmNvbS9hcHQvZG9jL2FwdC1rZXkuZ3BnIHwgYXB0LWtleSBhZGQgLQpjYXQgPDxFT0YgPi9ldGMvYXB0L3NvdXJjZXMubGlzdC5kL2t1YmVybmV0ZXMubGlzdApkZWIgaHR0cHM6Ly9hcHQua3ViZXJuZXRlcy5pby8ga3ViZXJuZXRlcy14ZW5pYWwgbWFpbgpFT0YKYWRkLWFwdC1yZXBvc2l0b3J5ICAgImRlYiBbYXJjaD1hbWQ2NF0gaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dSAgICQobHNiX3JlbGVhc2UgLWNzKSAgIHN0YWJsZSIKYXB0LWdldCB1cGRhdGUKYXB0LWdldCB1cGdyYWRlIC15CmFwdC1nZXQgaW5zdGFsbCAteSBrdWJlbGV0PTEuMTcuNC0wMCBrdWJlYWRtPTEuMTcuNC0wMCBrdWJlY3RsPTEuMTcuNC0wMAphcHQtbWFyayBob2xkIGt1YmVsZXQga3ViZWFkbSBrdWJlY3RsCmN1cmwgLWZzU0wgaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dS9ncGcgfCBhcHQta2V5IGFkZCAtCmFkZC1hcHQtcmVwb3NpdG9yeSAiZGViIFthcmNoPWFtZDY0XSBodHRwczovL2Rvd25sb2FkLmRvY2tlci5jb20vbGludXgvdWJ1bnR1IGJpb25pYyBzdGFibGUiCmFwdCB1cGRhdGUKYXB0IGluc3RhbGwgLXkgZG9ja2VyLWNlPTE4LjA2LjJ+Y2V+My0wfnVidW50dQpjYXQgPiAvZXRjL2RvY2tlci9kYWVtb24uanNvbiA8PEVPRgp7CiAgImV4ZWMtb3B0cyI6IFsibmF0aXZlLmNncm91cGRyaXZlcj1zeXN0ZW1kIl0sCiAgImxvZy1kcml2ZXIiOiAianNvbi1maWxlIiwKICAibG9nLW9wdHMiOiB7CiAgICAibWF4LXNpemUiOiAiMTAwbSIKICB9LAogICJzdG9yYWdlLWRyaXZlciI6ICJvdmVybGF5MiIKfQpFT0YKbWtkaXIgLXAgL2V0Yy9zeXN0ZW1kL3N5c3RlbS9kb2NrZXIuc2VydmljZS5kCnN5c3RlbWN0bCBkYWVtb24tcmVsb2FkCnN5c3RlbWN0bCByZXN0YXJ0IGRvY2tlcgpzd2Fwb2ZmIC1hCm12IC9ldGMvZnN0YWIgL2V0Yy9mc3RhYi5vbGQgJiYgZ3JlcCAtdiBzd2FwIC9ldGMvZnN0YWIub2xkID4gL2V0Yy9mc3RhYgpjYXQgPDxFT0YgfCB0ZWUgL2V0Yy9kZWZhdWx0L2t1YmVsZXQKS1VCRUxFVF9FWFRSQV9BUkdTPS0tY2xvdWQtcHJvdmlkZXI9ZXh0ZXJuYWwKRU9GCmt1YmVhZG0gam9pbiAtLWRpc2NvdmVyeS10b2tlbi11bnNhZmUtc2tpcC1jYS12ZXJpZmljYXRpb24gLS10b2tlbiB7ey5Cb290c3RyYXBUb2tlbklEfX0ue3suQm9vdHN0cmFwVG9rZW5TZWNyZXR9fSB7ey5BUElTZXJ2ZXJFbmRwb2ludH19Cg== reservation=prefer hostname-pattern=k8s-{{.ClusterName}}-{{.NodeGroup}}-{{.RandString8}} --- diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-svcaccount.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-svcaccount.yaml index 190bf9372ecf..bb5672a53a00 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-svcaccount.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-svcaccount.yaml @@ -38,7 +38,7 @@ rules: resources: ["daemonsets", "replicasets", "statefulsets"] verbs: ["watch", "list", "get"] - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] + resources: ["storageclasses", "csinodes"] verbs: ["watch", "list", "get"] - apiGroups: [""] resources: ["configmaps"] @@ -47,6 +47,13 @@ rules: resources: ["configmaps"] resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] verbs: ["delete", "get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cluster-autoscaler"] + verbs: ["get", "update"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go index 2edda3d848cb..1db3ca9c6722 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go @@ -31,12 +31,128 @@ import ( "time" "gopkg.in/gcfg.v1" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" klog "k8s.io/klog/v2" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) +type instanceType struct { + InstanceName string + CPU int64 + MemoryMb int64 + GPU int64 +} + +// InstanceTypes is a map of packet resources +var InstanceTypes = map[string]*instanceType{ + "c1.large.arm": { + InstanceName: "c1.large.arm", + CPU: 96, + MemoryMb: 131072, + GPU: 0, + }, + "c1.small.x86": { + InstanceName: "c1.small.x86", + CPU: 4, + MemoryMb: 32768, + GPU: 0, + }, + "c1.xlarge.x86": { + InstanceName: "c1.xlarge.x86", + CPU: 16, + MemoryMb: 131072, + GPU: 0, + }, + "c2.large.arm": { + InstanceName: "c2.large.arm", + CPU: 32, + MemoryMb: 131072, + GPU: 0, + }, + "c2.medium.x86": { + InstanceName: "c2.medium.x86", + CPU: 24, + MemoryMb: 65536, + GPU: 0, + }, + "c3.medium.x86": { + InstanceName: "c3.medium.x86", + CPU: 24, + MemoryMb: 65536, + GPU: 0, + }, + "c3.small.x86": { + InstanceName: "c3.small.x86", + CPU: 8, + MemoryMb: 32768, + GPU: 1, + }, + "g2.large.x86": { + InstanceName: "g2.large.x86", + CPU: 24, + MemoryMb: 196608, + GPU: 0, + }, + "m1.xlarge.x86": { + InstanceName: "m1.xlarge.x86", + CPU: 24, + MemoryMb: 262144, + GPU: 0, + }, + "m2.xlarge.x86": { + InstanceName: "m2.xlarge.x86", + CPU: 28, + MemoryMb: 393216, + GPU: 0, + }, + "n2.xlarge.x86": { + InstanceName: "n2.xlarge.x86", + CPU: 28, + MemoryMb: 393216, + GPU: 0, + }, + "s1.large.x86": { + InstanceName: "s1.large.x86", + CPU: 8, + MemoryMb: 65536, + GPU: 0, + }, + "s3.xlarge.x86": { + InstanceName: "s3.xlarge.x86", + CPU: 24, + MemoryMb: 196608, + GPU: 0, + }, + "t1.small.x86": { + InstanceName: "t1.small.x86", + CPU: 4, + MemoryMb: 8192, + GPU: 0, + }, + "t3.small.x86": { + InstanceName: "t3.small.x86", + CPU: 4, + MemoryMb: 16384, + GPU: 0, + }, + "x1.small.x86": { + InstanceName: "x1.small.x86", + CPU: 4, + MemoryMb: 32768, + GPU: 0, + }, + "x2.xlarge.x86": { + InstanceName: "x2.xlarge.x86", + CPU: 28, + MemoryMb: 393216, + GPU: 1, + }, +} + type packetManagerRest struct { baseURL string clusterName string @@ -338,7 +454,7 @@ func (mgr *packetManagerRest) getNodes(nodegroup string) ([]string, error) { nodes := []string{} for _, d := range devices.Devices { if Contains(d.Tags, "k8s-cluster-"+mgr.clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { - nodes = append(nodes, d.ID) + nodes = append(nodes, fmt.Sprintf("packet://%s", d.ID)) } } return nodes, err @@ -392,10 +508,35 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd return nil } -// templateNodeInfo returns a NodeInfo with a node template based on the VM flavor -// that is used to created minions in a given node group. -func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) { - return nil, cloudprovider.ErrNotImplemented +// templateNodeInfo returns a NodeInfo with a node template based on the packet plan +// that is used to create nodes in a given node group. +func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) { + node := apiv1.Node{} + nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63()) + node.ObjectMeta = metav1.ObjectMeta{ + Name: nodeName, + SelfLink: fmt.Sprintf("/api/v1/nodes/%s", nodeName), + Labels: map[string]string{}, + } + node.Status = apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{}, + } + + packetPlan := InstanceTypes[mgr.plan] + if packetPlan == nil { + return nil, fmt.Errorf("packet plan %q not supported", mgr.plan) + } + node.Status.Capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(packetPlan.CPU, resource.DecimalSI) + node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(packetPlan.GPU, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(packetPlan.MemoryMb*1024*1024, resource.DecimalSI) + + node.Status.Allocatable = node.Status.Capacity + node.Status.Conditions = cloudprovider.BuildReadyConditions() + + nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup)) + nodeInfo.SetNode(&node) + return nodeInfo, nil } func renderTemplate(str string, vars interface{}) string { diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go index ea9eb19625f0..b9fa0eed32ec 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go @@ -30,6 +30,7 @@ import ( "github.com/stretchr/testify/mock" ) +// Sensitive information used in tests, such as root passwords are fake const listPacketDevicesResponse = ` {"devices":[{"id":"4d47a322-47e6-40cc-8402-e22b9933fb8f","short_id":"4d47a322","hostname":"k8s-worker-1","description":null,"state":"active","tags":["k8s-nodepool-pool1","k8s-cluster-cluster1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-05.net.packet:device.4d47a322","locked":false,"bonding_mode":5,"created_at":"2019-05-24T11:59:52Z","updated_at":"2019-08-22T14:45:27Z","ipxe_script_url":null,"always_pxe":false,"storage":{},"customdata":{},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/1fd6e5a9-966d-4937-89b2-e269ff1d447a"},{"href":"/ssh-keys/22f9fd4c-ab3d-47f5-92ac-7d5703084d3d"},{"href":"/ssh-keys/23640808-0983-4b5c-b251-2f7715f5450a"},{"href":"/ssh-keys/2af51313-c514-4145-907f-f7445ca2e5ad"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"d06ebdf8-57d1-4b07-974d-c23c013901b7","address_family":4,"netmask":"255.255.255.254","created_at":"2019-05-24T11:59:55Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},"interface":{"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},"network":"147.75.85.136","address":"147.75.85.137","gateway":"147.75.85.136","href":"/ips/d06ebdf8-57d1-4b07-974d-c23c013901b7"},{"id":"31466841-6877-4197-966d-80125b8bf2a0","address_family":4,"netmask":"255.255.255.240","created_at":"2019-05-24T11:59:55Z","details":null,"tags":[],"public":false,"cidr":28,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},"interface":{"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},"network":"10.80.125.144","address":"10.80.125.146","gateway":"10.80.125.145","href":"/ips/31466841-6877-4197-966d-80125b8bf2a0"}],"created_by":{"id":"bd4f24f3-33f0-46a6-9528-4e31e2ba6074","full_name":"Dimitris Moraitis","avatar_thumb_url":"https://www.gravatar.com/avatar/702119decab6288093449009ab5af843?d=mm","email":"dimo@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"baremetal_0","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"","switch_uuid":"a7994efc","network_ports":[{"id":"283fb58f-eacc-4f21-818e-619029b859aa","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":null,"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},{"id":"c0dcccb1-0c57-4be6-970a-3589cbb34355","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:42:ce","bonded":true},"bond":{"id":"283fb58f-eacc-4f21-818e-619029b859aa","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":{"href":"/ports/57412b1f-f1fd-4071-8db2-5bc6494b9438"},"href":"/ports/c0dcccb1-0c57-4be6-970a-3589cbb34355"},{"id":"8fb7496b-32c1-45f4-8120-4b8faeaa57c3","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:42:cf","bonded":true},"bond":{"id":"283fb58f-eacc-4f21-818e-619029b859aa","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":{"href":"/ports/d0803efe-50e7-492b-9044-b3ce9af609cc"},"href":"/ports/8fb7496b-32c1-45f4-8120-4b8faeaa57c3"}],"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},{"id":"8a56bcad-e26f-4b0d-8d46-f490917ab2a3","short_id":"8a56bcad","hostname":"k8s-master-1","description":null,"state":"active","tags":["k8s-cluster-cluster1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-05.net.packet:device.8a56bcad","locked":false,"bonding_mode":5,"created_at":"2019-05-24T11:59:51Z","updated_at":"2019-08-22T14:33:00Z","ipxe_script_url":null,"always_pxe":false,"storage":{},"customdata":{},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/1fd6e5a9-966d-4937-89b2-e269ff1d447a"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"f77aa56c-a781-441d-bb40-c639db16a3cc","address_family":4,"netmask":"255.255.255.254","created_at":"2019-05-24T11:59:54Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"},"interface":{"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},"network":"147.75.102.14","address":"147.75.102.15","gateway":"147.75.102.14","href":"/ips/f77aa56c-a781-441d-bb40-c639db16a3cc"},{"id":"24502d6d-a633-4650-9650-6c9d3de50b72","address_family":4,"netmask":"255.255.255.240","created_at":"2019-05-24T11:59:54Z","details":null,"tags":[],"public":false,"cidr":28,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"},"interface":{"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},"network":"10.80.125.128","address":"10.80.125.130","gateway":"10.80.125.129","href":"/ips/24502d6d-a633-4650-9650-6c9d3de50b72"}],"created_by":{"id":"bd4f24f3-33f0-46a6-9528-4e31e2ba6074","full_name":"Dimitris Moraitis","avatar_thumb_url":"https://www.gravatar.com/avatar/702119decab6288093449009ab5af843?d=mm","email":"dimo@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"baremetal_0","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"","switch_uuid":"ddb086ff","network_ports":[{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":null,"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},{"id":"2281afe5-c934-407a-abe0-b1f315291d3d","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:43:04","bonded":true},"bond":{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":{"href":"/ports/cba6a9dd-550d-4e11-a93c-3a7b83bfaa65"},"href":"/ports/2281afe5-c934-407a-abe0-b1f315291d3d"},{"id":"1f351695-103b-4d92-9c7e-a6ce03904b12","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:43:05","bonded":true},"bond":{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":{"href":"/ports/c7466539-f5c6-41b9-9bb2-97490d6b7c10"},"href":"/ports/1f351695-103b-4d92-9c7e-a6ce03904b12"}],"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"}],"meta":{"first":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"previous":null,"self":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"next":null,"last":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"current_page":1,"last_page":1,"total":2}}` diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group.go index 1ef2f311f072..7ef005b5aae9 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group.go @@ -58,7 +58,7 @@ const ( waitForStatusTimeStep = 30 * time.Second waitForUpdateStatusTimeout = 2 * time.Minute waitForCompleteStatusTimout = 10 * time.Minute - scaleToZeroSupported = false + scaleToZeroSupported = true // Time that the goroutine that first acquires clusterUpdateMutex // in deleteNodes should wait for other synchronous calls to deleteNodes. From a86274ce0c77e7e19577e1e90e3e6ea12abbaa07 Mon Sep 17 00:00:00 2001 From: v-pap Date: Thu, 19 Mar 2020 19:27:52 +0000 Subject: [PATCH 3/4] Add support for multiple nodepools in Packet --- .../cloudprovider/packet/README.md | 2 +- .../cluster-autoscaler-deployment.yaml | 7 +- .../examples/cluster-autoscaler-secret.yaml | 13 +- .../packet/packet_cloud_provider.go | 29 ++- .../cloudprovider/packet/packet_manager.go | 3 +- .../packet/packet_manager_rest.go | 205 ++++++++++++++---- .../packet/packet_manager_rest_test.go | 62 ++++-- .../packet/packet_node_group_test.go | 114 ++++++++-- 8 files changed, 335 insertions(+), 100 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/packet/README.md b/cluster-autoscaler/cloudprovider/packet/README.md index 5200e9c3e12c..44dbcd6b1f47 100644 --- a/cluster-autoscaler/cloudprovider/packet/README.md +++ b/cluster-autoscaler/cloudprovider/packet/README.md @@ -44,7 +44,7 @@ to match your cluster. | Argument | Usage | |------------------|------------------------------------------------------------------------------------------------------------| | --cluster-name | The name of your Kubernetes cluster. It should correspond to the tags that have been applied to the nodes. | -| --nodes | Of the form `min:max:NodepoolName`. Only a single node pool is currently supported. | +| --nodes | Of the form `min:max:NodepoolName`. For multiple nodepools you can add the same argument multiple times. E.g. for pool1, pool2 you would add `--nodes=0:10:pool1` and `--nodes=0:10:pool2` | ## Notes diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml index df84a45c1f24..69de583db995 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml @@ -130,7 +130,7 @@ spec: spec: # Node affinity is used to force cluster-autoscaler to stick # to the master node. This allows the cluster to reliably downscale - # to zero nodes when needed. + # to zero worker nodes when needed. affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -161,6 +161,11 @@ spec: secretKeyRef: name: cluster-autoscaler-packet key: authtoken + # You can take advantage of multiple nodepools by adding + # extra arguments on the cluster-autoscaler command. + # e.g. for pool1, pool2 + # --nodes=0:10:pool1 + # --nodes=0:10:pool2 command: - ./cluster-autoscaler - --alsologtostderr diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml index 1412e415c978..bf4fa57049b9 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml @@ -19,7 +19,18 @@ stringData: # The version can be altered by decoding the cloudinit and updating it to # the desired version cloud-config: |- - [Global] + [nodegroupdef "default"] + project-id=YOUR_PACKET_PROJECT_ID + api-server-endpoint=YOUR_KUBERNETES_API_IP_ADDRESS:YOUR_KUBERNETES_API_PORT + facility=ams1 + os=ubuntu_18_04 + plan=t1.small.x86 + billing=hourly + cloudinit=IyEvYmluL2Jhc2gKZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQphcHQtZ2V0IHVwZGF0ZSAmJiBhcHQtZ2V0IGluc3RhbGwgLXkgYXB0LXRyYW5zcG9ydC1odHRwcyBjYS1jZXJ0aWZpY2F0ZXMgY3VybCBzb2Z0d2FyZS1wcm9wZXJ0aWVzLWNvbW1vbgpjdXJsIC1mc1NMIGh0dHBzOi8vZG93bmxvYWQuZG9ja2VyLmNvbS9saW51eC91YnVudHUvZ3BnIHwgYXB0LWtleSBhZGQgLQpjdXJsIC1zIGh0dHBzOi8vcGFja2FnZXMuY2xvdWQuZ29vZ2xlLmNvbS9hcHQvZG9jL2FwdC1rZXkuZ3BnIHwgYXB0LWtleSBhZGQgLQpjYXQgPDxFT0YgPi9ldGMvYXB0L3NvdXJjZXMubGlzdC5kL2t1YmVybmV0ZXMubGlzdApkZWIgaHR0cHM6Ly9hcHQua3ViZXJuZXRlcy5pby8ga3ViZXJuZXRlcy14ZW5pYWwgbWFpbgpFT0YKYWRkLWFwdC1yZXBvc2l0b3J5ICAgImRlYiBbYXJjaD1hbWQ2NF0gaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dSAgICQobHNiX3JlbGVhc2UgLWNzKSAgIHN0YWJsZSIKYXB0LWdldCB1cGRhdGUKYXB0LWdldCB1cGdyYWRlIC15CmFwdC1nZXQgaW5zdGFsbCAteSBrdWJlbGV0PTEuMTcuNC0wMCBrdWJlYWRtPTEuMTcuNC0wMCBrdWJlY3RsPTEuMTcuNC0wMAphcHQtbWFyayBob2xkIGt1YmVsZXQga3ViZWFkbSBrdWJlY3RsCmN1cmwgLWZzU0wgaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dS9ncGcgfCBhcHQta2V5IGFkZCAtCmFkZC1hcHQtcmVwb3NpdG9yeSAiZGViIFthcmNoPWFtZDY0XSBodHRwczovL2Rvd25sb2FkLmRvY2tlci5jb20vbGludXgvdWJ1bnR1IGJpb25pYyBzdGFibGUiCmFwdCB1cGRhdGUKYXB0IGluc3RhbGwgLXkgZG9ja2VyLWNlPTE4LjA2LjJ+Y2V+My0wfnVidW50dQpjYXQgPiAvZXRjL2RvY2tlci9kYWVtb24uanNvbiA8PEVPRgp7CiAgImV4ZWMtb3B0cyI6IFsibmF0aXZlLmNncm91cGRyaXZlcj1zeXN0ZW1kIl0sCiAgImxvZy1kcml2ZXIiOiAianNvbi1maWxlIiwKICAibG9nLW9wdHMiOiB7CiAgICAibWF4LXNpemUiOiAiMTAwbSIKICB9LAogICJzdG9yYWdlLWRyaXZlciI6ICJvdmVybGF5MiIKfQpFT0YKbWtkaXIgLXAgL2V0Yy9zeXN0ZW1kL3N5c3RlbS9kb2NrZXIuc2VydmljZS5kCnN5c3RlbWN0bCBkYWVtb24tcmVsb2FkCnN5c3RlbWN0bCByZXN0YXJ0IGRvY2tlcgpzd2Fwb2ZmIC1hCm12IC9ldGMvZnN0YWIgL2V0Yy9mc3RhYi5vbGQgJiYgZ3JlcCAtdiBzd2FwIC9ldGMvZnN0YWIub2xkID4gL2V0Yy9mc3RhYgpjYXQgPDxFT0YgfCB0ZWUgL2V0Yy9kZWZhdWx0L2t1YmVsZXQKS1VCRUxFVF9FWFRSQV9BUkdTPS0tY2xvdWQtcHJvdmlkZXI9ZXh0ZXJuYWwKRU9GCmt1YmVhZG0gam9pbiAtLWRpc2NvdmVyeS10b2tlbi11bnNhZmUtc2tpcC1jYS12ZXJpZmljYXRpb24gLS10b2tlbiB7ey5Cb290c3RyYXBUb2tlbklEfX0ue3suQm9vdHN0cmFwVG9rZW5TZWNyZXR9fSB7ey5BUElTZXJ2ZXJFbmRwb2ludH19Cg== + reservation=prefer + hostname-pattern=k8s-{{.ClusterName}}-{{.NodeGroup}}-{{.RandString8}} + + [nodegroupdef "pool2"] project-id=YOUR_PACKET_PROJECT_ID api-server-endpoint=YOUR_KUBERNETES_API_IP_ADDRESS:YOUR_KUBERNETES_API_PORT facility=ams1 diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index de57adf6c560..e783be90d6b8 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -17,8 +17,10 @@ limitations under the License. package packet import ( + "fmt" "io" "os" + "regexp" "sync" apiv1 "k8s.io/api/core/v1" @@ -45,14 +47,14 @@ var ( // packetCloudProvider implements CloudProvider interface from cluster-autoscaler/cloudprovider module. type packetCloudProvider struct { - packetManager *packetManager + packetManager packetManager resourceLimiter *cloudprovider.ResourceLimiter nodeGroups []packetNodeGroup } func buildPacketCloudProvider(packetManager packetManager, resourceLimiter *cloudprovider.ResourceLimiter) (cloudprovider.CloudProvider, error) { pcp := &packetCloudProvider{ - packetManager: &packetManager, + packetManager: packetManager, resourceLimiter: resourceLimiter, nodeGroups: []packetNodeGroup{}, } @@ -77,8 +79,8 @@ func (pcp *packetCloudProvider) GetAvailableGPUTypes() map[string]struct{} { // NodeGroups returns all node groups managed by this cloud provider. func (pcp *packetCloudProvider) NodeGroups() []cloudprovider.NodeGroup { groups := make([]cloudprovider.NodeGroup, len(pcp.nodeGroups)) - for i, group := range pcp.nodeGroups { - groups[i] = &group + for i := range pcp.nodeGroups { + groups[i] = &pcp.nodeGroups[i] } return groups } @@ -95,7 +97,16 @@ func (pcp *packetCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovide if _, found := node.ObjectMeta.Labels["node-role.kubernetes.io/master"]; found { return nil, nil } - return &(pcp.nodeGroups[0]), nil + nodeGroupId, err := pcp.packetManager.NodeGroupForNode(node.ObjectMeta.Labels, node.Spec.ProviderID) + if err != nil { + return nil, err + } + for i, nodeGroup := range pcp.nodeGroups { + if nodeGroup.Id() == nodeGroupId { + return &(pcp.nodeGroups[i]), nil + } + } + return nil, fmt.Errorf("Could not find group for node: %s", node.Spec.ProviderID) } // Pricing is not implemented. @@ -164,9 +175,7 @@ func BuildPacket(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDisco klog.Fatalf("Must specify at least one node group with --nodes=::,...") } - if len(do.NodeGroupSpecs) > 1 { - klog.Fatalf("Packet autoscaler only supports a single nodegroup for now") - } + validNodepoolName := regexp.MustCompile(`^[a-z0-9A-Z]+[a-z0-9A-Z\-\.\_]*[a-z0-9A-Z]+$|^[a-z0-9A-Z]{1}$`) clusterUpdateLock := sync.Mutex{} @@ -176,6 +185,10 @@ func BuildPacket(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDisco klog.Fatalf("Could not parse node group spec %s: %v", nodegroupSpec, err) } + if !validNodepoolName.MatchString(spec.Name) || len(spec.Name) > 63 { + klog.Fatalf("Invalid nodepool name: %s\nMust be a valid kubernetes label value", spec.Name) + } + ng := packetNodeGroup{ packetManager: manager, id: spec.Name, diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager.go b/cluster-autoscaler/cloudprovider/packet/packet_manager.go index e2caac093e8e..f10671c21705 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager.go @@ -45,7 +45,8 @@ type packetManager interface { getNodes(nodegroup string) ([]string, error) getNodeNames(nodegroup string) ([]string, error) deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error - templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) + templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) + NodeGroupForNode(labels map[string]string, nodeId string) (string, error) } // createPacketManager creates the desired implementation of packetManager. diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go index 1db3ca9c6722..67437d641e61 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go @@ -27,6 +27,7 @@ import ( "math/rand" "net/http" "os" + "strings" "text/template" "time" @@ -38,6 +39,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" klog "k8s.io/klog/v2" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) type instanceType struct { @@ -47,6 +50,11 @@ type instanceType struct { GPU int64 } +var ( + maxHttpRetries int = 3 + httpRetryDelay time.Duration = 100 * time.Millisecond +) + // InstanceTypes is a map of packet resources var InstanceTypes = map[string]*instanceType{ "c1.large.arm": { @@ -153,7 +161,7 @@ var InstanceTypes = map[string]*instanceType{ }, } -type packetManagerRest struct { +type packetManagerNodePool struct { baseURL string clusterName string projectID string @@ -168,8 +176,12 @@ type packetManagerRest struct { waitTimeStep time.Duration } -// ConfigGlobal options only include the project-id for now -type ConfigGlobal struct { +type packetManagerRest struct { + packetManagerNodePools map[string]*packetManagerNodePool +} + +// ConfigNodepool options only include the project-id for now +type ConfigNodepool struct { ClusterName string `gcfg:"cluster-name"` ProjectID string `gcfg:"project-id"` APIServerEndpoint string `gcfg:"api-server-endpoint"` @@ -184,7 +196,8 @@ type ConfigGlobal struct { // ConfigFile is used to read and store information from the cloud configuration file type ConfigFile struct { - Global ConfigGlobal `gcfg:"global"` + DefaultNodegroupdef ConfigNodepool `gcfg:"global"` + Nodegroupdef map[string]*ConfigNodepool `gcfg:"nodegroupdef"` } // Device represents a Packet device @@ -229,6 +242,7 @@ type CloudInitTemplateData struct { BootstrapTokenID string BootstrapTokenSecret string APIServerEndpoint string + NodeGroup string } // HostnameTemplateData represents the template variables used to construct host names for new nodes @@ -270,37 +284,59 @@ func createPacketManagerRest(configReader io.Reader, discoverOpts cloudprovider. } } - if opts.ClusterName == "" && cfg.Global.ClusterName == "" { - klog.Fatalf("The cluster-name parameter must be set") - } else if opts.ClusterName != "" && cfg.Global.ClusterName == "" { - cfg.Global.ClusterName = opts.ClusterName + var manager packetManagerRest + manager.packetManagerNodePools = make(map[string]*packetManagerNodePool) + + if _, ok := cfg.Nodegroupdef["default"]; !ok { + cfg.Nodegroupdef["default"] = &cfg.DefaultNodegroupdef + } + + if *cfg.Nodegroupdef["default"] == (ConfigNodepool{}) { + klog.Fatalf("No \"default\" or [Global] nodepool definition was found") } - manager := packetManagerRest{ - baseURL: "https://api.packet.net", - clusterName: cfg.Global.ClusterName, - projectID: cfg.Global.ProjectID, - apiServerEndpoint: cfg.Global.APIServerEndpoint, - facility: cfg.Global.Facility, - plan: cfg.Global.Plan, - os: cfg.Global.OS, - billing: cfg.Global.Billing, - cloudinit: cfg.Global.CloudInit, - reservation: cfg.Global.Reservation, - hostnamePattern: cfg.Global.HostnamePattern, + for nodepool := range cfg.Nodegroupdef { + if opts.ClusterName == "" && cfg.Nodegroupdef[nodepool].ClusterName == "" { + klog.Fatalf("The cluster-name parameter must be set") + } else if opts.ClusterName != "" && cfg.Nodegroupdef[nodepool].ClusterName == "" { + cfg.Nodegroupdef[nodepool].ClusterName = opts.ClusterName + } + + manager.packetManagerNodePools[nodepool] = &packetManagerNodePool{ + baseURL: "https://api.packet.net", + clusterName: cfg.Nodegroupdef[nodepool].ClusterName, + projectID: cfg.Nodegroupdef["default"].ProjectID, + apiServerEndpoint: cfg.Nodegroupdef["default"].APIServerEndpoint, + facility: cfg.Nodegroupdef[nodepool].Facility, + plan: cfg.Nodegroupdef[nodepool].Plan, + os: cfg.Nodegroupdef[nodepool].OS, + billing: cfg.Nodegroupdef[nodepool].Billing, + cloudinit: cfg.Nodegroupdef[nodepool].CloudInit, + reservation: cfg.Nodegroupdef[nodepool].Reservation, + hostnamePattern: cfg.Nodegroupdef[nodepool].HostnamePattern, + } } + return &manager, nil } func (mgr *packetManagerRest) listPacketDevices() (*Devices, error) { var jsonStr = []byte(``) packetAuthToken := os.Getenv("PACKET_AUTH_TOKEN") - url := mgr.baseURL + "/projects/" + mgr.projectID + "/devices" + url := mgr.getNodePoolDefinition("default").baseURL + "/projects/" + mgr.getNodePoolDefinition("default").projectID + "/devices" req, _ := http.NewRequest("GET", url, bytes.NewBuffer(jsonStr)) req.Header.Set("X-Auth-Token", packetAuthToken) req.Header.Set("Content-Type", "application/json") client := &http.Client{} - resp, err := client.Do(req) + var err error + var resp *http.Response + for i := 0; i < maxHttpRetries; i++ { + resp, err = client.Do(req) + if err == nil || (resp != nil && resp.StatusCode < 500 && resp.StatusCode != 0) { + break + } + time.Sleep(httpRetryDelay) + } if err != nil { panic(err) // klog.Fatalf("Error listing nodes: %v", err) @@ -320,13 +356,65 @@ func (mgr *packetManagerRest) listPacketDevices() (*Devices, error) { return &devices, fmt.Errorf(resp.Status, resp.Body) } +func (mgr *packetManagerRest) getPacketDevice(id string) (*Device, error) { + var jsonStr = []byte(``) + packetAuthToken := os.Getenv("PACKET_AUTH_TOKEN") + url := mgr.getNodePoolDefinition("default").baseURL + "/devices/" + id + req, _ := http.NewRequest("GET", url, bytes.NewBuffer(jsonStr)) + req.Header.Set("X-Auth-Token", packetAuthToken) + req.Header.Set("Content-Type", "application/json") + client := &http.Client{} + var err error + var resp *http.Response + for i := 0; i < maxHttpRetries; i++ { + resp, err = client.Do(req) + if err == nil || (resp != nil && resp.StatusCode < 500 && resp.StatusCode != 0) { + break + } + time.Sleep(httpRetryDelay) + } + if err != nil { + panic(err) + // klog.Fatalf("Error listing nodes: %v", err) + } + defer resp.Body.Close() + + klog.Infof("response Status: %s", resp.Status) + + var device Device + + if "200 OK" == resp.Status { + body, _ := ioutil.ReadAll(resp.Body) + json.Unmarshal([]byte(body), &device) + return &device, nil + } + + return &device, fmt.Errorf(resp.Status, resp.Body) +} + +func (mgr *packetManagerRest) NodeGroupForNode(labels map[string]string, nodeId string) (string, error) { + if nodegroup, ok := labels["pool"]; ok { + return nodegroup, nil + } + device, err := mgr.getPacketDevice(strings.TrimPrefix(nodeId, "packet://")) + if err != nil { + return "", fmt.Errorf("Could not find group for node: %s %s", nodeId, err) + } + for _, t := range device.Tags { + if strings.HasPrefix(t, "k8s-nodepool-") { + return strings.TrimPrefix(t, "k8s-nodepool-"), nil + } + } + return "", fmt.Errorf("Could not find group for node: %s", nodeId) +} + // nodeGroupSize gets the current size of the nodegroup as reported by packet tags. func (mgr *packetManagerRest) nodeGroupSize(nodegroup string) (int, error) { devices, _ := mgr.listPacketDevices() // Get the count of devices tagged as nodegroup members count := 0 for _, d := range devices.Devices { - if Contains(d.Tags, "k8s-cluster-"+mgr.clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { + if Contains(d.Tags, "k8s-cluster-"+mgr.getNodePoolDefinition(nodegroup).clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { count++ } } @@ -349,40 +437,41 @@ func (mgr *packetManagerRest) createNode(cloudinit, nodegroup string) { udvars := CloudInitTemplateData{ BootstrapTokenID: os.Getenv("BOOTSTRAP_TOKEN_ID"), BootstrapTokenSecret: os.Getenv("BOOTSTRAP_TOKEN_SECRET"), - APIServerEndpoint: mgr.apiServerEndpoint, + APIServerEndpoint: mgr.getNodePoolDefinition(nodegroup).apiServerEndpoint, + NodeGroup: nodegroup, } ud := renderTemplate(cloudinit, udvars) hnvars := HostnameTemplateData{ - ClusterName: mgr.clusterName, + ClusterName: mgr.getNodePoolDefinition(nodegroup).clusterName, NodeGroup: nodegroup, RandString8: randString8(), } - hn := renderTemplate(mgr.hostnamePattern, hnvars) + hn := renderTemplate(mgr.getNodePoolDefinition(nodegroup).hostnamePattern, hnvars) reservation := "" - if mgr.reservation == "require" || mgr.reservation == "prefer" { + if mgr.getNodePoolDefinition(nodegroup).reservation == "require" || mgr.getNodePoolDefinition(nodegroup).reservation == "prefer" { reservation = "next-available" } cr := DeviceCreateRequest{ Hostname: hn, - Facility: []string{mgr.facility}, - Plan: mgr.plan, - OS: mgr.os, - ProjectID: mgr.projectID, - BillingCycle: mgr.billing, + Facility: []string{mgr.getNodePoolDefinition(nodegroup).facility}, + Plan: mgr.getNodePoolDefinition(nodegroup).plan, + OS: mgr.getNodePoolDefinition(nodegroup).os, + ProjectID: mgr.getNodePoolDefinition(nodegroup).projectID, + BillingCycle: mgr.getNodePoolDefinition(nodegroup).billing, UserData: ud, - Tags: []string{"k8s-cluster-" + mgr.clusterName, "k8s-nodepool-" + nodegroup}, + Tags: []string{"k8s-cluster-" + mgr.getNodePoolDefinition(nodegroup).clusterName, "k8s-nodepool-" + nodegroup}, HardwareReservationID: reservation, } - resp, err := createDevice(&cr, mgr.baseURL) + resp, err := createDevice(&cr, mgr.getNodePoolDefinition(nodegroup).baseURL) if err != nil || resp.StatusCode > 299 { // If reservation is preferred but not available, retry provisioning as on-demand - if reservation != "" && mgr.reservation == "prefer" { + if reservation != "" && mgr.getNodePoolDefinition(nodegroup).reservation == "prefer" { klog.Infof("Reservation preferred but not available. Provisioning on-demand node.") cr.HardwareReservationID = "" - resp, err = createDevice(&cr, mgr.baseURL) + resp, err = createDevice(&cr, mgr.getNodePoolDefinition(nodegroup).baseURL) if err != nil { klog.Errorf("Failed to create device using Packet API: %v", err) panic(err) @@ -415,7 +504,7 @@ func (mgr *packetManagerRest) createNode(cloudinit, nodegroup string) { // createNodes provisions new nodes on packet and bootstraps them in the cluster. func (mgr *packetManagerRest) createNodes(nodegroup string, nodes int) error { klog.Infof("Updating node count to %d for nodegroup %s", nodes, nodegroup) - cloudinit, err := base64.StdEncoding.DecodeString(mgr.cloudinit) + cloudinit, err := base64.StdEncoding.DecodeString(mgr.getNodePoolDefinition(nodegroup).cloudinit) if err != nil { log.Fatal(err) return fmt.Errorf("Could not decode cloudinit script: %v", err) @@ -453,7 +542,7 @@ func (mgr *packetManagerRest) getNodes(nodegroup string) ([]string, error) { devices, err := mgr.listPacketDevices() nodes := []string{} for _, d := range devices.Devices { - if Contains(d.Tags, "k8s-cluster-"+mgr.clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { + if Contains(d.Tags, "k8s-cluster-"+mgr.getNodePoolDefinition(nodegroup).clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { nodes = append(nodes, fmt.Sprintf("packet://%s", d.ID)) } } @@ -466,7 +555,7 @@ func (mgr *packetManagerRest) getNodeNames(nodegroup string) ([]string, error) { devices, err := mgr.listPacketDevices() nodes := []string{} for _, d := range devices.Devices { - if Contains(d.Tags, "k8s-cluster-"+mgr.clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { + if Contains(d.Tags, "k8s-cluster-"+mgr.getNodePoolDefinition(nodegroup).clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { nodes = append(nodes, d.Hostname) } } @@ -484,11 +573,11 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd // Get the count of devices tagged as nodegroup for _, d := range dl.Devices { klog.Infof("Checking device %v", d) - if Contains(d.Tags, "k8s-cluster-"+mgr.clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { + if Contains(d.Tags, "k8s-cluster-"+mgr.getNodePoolDefinition(nodegroup).clusterName) && Contains(d.Tags, "k8s-nodepool-"+nodegroup) { klog.Infof("nodegroup match %s %s", d.Hostname, n.Name) if d.Hostname == n.Name { klog.V(1).Infof("Matching Packet Device %s - %s", d.Hostname, d.ID) - req, _ := http.NewRequest("DELETE", mgr.baseURL+"/devices/"+d.ID, bytes.NewBuffer([]byte(""))) + req, _ := http.NewRequest("DELETE", mgr.getNodePoolDefinition(nodegroup).baseURL+"/devices/"+d.ID, bytes.NewBuffer([]byte(""))) req.Header.Set("X-Auth-Token", packetAuthToken) req.Header.Set("Content-Type", "application/json") @@ -508,6 +597,20 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd return nil } +func buildGenericLabels(nodegroup string, instanceType string) map[string]string { + result := make(map[string]string) + + result[kubeletapis.LabelArch] = "amd64" + result[kubeletapis.LabelOS] = "linux" + result[apiv1.LabelInstanceType] = instanceType + result[apiv1.LabelZoneRegion] = "" + result[apiv1.LabelZoneFailureDomain] = "0" + result[apiv1.LabelHostname] = "" + result["pool"] = nodegroup + + return result +} + // templateNodeInfo returns a NodeInfo with a node template based on the packet plan // that is used to create nodes in a given node group. func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) { @@ -522,9 +625,9 @@ func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulernode Capacity: apiv1.ResourceList{}, } - packetPlan := InstanceTypes[mgr.plan] + packetPlan := InstanceTypes[mgr.getNodePoolDefinition(nodegroup).plan] if packetPlan == nil { - return nil, fmt.Errorf("packet plan %q not supported", mgr.plan) + return nil, fmt.Errorf("packet plan %q not supported", mgr.getNodePoolDefinition(nodegroup).plan) } node.Status.Capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI) node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(packetPlan.CPU, resource.DecimalSI) @@ -534,11 +637,27 @@ func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulernode node.Status.Allocatable = node.Status.Capacity node.Status.Conditions = cloudprovider.BuildReadyConditions() + // GenericLabels + node.Labels = cloudprovider.JoinStringMaps(node.Labels, buildGenericLabels(nodegroup, mgr.getNodePoolDefinition(nodegroup).plan)) + nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup)) nodeInfo.SetNode(&node) return nodeInfo, nil } +func (mgr *packetManagerRest) getNodePoolDefinition(nodegroup string) *packetManagerNodePool { + NodePoolDefinition, ok := mgr.packetManagerNodePools[nodegroup] + if !ok { + NodePoolDefinition, ok = mgr.packetManagerNodePools["default"] + if !ok { + klog.Fatalf("No default cloud-config was found") + } + klog.Infof("No cloud-config was found for %s, using default", nodegroup) + } + + return NodePoolDefinition +} + func renderTemplate(str string, vars interface{}) string { tmpl, err := template.New("tmpl").Parse(str) diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go index b9fa0eed32ec..3cf2232e290f 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest_test.go @@ -17,10 +17,6 @@ limitations under the License. package packet import ( - // "fmt" - - // "strings" - "os" "testing" @@ -30,29 +26,51 @@ import ( "github.com/stretchr/testify/mock" ) -// Sensitive information used in tests, such as root passwords are fake +// API call responses contain only the minimum information required by the cluster-autoscaler const listPacketDevicesResponse = ` -{"devices":[{"id":"4d47a322-47e6-40cc-8402-e22b9933fb8f","short_id":"4d47a322","hostname":"k8s-worker-1","description":null,"state":"active","tags":["k8s-nodepool-pool1","k8s-cluster-cluster1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-05.net.packet:device.4d47a322","locked":false,"bonding_mode":5,"created_at":"2019-05-24T11:59:52Z","updated_at":"2019-08-22T14:45:27Z","ipxe_script_url":null,"always_pxe":false,"storage":{},"customdata":{},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/1fd6e5a9-966d-4937-89b2-e269ff1d447a"},{"href":"/ssh-keys/22f9fd4c-ab3d-47f5-92ac-7d5703084d3d"},{"href":"/ssh-keys/23640808-0983-4b5c-b251-2f7715f5450a"},{"href":"/ssh-keys/2af51313-c514-4145-907f-f7445ca2e5ad"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"d06ebdf8-57d1-4b07-974d-c23c013901b7","address_family":4,"netmask":"255.255.255.254","created_at":"2019-05-24T11:59:55Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},"interface":{"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},"network":"147.75.85.136","address":"147.75.85.137","gateway":"147.75.85.136","href":"/ips/d06ebdf8-57d1-4b07-974d-c23c013901b7"},{"id":"31466841-6877-4197-966d-80125b8bf2a0","address_family":4,"netmask":"255.255.255.240","created_at":"2019-05-24T11:59:55Z","details":null,"tags":[],"public":false,"cidr":28,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},"interface":{"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},"network":"10.80.125.144","address":"10.80.125.146","gateway":"10.80.125.145","href":"/ips/31466841-6877-4197-966d-80125b8bf2a0"}],"created_by":{"id":"bd4f24f3-33f0-46a6-9528-4e31e2ba6074","full_name":"Dimitris Moraitis","avatar_thumb_url":"https://www.gravatar.com/avatar/702119decab6288093449009ab5af843?d=mm","email":"dimo@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"baremetal_0","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"","switch_uuid":"a7994efc","network_ports":[{"id":"283fb58f-eacc-4f21-818e-619029b859aa","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":null,"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},{"id":"c0dcccb1-0c57-4be6-970a-3589cbb34355","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:42:ce","bonded":true},"bond":{"id":"283fb58f-eacc-4f21-818e-619029b859aa","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":{"href":"/ports/57412b1f-f1fd-4071-8db2-5bc6494b9438"},"href":"/ports/c0dcccb1-0c57-4be6-970a-3589cbb34355"},{"id":"8fb7496b-32c1-45f4-8120-4b8faeaa57c3","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:42:cf","bonded":true},"bond":{"id":"283fb58f-eacc-4f21-818e-619029b859aa","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":{"href":"/ports/d0803efe-50e7-492b-9044-b3ce9af609cc"},"href":"/ports/8fb7496b-32c1-45f4-8120-4b8faeaa57c3"}],"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},{"id":"8a56bcad-e26f-4b0d-8d46-f490917ab2a3","short_id":"8a56bcad","hostname":"k8s-master-1","description":null,"state":"active","tags":["k8s-cluster-cluster1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-05.net.packet:device.8a56bcad","locked":false,"bonding_mode":5,"created_at":"2019-05-24T11:59:51Z","updated_at":"2019-08-22T14:33:00Z","ipxe_script_url":null,"always_pxe":false,"storage":{},"customdata":{},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/1fd6e5a9-966d-4937-89b2-e269ff1d447a"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"f77aa56c-a781-441d-bb40-c639db16a3cc","address_family":4,"netmask":"255.255.255.254","created_at":"2019-05-24T11:59:54Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"},"interface":{"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},"network":"147.75.102.14","address":"147.75.102.15","gateway":"147.75.102.14","href":"/ips/f77aa56c-a781-441d-bb40-c639db16a3cc"},{"id":"24502d6d-a633-4650-9650-6c9d3de50b72","address_family":4,"netmask":"255.255.255.240","created_at":"2019-05-24T11:59:54Z","details":null,"tags":[],"public":false,"cidr":28,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"},"interface":{"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},"network":"10.80.125.128","address":"10.80.125.130","gateway":"10.80.125.129","href":"/ips/24502d6d-a633-4650-9650-6c9d3de50b72"}],"created_by":{"id":"bd4f24f3-33f0-46a6-9528-4e31e2ba6074","full_name":"Dimitris Moraitis","avatar_thumb_url":"https://www.gravatar.com/avatar/702119decab6288093449009ab5af843?d=mm","email":"dimo@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"baremetal_0","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"","switch_uuid":"ddb086ff","network_ports":[{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":null,"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},{"id":"2281afe5-c934-407a-abe0-b1f315291d3d","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:43:04","bonded":true},"bond":{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":{"href":"/ports/cba6a9dd-550d-4e11-a93c-3a7b83bfaa65"},"href":"/ports/2281afe5-c934-407a-abe0-b1f315291d3d"},{"id":"1f351695-103b-4d92-9c7e-a6ce03904b12","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:43:05","bonded":true},"bond":{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":{"href":"/ports/c7466539-f5c6-41b9-9bb2-97490d6b7c10"},"href":"/ports/1f351695-103b-4d92-9c7e-a6ce03904b12"}],"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"}],"meta":{"first":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"previous":null,"self":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"next":null,"last":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"current_page":1,"last_page":1,"total":2}}` +{"devices":[{"id":"cace3b27-dff8-4930-943d-b2a63a775f03","short_id":"cace3b27","hostname":"k8s-cluster2-pool3-gndxdmmw","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"efc985f6-ba6a-4bc3-8ef4-9643b0e950a9","short_id":"efc985f6","hostname":"k8s-cluster2-master","description":null,"state":"active","tags":["k8s-cluster-cluster2"]}]} +` + +const listPacketDevicesResponseAfterIncreasePool3 = ` +{"devices":[{"id":"8fa90049-e715-4794-ba31-81c1c78cee84","short_id":"8fa90049","hostname":"k8s-cluster2-pool3-xpnrwgdf","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"cace3b27-dff8-4930-943d-b2a63a775f03","short_id":"cace3b27","hostname":"k8s-cluster2-pool3-gndxdmmw","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"efc985f6-ba6a-4bc3-8ef4-9643b0e950a9","short_id":"efc985f6","hostname":"k8s-cluster2-master","description":null,"state":"active","tags":["k8s-cluster-cluster2"]}]} +` -const listPacketDevicesResponseAfterCreate = ` -{"devices":[{"id":"55de9631-b5a2-4e2b-82e5-3c8eff5af12a","short_id":"55de9631","hostname":"k8s-cluster1-pool1-vaxicfgl","description":null,"state":"active","tags":["k8s-cluster-cluster1","k8s-nodepool-pool1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-10.net.packet:device.55de9631","locked":false,"bonding_mode":5,"created_at":"2019-10-18T23:58:13Z","updated_at":"2019-10-19T00:01:40Z","ipxe_script_url":null,"always_pxe":false,"storage":null,"customdata":{},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/cf8abbdd-a484-43c3-ba62-627a16594594"},{"href":"/ssh-keys/502178f5-b985-466b-980e-f01ba028e5fc"},{"href":"/ssh-keys/2b38c95c-02dc-4ce8-961c-9d2ce8763647"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"ba847802-bfaf-49e2-b9f5-c4389fd41512","address_family":4,"netmask":"255.255.255.254","created_at":"2019-10-18T23:58:16Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/55de9631-b5a2-4e2b-82e5-3c8eff5af12a"},"interface":{"href":"/ports/5df5872e-7e29-43d7-9903-cba7bdca5d68"},"network":"147.75.101.4","address":"147.75.101.5","gateway":"147.75.101.4","href":"/ips/ba847802-bfaf-49e2-b9f5-c4389fd41512"},{"id":"14a3b5ab-9755-4290-8b09-63d8a5004078","address_family":6,"netmask":"ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe","created_at":"2019-10-18T23:58:16Z","details":null,"tags":[],"public":true,"cidr":127,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/55de9631-b5a2-4e2b-82e5-3c8eff5af12a"},"interface":{"href":"/ports/5df5872e-7e29-43d7-9903-cba7bdca5d68"},"network":"2604:1380:2000:ec00::2","address":"2604:1380:2000:ec00::3","gateway":"2604:1380:2000:ec00::2","href":"/ips/14a3b5ab-9755-4290-8b09-63d8a5004078"},{"id":"906cf79a-4138-4321-853a-7bf7b92c0c00","address_family":4,"netmask":"255.255.255.254","created_at":"2019-10-18T23:58:16Z","details":null,"tags":[],"public":false,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/55de9631-b5a2-4e2b-82e5-3c8eff5af12a"},"interface":{"href":"/ports/5df5872e-7e29-43d7-9903-cba7bdca5d68"},"network":"10.80.84.138","address":"10.80.84.139","gateway":"10.80.84.138","href":"/ips/906cf79a-4138-4321-853a-7bf7b92c0c00"}],"created_by":{"id":"446476b8-a835-414a-8e9c-84925cc72705","full_name":"Markos Gogoulos","avatar_thumb_url":"https://www.gravatar.com/avatar/fafa2cd3f9d378981e87a993661859b4?d=mm","email":"mgogoulos@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"t1.small.x86","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"#!/bin/bash\nexport DEBIAN_FRONTEND=noninteractive\napt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\ncurl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -\ncat </etc/apt/sources.list.d/kubernetes.list\ndeb https://apt.kubernetes.io/ kubernetes-xenial main\nEOF\nadd-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"\napt-get update\napt-get upgrade -y\napt-get install -y kubelet kubeadm kubectl\napt-mark hold kubelet kubeadm kubectl\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\nadd-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable\"\napt update\napt install -y docker-ce=18.06.2~ce~3-0~ubuntu\ncat > /etc/docker/daemon.json < /etc/fstab\nkubeadm join --discovery-token-unsafe-skip-ca-verification --token 07401b.f395accd246ae52d 147.75.102.15:6443\n","root_password":"NCp5)m0%7-","switch_uuid":"a7994efc","network_ports":[{"id":"5df5872e-7e29-43d7-9903-cba7bdca5d68","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/9e88d670-8a26-43e2-aef7-7963924832ed"},"virtual_networks":[],"connected_port":null,"href":"/ports/5df5872e-7e29-43d7-9903-cba7bdca5d68"},{"id":"926abcab-4bb1-42a9-9f8a-5dc1536a2c26","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:44:ca","bonded":true},"bond":{"id":"5df5872e-7e29-43d7-9903-cba7bdca5d68","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/9e88d670-8a26-43e2-aef7-7963924832ed"},"virtual_networks":[],"connected_port":{"href":"/ports/083133e8-62a7-4434-a91d-dea9f33b2569"},"href":"/ports/926abcab-4bb1-42a9-9f8a-5dc1536a2c26"},{"id":"8d5fe47b-1e11-4fee-a661-b559c21b3c0a","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:44:cb","bonded":true},"bond":{"id":"5df5872e-7e29-43d7-9903-cba7bdca5d68","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/9e88d670-8a26-43e2-aef7-7963924832ed"},"virtual_networks":[],"connected_port":{"href":"/ports/12dc62ac-96a8-487e-b5a2-eb45038797fc"},"href":"/ports/8d5fe47b-1e11-4fee-a661-b559c21b3c0a"}],"href":"/devices/55de9631-b5a2-4e2b-82e5-3c8eff5af12a"},{"id":"a40d063e-2513-4c1e-a48e-1cafbab436e3","short_id":"a40d063e","hostname":"k8s-cluster1-pool1-ztrxjzao","description":null,"state":"active","tags":["k8s-cluster-cluster1","k8s-nodepool-pool1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-10.net.packet:device.a40d063e","locked":false,"bonding_mode":5,"created_at":"2019-10-18T23:58:09Z","updated_at":"2019-10-19T00:04:27Z","ipxe_script_url":null,"always_pxe":false,"storage":null,"customdata":{},"hardware_reservation":{"href":"/hardware-reservations/28d695ad-243e-4cba-9e90-cc55b880bc69"},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/cf8abbdd-a484-43c3-ba62-627a16594594"},{"href":"/ssh-keys/502178f5-b985-466b-980e-f01ba028e5fc"},{"href":"/ssh-keys/2b38c95c-02dc-4ce8-961c-9d2ce8763647"},{"href":"/ssh-keys/343b1178-2dcb-40d9-a03b-6d5b58437e86"},{"href":"/ssh-keys/9af94154-e38f-4a77-8cd2-8e6455fc8129"},{"href":"/ssh-keys/22f9fd4c-ab3d-47f5-92ac-7d5703084d3d"},{"href":"/ssh-keys/56b9d292-8ed8-43c5-8687-a436979d1ac0"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"229ec20e-ec56-4a5b-8e61-c4eb15f60bdb","address_family":4,"netmask":"255.255.255.254","created_at":"2019-10-18T23:58:12Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/a40d063e-2513-4c1e-a48e-1cafbab436e3"},"interface":{"href":"/ports/2a7610af-3631-499d-9cfa-cdd7e8547575"},"network":"147.75.100.108","address":"147.75.100.109","gateway":"147.75.100.108","href":"/ips/229ec20e-ec56-4a5b-8e61-c4eb15f60bdb"},{"id":"818ce056-d740-485e-b8ba-3558d53798ed","address_family":6,"netmask":"ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe","created_at":"2019-10-18T23:58:12Z","details":null,"tags":[],"public":true,"cidr":127,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/a40d063e-2513-4c1e-a48e-1cafbab436e3"},"interface":{"href":"/ports/2a7610af-3631-499d-9cfa-cdd7e8547575"},"network":"2604:1380:2000:ec00::","address":"2604:1380:2000:ec00::1","gateway":"2604:1380:2000:ec00::","href":"/ips/818ce056-d740-485e-b8ba-3558d53798ed"},{"id":"8701af8d-1d0a-47b9-8782-9439a7d2038c","address_family":4,"netmask":"255.255.255.254","created_at":"2019-10-18T23:58:12Z","details":null,"tags":[],"public":false,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/a40d063e-2513-4c1e-a48e-1cafbab436e3"},"interface":{"href":"/ports/2a7610af-3631-499d-9cfa-cdd7e8547575"},"network":"10.80.84.134","address":"10.80.84.135","gateway":"10.80.84.134","href":"/ips/8701af8d-1d0a-47b9-8782-9439a7d2038c"}],"created_by":{"id":"446476b8-a835-414a-8e9c-84925cc72705","full_name":"Markos Gogoulos","avatar_thumb_url":"https://www.gravatar.com/avatar/fafa2cd3f9d378981e87a993661859b4?d=mm","email":"mgogoulos@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"t1.small.x86","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"#!/bin/bash\nexport DEBIAN_FRONTEND=noninteractive\napt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\ncurl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -\ncat </etc/apt/sources.list.d/kubernetes.list\ndeb https://apt.kubernetes.io/ kubernetes-xenial main\nEOF\nadd-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"\napt-get update\napt-get upgrade -y\napt-get install -y kubelet kubeadm kubectl\napt-mark hold kubelet kubeadm kubectl\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -\nadd-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable\"\napt update\napt install -y docker-ce=18.06.2~ce~3-0~ubuntu\ncat > /etc/docker/daemon.json < /etc/fstab\nkubeadm join --discovery-token-unsafe-skip-ca-verification --token 07401b.f395accd246ae52d 147.75.102.15:6443\n","root_password":"t3%#d/J72D","switch_uuid":"a7994efc","network_ports":[{"id":"2a7610af-3631-499d-9cfa-cdd7e8547575","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/feea2550-963d-49cd-8ac2-3efa5cdc2883"},"virtual_networks":[],"connected_port":null,"href":"/ports/2a7610af-3631-499d-9cfa-cdd7e8547575"},{"id":"c33c170c-bdcd-49aa-862f-7e80b7a5b644","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:43:c8","bonded":true},"bond":{"id":"2a7610af-3631-499d-9cfa-cdd7e8547575","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/feea2550-963d-49cd-8ac2-3efa5cdc2883"},"virtual_networks":[],"connected_port":{"href":"/ports/e93ce17a-e3ab-4208-a32c-d0a00b876869"},"href":"/ports/c33c170c-bdcd-49aa-862f-7e80b7a5b644"},{"id":"ea414f0a-f509-4b73-80f0-029f0163989d","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:43:c9","bonded":true},"bond":{"id":"2a7610af-3631-499d-9cfa-cdd7e8547575","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/feea2550-963d-49cd-8ac2-3efa5cdc2883"},"virtual_networks":[],"connected_port":{"href":"/ports/aad81c51-bb88-41a4-a937-8db2e49e807d"},"href":"/ports/ea414f0a-f509-4b73-80f0-029f0163989d"}],"href":"/devices/a40d063e-2513-4c1e-a48e-1cafbab436e3"},{"id":"4d47a322-47e6-40cc-8402-e22b9933fb8f","short_id":"4d47a322","hostname":"k8s-worker-1","description":null,"state":"active","tags":["k8s-nodepool-pool1","k8s-cluster-cluster1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-05.net.packet:device.4d47a322","locked":false,"bonding_mode":5,"created_at":"2019-05-24T11:59:52Z","updated_at":"2019-08-22T14:45:27Z","ipxe_script_url":null,"always_pxe":false,"storage":{},"customdata":{},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/cf8abbdd-a484-43c3-ba62-627a16594594"},{"href":"/ssh-keys/502178f5-b985-466b-980e-f01ba028e5fc"},{"href":"/ssh-keys/2b38c95c-02dc-4ce8-961c-9d2ce8763647"},{"href":"/ssh-keys/343b1178-2dcb-40d9-a03b-6d5b58437e86"},{"href":"/ssh-keys/9af94154-e38f-4a77-8cd2-8e6455fc8129"},{"href":"/ssh-keys/22f9fd4c-ab3d-47f5-92ac-7d5703084d3d"},{"href":"/ssh-keys/56b9d292-8ed8-43c5-8687-a436979d1ac0"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"d06ebdf8-57d1-4b07-974d-c23c013901b7","address_family":4,"netmask":"255.255.255.254","created_at":"2019-05-24T11:59:55Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},"interface":{"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},"network":"147.75.85.136","address":"147.75.85.137","gateway":"147.75.85.136","href":"/ips/d06ebdf8-57d1-4b07-974d-c23c013901b7"},{"id":"31466841-6877-4197-966d-80125b8bf2a0","address_family":4,"netmask":"255.255.255.240","created_at":"2019-05-24T11:59:55Z","details":null,"tags":[],"public":false,"cidr":28,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},"interface":{"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},"network":"10.80.125.144","address":"10.80.125.146","gateway":"10.80.125.145","href":"/ips/31466841-6877-4197-966d-80125b8bf2a0"}],"created_by":{"id":"bd4f24f3-33f0-46a6-9528-4e31e2ba6074","full_name":"Dimitris Moraitis","avatar_thumb_url":"https://www.gravatar.com/avatar/702119decab6288093449009ab5af843?d=mm","email":"dimo@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"baremetal_0","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"","switch_uuid":"a7994efc","network_ports":[{"id":"283fb58f-eacc-4f21-818e-619029b859aa","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":null,"href":"/ports/283fb58f-eacc-4f21-818e-619029b859aa"},{"id":"c0dcccb1-0c57-4be6-970a-3589cbb34355","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:42:ce","bonded":true},"bond":{"id":"283fb58f-eacc-4f21-818e-619029b859aa","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":{"href":"/ports/57412b1f-f1fd-4071-8db2-5bc6494b9438"},"href":"/ports/c0dcccb1-0c57-4be6-970a-3589cbb34355"},{"id":"8fb7496b-32c1-45f4-8120-4b8faeaa57c3","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:42:cf","bonded":true},"bond":{"id":"283fb58f-eacc-4f21-818e-619029b859aa","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/6c6046ef-df41-4c04-8522-69e4ec798024"},"virtual_networks":[],"connected_port":{"href":"/ports/d0803efe-50e7-492b-9044-b3ce9af609cc"},"href":"/ports/8fb7496b-32c1-45f4-8120-4b8faeaa57c3"}],"href":"/devices/4d47a322-47e6-40cc-8402-e22b9933fb8f"},{"id":"8a56bcad-e26f-4b0d-8d46-f490917ab2a3","short_id":"8a56bcad","hostname":"k8s-master-1","description":null,"state":"active","tags":["k8s-cluster-cluster1"],"image_url":null,"billing_cycle":"hourly","user":"root","iqn":"iqn.2019-05.net.packet:device.8a56bcad","locked":false,"bonding_mode":5,"created_at":"2019-05-24T11:59:51Z","updated_at":"2019-08-22T14:33:00Z","ipxe_script_url":null,"always_pxe":false,"storage":{},"customdata":{},"operating_system":{"id":"201bc259-982b-41a1-a4c1-bba01ce71f51","slug":"ubuntu_18_04","name":"Ubuntu 18.04 LTS","distro":"ubuntu","version":"18.04","provisionable_on":["baremetal_2a4","baremetal_2a5","c1.bloomberg.x86","c1.large.arm","baremetal_2a","c1.large.arm.xda","baremetal_2a2","c1.small.x86","baremetal_1","c1.xlarge.x86","baremetal_3","c2.large.anbox","c2.large.arm","c2.medium.x86","c2.small.x86","c3.medium.x86","c3.medium.x86","cpe1.c1.r720xd","cpe1.c1.r720xd","cpe1.g1.4028gr","cpe1.g1.4028gr","cpe1.m2.r640","cpe1.m2.r640","cpe1.s1.r730","cpe1.s1.r730","d1f.optane.x86","d1p.optane.x86","g2.large.x86","m1.xlarge.x86","baremetal_2","m2.xlarge.x86","n2.xlarge.x86","s1.large.x86","baremetal_s","t1.small.x86","baremetal_0","x1.small.x86","baremetal_1e","x2.graphcore.x86","x2.xlarge.x86"],"preinstallable":false,"pricing":{},"licensed":false},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"project":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"ssh_keys":[{"href":"/ssh-keys/cf8abbdd-a484-43c3-ba62-627a16594594"},{"href":"/ssh-keys/502178f5-b985-466b-980e-f01ba028e5fc"},{"href":"/ssh-keys/2b38c95c-02dc-4ce8-961c-9d2ce8763647"},{"href":"/ssh-keys/343b1178-2dcb-40d9-a03b-6d5b58437e86"},{"href":"/ssh-keys/9af94154-e38f-4a77-8cd2-8e6455fc8129"}],"project_lite":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df"},"volumes":[],"ip_addresses":[{"id":"f77aa56c-a781-441d-bb40-c639db16a3cc","address_family":4,"netmask":"255.255.255.254","created_at":"2019-05-24T11:59:54Z","details":null,"tags":[],"public":true,"cidr":31,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"},"interface":{"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},"network":"147.75.102.14","address":"147.75.102.15","gateway":"147.75.102.14","href":"/ips/f77aa56c-a781-441d-bb40-c639db16a3cc"},{"id":"24502d6d-a633-4650-9650-6c9d3de50b72","address_family":4,"netmask":"255.255.255.240","created_at":"2019-05-24T11:59:54Z","details":null,"tags":[],"public":false,"cidr":28,"management":true,"manageable":true,"enabled":true,"global_ip":null,"customdata":{},"project":{},"project_lite":{},"facility":{"id":"8e6470b3-b75e-47d1-bb93-45b225750975","name":"Amsterdam, NL","code":"ams1","features":["baremetal","storage","global_ipv4","backend_transfer","layer_2"],"address":{"href":"#0688e909-647e-4b21-bdf2-fc056d993fc5"},"ip_ranges":["2604:1380:2000::/36","147.75.204.0/23","147.75.100.0/22","147.75.80.0/22","147.75.32.0/23"]},"assigned_to":{"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"},"interface":{"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},"network":"10.80.125.128","address":"10.80.125.130","gateway":"10.80.125.129","href":"/ips/24502d6d-a633-4650-9650-6c9d3de50b72"}],"created_by":{"id":"bd4f24f3-33f0-46a6-9528-4e31e2ba6074","full_name":"Dimitris Moraitis","avatar_thumb_url":"https://www.gravatar.com/avatar/702119decab6288093449009ab5af843?d=mm","email":"dimo@mist.io"},"plan":{"id":"e69c0169-4726-46ea-98f1-939c9e8a3607","slug":"baremetal_0","name":"t1.small.x86","description":"Our Type 0 configuration is a general use \"cloud killer\" server, with a Intel Atom 2.4Ghz processor and 8GB of RAM.","line":"baremetal","specs":{"cpus":[{"count":1,"type":"Intel Atom C2550 @ 2.4Ghz"}],"memory":{"total":"8GB"},"drives":[{"count":1,"size":"80GB","type":"SSD"}],"nics":[{"count":2,"type":"1Gbps"}],"features":{"raid":false,"txt":true}},"legacy":false,"deployment_types":["on_demand","spot_market"],"available_in":[{"href":"/facilities/8e6470b3-b75e-47d1-bb93-45b225750975"},{"href":"/facilities/2b70eb8f-fa18-47c0-aba7-222a842362fd"},{"href":"/facilities/8ea03255-89f9-4e62-9d3f-8817db82ceed"},{"href":"/facilities/e1e9c52e-a0bc-4117-b996-0fc94843ea09"}],"class":"t1.small.x86","pricing":{"hour":0.07}},"userdata":"","switch_uuid":"ddb086ff","network_ports":[{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","type":"NetworkBondPort","name":"bond0","data":{"bonded":true},"network_type":"layer3","native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":null,"href":"/ports/51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1"},{"id":"2281afe5-c934-407a-abe0-b1f315291d3d","type":"NetworkPort","name":"eth0","data":{"mac":"0c:c4:7a:e5:43:04","bonded":true},"bond":{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":{"href":"/ports/cba6a9dd-550d-4e11-a93c-3a7b83bfaa65"},"href":"/ports/2281afe5-c934-407a-abe0-b1f315291d3d"},{"id":"1f351695-103b-4d92-9c7e-a6ce03904b12","type":"NetworkPort","name":"eth1","data":{"mac":"0c:c4:7a:e5:43:05","bonded":true},"bond":{"id":"51a3ad77-4eb5-4f81-ab6f-8de3e1db15e1","name":"bond0"},"native_virtual_network":null,"hardware":{"href":"/hardware/7f262628-7db2-4b4b-90b1-41529818c7c0"},"virtual_networks":[],"connected_port":{"href":"/ports/c7466539-f5c6-41b9-9bb2-97490d6b7c10"},"href":"/ports/1f351695-103b-4d92-9c7e-a6ce03904b12"}],"href":"/devices/8a56bcad-e26f-4b0d-8d46-f490917ab2a3"}],"meta":{"first":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"previous":null,"self":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"next":null,"last":{"href":"/projects/3d27fd13-0466-4878-be22-9a4b5595a3df/devices?page=1"},"current_page":1,"last_page":1,"total":4}} +const listPacketDevicesResponseAfterIncreasePool2 = ` +{"devices":[{"id":"0f5609af-1c27-451b-8edd-a1283f2c9440","short_id":"0f5609af","hostname":"k8s-cluster2-pool2-jssxcyzz","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool2"]},{"id":"8fa90049-e715-4794-ba31-81c1c78cee84","short_id":"8fa90049","hostname":"k8s-cluster2-pool3-xpnrwgdf","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"cace3b27-dff8-4930-943d-b2a63a775f03","short_id":"cace3b27","hostname":"k8s-cluster2-pool3-gndxdmmw","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]},{"id":"efc985f6-ba6a-4bc3-8ef4-9643b0e950a9","short_id":"efc985f6","hostname":"k8s-cluster2-master","description":null,"state":"active","tags":["k8s-cluster-cluster2"]}]} ` -const cloudinitDefault = "IyEvYmluL2Jhc2gKZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQphcHQtZ2V0IHVwZGF0ZSAmJiBhcHQtZ2V0IGluc3RhbGwgLXkgYXB0LXRyYW5zcG9ydC1odHRwcyBjYS1jZXJ0aWZpY2F0ZXMgY3VybCBzb2Z0d2FyZS1wcm9wZXJ0aWVzLWNvbW1vbgpjdXJsIC1mc1NMIGh0dHBzOi8vZG93bmxvYWQuZG9ja2VyLmNvbS9saW51eC91YnVudHUvZ3BnIHwgYXB0LWtleSBhZGQgLQpjdXJsIC1zIGh0dHBzOi8vcGFja2FnZXMuY2xvdWQuZ29vZ2xlLmNvbS9hcHQvZG9jL2FwdC1rZXkuZ3BnIHwgYXB0LWtleSBhZGQgLQpjYXQgPDxFT0YgPi9ldGMvYXB0L3NvdXJjZXMubGlzdC5kL2t1YmVybmV0ZXMubGlzdApkZWIgaHR0cHM6Ly9hcHQua3ViZXJuZXRlcy5pby8ga3ViZXJuZXRlcy14ZW5pYWwgbWFpbgpFT0YKYWRkLWFwdC1yZXBvc2l0b3J5ICAgImRlYiBbYXJjaD1hbWQ2NF0gaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dSAgICQobHNiX3JlbGVhc2UgLWNzKSAgIHN0YWJsZSIKYXB0LWdldCB1cGRhdGUKYXB0LWdldCB1cGdyYWRlIC15CmFwdC1nZXQgaW5zdGFsbCAteSBrdWJlbGV0IGt1YmVhZG0ga3ViZWN0bAphcHQtbWFyayBob2xkIGt1YmVsZXQga3ViZWFkbSBrdWJlY3RsCmN1cmwgLWZzU0wgaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dS9ncGcgfCBhcHQta2V5IGFkZCAtCmFkZC1hcHQtcmVwb3NpdG9yeSAiZGViIFthcmNoPWFtZDY0XSBodHRwczovL2Rvd25sb2FkLmRvY2tlci5jb20vbGludXgvdWJ1bnR1IGJpb25pYyBzdGFibGUiCmFwdCB1cGRhdGUKYXB0IGluc3RhbGwgLXkgZG9ja2VyLWNlPTE4LjA2LjJ+Y2V+My0wfnVidW50dQpjYXQgPiAvZXRjL2RvY2tlci9kYWVtb24uanNvbiA8PEVPRgp7CiAgImV4ZWMtb3B0cyI6IFsibmF0aXZlLmNncm91cGRyaXZlcj1zeXN0ZW1kIl0sCiAgImxvZy1kcml2ZXIiOiAianNvbi1maWxlIiwKICAibG9nLW9wdHMiOiB7CiAgICAibWF4LXNpemUiOiAiMTAwbSIKICB9LAogICJzdG9yYWdlLWRyaXZlciI6ICJvdmVybGF5MiIKfQpFT0YKbWtkaXIgLXAgL2V0Yy9zeXN0ZW1kL3N5c3RlbS9kb2NrZXIuc2VydmljZS5kCnN5c3RlbWN0bCBkYWVtb24tcmVsb2FkCnN5c3RlbWN0bCByZXN0YXJ0IGRvY2tlcgpzd2Fwb2ZmIC1hCm12IC9ldGMvZnN0YWIgL2V0Yy9mc3RhYi5vbGQgJiYgZ3JlcCAtdiBzd2FwIC9ldGMvZnN0YWIub2xkID4gL2V0Yy9mc3RhYgprdWJlYWRtIGpvaW4gLS1kaXNjb3ZlcnktdG9rZW4tdW5zYWZlLXNraXAtY2EtdmVyaWZpY2F0aW9uIC0tdG9rZW4ge3suQm9vdHN0cmFwVG9rZW5JRH19Lnt7LkJvb3RzdHJhcFRva2VuU2VjcmV0fX0ge3suQVBJU2VydmVyRW5kcG9pbnR9fQo=" +const cloudinitDefault = "IyEvYmluL2Jhc2gKZXhwb3J0IERFQklBTl9GUk9OVEVORD1ub25pbnRlcmFjdGl2ZQphcHQtZ2V0IHVwZGF0ZSAmJiBhcHQtZ2V0IGluc3RhbGwgLXkgYXB0LXRyYW5zcG9ydC1odHRwcyBjYS1jZXJ0aWZpY2F0ZXMgY3VybCBzb2Z0d2FyZS1wcm9wZXJ0aWVzLWNvbW1vbgpjdXJsIC1mc1NMIGh0dHBzOi8vZG93bmxvYWQuZG9ja2VyLmNvbS9saW51eC91YnVudHUvZ3BnIHwgYXB0LWtleSBhZGQgLQpjdXJsIC1zIGh0dHBzOi8vcGFja2FnZXMuY2xvdWQuZ29vZ2xlLmNvbS9hcHQvZG9jL2FwdC1rZXkuZ3BnIHwgYXB0LWtleSBhZGQgLQpjYXQgPDxFT0YgPi9ldGMvYXB0L3NvdXJjZXMubGlzdC5kL2t1YmVybmV0ZXMubGlzdApkZWIgaHR0cHM6Ly9hcHQua3ViZXJuZXRlcy5pby8ga3ViZXJuZXRlcy14ZW5pYWwgbWFpbgpFT0YKYWRkLWFwdC1yZXBvc2l0b3J5ICAgImRlYiBbYXJjaD1hbWQ2NF0gaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dSAgICQobHNiX3JlbGVhc2UgLWNzKSAgIHN0YWJsZSIKYXB0LWdldCB1cGRhdGUKYXB0LWdldCB1cGdyYWRlIC15CmFwdC1nZXQgaW5zdGFsbCAteSBrdWJlbGV0PTEuMTcuNC0wMCBrdWJlYWRtPTEuMTcuNC0wMCBrdWJlY3RsPTEuMTcuNC0wMAphcHQtbWFyayBob2xkIGt1YmVsZXQga3ViZWFkbSBrdWJlY3RsCmN1cmwgLWZzU0wgaHR0cHM6Ly9kb3dubG9hZC5kb2NrZXIuY29tL2xpbnV4L3VidW50dS9ncGcgfCBhcHQta2V5IGFkZCAtCmFkZC1hcHQtcmVwb3NpdG9yeSAiZGViIFthcmNoPWFtZDY0XSBodHRwczovL2Rvd25sb2FkLmRvY2tlci5jb20vbGludXgvdWJ1bnR1IGJpb25pYyBzdGFibGUiCmFwdCB1cGRhdGUKYXB0IGluc3RhbGwgLXkgZG9ja2VyLWNlPTE4LjA2LjJ+Y2V+My0wfnVidW50dQpjYXQgPiAvZXRjL2RvY2tlci9kYWVtb24uanNvbiA8PEVPRgp7CiAgImV4ZWMtb3B0cyI6IFsibmF0aXZlLmNncm91cGRyaXZlcj1zeXN0ZW1kIl0sCiAgImxvZy1kcml2ZXIiOiAianNvbi1maWxlIiwKICAibG9nLW9wdHMiOiB7CiAgICAibWF4LXNpemUiOiAiMTAwbSIKICB9LAogICJzdG9yYWdlLWRyaXZlciI6ICJvdmVybGF5MiIKfQpFT0YKbWtkaXIgLXAgL2V0Yy9zeXN0ZW1kL3N5c3RlbS9kb2NrZXIuc2VydmljZS5kCnN5c3RlbWN0bCBkYWVtb24tcmVsb2FkCnN5c3RlbWN0bCByZXN0YXJ0IGRvY2tlcgpzd2Fwb2ZmIC1hCm12IC9ldGMvZnN0YWIgL2V0Yy9mc3RhYi5vbGQgJiYgZ3JlcCAtdiBzd2FwIC9ldGMvZnN0YWIub2xkID4gL2V0Yy9mc3RhYgpjYXQgPDxFT0YgfCB0ZWUgL2V0Yy9kZWZhdWx0L2t1YmVsZXQKS1VCRUxFVF9FWFRSQV9BUkdTPS0tY2xvdWQtcHJvdmlkZXI9ZXh0ZXJuYWwgLS1ub2RlLWxhYmVscz1wb29sPXt7Lk5vZGVHcm91cH19CkVPRgprdWJlYWRtIGpvaW4gLS1kaXNjb3ZlcnktdG9rZW4tdW5zYWZlLXNraXAtY2EtdmVyaWZpY2F0aW9uIC0tdG9rZW4ge3suQm9vdHN0cmFwVG9rZW5JRH19Lnt7LkJvb3RzdHJhcFRva2VuU2VjcmV0fX0ge3suQVBJU2VydmVyRW5kcG9pbnR9fQo=" func newTestPacketManagerRest(t *testing.T, url string) *packetManagerRest { manager := &packetManagerRest{ - baseURL: url, - clusterName: "cluster1", - projectID: "3d27fd13-0466-4878-be22-9a4b5595a3df", - apiServerEndpoint: "147.75.102.15:6443", - facility: "ams1", - os: "ubuntu_18_04", - plan: "t1.small.x86", - billing: "hourly", - cloudinit: cloudinitDefault, - reservation: "prefer", - hostnamePattern: "k8s-{{.ClusterName}}-{{.NodeGroup}}-{{.RandString8}}", + packetManagerNodePools: map[string]*packetManagerNodePool{ + "default": { + baseURL: url, + clusterName: "cluster2", + projectID: "3d27fd13-0466-4878-be22-9a4b5595a3df", + apiServerEndpoint: "147.75.102.15:6443", + facility: "ams1", + plan: "t1.small.x86", + os: "ubuntu_18_04", + billing: "hourly", + cloudinit: cloudinitDefault, + reservation: "prefer", + hostnamePattern: "k8s-{{.ClusterName}}-{{.NodeGroup}}-{{.RandString8}}", + }, + "pool2": { + baseURL: url, + clusterName: "cluster2", + projectID: "3d27fd13-0466-4878-be22-9a4b5595a3df", + apiServerEndpoint: "147.75.102.15:6443", + facility: "ams1", + plan: "c1.small.x86", + os: "ubuntu_18_04", + billing: "hourly", + cloudinit: cloudinitDefault, + reservation: "prefer", + hostnamePattern: "k8s-{{.ClusterName}}-{{.NodeGroup}}-{{.RandString8}}", + }, + }, } return manager } @@ -66,13 +84,13 @@ func TestListPacketDevices(t *testing.T) { } else { // Set up a mock Packet API m = newTestPacketManagerRest(t, server.URL) - server.On("handle", "/projects/"+m.projectID+"/devices").Return(listPacketDevicesResponse).Times(2) + server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return(listPacketDevicesResponse).Times(2) } _, err := m.listPacketDevices() assert.NoError(t, err) - c, err := m.nodeGroupSize("pool1") + c, err := m.nodeGroupSize("pool3") assert.NoError(t, err) assert.Equal(t, int(1), c) // One device in nodepool diff --git a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go index c05f5de14d0c..dba5a5647611 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_node_group_test.go @@ -29,6 +29,13 @@ import ( "github.com/stretchr/testify/mock" ) +const getPacketDeviceResponsePool2 = ` +{"id":"0f5609af-1c27-451b-8edd-a1283f2c9440","short_id":"0f5609af","hostname":"k8s-cluster2-pool2-jssxcyzz","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool2"]} +` +const getPacketDeviceResponsePool3 = ` +{"id":"8fa90049-e715-4794-ba31-81c1c78cee84","short_id":"8fa90049","hostname":"k8s-cluster2-pool3-xpnrwgdf","description":null,"state":"active","tags":["k8s-cluster-cluster2","k8s-nodepool-pool3"]} +` + func TestIncreaseDecreaseSize(t *testing.T) { var m *packetManagerRest server := NewHttpServerMock() @@ -40,51 +47,107 @@ func TestIncreaseDecreaseSize(t *testing.T) { } else { // Set up a mock Packet API m = newTestPacketManagerRest(t, server.URL) - server.On("handle", "/projects/"+m.projectID+"/devices").Return(listPacketDevicesResponse).Times(4) - server.On("handle", "/projects/"+m.projectID+"/devices").Return(listPacketDevicesResponseAfterCreate).Times(2) - server.On("handle", "/projects/"+m.projectID+"/devices").Return(listPacketDevicesResponse) + server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return(listPacketDevicesResponse).Times(4) + server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return(listPacketDevicesResponseAfterIncreasePool3).Times(3) + server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return(listPacketDevicesResponseAfterIncreasePool2).Times(1) + server.On("handle", "/devices/0f5609af-1c27-451b-8edd-a1283f2c9440").Return(getPacketDeviceResponsePool2).Times(1) + server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return(listPacketDevicesResponseAfterIncreasePool2).Times(4) + server.On("handle", "/devices/8fa90049-e715-4794-ba31-81c1c78cee84").Return(getPacketDeviceResponsePool3).Times(1) + server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return(listPacketDevicesResponseAfterIncreasePool2).Times(2) + server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return(listPacketDevicesResponse).Times(2) } clusterUpdateLock := sync.Mutex{} - ng := &packetNodeGroup{ + ngPool2 := &packetNodeGroup{ + packetManager: m, + id: "pool2", + clusterUpdateMutex: &clusterUpdateLock, + minSize: 0, + maxSize: 10, + targetSize: new(int), + waitTimeStep: 30 * time.Second, + deleteBatchingDelay: 2 * time.Second, + } + + ngPool3 := &packetNodeGroup{ packetManager: m, - id: "pool1", + id: "pool3", clusterUpdateMutex: &clusterUpdateLock, - minSize: 1, + minSize: 0, maxSize: 10, targetSize: new(int), waitTimeStep: 30 * time.Second, deleteBatchingDelay: 2 * time.Second, } - n1, err := ng.Nodes() + n1Pool2, err := ngPool2.packetManager.getNodeNames(ngPool2.id) assert.NoError(t, err) - assert.Equal(t, int(1), len(n1)) + assert.Equal(t, int(0), len(n1Pool2)) - // Try to increase pool with negative size, this should return an error - err = ng.IncreaseSize(-1) + n1Pool3, err := ngPool3.packetManager.getNodeNames(ngPool3.id) + assert.NoError(t, err) + assert.Equal(t, int(1), len(n1Pool3)) + + existingNodesPool2 := make(map[string]bool) + existingNodesPool3 := make(map[string]bool) + + for _, node := range n1Pool2 { + existingNodesPool2[node] = true + } + + for _, node := range n1Pool3 { + existingNodesPool3[node] = true + } + + // Try to increase pool3 with negative size, this should return an error + err = ngPool3.IncreaseSize(-1) assert.Error(t, err) - // Now try to increase the pool size by 2, that should work - err = ng.IncreaseSize(2) + // Now try to increase the pool3 size by 1, that should work + err = ngPool3.IncreaseSize(1) assert.NoError(t, err) if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { // If testing with actual API give it some time until the nodes bootstrap time.Sleep(420 * time.Second) } - n2, err := ng.packetManager.getNodeNames(ng.id) + + n2Pool3, err := ngPool3.packetManager.getNodeNames(ngPool3.id) assert.NoError(t, err) - // Assert that the nodepool size is now 3 - assert.Equal(t, int(3), len(n2)) + // Assert that the nodepool3 size is now 2 + assert.Equal(t, int(2), len(n2Pool3)) + + // Now try to increase the pool2 size by 1, that should work + err = ngPool2.IncreaseSize(1) + assert.NoError(t, err) + + if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 { + // If testing with actual API give it some time until the nodes bootstrap + time.Sleep(420 * time.Second) + } + + n2Pool2, err := ngPool2.packetManager.getNodeNames(ngPool2.id) + assert.NoError(t, err) + // Assert that the nodepool2 size is now 1 + assert.Equal(t, int(1), len(n2Pool2)) // Let's try to delete the new nodes - nodes := []*apiv1.Node{} - for _, node := range n2 { - if node != "k8s-worker-1" { - nodes = append(nodes, BuildTestNode(node, 1000, 1000)) + nodesPool2 := []*apiv1.Node{} + nodesPool3 := []*apiv1.Node{} + for _, node := range n2Pool2 { + if _, ok := existingNodesPool2[node]; !ok { + nodesPool2 = append(nodesPool2, BuildTestNode(node, 1000, 1000)) + } + } + for _, node := range n2Pool3 { + if _, ok := existingNodesPool3[node]; !ok { + nodesPool3 = append(nodesPool3, BuildTestNode(node, 1000, 1000)) } } - err = ng.DeleteNodes(nodes) + + err = ngPool2.DeleteNodes(nodesPool2) + assert.NoError(t, err) + + err = ngPool3.DeleteNodes(nodesPool3) assert.NoError(t, err) // Wait a few seconds if talking to the actual Packet API @@ -92,9 +155,14 @@ func TestIncreaseDecreaseSize(t *testing.T) { time.Sleep(10 * time.Second) } - // Make sure that there were no errors and the nodepool size is once again 1 - n3, err := ng.Nodes() + // Make sure that there were no errors and the nodepool2 size is once again 0 + n3Pool2, err := ngPool2.packetManager.getNodeNames(ngPool2.id) + assert.NoError(t, err) + assert.Equal(t, int(0), len(n3Pool2)) + + // Make sure that there were no errors and the nodepool3 size is once again 1 + n3Pool3, err := ngPool3.packetManager.getNodeNames(ngPool3.id) assert.NoError(t, err) - assert.Equal(t, int(1), len(n3)) + assert.Equal(t, int(1), len(n3Pool3)) mock.AssertExpectationsForObjects(t, server) } From 1e862661243e042ddcc15a0b78a7dc85755f8eac Mon Sep 17 00:00:00 2001 From: v-pap Date: Tue, 21 Apr 2020 20:21:41 +0000 Subject: [PATCH 4/4] Add price support in Packet --- .../cloudprovider/packet/README.md | 62 ++++++++++-- .../cluster-autoscaler-deployment.yaml | 11 ++- .../examples/cluster-autoscaler-secret.yaml | 1 + .../packet/packet_cloud_provider.go | 4 +- .../cloudprovider/packet/packet_manager.go | 2 +- .../packet/packet_manager_rest.go | 22 ++--- .../packet/packet_price_model.go | 97 +++++++++++++++++++ .../packet/packet_price_model_test.go | 67 +++++++++++++ 8 files changed, 241 insertions(+), 25 deletions(-) create mode 100644 cluster-autoscaler/cloudprovider/packet/packet_price_model.go create mode 100644 cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go diff --git a/cluster-autoscaler/cloudprovider/packet/README.md b/cluster-autoscaler/cloudprovider/packet/README.md index 44dbcd6b1f47..d78ada775d75 100644 --- a/cluster-autoscaler/cloudprovider/packet/README.md +++ b/cluster-autoscaler/cloudprovider/packet/README.md @@ -1,8 +1,8 @@ # Cluster Autoscaler for Packet The cluster autoscaler for [Packet](https://packet.com) worker nodes performs -autoscaling within any specified nodepool. It will run as a `Deployment` in -your cluster. The nodepool is specified using tags on Packet. +autoscaling within any specified nodepools. It will run as a `Deployment` in +your cluster. The nodepools are specified using tags on Packet. This README will go over some of the necessary steps required to get the cluster autoscaler up and running. @@ -27,10 +27,12 @@ In the above file you can modify the following fields: | cluster-autoscaler-cloud-config | Global/plan | The Packet plan (aka size/flavor) for new nodes in the nodepool (eg: t1.small.x86) | | cluster-autoscaler-cloud-config | Global/billing | The billing interval for new nodes (default: hourly) | | cluster-autoscaler-cloud-config | Global/os | The OS image to use for new nodes (default: ubuntu_18_04). If you change this also update cloudinit. | -| cluster-autoscaler-cloud-config | Global/cloudinit | The base64 encoded [user data](https://support.packet.com/kb/articles/user-data) submitted when provisioning devices. In the example file, the default value has been tested with Ubuntu 18.04 to install Docker & kubelet and then to bootstrap the node into the cluster using kubeadm. For a different base OS or bootstrap method, this needs to be customized accordingly. | +| cluster-autoscaler-cloud-config | Global/cloudinit | The base64 encoded [user data](https://support.packet.com/kb/articles/user-data) submitted when provisioning devices. In the example file, the default value has been tested with Ubuntu 18.04 to install Docker & kubelet and then to bootstrap the node into the cluster using kubeadm. The kubeadm, kubelet, kubectl are pinned to version 1.17.4. For a different base OS or bootstrap method, this needs to be customized accordingly| | cluster-autoscaler-cloud-config | Global/reservation | The values "require" or "prefer" will request the next available hardware reservation for new devices in selected facility & plan. If no hardware reservations match, "require" will trigger a failure, while "prefer" will launch on-demand devices instead (default: none) | | cluster-autoscaler-cloud-config | Global/hostname-pattern | The pattern for the names of new Packet devices (default: "k8s-{{.ClusterName}}-{{.NodeGroup}}-{{.RandString8}}" ) | +You can always update the secret with more nodepool definitions (with different plans etc.) as shown in the example, but you should always provide a default nodepool configuration. + ## Configure nodepool and cluster names using Packet tags The Packet API does not yet have native support for groups or pools of devices. So we use tags to specify them. Each Packet device that's a member of the "cluster1" cluster should have the tag k8s-cluster-cluster1. The devices that are members of the "pool1" nodepool should also have the tag k8s-nodepool-pool1. Once you have a Kubernetes cluster running on Packet, use the Packet Portal or API to tag the nodes accordingly. @@ -41,10 +43,40 @@ The deployment in `examples/cluster-autoscaler-deployment.yaml` can be used, but the arguments passed to the autoscaler will need to be changed to match your cluster. -| Argument | Usage | -|------------------|------------------------------------------------------------------------------------------------------------| -| --cluster-name | The name of your Kubernetes cluster. It should correspond to the tags that have been applied to the nodes. | -| --nodes | Of the form `min:max:NodepoolName`. For multiple nodepools you can add the same argument multiple times. E.g. for pool1, pool2 you would add `--nodes=0:10:pool1` and `--nodes=0:10:pool2` | +| Argument | Usage | +|-----------------------|------------------------------------------------------------------------------------------------------------| +| --cluster-name | The name of your Kubernetes cluster. It should correspond to the tags that have been applied to the nodes. | +| --nodes | Of the form `min:max:NodepoolName`. For multiple nodepools you can add the same argument multiple times. E.g. for pool1, pool2 you would add `--nodes=0:10:pool1` and `--nodes=0:10:pool2`. In addition, each node provisioned by the autoscaler will have a label with key: `pool` and with value: `NodepoolName`. These labels can be useful when there is a need to target specific nodepools. | +| --expander=price | This is an optional argument which allows the cluster-autoscaler to take into account the pricing of the Packet nodes when scaling with multiple nodepools. | + +## Target Specific Nodepools (New!) + +In case you want to target a specific nodepool(s) for e.g. a deployment, you can add a `nodeAffinity` with the key `pool` and with value the nodepool name that you want to target. This functionality is not backwards compatible, which means that nodes provisioned with older cluster-autoscaler images won't have the key `pool`. But you can overcome this limitation by manually adding the correct labels. Here are some examples: + +Target a nodepool with a specific name: +``` +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: pool + operator: In + values: + - pool3 +``` +Target a nodepool with a specific Packet instance: +``` +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/instance-type + operator: In + values: + - t1.small.x86 +``` ## Notes @@ -52,4 +84,18 @@ The autoscaler will not remove nodes which have non-default kube-system pods. This prevents the node that the autoscaler is running on from being scaled down. If you are deploying the autoscaler into a cluster which already has more than one node, it is best to deploy it onto any node which already has non-default kube-system pods, -to minimise the number of nodes which cannot be removed when scaling. +to minimise the number of nodes which cannot be removed when scaling. For this reason in +the provided example the autoscaler pod has a nodeaffinity which forces it to deploy on +the master node. + +### Changes + +1. It is now possible to use multiple nodepools, scale nodepools to 0 nodes and prioritize scaling of specific nodepools by taking into account the pricing of the Packet instances. + +2. In order to take advantage of the new features mentioned above, you might need to update the cloud-config and the autoscaler deployment as shown in the examples. For example, the default/global cloud-config is applied to all the nodepools and if you want to override it for a specific nodepool you have to modify the cloud-config according to the examples. + +3. You can target specific nodepools, as described above. + +4. Cloud inits in the examples have pinned versions for Kubernetes in order to minimize potential incompatibilities as a result of nodes provisioned with different Kubernetes versions. + +5. In the provided cluster-autoscaler deployment example, the autoscaler pod has a nodeaffinity which forces it to deploy on the master node, so that the cluster-autoscaler can scale down all of the worker nodes. Without this change there was a possibility for the cluster-autoscaler to be deployed on a worker node that could not be downscaled. \ No newline at end of file diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml index 69de583db995..ba447ed86659 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-deployment.yaml @@ -50,11 +50,14 @@ rules: resources: ["statefulsets", "replicasets", "daemonsets"] verbs: ["watch", "list", "get"] - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] + resources: ["storageclasses", "csinodes"] verbs: ["watch", "list", "get"] - apiGroups: ["batch", "extensions"] resources: ["jobs"] verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 @@ -136,10 +139,10 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: dedicated + - key: node-role.kubernetes.io/master operator: In values: - - master + - true serviceAccountName: cluster-autoscaler containers: - name: cluster-autoscaler @@ -172,7 +175,9 @@ spec: - --cluster-name=cluster1 - --cloud-config=/config/cloud-config - --cloud-provider=packet + - --expander=price - --nodes=0:10:pool1 + - --nodes=0:10:pool2 - --scale-down-unneeded-time=1m0s - --scale-down-delay-after-add=1m0s - --scale-down-unready-time=1m0s diff --git a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml index bf4fa57049b9..4cb68c49c32f 100644 --- a/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml +++ b/cluster-autoscaler/cloudprovider/packet/examples/cluster-autoscaler-secret.yaml @@ -18,6 +18,7 @@ stringData: # kubeadm, kubelet, kubectl are pinned to version 1.17.4 # The version can be altered by decoding the cloudinit and updating it to # the desired version + # In the cloud-config you must always have a valid default nodegroup cloud-config: |- [nodegroupdef "default"] project-id=YOUR_PACKET_PROJECT_ID diff --git a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go index e783be90d6b8..32e28e7e6ad8 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_cloud_provider.go @@ -109,9 +109,9 @@ func (pcp *packetCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovide return nil, fmt.Errorf("Could not find group for node: %s", node.Spec.ProviderID) } -// Pricing is not implemented. +// Pricing returns pricing model for this cloud provider or error if not available. func (pcp *packetCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { - return nil, cloudprovider.ErrNotImplemented + return &PacketPriceModel{}, nil } // GetAvailableMachineTypes is not implemented. diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager.go b/cluster-autoscaler/cloudprovider/packet/packet_manager.go index f10671c21705..edb17d1a9eca 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager.go @@ -45,7 +45,7 @@ type packetManager interface { getNodes(nodegroup string) ([]string, error) getNodeNames(nodegroup string) ([]string, error) deleteNodes(nodegroup string, nodes []NodeRef, updatedNodeCount int) error - templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) + templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) NodeGroupForNode(labels map[string]string, nodeId string) (string, error) } diff --git a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go index 67437d641e61..9963b2142dd1 100644 --- a/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go +++ b/cluster-autoscaler/cloudprovider/packet/packet_manager_rest.go @@ -39,8 +39,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" klog "k8s.io/klog/v2" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) type instanceType struct { @@ -597,15 +596,16 @@ func (mgr *packetManagerRest) deleteNodes(nodegroup string, nodes []NodeRef, upd return nil } -func buildGenericLabels(nodegroup string, instanceType string) map[string]string { +// BuildGenericLabels builds basic labels for Packet nodes +func BuildGenericLabels(nodegroup string, instanceType string) map[string]string { result := make(map[string]string) - result[kubeletapis.LabelArch] = "amd64" - result[kubeletapis.LabelOS] = "linux" + //result[kubeletapis.LabelArch] = "amd64" + //result[kubeletapis.LabelOS] = "linux" result[apiv1.LabelInstanceType] = instanceType - result[apiv1.LabelZoneRegion] = "" - result[apiv1.LabelZoneFailureDomain] = "0" - result[apiv1.LabelHostname] = "" + //result[apiv1.LabelZoneRegion] = "" + //result[apiv1.LabelZoneFailureDomain] = "0" + //result[apiv1.LabelHostname] = "" result["pool"] = nodegroup return result @@ -613,7 +613,7 @@ func buildGenericLabels(nodegroup string, instanceType string) map[string]string // templateNodeInfo returns a NodeInfo with a node template based on the packet plan // that is used to create nodes in a given node group. -func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulernodeinfo.NodeInfo, error) { +func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulerframework.NodeInfo, error) { node := apiv1.Node{} nodeName := fmt.Sprintf("%s-asg-%d", nodegroup, rand.Int63()) node.ObjectMeta = metav1.ObjectMeta{ @@ -638,9 +638,9 @@ func (mgr *packetManagerRest) templateNodeInfo(nodegroup string) (*schedulernode node.Status.Conditions = cloudprovider.BuildReadyConditions() // GenericLabels - node.Labels = cloudprovider.JoinStringMaps(node.Labels, buildGenericLabels(nodegroup, mgr.getNodePoolDefinition(nodegroup).plan)) + node.Labels = cloudprovider.JoinStringMaps(node.Labels, BuildGenericLabels(nodegroup, mgr.getNodePoolDefinition(nodegroup).plan)) - nodeInfo := schedulernodeinfo.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup)) + nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(nodegroup)) nodeInfo.SetNode(&node) return nodeInfo, nil } diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go new file mode 100644 index 000000000000..753b0b26f26c --- /dev/null +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package packet + +import ( + "math" + "time" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" +) + +// PacketPriceModel implements PriceModel interface for Packet. +type PacketPriceModel struct { +} + +const ( + cpuPricePerHour = 0.005208 + memoryPricePerHourPerGb = 0.003815 +) + +var instancePrices = map[string]float64{ + "c1.large.arm": 0.5000, + "c1.small.x86": 0.4000, + "c1.xlarge.x86": 1.7500, + "c2.large.arm": 1.0000, + "c2.medium.x86": 1.0000, + "c3.medium.x86": 1.1000, + "c3.small.x86": 0.5000, + "g2.large.x86": 5.0000, + "m1.xlarge.x86": 1.7000, + "m2.xlarge.x86": 2.0000, + "n2.xlarge.x86": 2.2500, + "s1.large.x86": 1.5000, + "s3.xlarge.x86": 1.8500, + "t1.small.x86": 0.0700, + "t3.small.x86": 0.3500, + "x1.small.x86": 0.4000, + "x2.xlarge.x86": 2.5000, +} + +// NodePrice returns a price of running the given node for a given period of time. +// All prices are in USD. +func (model *PacketPriceModel) NodePrice(node *apiv1.Node, startTime time.Time, endTime time.Time) (float64, error) { + price := 0.0 + if node.Labels != nil { + if machineType, found := node.Labels[apiv1.LabelInstanceType]; found { + if pricePerHour, found := instancePrices[machineType]; found { + price = pricePerHour * getHours(startTime, endTime) + } + } + } + return price, nil +} + +func getHours(startTime time.Time, endTime time.Time) float64 { + minutes := math.Ceil(float64(endTime.Sub(startTime)) / float64(time.Minute)) + hours := minutes / 60.0 + return hours +} + +// PodPrice returns a theoretical minimum price of running a pod for a given +// period of time on a perfectly matching machine. +func (model *PacketPriceModel) PodPrice(pod *apiv1.Pod, startTime time.Time, endTime time.Time) (float64, error) { + price := 0.0 + for _, container := range pod.Spec.Containers { + price += getBasePrice(container.Resources.Requests, startTime, endTime) + } + return price, nil +} + +func getBasePrice(resources apiv1.ResourceList, startTime time.Time, endTime time.Time) float64 { + if len(resources) == 0 { + return 0 + } + hours := getHours(startTime, endTime) + price := 0.0 + cpu := resources[apiv1.ResourceCPU] + mem := resources[apiv1.ResourceMemory] + price += float64(cpu.MilliValue()) / 1000.0 * cpuPricePerHour * hours + price += float64(mem.Value()) / float64(units.GiB) * memoryPricePerHourPerGb * hours + return price +} diff --git a/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go new file mode 100644 index 000000000000..50dd8fa80cb0 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/packet/packet_price_model_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package packet + +import ( + "math" + "testing" + "time" + + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" + + "github.com/stretchr/testify/assert" +) + +func TestGetNodePrice(t *testing.T) { + labelsPool1 := BuildGenericLabels("pool1", "t1.small.x86") + plan1 := InstanceTypes["t1.small.x86"] + + labelsPool2 := BuildGenericLabels("pool2", "c1.xlarge.x86") + plan2 := InstanceTypes["c1.xlarge.x86"] + + model := &PacketPriceModel{} + now := time.Now() + + node1 := BuildTestNode("node1", plan1.CPU*1000, plan1.MemoryMb*1024*1024) + node1.Labels = labelsPool1 + price1, err := model.NodePrice(node1, now, now.Add(time.Hour)) + assert.NoError(t, err) + + node2 := BuildTestNode("node2", plan2.CPU*1000, plan2.MemoryMb*1024*1024) + node2.Labels = labelsPool2 + price2, err := model.NodePrice(node2, now, now.Add(time.Hour)) + assert.NoError(t, err) + + assert.True(t, price1 == 0.07) + assert.True(t, price2 == 1.75) +} + +func TestGetPodPrice(t *testing.T) { + pod1 := BuildTestPod("pod1", 100, 500*units.MiB) + pod2 := BuildTestPod("pod2", 2*100, 2*500*units.MiB) + + model := &PacketPriceModel{} + now := time.Now() + + price1, err := model.PodPrice(pod1, now, now.Add(time.Hour)) + assert.NoError(t, err) + price2, err := model.PodPrice(pod2, now, now.Add(time.Hour)) + assert.NoError(t, err) + // 2 times bigger pod should cost twice as much. + assert.True(t, math.Abs(price1*2-price2) < 0.001) +}