From a14c569b0a9472218eac8f994756df240ea8fb8a Mon Sep 17 00:00:00 2001 From: JunYang Date: Thu, 27 May 2021 19:31:49 +0800 Subject: [PATCH] Implement using /metrics/resource Kubelet endpoint Signed-off-by: JunYang --- Makefile | 5 +- go.mod | 2 - go.sum | 5 +- manifests/base/rbac.yaml | 2 +- pkg/scraper/client/interface.go | 2 +- .../client/{summary => resource}/client.go | 111 +-- pkg/scraper/client/resource/decode.go | 158 ++++ pkg/scraper/client/resource/decode_test.go | 151 ++++ pkg/scraper/client/summary/decode.go | 113 --- pkg/scraper/client/summary/decode_test.go | 146 ---- pkg/scraper/client/summary/types.go | 90 --- pkg/scraper/client/summary/types_easyjson.go | 678 ------------------ pkg/scraper/client/summary/types_test.go | 487 ------------- pkg/server/config.go | 4 +- pkg/storage/node_test.go | 30 + pkg/storage/pod_test.go | 29 + 16 files changed, 434 insertions(+), 1579 deletions(-) rename pkg/scraper/client/{summary => resource}/client.go (71%) create mode 100644 pkg/scraper/client/resource/decode.go create mode 100644 pkg/scraper/client/resource/decode_test.go delete mode 100644 pkg/scraper/client/summary/decode.go delete mode 100644 pkg/scraper/client/summary/decode_test.go delete mode 100644 pkg/scraper/client/summary/types.go delete mode 100644 pkg/scraper/client/summary/types_easyjson.go delete mode 100644 pkg/scraper/client/summary/types_test.go diff --git a/Makefile b/Makefile index 3ecd6e8630..728b7d0dfd 100644 --- a/Makefile +++ b/Makefile @@ -245,7 +245,7 @@ verify-deps: # Generated # --------- -generated_files=pkg/scraper/client/summary/types_easyjson.go pkg/api/generated/openapi/zz_generated.openapi.go +generated_files=pkg/api/generated/openapi/zz_generated.openapi.go .PHONY: verify-generated verify-generated: update-generated @@ -253,9 +253,6 @@ verify-generated: update-generated .PHONY: update-generated update-generated: - # pkg/scraper/client/summary/types_easyjson.go: - go install -mod=readonly github.com/mailru/easyjson/easyjson - $(GOPATH)/bin/easyjson -all pkg/scraper/client/summary/types.go # pkg/api/generated/openapi/zz_generated.openapi.go go install -mod=readonly k8s.io/kube-openapi/cmd/openapi-gen $(GOPATH)/bin/openapi-gen --logtostderr -i k8s.io/metrics/pkg/apis/metrics/v1beta1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/version -p pkg/api/generated/openapi/ -O zz_generated.openapi -o $(REPO_DIR) -h $(REPO_DIR)/scripts/boilerplate.go.txt -r /dev/null diff --git a/go.mod b/go.mod index bb2b135805..fa3d544822 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,6 @@ require ( github.com/go-openapi/spec v0.20.3 github.com/google/addlicense v0.0.0-20210428195630-6d92264d7170 github.com/google/go-cmp v0.5.5 - github.com/mailru/easyjson v0.7.7 github.com/onsi/ginkgo v1.13.0 github.com/onsi/gomega v1.11.0 github.com/prometheus/common v0.25.0 @@ -21,7 +20,6 @@ require ( k8s.io/klog/hack/tools v0.0.0-20210512110738-02ca14bed863 k8s.io/klog/v2 v2.8.0 k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 - k8s.io/kubelet v0.21.1 k8s.io/metrics v0.21.1 sigs.k8s.io/mdtoc v1.0.1 ) diff --git a/go.sum b/go.sum index a7bfaa6ff8..497d4ac584 100644 --- a/go.sum +++ b/go.sum @@ -329,9 +329,8 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -878,8 +877,6 @@ k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kubelet v0.21.1 h1:JeZsCr3GN2Kjg3gn21jLU10RFu0APUK/vdpFWa8P8Nw= -k8s.io/kubelet v0.21.1/go.mod h1:poOR6Iaa5WqytFOp0egXFV8c2XTLFxaXTdj5njUlnVY= k8s.io/metrics v0.21.1 h1:Xlfrjdda/WWHxG6/h6ACykxb1RByy5EIT862Vc81IYQ= k8s.io/metrics v0.21.1/go.mod h1:pyDVLsLe++FIGDBFU80NcW4xMFsuiVTWL8Zfi7+PpNo= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= diff --git a/manifests/base/rbac.yaml b/manifests/base/rbac.yaml index 04c675fcec..7de1646147 100644 --- a/manifests/base/rbac.yaml +++ b/manifests/base/rbac.yaml @@ -55,7 +55,7 @@ rules: resources: - pods - nodes - - nodes/stats + - nodes/metrics - namespaces - configmaps verbs: diff --git a/pkg/scraper/client/interface.go b/pkg/scraper/client/interface.go index 7a7d564dac..65048a8734 100644 --- a/pkg/scraper/client/interface.go +++ b/pkg/scraper/client/interface.go @@ -23,6 +23,6 @@ import ( // KubeletMetricsInterface knows how to fetch metrics from the Kubelet type KubeletMetricsInterface interface { - // GetSummary fetches summary metrics from the given Kubelet + // GetMetrics fetches Resource metrics from the given Kubelet GetMetrics(ctx context.Context, node *v1.Node) (*storage.MetricsBatch, error) } diff --git a/pkg/scraper/client/summary/client.go b/pkg/scraper/client/resource/client.go similarity index 71% rename from pkg/scraper/client/summary/client.go rename to pkg/scraper/client/resource/client.go index b63ca8a3f5..ef3d0149cd 100644 --- a/pkg/scraper/client/summary/client.go +++ b/pkg/scraper/client/resource/client.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Kubernetes Authors. +// Copyright 2021 The Kubernetes Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,13 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package summary +package resource import ( "bytes" "context" "fmt" "io" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "net" "net/http" "net/url" @@ -27,16 +31,21 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/metrics-server/pkg/scraper/client" - "sigs.k8s.io/metrics-server/pkg/storage" - - "github.com/mailru/easyjson" + "sigs.k8s.io/metrics-server/pkg/utils" corev1 "k8s.io/api/core/v1" - - "sigs.k8s.io/metrics-server/pkg/utils" ) +type kubeletClient struct { + defaultPort int + useNodeStatusPort bool + client *http.Client + scheme string + addrResolver utils.NodeAddressResolver + buffers sync.Pool +} + func NewClient(config client.KubeletClientConfig) (*kubeletClient, error) { transport, err := rest.TransportFor(&config.Client) if err != nil { @@ -60,42 +69,9 @@ func NewClient(config client.KubeletClientConfig) (*kubeletClient, error) { }, nil } -type kubeletClient struct { - defaultPort int - useNodeStatusPort bool - client *http.Client - scheme string - addrResolver utils.NodeAddressResolver - buffers sync.Pool -} - var _ client.KubeletMetricsInterface = (*kubeletClient)(nil) -func (kc *kubeletClient) makeRequestAndGetValue(client *http.Client, req *http.Request, value easyjson.Unmarshaler) error { - // TODO(directxman12): support validating certs by hostname - response, err := client.Do(req) - if err != nil { - return err - } - defer response.Body.Close() - b := kc.getBuffer() - defer kc.returnBuffer(b) - _, err = io.Copy(b, response.Body) - if err != nil { - return err - } - body := b.Bytes() - if response.StatusCode != http.StatusOK { - return fmt.Errorf("GET %q: bad status code %q", req.URL, response.Status) - } - - err = easyjson.Unmarshal(body, value) - if err != nil { - return fmt.Errorf("GET %q: failed to parse output: %w", req.URL, err) - } - return nil -} - +//GetMetrics get metrics from kubelet /metrics/resource endpoint func (kc *kubeletClient) GetMetrics(ctx context.Context, node *corev1.Node) (*storage.MetricsBatch, error) { port := kc.defaultPort nodeStatusPort := int(node.Status.DaemonEndpoints.KubeletEndpoint.Port) @@ -107,23 +83,56 @@ func (kc *kubeletClient) GetMetrics(ctx context.Context, node *corev1.Node) (*st return nil, err } url := url.URL{ - Scheme: kc.scheme, - Host: net.JoinHostPort(addr, strconv.Itoa(port)), - Path: "/stats/summary", - RawQuery: "only_cpu_and_memory=true", + Scheme: kc.scheme, + Host: net.JoinHostPort(addr, strconv.Itoa(port)), + Path: "/metrics/resource", } req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, err } - summary := &Summary{} - client := kc.client - if client == nil { - client = http.DefaultClient + samples, err := kc.sendRequestDecode(kc.client, req.WithContext(ctx)) + if err != nil { + return nil, err + } + return decodeBatch(samples, node.Name), err +} + +func (kc *kubeletClient) sendRequestDecode(client *http.Client, req *http.Request) ([]*model.Sample, error) { + response, err := client.Do(req) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + return nil, fmt.Errorf("request failed - %q.", response.Status) + } + b := kc.getBuffer() + defer kc.returnBuffer(b) + _, err = io.Copy(b, response.Body) + if err != nil { + return nil, err + } + dec := expfmt.NewDecoder(b, expfmt.FmtText) + decoder := expfmt.SampleDecoder{ + Dec: dec, + Opts: &expfmt.DecodeOptions{}, + } + + var samples []*model.Sample + for { + var v model.Vector + if err := decoder.Decode(&v); err != nil { + if err == io.EOF { + // Expected loop termination condition. + break + } + return nil, err + } + samples = append(samples, v...) } - err = kc.makeRequestAndGetValue(client, req.WithContext(ctx), summary) - return decodeBatch(summary), err + return samples, nil } func (kc *kubeletClient) getBuffer() *bytes.Buffer { diff --git a/pkg/scraper/client/resource/decode.go b/pkg/scraper/client/resource/decode.go new file mode 100644 index 0000000000..d39512cb8d --- /dev/null +++ b/pkg/scraper/client/resource/decode.go @@ -0,0 +1,158 @@ +// Copyright 2021 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "time" + + "github.com/prometheus/common/model" + apitypes "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + + "sigs.k8s.io/metrics-server/pkg/storage" +) + +const ( + nameSpaceMetricName = "namespace" + podNameMetricName = "pod" + containerNameMetricName = "container" + nodeCpuUsageMetricName = model.LabelValue("node_cpu_usage_seconds_total") + nodeMemUsageMetricName = model.LabelValue("node_memory_working_set_bytes") + containerCpuUsageMetricName = model.LabelValue("container_cpu_usage_seconds_total") + containerMemUsageMetricName = model.LabelValue("container_memory_working_set_bytes") +) + +func decodeBatch(samples []*model.Sample, nodeName string) *storage.MetricsBatch { + if len(samples) == 0 { + return nil + } + res := &storage.MetricsBatch{ + Nodes: make(map[string]storage.MetricsPoint), + Pods: make(map[apitypes.NamespacedName]storage.PodMetricsPoint), + } + node := &storage.MetricsPoint{} + pods := make(map[apitypes.NamespacedName]storage.PodMetricsPoint) + for _, sample := range samples { + //parse metrics from sample + switch sample.Metric[model.MetricNameLabel] { + case nodeCpuUsageMetricName: + parseNodeCpuUsageMetrics(sample, node) + case nodeMemUsageMetricName: + parseNodeMemUsageMetrics(sample, node) + case containerCpuUsageMetricName: + parseContainerCpuMetrics(sample, pods) + case containerMemUsageMetricName: + parseContainerMemMetrics(sample, pods) + } + } + + if node.Timestamp.IsZero() || node.CumulativeCpuUsed == 0 || node.MemoryUsage == 0 { + klog.V(1).InfoS("Failed getting complete node metric", "node", nodeName, "metric", node) + node = nil + } else { + res.Nodes[nodeName] = *node + } + + for podRef, podMetric := range pods { + if len(podMetric.Containers) != 0 { + //drop container metrics when Timestamp is zero + + pm := storage.PodMetricsPoint{ + Containers: checkContainerMetricsTimestamp(podMetric), + } + if pm.Containers == nil { + klog.V(1).InfoS("Failed getting complete Pod metric", "pod", klog.KRef(podRef.Namespace, podRef.Name)) + } else { + res.Pods[podRef] = pm + } + } + } + return res +} + +func getNamespaceName(sample *model.Sample) apitypes.NamespacedName { + return apitypes.NamespacedName{Namespace: string(sample.Metric[nameSpaceMetricName]), Name: string(sample.Metric[podNameMetricName])} +} + +func parseNodeCpuUsageMetrics(sample *model.Sample, node *storage.MetricsPoint) { + //unit of node_cpu_usage_seconds_total is second, need to convert to nanosecond + node.CumulativeCpuUsed = uint64(sample.Value * 1e9) + if sample.Timestamp != 0 { + //unit of timestamp is millisecond, need to convert to nanosecond + node.Timestamp = time.Unix(0, int64(sample.Timestamp*1e6)) + } +} + +func parseNodeMemUsageMetrics(sample *model.Sample, node *storage.MetricsPoint) { + node.MemoryUsage = uint64(sample.Value) + if node.Timestamp.IsZero() && sample.Timestamp != 0 { + //unit of timestamp is millisecond, need to convert to nanosecond + node.Timestamp = time.Unix(0, int64(sample.Timestamp*1e6)) + } +} + +func parseContainerCpuMetrics(sample *model.Sample, pods map[apitypes.NamespacedName]storage.PodMetricsPoint) { + namespaceName := getNamespaceName(sample) + containerName := string(sample.Metric[containerNameMetricName]) + if _, findPod := pods[namespaceName]; !findPod { + pods[namespaceName] = storage.PodMetricsPoint{Containers: make(map[string]storage.MetricsPoint)} + } + if _, findContainer := pods[namespaceName].Containers[containerName]; !findContainer { + pods[namespaceName].Containers[containerName] = storage.MetricsPoint{} + } + //unit of node_cpu_usage_seconds_total is second, need to convert to nanosecond + containerMetrics := pods[namespaceName].Containers[containerName] + containerMetrics.CumulativeCpuUsed = uint64(sample.Value * 1e9) + if sample.Timestamp != 0 { + //unit of timestamp is millisecond, need to convert to nanosecond + containerMetrics.Timestamp = time.Unix(0, int64(sample.Timestamp*1e6)) + } + pods[namespaceName].Containers[containerName] = containerMetrics +} + +func parseContainerMemMetrics(sample *model.Sample, pods map[apitypes.NamespacedName]storage.PodMetricsPoint) { + namespaceName := getNamespaceName(sample) + containerName := string(sample.Metric[containerNameMetricName]) + + if _, findPod := pods[namespaceName]; !findPod { + pods[namespaceName] = storage.PodMetricsPoint{Containers: make(map[string]storage.MetricsPoint)} + } + if _, findContainer := pods[namespaceName].Containers[containerName]; !findContainer { + pods[namespaceName].Containers[containerName] = storage.MetricsPoint{} + } + containerMetrics := pods[namespaceName].Containers[containerName] + containerMetrics.MemoryUsage = uint64(sample.Value) + if containerMetrics.Timestamp.IsZero() && sample.Timestamp != 0 { + //unit of timestamp is millisecond, need to convert to nanosecond + containerMetrics.Timestamp = time.Unix(0, int64(sample.Timestamp*1e6)) + } + pods[namespaceName].Containers[containerName] = containerMetrics +} + +func checkContainerMetricsTimestamp(podMetric storage.PodMetricsPoint) map[string]storage.MetricsPoint { + podMetrics := make(map[string]storage.MetricsPoint) + for containerName, containerMetric := range podMetric.Containers { + if containerMetric != (storage.MetricsPoint{}) { + //drop metrics when Timestamp is zero + if containerMetric.Timestamp.IsZero() || containerMetric.CumulativeCpuUsed == 0 || containerMetric.MemoryUsage == 0 { + klog.V(1).InfoS("Failed getting complete container metric", "containerName", containerName, "containerMetric", containerMetric) + return nil + } else { + podMetrics[containerName] = containerMetric + } + } + } + return podMetrics +} diff --git a/pkg/scraper/client/resource/decode_test.go b/pkg/scraper/client/resource/decode_test.go new file mode 100644 index 0000000000..0f2b2bc3fd --- /dev/null +++ b/pkg/scraper/client/resource/decode_test.go @@ -0,0 +1,151 @@ +// Copyright 2021 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/prometheus/common/model" + + apitypes "k8s.io/apimachinery/pkg/types" +) + +func TestDecode(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Decode Suite") +} + +var _ = Describe("Decode", func() { + var ( + samples []*model.Sample + ) + BeforeEach(func() { + scrapeTime := time.Now() + + sample1 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "node_cpu_usage_seconds_total"}, + Value: 100, + Timestamp: model.Time(scrapeTime.Add(100*time.Millisecond).UnixNano() / 1e6), + } + sample2 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "node_memory_working_set_bytes"}, + Value: 200, + Timestamp: model.Time(scrapeTime.Add(100*time.Millisecond).UnixNano() / 1e6), + } + sample3 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_cpu_usage_seconds_total", "container": "container1", "namespace": "ns1", "pod": "pod1"}, + Value: 300, + Timestamp: model.Time(scrapeTime.Add(10*time.Millisecond).Unix() / 1e6), + } + sample4 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_memory_working_set_bytes", "container": "container1", "namespace": "ns1", "pod": "pod1"}, + Value: 400, + Timestamp: model.Time(scrapeTime.Add(10*time.Millisecond).Unix() / 1e6), + } + sample5 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_cpu_usage_seconds_total", "container": "container2", "namespace": "ns1", "pod": "pod1"}, + Value: 500, + Timestamp: model.Time(scrapeTime.Add(20*time.Millisecond).Unix() / 1e6), + } + sample6 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_memory_working_set_bytes", "container": "container2", "namespace": "ns1", "pod": "pod1"}, + Value: 600, + Timestamp: model.Time(scrapeTime.Add(20*time.Millisecond).Unix() / 1e6), + } + sample7 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_cpu_usage_seconds_total", "container": "container1", "namespace": "ns1", "pod": "pod2"}, + Value: 700, + Timestamp: model.Time(scrapeTime.Add(30*time.Millisecond).Unix() / 1e6), + } + sample8 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_memory_working_set_bytes", "container": "container1", "namespace": "ns1", "pod": "pod2"}, + Value: 800, + Timestamp: model.Time(scrapeTime.Add(30*time.Millisecond).Unix() / 1e6), + } + sample9 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_cpu_usage_seconds_total", "container": "container1", "namespace": "ns2", "pod": "pod1"}, + Value: 900, + Timestamp: model.Time(scrapeTime.Add(40*time.Millisecond).Unix() / 1e6), + } + sample10 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_memory_working_set_bytes", "container": "container1", "namespace": "ns2", "pod": "pod1"}, + Value: 1000, + Timestamp: model.Time(scrapeTime.Add(40*time.Millisecond).Unix() / 1e6), + } + sample11 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_cpu_usage_seconds_total", "container": "container1", "namespace": "ns3", "pod": "pod1"}, + Value: 1100, + Timestamp: model.Time(scrapeTime.Add(50*time.Millisecond).Unix() / 1e6), + } + sample12 := model.Sample{Metric: model.Metric{model.MetricNameLabel: "container_memory_working_set_bytes", "container": "container1", "namespace": "ns3", "pod": "pod1"}, + Value: 1200, + Timestamp: model.Time(scrapeTime.Add(50*time.Millisecond).Unix() / 1e6), + } + samples = []*model.Sample{} + samples = append(samples, &sample1, &sample2, &sample3, &sample4, &sample5, &sample6, &sample7, &sample8, &sample9, &sample10, &sample11, &sample12) + }) + + It("should use the decode time from the CPU", func() { + By("removing some times from the data") + + By("decoding") + batch := decodeBatch(samples, "node1") + + By("verifying that the scrape time is as expected") + Expect(batch.Nodes["node1"].Timestamp).To(Equal(time.Unix(0, int64(samples[0].Timestamp*1e6)))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod1"}].Containers["container1"].Timestamp).To(Equal(time.Unix(0, int64(samples[2].Timestamp*1e6)))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod2"}].Containers["container1"].Timestamp).To(Equal(time.Unix(0, int64(samples[6].Timestamp*1e6)))) + }) + + It("should use the decode CumulativeCpuUsed MemoryUsage and Timestamp when StartTime is zero", func() { + + By("decoding") + batch := decodeBatch(samples, "node1") + + By("verifying that the CumulativeCpuUsed MemoryUsage and Timestamp are as expected") + Expect(batch.Nodes["node1"].CumulativeCpuUsed).To(Equal(uint64(100 * 1e9))) + Expect(batch.Nodes["node1"].MemoryUsage).To(Equal(uint64(200))) + Expect(batch.Nodes["node1"].StartTime).To(Equal(time.Time{})) + Expect(batch.Nodes["node1"].Timestamp).To(Equal(time.Unix(0, int64(samples[0].Timestamp*1e6)))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod1"}].Containers["container1"].CumulativeCpuUsed).To(Equal(uint64(300 * 1e9))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod1"}].Containers["container1"].MemoryUsage).To(Equal(uint64(400))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod1"}].Containers["container1"].StartTime).To(Equal(time.Time{})) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod1"}].Containers["container1"].Timestamp).To(Equal(time.Unix(0, int64(samples[2].Timestamp*1e6)))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod2"}].Containers["container1"].CumulativeCpuUsed).To(Equal(uint64(700 * 1e9))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod2"}].Containers["container1"].MemoryUsage).To(Equal(uint64(800))) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod2"}].Containers["container1"].StartTime).To(Equal(time.Time{})) + Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod2"}].Containers["container1"].Timestamp).To(Equal(time.Unix(0, int64(samples[6].Timestamp*1e6)))) + }) + + It("should continue on missing CPU or memory metrics", func() { + By("removing some data from the raw samples") + samples[6].Value = 0 + samples[11].Value = 0 + samples2 := []*model.Sample{} + samples2 = append(samples2, samples[0], samples[2], samples[3], samples[5], samples[6], samples[7], samples[8], samples[10], samples[11]) + By("decoding") + batch := decodeBatch(samples2, "node1") + + By("verifying that the batch has all the data, save for what was missing") + Expect(batch.Pods).To(HaveLen(0)) + Expect(batch.Nodes).To(HaveLen(0)) + }) + + It("should skip on cumulative CPU equal zero", func() { + By("setting CPU cumulative value to zero") + samples[0].Value = 0 + samples[2].Value = 0 + + By("decoding") + batch := decodeBatch(samples, "node1") + + By("verifying that zero records were deleted") + Expect(batch.Pods).To(HaveLen(3)) + Expect(batch.Nodes).To(HaveLen(0)) + }) +}) diff --git a/pkg/scraper/client/summary/decode.go b/pkg/scraper/client/summary/decode.go deleted file mode 100644 index 2acbe97f45..0000000000 --- a/pkg/scraper/client/summary/decode.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package summary - -import ( - "fmt" - - "k8s.io/klog/v2" - - apitypes "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/metrics-server/pkg/storage" -) - -func decodeBatch(summary *Summary) *storage.MetricsBatch { - res := &storage.MetricsBatch{ - Nodes: make(map[string]storage.MetricsPoint, 1), - Pods: make(map[apitypes.NamespacedName]storage.PodMetricsPoint, len(summary.Pods)), - } - decodeNodeStats(&summary.Node, res) - for _, pod := range summary.Pods { - decodePodStats(&pod, res) - } - return res -} - -func decodeNodeStats(nodeStats *NodeStats, batch *storage.MetricsBatch) { - if nodeStats.StartTime.IsZero() || nodeStats.CPU == nil || nodeStats.CPU.Time.IsZero() { - // if we can't get a timestamp, assume bad data in general - klog.V(1).InfoS("Failed getting node metric timestamp", "node", klog.KRef("", nodeStats.NodeName)) - return - } - point := storage.MetricsPoint{ - StartTime: nodeStats.StartTime.Time, - Timestamp: nodeStats.CPU.Time.Time, - } - if err := decodeCPU(&point.CumulativeCpuUsed, nodeStats.CPU); err != nil { - klog.V(1).InfoS("Skipped node CPU metric", "node", klog.KRef("", nodeStats.NodeName), "err", err) - return - } - if err := decodeMemory(&point.MemoryUsage, nodeStats.Memory); err != nil { - klog.V(1).InfoS("Skipped node memory metric", "node", klog.KRef("", nodeStats.NodeName), "err", err) - return - } - batch.Nodes[nodeStats.NodeName] = point -} - -// NB: we explicitly want to discard pods with partial results, since -// the horizontal pod autoscaler takes special action when a pod is missing -// metrics (and zero CPU or memory does not count as "missing metrics") -func decodePodStats(podStats *PodStats, batch *storage.MetricsBatch) { - // completely overwrite data in the target - pod := storage.PodMetricsPoint{ - Containers: make(map[string]storage.MetricsPoint, len(podStats.Containers)), - } - for _, container := range podStats.Containers { - if container.StartTime.IsZero() || container.CPU == nil || container.CPU.Time.IsZero() { - // if we can't get a timestamp, assume bad data in general - klog.V(1).InfoS("Failed getting container metric timestamp", "containerName", container.Name, "pod", klog.KRef(podStats.PodRef.Namespace, podStats.PodRef.Name)) - return - - } - point := storage.MetricsPoint{ - StartTime: container.StartTime.Time, - Timestamp: container.CPU.Time.Time, - } - if err := decodeCPU(&point.CumulativeCpuUsed, container.CPU); err != nil { - klog.V(1).InfoS("Skipped container CPU metric", "containerName", container.Name, "pod", klog.KRef(podStats.PodRef.Namespace, podStats.PodRef.Name), "err", err) - return - } - if err := decodeMemory(&point.MemoryUsage, container.Memory); err != nil { - klog.V(1).InfoS("Skipped container memory metric", "containerName", container.Name, "pod", klog.KRef(podStats.PodRef.Namespace, podStats.PodRef.Name), "err", err) - return - } - pod.Containers[container.Name] = point - } - batch.Pods[apitypes.NamespacedName{Name: podStats.PodRef.Name, Namespace: podStats.PodRef.Namespace}] = pod -} - -func decodeCPU(target *uint64, cpuStats *CPUStats) error { - if cpuStats == nil || cpuStats.UsageCoreNanoSeconds == nil { - return fmt.Errorf("missing usageCoreNanoSeconds value") - } - - if *cpuStats.UsageCoreNanoSeconds == 0 { - return fmt.Errorf("Got UsageCoreNanoSeconds equal zero") - } - *target = *cpuStats.UsageCoreNanoSeconds - return nil -} - -func decodeMemory(target *uint64, memStats *MemoryStats) error { - if memStats == nil || memStats.WorkingSetBytes == nil { - return fmt.Errorf("missing workingSetBytes value") - } - if *memStats.WorkingSetBytes == 0 { - return fmt.Errorf("Got WorkingSetBytes equal zero") - } - - *target = *memStats.WorkingSetBytes - return nil -} diff --git a/pkg/scraper/client/summary/decode_test.go b/pkg/scraper/client/summary/decode_test.go deleted file mode 100644 index 28f868acc1..0000000000 --- a/pkg/scraper/client/summary/decode_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package summary - -import ( - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apitypes "k8s.io/apimachinery/pkg/types" -) - -func TestDecode(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Decode Suite") -} - -var _ = Describe("Decode", func() { - var ( - summary *Summary - ) - BeforeEach(func() { - scrapeTime := time.Now() - summary = &Summary{ - Node: NodeStats{ - NodeName: "node1", - CPU: cpuStats(100, scrapeTime.Add(100*time.Millisecond)), - Memory: memStats(200, scrapeTime.Add(200*time.Millisecond)), - StartTime: metav1.Time{Time: scrapeTime.Add(-100 * time.Millisecond)}, - }, - Pods: []PodStats{ - podStats("ns1", "pod1", - containerStats("container1", 300, 400, scrapeTime.Add(10*time.Millisecond)), - containerStats("container2", 500, 600, scrapeTime.Add(20*time.Millisecond))), - podStats("ns1", "pod2", - containerStats("container1", 700, 800, scrapeTime.Add(30*time.Millisecond))), - podStats("ns2", "pod1", - containerStats("container1", 900, 1000, scrapeTime.Add(40*time.Millisecond))), - podStats("ns3", "pod1", - containerStats("container1", 1100, 1200, scrapeTime.Add(50*time.Millisecond))), - }, - } - }) - - It("should use the decode time from the CPU", func() { - By("removing some times from the data") - - By("decoding") - batch := decodeBatch(summary) - - By("verifying that the scrape time is as expected") - Expect(batch.Nodes["node1"].Timestamp).To(Equal(summary.Node.CPU.Time.Time)) - Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod1"}].Containers["container1"].Timestamp).To(Equal(summary.Pods[0].Containers[0].CPU.Time.Time)) - Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod2"}].Containers["container1"].Timestamp).To(Equal(summary.Pods[1].Containers[0].CPU.Time.Time)) - }) - - It("should use the decode start time", func() { - By("removing some times from the data") - - By("decoding") - batch := decodeBatch(summary) - - By("verifying that the scrape time is as expected") - Expect(batch.Nodes["node1"].StartTime).To(Equal(summary.Node.StartTime.Time)) - Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod1"}].Containers["container1"].StartTime).To(Equal(summary.Pods[0].Containers[0].StartTime.Time)) - Expect(batch.Pods[apitypes.NamespacedName{Namespace: "ns1", Name: "pod2"}].Containers["container1"].StartTime).To(Equal(summary.Pods[1].Containers[0].StartTime.Time)) - }) - - It("should continue on missing CPU or memory metrics", func() { - By("removing some data from the raw summary") - summary.Node.Memory = nil - summary.Pods[0].Containers[1].CPU = nil - summary.Pods[1].Containers[0].CPU.UsageCoreNanoSeconds = nil - summary.Pods[2].Containers[0].Memory = nil - summary.Pods[3].Containers[0].Memory.WorkingSetBytes = nil - - By("decoding") - batch := decodeBatch(summary) - - By("verifying that the batch has all the data, save for what was missing") - Expect(batch.Pods).To(HaveLen(0)) - Expect(batch.Nodes).To(HaveLen(0)) - }) - - It("should skip on cumulative CPU equal zero", func() { - By("setting CPU cumulative value to zero") - var zero uint64 = 0 - summary.Node.CPU.UsageCoreNanoSeconds = &zero - summary.Pods[0].Containers[0].CPU.UsageCoreNanoSeconds = &zero - - By("decoding") - batch := decodeBatch(summary) - - By("verifying that zero records were deleted") - Expect(batch.Pods).To(HaveLen(3)) - Expect(batch.Nodes).To(HaveLen(0)) - }) -}) - -func cpuStats(usageCoreNanoSeconds uint64, ts time.Time) *CPUStats { - return &CPUStats{ - Time: metav1.Time{Time: ts}, - UsageCoreNanoSeconds: &usageCoreNanoSeconds, - } -} - -func memStats(wssBytes uint64, ts time.Time) *MemoryStats { - return &MemoryStats{ - Time: metav1.Time{Time: ts}, - WorkingSetBytes: &wssBytes, - } -} - -func podStats(namespace, name string, containers ...ContainerStats) PodStats { - return PodStats{ - PodRef: PodReference{ - Name: name, - Namespace: namespace, - }, - Containers: containers, - } -} - -func containerStats(name string, cpu, mem uint64, startTime time.Time) ContainerStats { - return ContainerStats{ - Name: name, - CPU: cpuStats(cpu, startTime.Add(2*time.Millisecond)), - Memory: memStats(mem, startTime.Add(4*time.Millisecond)), - StartTime: metav1.Time{Time: startTime}, - } -} diff --git a/pkg/scraper/client/summary/types.go b/pkg/scraper/client/summary/types.go deleted file mode 100644 index 2be3eaf4bc..0000000000 --- a/pkg/scraper/client/summary/types.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package summary - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Summary is a top-level container for holding NodeStats and PodStats. -type Summary struct { - // Overall node stats. - Node NodeStats `json:"node"` - // Per-pod stats. - Pods []PodStats `json:"pods"` -} - -// NodeStats holds node-level unprocessed sample stats. -type NodeStats struct { - // Reference to the measured Node. - NodeName string `json:"nodeName"` - // Start time of system - StartTime metav1.Time `json:"startTime"` - // Stats pertaining to CPU resources. - // +optional - CPU *CPUStats `json:"cpu,omitempty"` - // Stats pertaining to memory (RAM) resources. - // +optional - Memory *MemoryStats `json:"memory,omitempty"` -} - -// PodStats holds pod-level unprocessed sample stats. -type PodStats struct { - // Reference to the measured Pod. - PodRef PodReference `json:"podRef"` - // Stats of containers in the measured pod. - // +patchMergeKey=name - // +patchStrategy=merge - Containers []ContainerStats `json:"containers" patchStrategy:"merge" patchMergeKey:"name"` -} - -// ContainerStats holds container-level unprocessed sample stats. -type ContainerStats struct { - // Reference to the measured container. - Name string `json:"name"` - // Start time of container - StartTime metav1.Time `json:"startTime"` - // Stats pertaining to CPU resources. - // +optional - CPU *CPUStats `json:"cpu,omitempty"` - // Stats pertaining to memory (RAM) resources. - // +optional - Memory *MemoryStats `json:"memory,omitempty"` -} - -// PodReference contains enough information to locate the referenced pod. -type PodReference struct { - Name string `json:"name"` - Namespace string `json:"namespace"` -} - -// CPUStats contains data about CPU usage. -type CPUStats struct { - // The time at which these stats were updated. - Time metav1.Time `json:"time"` - // Cumulative CPU usage (sum of all cores) since object creation. - // +optional - UsageCoreNanoSeconds *uint64 `json:"usageCoreNanoSeconds,omitempty"` -} - -// MemoryStats contains data about memory usage. -type MemoryStats struct { - // The time at which these stats were updated. - Time metav1.Time `json:"time"` - // The amount of working set memory. This includes recently accessed memory, - // dirty memory, and kernel memory. WorkingSetBytes is <= UsageBytes - // +optional - WorkingSetBytes *uint64 `json:"workingSetBytes,omitempty"` -} diff --git a/pkg/scraper/client/summary/types_easyjson.go b/pkg/scraper/client/summary/types_easyjson.go deleted file mode 100644 index 64ce97ac56..0000000000 --- a/pkg/scraper/client/summary/types_easyjson.go +++ /dev/null @@ -1,678 +0,0 @@ -// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. - -package summary - -import ( - json "encoding/json" - easyjson "github.com/mailru/easyjson" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" -) - -// suppress unused package warning -var ( - _ *json.RawMessage - _ *jlexer.Lexer - _ *jwriter.Writer - _ easyjson.Marshaler -) - -func easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary(in *jlexer.Lexer, out *Summary) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "node": - (out.Node).UnmarshalEasyJSON(in) - case "pods": - if in.IsNull() { - in.Skip() - out.Pods = nil - } else { - in.Delim('[') - if out.Pods == nil { - if !in.IsDelim(']') { - out.Pods = make([]PodStats, 0, 1) - } else { - out.Pods = []PodStats{} - } - } else { - out.Pods = (out.Pods)[:0] - } - for !in.IsDelim(']') { - var v1 PodStats - (v1).UnmarshalEasyJSON(in) - out.Pods = append(out.Pods, v1) - in.WantComma() - } - in.Delim(']') - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary(out *jwriter.Writer, in Summary) { - out.RawByte('{') - first := true - _ = first - { - const prefix string = ",\"node\":" - out.RawString(prefix[1:]) - (in.Node).MarshalEasyJSON(out) - } - { - const prefix string = ",\"pods\":" - out.RawString(prefix) - if in.Pods == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { - out.RawString("null") - } else { - out.RawByte('[') - for v2, v3 := range in.Pods { - if v2 > 0 { - out.RawByte(',') - } - (v3).MarshalEasyJSON(out) - } - out.RawByte(']') - } - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v Summary) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v Summary) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *Summary) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *Summary) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary(l, v) -} -func easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary1(in *jlexer.Lexer, out *PodStats) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "podRef": - (out.PodRef).UnmarshalEasyJSON(in) - case "containers": - if in.IsNull() { - in.Skip() - out.Containers = nil - } else { - in.Delim('[') - if out.Containers == nil { - if !in.IsDelim(']') { - out.Containers = make([]ContainerStats, 0, 1) - } else { - out.Containers = []ContainerStats{} - } - } else { - out.Containers = (out.Containers)[:0] - } - for !in.IsDelim(']') { - var v4 ContainerStats - (v4).UnmarshalEasyJSON(in) - out.Containers = append(out.Containers, v4) - in.WantComma() - } - in.Delim(']') - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary1(out *jwriter.Writer, in PodStats) { - out.RawByte('{') - first := true - _ = first - { - const prefix string = ",\"podRef\":" - out.RawString(prefix[1:]) - (in.PodRef).MarshalEasyJSON(out) - } - { - const prefix string = ",\"containers\":" - out.RawString(prefix) - if in.Containers == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { - out.RawString("null") - } else { - out.RawByte('[') - for v5, v6 := range in.Containers { - if v5 > 0 { - out.RawByte(',') - } - (v6).MarshalEasyJSON(out) - } - out.RawByte(']') - } - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v PodStats) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary1(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v PodStats) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary1(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *PodStats) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary1(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *PodStats) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary1(l, v) -} -func easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary2(in *jlexer.Lexer, out *PodReference) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "name": - out.Name = string(in.String()) - case "namespace": - out.Namespace = string(in.String()) - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary2(out *jwriter.Writer, in PodReference) { - out.RawByte('{') - first := true - _ = first - { - const prefix string = ",\"name\":" - out.RawString(prefix[1:]) - out.String(string(in.Name)) - } - { - const prefix string = ",\"namespace\":" - out.RawString(prefix) - out.String(string(in.Namespace)) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v PodReference) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary2(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v PodReference) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary2(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *PodReference) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary2(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *PodReference) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary2(l, v) -} -func easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary3(in *jlexer.Lexer, out *NodeStats) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "nodeName": - out.NodeName = string(in.String()) - case "startTime": - if data := in.Raw(); in.Ok() { - in.AddError((out.StartTime).UnmarshalJSON(data)) - } - case "cpu": - if in.IsNull() { - in.Skip() - out.CPU = nil - } else { - if out.CPU == nil { - out.CPU = new(CPUStats) - } - (*out.CPU).UnmarshalEasyJSON(in) - } - case "memory": - if in.IsNull() { - in.Skip() - out.Memory = nil - } else { - if out.Memory == nil { - out.Memory = new(MemoryStats) - } - (*out.Memory).UnmarshalEasyJSON(in) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary3(out *jwriter.Writer, in NodeStats) { - out.RawByte('{') - first := true - _ = first - { - const prefix string = ",\"nodeName\":" - out.RawString(prefix[1:]) - out.String(string(in.NodeName)) - } - { - const prefix string = ",\"startTime\":" - out.RawString(prefix) - out.Raw((in.StartTime).MarshalJSON()) - } - if in.CPU != nil { - const prefix string = ",\"cpu\":" - out.RawString(prefix) - (*in.CPU).MarshalEasyJSON(out) - } - if in.Memory != nil { - const prefix string = ",\"memory\":" - out.RawString(prefix) - (*in.Memory).MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v NodeStats) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary3(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v NodeStats) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary3(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *NodeStats) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary3(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *NodeStats) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary3(l, v) -} -func easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary4(in *jlexer.Lexer, out *MemoryStats) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "time": - if data := in.Raw(); in.Ok() { - in.AddError((out.Time).UnmarshalJSON(data)) - } - case "workingSetBytes": - if in.IsNull() { - in.Skip() - out.WorkingSetBytes = nil - } else { - if out.WorkingSetBytes == nil { - out.WorkingSetBytes = new(uint64) - } - *out.WorkingSetBytes = uint64(in.Uint64()) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary4(out *jwriter.Writer, in MemoryStats) { - out.RawByte('{') - first := true - _ = first - { - const prefix string = ",\"time\":" - out.RawString(prefix[1:]) - out.Raw((in.Time).MarshalJSON()) - } - if in.WorkingSetBytes != nil { - const prefix string = ",\"workingSetBytes\":" - out.RawString(prefix) - out.Uint64(uint64(*in.WorkingSetBytes)) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v MemoryStats) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary4(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v MemoryStats) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary4(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *MemoryStats) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary4(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *MemoryStats) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary4(l, v) -} -func easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary5(in *jlexer.Lexer, out *ContainerStats) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "name": - out.Name = string(in.String()) - case "startTime": - if data := in.Raw(); in.Ok() { - in.AddError((out.StartTime).UnmarshalJSON(data)) - } - case "cpu": - if in.IsNull() { - in.Skip() - out.CPU = nil - } else { - if out.CPU == nil { - out.CPU = new(CPUStats) - } - (*out.CPU).UnmarshalEasyJSON(in) - } - case "memory": - if in.IsNull() { - in.Skip() - out.Memory = nil - } else { - if out.Memory == nil { - out.Memory = new(MemoryStats) - } - (*out.Memory).UnmarshalEasyJSON(in) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary5(out *jwriter.Writer, in ContainerStats) { - out.RawByte('{') - first := true - _ = first - { - const prefix string = ",\"name\":" - out.RawString(prefix[1:]) - out.String(string(in.Name)) - } - { - const prefix string = ",\"startTime\":" - out.RawString(prefix) - out.Raw((in.StartTime).MarshalJSON()) - } - if in.CPU != nil { - const prefix string = ",\"cpu\":" - out.RawString(prefix) - (*in.CPU).MarshalEasyJSON(out) - } - if in.Memory != nil { - const prefix string = ",\"memory\":" - out.RawString(prefix) - (*in.Memory).MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v ContainerStats) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary5(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v ContainerStats) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary5(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *ContainerStats) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary5(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *ContainerStats) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary5(l, v) -} -func easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary6(in *jlexer.Lexer, out *CPUStats) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "time": - if data := in.Raw(); in.Ok() { - in.AddError((out.Time).UnmarshalJSON(data)) - } - case "usageCoreNanoSeconds": - if in.IsNull() { - in.Skip() - out.UsageCoreNanoSeconds = nil - } else { - if out.UsageCoreNanoSeconds == nil { - out.UsageCoreNanoSeconds = new(uint64) - } - *out.UsageCoreNanoSeconds = uint64(in.Uint64()) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary6(out *jwriter.Writer, in CPUStats) { - out.RawByte('{') - first := true - _ = first - { - const prefix string = ",\"time\":" - out.RawString(prefix[1:]) - out.Raw((in.Time).MarshalJSON()) - } - if in.UsageCoreNanoSeconds != nil { - const prefix string = ",\"usageCoreNanoSeconds\":" - out.RawString(prefix) - out.Uint64(uint64(*in.UsageCoreNanoSeconds)) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v CPUStats) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary6(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v CPUStats) MarshalEasyJSON(w *jwriter.Writer) { - easyjson6601e8cdEncodeSigsK8sIoMetricsServerPkgScraperClientSummary6(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *CPUStats) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary6(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *CPUStats) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson6601e8cdDecodeSigsK8sIoMetricsServerPkgScraperClientSummary6(l, v) -} diff --git a/pkg/scraper/client/summary/types_test.go b/pkg/scraper/client/summary/types_test.go deleted file mode 100644 index 2ba7459e8b..0000000000 --- a/pkg/scraper/client/summary/types_test.go +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package summary - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/google/go-cmp/cmp" - "github.com/mailru/easyjson" - - apitypes "k8s.io/apimachinery/pkg/types" - "k8s.io/kubelet/pkg/apis/stats/v1alpha1" - - "sigs.k8s.io/metrics-server/pkg/storage" -) - -func TestTypes(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Decode Suite") -} - -var _ = Describe("Types", func() { - It("internal Summary should be compatible with stats.Summary", func() { - By("Unmarshaling json into stats.Summary") - stats := &v1alpha1.Summary{} - err := json.Unmarshal([]byte(data), stats) - Expect(err).NotTo(HaveOccurred()) - - By("Unmarshaling json into internal Summary") - internal := &Summary{} - err = json.Unmarshal([]byte(data), internal) - Expect(err).NotTo(HaveOccurred()) - - By("Comparing values") - err = compare(stats, internal) - Expect(err).NotTo(HaveOccurred()) - }) - - It("internal summary should include all values needed", func() { - By("Unmarshaling json into internal Summary") - internal := &Summary{} - err := easyjson.Unmarshal([]byte(data), internal) - Expect(err).NotTo(HaveOccurred()) - - By("checking decoded metrics match expected") - got := decodeBatch(internal) - diff := cmp.Diff(got, expected) - Expect(diff).To(BeEmpty()) - }) -}) - -func compare(stats *v1alpha1.Summary, internal *Summary) error { - if len(internal.Pods) != len(internal.Pods) { - return fmt.Errorf("diff: len(.Pods)") - } - for i := range stats.Pods { - if internal.Pods[i].PodRef.Name != stats.Pods[i].PodRef.Name { - return fmt.Errorf("diff: .Pods[%d].PodRef.Name ", i) - } - if internal.Pods[i].PodRef.Namespace != stats.Pods[i].PodRef.Namespace { - return fmt.Errorf("diff: stats.Pods[%d].Namespace", i) - } - if len(internal.Pods[i].Containers) != len(stats.Pods[i].Containers) { - return fmt.Errorf("diff: len(stats.Pods[%d].Containers)", i) - } - for j := range internal.Pods[i].Containers { - if internal.Pods[i].Containers[j].Name != stats.Pods[i].Containers[j].Name { - return fmt.Errorf("diff: stats.Pods[%d].Containers[%d].Name", i, j) - } - err := compareCPU(stats.Pods[i].Containers[j].CPU, internal.Pods[i].Containers[j].CPU) - if err != nil { - return fmt.Errorf("diff: stats.Pods[%d].Containers[%d].CPU%v", i, j, err) - } - err = compareMemory(stats.Pods[i].Containers[j].Memory, internal.Pods[i].Containers[j].Memory) - if err != nil { - return fmt.Errorf("diff: stats.Pods[%d].Containers[%d].Memory%v", i, j, err) - } - } - } - if internal.Node.NodeName != stats.Node.NodeName { - return fmt.Errorf("diff: .Node.NodeName") - } - err := compareCPU(stats.Node.CPU, internal.Node.CPU) - if err != nil { - return fmt.Errorf("diff: .Node.CPU%v", err) - } - err = compareMemory(stats.Node.Memory, internal.Node.Memory) - if err != nil { - return fmt.Errorf("diff: .Node.Memory%v", err) - } - return nil -} - -func compareCPU(stats *v1alpha1.CPUStats, internal *CPUStats) error { - if (stats == nil) != (internal == nil) { - return fmt.Errorf("== nil") - } - if stats == nil || internal == nil { - return nil - } - if internal.Time != stats.Time { - return fmt.Errorf(".Time") - } - if (internal.UsageCoreNanoSeconds == nil) != (stats.UsageCoreNanoSeconds == nil) { - return fmt.Errorf(".UsageCoreNanoSeconds") - } - if (internal.UsageCoreNanoSeconds == nil) || (stats.UsageCoreNanoSeconds == nil) { - return nil - } - if *internal.UsageCoreNanoSeconds != *stats.UsageCoreNanoSeconds { - return fmt.Errorf(".UsageCoreNanoSeconds") - } - return nil -} - -func compareMemory(stats *v1alpha1.MemoryStats, internal *MemoryStats) error { - if (stats == nil) != (internal == nil) { - return fmt.Errorf("== nil") - } - if stats == nil || internal == nil { - return nil - } - if internal.Time != stats.Time { - return fmt.Errorf(".Time") - } - if (internal.WorkingSetBytes == nil) != (stats.WorkingSetBytes == nil) { - return fmt.Errorf(".WorkingSetBytes") - } - if (internal.WorkingSetBytes == nil) || (stats.WorkingSetBytes == nil) { - return nil - } - if *internal.WorkingSetBytes != *stats.WorkingSetBytes { - return fmt.Errorf(".WorkingSetBytes") - } - return nil -} - -func BenchmarkJSONUnmarshal(b *testing.B) { - value := &Summary{} - for i := 0; i < b.N; i++ { - err := easyjson.Unmarshal([]byte(data), value) - if err != nil { - b.Error(err) - } - } -} - -var data = ` -{ - "node": { - "nodeName": "e2e-v1.17.0-control-plane", - "systemContainers": [ - { - "name": "kubelet", - "startTime": "2020-04-16T20:05:46Z", - "cpu": { - "time": "2020-04-16T20:25:30Z", - "usageNanoCores": 287620424, - "usageCoreNanoSeconds": 183912297212 - }, - "memory": { - "time": "2020-04-16T20:25:30Z", - "usageBytes": 146317312, - "workingSetBytes": 122638336, - "rssBytes": 85635072, - "pageFaults": 1757976, - "majorPageFaults": 528 - } - }, - { - "name": "pods", - "startTime": "2020-04-16T20:21:41Z", - "cpu": { - "time": "2020-04-16T20:25:28Z", - "usageNanoCores": 165934426, - "usageCoreNanoSeconds": 231508341412 - }, - "memory": { - "time": "2020-04-16T20:25:28Z", - "availableBytes": 15915753472, - "usageBytes": 752480256, - "workingSetBytes": 713609216, - "rssBytes": 381231104, - "pageFaults": 0, - "majorPageFaults": 0 - } - } - ], - "startTime": "2020-03-31T18:00:54Z", - "cpu": { - "time": "2020-04-16T20:25:28Z", - "usageNanoCores": 476553087, - "usageCoreNanoSeconds": 519978197128 - }, - "memory": { - "time": "2020-04-16T20:25:28Z", - "availableBytes": 15211810816, - "usageBytes": 1719095296, - "workingSetBytes": 1417551872, - "rssBytes": 848789504, - "pageFaults": 73326, - "majorPageFaults": 726 - }, - "network": { - "time": "2020-04-16T20:25:28Z", - "name": "eth0", - "rxBytes": 9848384, - "rxErrors": 0, - "txBytes": 72810891, - "txErrors": 0, - "interfaces": [ - { - "name": "eth0", - "rxBytes": 9848384, - "rxErrors": 0, - "txBytes": 72810891, - "txErrors": 0 - } - ] - }, - "fs": { - "time": "2020-04-16T20:25:28Z", - "availableBytes": 366430162944, - "capacityBytes": 500684595200, - "usedBytes": 108749709312, - "inodesFree": 29713960, - "inodes": 31113216, - "inodesUsed": 1399256 - }, - "runtime": { - "imageFs": { - "time": "2020-04-16T20:25:26Z", - "availableBytes": 366430162944, - "capacityBytes": 500684595200, - "usedBytes": 789861024, - "inodesFree": 29713960, - "inodes": 31113216, - "inodesUsed": 8769 - } - }, - "rlimit": { - "time": "2020-04-16T20:25:30Z", - "maxpid": 32768, - "curproc": 3317 - } - }, - "pods": [ - { - "podRef": { - "name": "all-fields", - "namespace": "default", - "uid": "96636a87-47f5-4970-a15e-6e7901925c90" - }, - "startTime": "2020-04-16T20:11:06Z", - "containers": [ - { - "name": "container", - "startTime": "2020-04-16T20:17:46Z", - "cpu": { - "time": "2020-04-16T20:25:30Z", - "usageNanoCores": 29713960, - "usageCoreNanoSeconds": 29328792 - }, - "memory": { - "time": "2020-04-16T20:25:30Z", - "workingSetBytes": 1449984 - }, - "rootfs": { - "time": "2020-04-16T20:25:26Z", - "availableBytes": 366430162944, - "capacityBytes": 500684595200, - "usedBytes": 24576, - "inodesFree": 29713960, - "inodes": 31113216, - "inodesUsed": 7 - }, - "logs": { - "time": "2020-04-16T20:25:30Z", - "availableBytes": 366430162944, - "capacityBytes": 500684595200, - "usedBytes": 4096, - "inodesFree": 29713960, - "inodes": 31113216, - "inodesUsed": 2 - } - } - ], - "cpu": { - "time": "2020-04-16T20:25:24Z", - "usageNanoCores": 123, - "usageCoreNanoSeconds": 54096725 - }, - "memory": { - "time": "2020-04-16T20:25:24Z", - "usageBytes": 2641920, - "workingSetBytes": 2641920, - "rssBytes": 0, - "pageFaults": 0, - "majorPageFaults": 0 - }, - "volume": [ - { - "time": "2020-04-16T20:11:49Z", - "availableBytes": 8314667008, - "capacityBytes": 8314679296, - "usedBytes": 12288, - "inodesFree": 2029942, - "inodes": 2029951, - "inodesUsed": 9, - "name": "default-token-sd9l8" - } - ], - "ephemeral-storage": { - "time": "2020-04-16T20:25:30Z", - "availableBytes": 366430162944, - "capacityBytes": 500684595200, - "usedBytes": 28672, - "inodesFree": 29713960, - "inodes": 31113216, - "inodesUsed": 9 - } - }, - { - "podRef": { - "name": "zero usageCoreNanoSeconds", - "namespace": "default" - }, - "startTime": "2020-04-16T20:11:06Z", - "containers": [ - { - "name": "container", - "startTime": "2020-04-16T20:17:46Z", - "cpu": { - "time": "2020-04-16T20:25:30Z", - "usageNanoCores": 29713960, - "usageCoreNanoSeconds": 0 - }, - "memory": { - "time": "2020-04-16T20:25:30Z", - "workingSetBytes": 1449984 - } - } - ] - }, - { - "podRef": { - "name": "no usageCoreNanoSeconds", - "namespace": "default" - }, - "startTime": "2020-04-16T20:11:06Z", - "containers": [ - { - "name": "container", - "startTime": "2020-04-16T20:17:46Z", - "cpu": { - "time": "2020-04-16T20:25:30Z", - "usageNanoCores": 29713960 - }, - "memory": { - "time": "2020-04-16T20:25:30Z", - "workingSetBytes": 1449984 - } - } - ] - }, - { - "podRef": { - "name": "no CPU", - "namespace": "default" - }, - "startTime": "2020-04-16T20:11:06Z", - "containers": [ - { - "name": "container", - "startTime": "2020-04-16T20:17:46Z", - "memory": { - "time": "2020-04-16T20:25:30Z", - "workingSetBytes": 1449984 - } - } - ] - }, - { - "podRef": { - "name": "zero workingSetBytes", - "namespace": "default" - }, - "startTime": "2020-04-16T20:11:06Z", - "containers": [ - { - "name": "container", - "startTime": "2020-04-16T20:17:46Z", - "cpu": { - "time": "2020-04-16T20:25:30Z", - "usageNanoCores": 29713960, - "usageCoreNanoSeconds": 29328792 - }, - "memory": { - "time": "2020-04-16T20:25:30Z", - "workingSetBytes": 0 - } - } - ] - }, - { - "podRef": { - "name": "no workingSetBytes", - "namespace": "default" - }, - "startTime": "2020-04-16T20:11:06Z", - "containers": [ - { - "name": "container", - "startTime": "2020-04-16T20:17:46Z", - "cpu": { - "time": "2020-04-16T20:25:30Z", - "usageNanoCores": 29713960, - "usageCoreNanoSeconds": 29328792 - }, - "memory": { - "time": "2020-04-16T20:25:30Z" - } - } - ] - }, - { - "podRef": { - "name": "no memory", - "namespace": "default" - }, - "startTime": "2020-04-16T20:11:06Z", - "containers": [ - { - "name": "container", - "startTime": "2020-04-16T20:17:46Z", - "cpu": { - "time": "2020-04-16T20:25:30Z", - "usageNanoCores": 29713960, - "usageCoreNanoSeconds": 29328792 - } - } - ] - } - ] -} -` - -var expected = &storage.MetricsBatch{ - Nodes: map[string]storage.MetricsPoint{ - "e2e-v1.17.0-control-plane": { - StartTime: time.Date(2020, 3, 31, 18, 00, 54, 0, time.UTC), - Timestamp: time.Date(2020, 4, 16, 20, 25, 28, 0, time.UTC), - CumulativeCpuUsed: 519978197128, - MemoryUsage: 1417551872, - }, - }, - Pods: map[apitypes.NamespacedName]storage.PodMetricsPoint{ - {Namespace: "default", Name: "all-fields"}: { - Containers: map[string]storage.MetricsPoint{ - "container": { - StartTime: time.Date(2020, 4, 16, 20, 17, 46, 0, time.UTC), - Timestamp: time.Date(2020, 4, 16, 20, 25, 30, 0, time.UTC), - CumulativeCpuUsed: 29328792, - MemoryUsage: 1449984, - }, - }, - }, - }, -} diff --git a/pkg/server/config.go b/pkg/server/config.go index 2705709e6b..f14c95512d 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -19,7 +19,7 @@ import ( "time" "sigs.k8s.io/metrics-server/pkg/scraper/client" - "sigs.k8s.io/metrics-server/pkg/scraper/client/summary" + "sigs.k8s.io/metrics-server/pkg/scraper/client/resource" corev1 "k8s.io/api/core/v1" apimetrics "k8s.io/apiserver/pkg/endpoints/metrics" @@ -53,7 +53,7 @@ func (c Config) Complete() (*server, error) { if err != nil { return nil, err } - kubeletClient, err := summary.NewClient(*c.Kubelet) + kubeletClient, err := resource.NewClient(*c.Kubelet) if err != nil { return nil, fmt.Errorf("unable to construct a client to connect to the kubelets: %v", err) } diff --git a/pkg/storage/node_test.go b/pkg/storage/node_test.go index 3a902887f4..62807120ea 100644 --- a/pkg/storage/node_test.go +++ b/pkg/storage/node_test.go @@ -186,6 +186,36 @@ var _ = Describe("Node storage", func() { }, )) }) + + It("provides node metrics from stored batches when StartTime is zero", func() { + s := NewStorage(60 * time.Second) + nodeStart := time.Now() + + By("storing first batch with node1 metrics") + s.Store(nodeMetricBatch(nodeMetricsPoint{"node1", newMetricsPoint(time.Time{}, nodeStart.Add(10*time.Second), 10*CoreSecond, 2*MiByte)})) + + By("waiting for second batch before becoming ready and serving metrics") + Expect(s.Ready()).NotTo(BeTrue()) + checkNodeResponseEmpty(s, "node1") + + By("storing second batch with node1 metrics") + s.Store(nodeMetricBatch(nodeMetricsPoint{"node1", newMetricsPoint(time.Time{}, nodeStart.Add(20*time.Second), 20*CoreSecond, 3*MiByte)})) + + By("becoming ready and returning metric for node1") + Expect(s.Ready()).To(BeTrue()) + ts, ms, err := s.GetNodeMetrics("node1") + Expect(err).NotTo(HaveOccurred()) + Expect(ts).Should(BeEquivalentTo([]api.TimeInfo{{Timestamp: nodeStart.Add(20 * time.Second), Window: 10 * time.Second}})) + Expect(ms).Should(BeEquivalentTo( + []v1.ResourceList{ + { + v1.ResourceCPU: *resource.NewScaledQuantity(CoreSecond, -9), + v1.ResourceMemory: *resource.NewQuantity(3*MiByte, resource.BinarySI), + }, + }, + )) + }) + }) func checkNodeResponseEmpty(s *storage, nodes ...string) { diff --git a/pkg/storage/pod_test.go b/pkg/storage/pod_test.go index 17b1b13cd4..6be2058a15 100644 --- a/pkg/storage/pod_test.go +++ b/pkg/storage/pod_test.go @@ -286,6 +286,35 @@ var _ = Describe("Pod storage", func() { Expect(s.Ready()).NotTo(BeTrue()) checkPodResponseEmpty(s, podRef) }) + + It("provides pod metrics from stored batches when StartTime is zero", func() { + s := NewStorage(60 * time.Second) + containerStart := time.Now() + podRef := types.NamespacedName{Name: "pod1", Namespace: "ns1"} + + By("storing first batch with pod1 metrics") + s.Store(podMetricsBatch(podMetrics(podRef, containerMetricsPoint{"container1", newMetricsPoint(time.Time{}, containerStart.Add(120*time.Second), 1*CoreSecond, 4*MiByte)}))) + + By("waiting for second batch before serving metrics") + Expect(s.Ready()).NotTo(BeTrue()) + checkPodResponseEmpty(s, podRef) + + By("storing second batch with pod1 metrics") + s.Store(podMetricsBatch(podMetrics(podRef, containerMetricsPoint{"container1", newMetricsPoint(time.Time{}, containerStart.Add(125*time.Second), 6*CoreSecond, 5*MiByte)}))) + + By("returning metric for pod1") + Expect(s.Ready()).To(BeTrue()) + ts, ms, err := s.GetPodMetrics(podRef) + Expect(err).NotTo(HaveOccurred()) + Expect(ts).Should(BeEquivalentTo([]api.TimeInfo{{Timestamp: containerStart.Add(125 * time.Second), Window: 5 * time.Second}})) + Expect(ms).Should(BeEquivalentTo([][]metrics.ContainerMetrics{{{ + Name: "container1", + Usage: v1.ResourceList{ + v1.ResourceCPU: *resource.NewScaledQuantity(1*CoreSecond, -9), + v1.ResourceMemory: *resource.NewQuantity(5*MiByte, resource.BinarySI), + }, + }}})) + }) }) func checkPodResponseEmpty(s *storage, pods ...types.NamespacedName) {