From ecffbc39bfc85f0eb772a09dec0d7316730e26bf Mon Sep 17 00:00:00 2001 From: ChrsMark Date: Mon, 17 Jun 2024 11:03:40 +0300 Subject: [PATCH 1/2] [receiver/kubeletstats] Add k8s.{container,pod}.memory.node.utilization metrics Signed-off-by: ChrsMark --- .../add_utilization_k8s_node_metrics.yaml | 27 + receiver/kubeletstatsreceiver/README.md | 9 +- receiver/kubeletstatsreceiver/config.go | 15 +- receiver/kubeletstatsreceiver/config_test.go | 56 +++ .../kubeletstatsreceiver/documentation.md | 16 + .../internal/kubelet/accumulator.go | 10 +- .../internal/kubelet/accumulator_test.go | 8 +- .../internal/kubelet/mem.go | 11 +- .../internal/kubelet/metadata.go | 15 +- .../internal/kubelet/metadata_test.go | 32 +- .../internal/kubelet/metrics_test.go | 2 +- .../internal/kubelet/volume_test.go | 2 +- .../internal/metadata/generated_config.go | 8 + .../metadata/generated_config_test.go | 4 + .../internal/metadata/generated_metrics.go | 114 +++++ .../metadata/generated_metrics_test.go | 30 ++ .../internal/metadata/metrics.go | 3 + .../internal/metadata/testdata/config.yaml | 8 + receiver/kubeletstatsreceiver/metadata.yaml | 14 + .../mocked_objects_test.go | 17 + receiver/kubeletstatsreceiver/scraper.go | 20 +- receiver/kubeletstatsreceiver/scraper_test.go | 76 ++- .../kubeletstatsreceiver/testdata/config.yaml | 12 + ...craper_memory_util_nodelimit_expected.yaml | 460 ++++++++++++++++++ 24 files changed, 922 insertions(+), 47 deletions(-) create mode 100644 .chloggen/add_utilization_k8s_node_metrics.yaml create mode 100644 receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_memory_util_nodelimit_expected.yaml diff --git a/.chloggen/add_utilization_k8s_node_metrics.yaml b/.chloggen/add_utilization_k8s_node_metrics.yaml new file mode 100644 index 000000000000..a7246d3b9102 --- /dev/null +++ b/.chloggen/add_utilization_k8s_node_metrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: kubeletstatsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Add `k8s.pod.memory.node.utilization` and `k8s.container.memory.node.utilization` metrics" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33591] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/kubeletstatsreceiver/README.md b/receiver/kubeletstatsreceiver/README.md index 8a586b589a15..a250ba5dff37 100644 --- a/receiver/kubeletstatsreceiver/README.md +++ b/receiver/kubeletstatsreceiver/README.md @@ -218,9 +218,10 @@ receivers: - pod ``` -### Collect k8s.container.cpu.node.utilization, `k8s.pod.cpu.node.utilization` as ratio of total node's capacity +### Collect `k8s.{container,pod}.{cpu,memory}.node.utilization` as ratio of total node's capacity -In order to calculate the `k8s.container.cpu.node.utilization` or `k8s.pod.cpu.node.utilization` metrics, the +In order to calculate the `k8s.container.cpu.node.utilization`, `k8s.pod.cpu.node.utilization`, +`k8s.container.memory.node.utilization` and `k8s.pod.memory.node.utilization` metrics, the information of the node's capacity must be retrieved from the k8s API. In this, the `k8s_api_config` needs to be set. In addition, the node name must be identified properly. The `K8S_NODE_NAME` env var can be set using the downward API inside the collector pod spec as follows: @@ -248,6 +249,10 @@ receivers: enabled: true k8s.pod.cpu.node.utilization: enabled: true + k8s.container.memory.node.utilization: + enabled: true + k8s.pod.memory.node.utilization: + enabled: true ``` ### Optional parameters diff --git a/receiver/kubeletstatsreceiver/config.go b/receiver/kubeletstatsreceiver/config.go index e2805a3a2a3f..f06bd3c051dd 100644 --- a/receiver/kubeletstatsreceiver/config.go +++ b/receiver/kubeletstatsreceiver/config.go @@ -120,10 +120,17 @@ func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { } func (cfg *Config) Validate() error { - if cfg.Metrics.K8sContainerCPUNodeUtilization.Enabled && cfg.NodeName == "" { - return errors.New("for k8s.container.cpu.node.utilization node setting is required. Check the readme on how to set the required setting") - } else if cfg.Metrics.K8sPodCPUNodeUtilization.Enabled && cfg.NodeName == "" { - return errors.New("for k8s.pod.cpu.node.utilization node setting is required. Check the readme on how to set the required setting") + if cfg.NodeName == "" { + switch { + case cfg.Metrics.K8sContainerCPUNodeUtilization.Enabled: + return errors.New("for k8s.container.cpu.node.utilization node setting is required. Check the readme on how to set the required setting") + case cfg.Metrics.K8sPodCPUNodeUtilization.Enabled: + return errors.New("for k8s.pod.cpu.node.utilization node setting is required. Check the readme on how to set the required setting") + case cfg.Metrics.K8sContainerMemoryNodeUtilization.Enabled: + return errors.New("for k8s.container.memory.node.utilization node setting is required. Check the readme on how to set the required setting") + case cfg.Metrics.K8sPodMemoryNodeUtilization.Enabled: + return errors.New("for k8s.pod.memory.node.utilization node setting is required. Check the readme on how to set the required setting") + } } return nil } diff --git a/receiver/kubeletstatsreceiver/config_test.go b/receiver/kubeletstatsreceiver/config_test.go index 04f4746d9126..f3baf8a2fb8f 100644 --- a/receiver/kubeletstatsreceiver/config_test.go +++ b/receiver/kubeletstatsreceiver/config_test.go @@ -229,6 +229,62 @@ func TestLoadConfig(t *testing.T) { }, expectedValidationErr: "for k8s.pod.cpu.node.utilization node setting is required. Check the readme on how to set the required setting", }, + { + id: component.NewIDWithName(metadata.Type, "container_memory_node_utilization"), + expected: &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: duration, + InitialDelay: time.Second, + }, + ClientConfig: kube.ClientConfig{ + APIConfig: k8sconfig.APIConfig{ + AuthType: "tls", + }, + }, + MetricGroupsToCollect: []kubelet.MetricGroup{ + kubelet.ContainerMetricGroup, + kubelet.PodMetricGroup, + kubelet.NodeMetricGroup, + }, + MetricsBuilderConfig: metadata.MetricsBuilderConfig{ + Metrics: metadata.MetricsConfig{ + K8sContainerMemoryNodeUtilization: metadata.MetricConfig{ + Enabled: true, + }, + }, + ResourceAttributes: metadata.DefaultResourceAttributesConfig(), + }, + }, + expectedValidationErr: "for k8s.container.memory.node.utilization node setting is required. Check the readme on how to set the required setting", + }, + { + id: component.NewIDWithName(metadata.Type, "pod_memory_node_utilization"), + expected: &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: duration, + InitialDelay: time.Second, + }, + ClientConfig: kube.ClientConfig{ + APIConfig: k8sconfig.APIConfig{ + AuthType: "tls", + }, + }, + MetricGroupsToCollect: []kubelet.MetricGroup{ + kubelet.ContainerMetricGroup, + kubelet.PodMetricGroup, + kubelet.NodeMetricGroup, + }, + MetricsBuilderConfig: metadata.MetricsBuilderConfig{ + Metrics: metadata.MetricsConfig{ + K8sPodMemoryNodeUtilization: metadata.MetricConfig{ + Enabled: true, + }, + }, + ResourceAttributes: metadata.DefaultResourceAttributesConfig(), + }, + }, + expectedValidationErr: "for k8s.pod.memory.node.utilization node setting is required. Check the readme on how to set the required setting", + }, } for _, tt := range tests { diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index adafdf58ddc4..ed4a577ebd61 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -426,6 +426,14 @@ Container cpu utilization as a ratio of the container's requests | ---- | ----------- | ---------- | | 1 | Gauge | Double | +### k8s.container.memory.node.utilization + +Container memory utilization as a ratio of the node's capacity + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### k8s.container.memory_limit_utilization Container memory utilization as a ratio of the container's limits @@ -490,6 +498,14 @@ Pod cpu utilization as a ratio of the pod's total container requests. If any con | ---- | ----------- | ---------- | | 1 | Gauge | Double | +### k8s.pod.memory.node.utilization + +Pod memory utilization as a ratio of the node's capacity + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### k8s.pod.memory_limit_utilization Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted. diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go index b8d9cc873fea..5cdf803080ab 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go @@ -57,7 +57,7 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) { currentTime := pcommon.NewTimestampFromTime(a.time) addUptimeMetric(a.mbs.NodeMetricsBuilder, metadata.NodeUptimeMetrics.Uptime, s.StartTime, currentTime) addCPUMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeCPUMetrics, s.CPU, currentTime, resources{}, 0) - addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime, resources{}) + addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime, resources{}, 0) addFilesystemMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeFilesystemMetrics, s.Fs, currentTime) addNetworkMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeNetworkMetrics, s.Network, currentTime) // todo s.Runtime.ImageFs @@ -76,8 +76,8 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) { currentTime := pcommon.NewTimestampFromTime(a.time) addUptimeMetric(a.mbs.PodMetricsBuilder, metadata.PodUptimeMetrics.Uptime, s.StartTime, currentTime) - addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime, a.metadata.podResources[s.PodRef.UID], a.metadata.cpuNodeLimit) - addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime, a.metadata.podResources[s.PodRef.UID]) + addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime, a.metadata.podResources[s.PodRef.UID], a.metadata.nodeCapacity.CPUCapacity) + addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime, a.metadata.podResources[s.PodRef.UID], a.metadata.nodeCapacity.MemoryCapacity) addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime) addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime) @@ -110,8 +110,8 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont currentTime := pcommon.NewTimestampFromTime(a.time) resourceKey := sPod.PodRef.UID + s.Name addUptimeMetric(a.mbs.ContainerMetricsBuilder, metadata.ContainerUptimeMetrics.Uptime, s.StartTime, currentTime) - addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime, a.metadata.containerResources[resourceKey], a.metadata.cpuNodeLimit) - addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime, a.metadata.containerResources[resourceKey]) + addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime, a.metadata.containerResources[resourceKey], a.metadata.nodeCapacity.CPUCapacity) + addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime, a.metadata.containerResources[resourceKey], a.metadata.nodeCapacity.MemoryCapacity) addFilesystemMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime) a.m = append(a.m, a.mbs.ContainerMetricsBuilder.Emit( diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go index 159d861a3a72..1c2ec492fc52 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go @@ -53,7 +53,7 @@ func TestMetadataErrorCases(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), testScenario: func(acc metricDataAccumulator) { now := metav1.Now() podStats := stats.PodStats{ @@ -79,7 +79,7 @@ func TestMetadataErrorCases(t *testing.T) { metricGroupsToCollect: map[MetricGroup]bool{ VolumeMetricGroup: true, }, - metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, NodeLimits{}, nil), + metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, NodeCapacity{}, nil), testScenario: func(acc metricDataAccumulator) { podStats := stats.PodStats{ PodRef: stats.PodReference{ @@ -121,7 +121,7 @@ func TestMetadataErrorCases(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), testScenario: func(acc metricDataAccumulator) { podStats := stats.PodStats{ PodRef: stats.PodReference{ @@ -165,7 +165,7 @@ func TestMetadataErrorCases(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), detailedPVCLabelsSetterOverride: func(*metadata.ResourceBuilder, string, string, string) error { // Mock failure cases. return errors.New("") diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go index 85713e5234c0..d6aab65d7caf 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go @@ -10,7 +10,13 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func addMemoryMetrics(mb *metadata.MetricsBuilder, memoryMetrics metadata.MemoryMetrics, s *stats.MemoryStats, currentTime pcommon.Timestamp, r resources) { +func addMemoryMetrics( + mb *metadata.MetricsBuilder, + memoryMetrics metadata.MemoryMetrics, + s *stats.MemoryStats, + currentTime pcommon.Timestamp, + r resources, + nodeMemoryLimit float64) { if s == nil { return } @@ -29,5 +35,8 @@ func addMemoryMetrics(mb *metadata.MetricsBuilder, memoryMetrics metadata.Memory if r.memoryRequest > 0 { memoryMetrics.RequestUtilization(mb, currentTime, float64(*s.UsageBytes)/float64(r.memoryRequest)) } + if nodeMemoryLimit > 0 { + memoryMetrics.NodeUtilization(mb, currentTime, float64(*s.UsageBytes)/nodeMemoryLimit) + } } } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index 392eb46dee70..dd0fff332716 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -52,7 +52,7 @@ type Metadata struct { DetailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error podResources map[string]resources containerResources map[string]resources - cpuNodeLimit float64 + nodeCapacity NodeCapacity } type resources struct { @@ -62,9 +62,12 @@ type resources struct { memoryLimit int64 } -type NodeLimits struct { - Name string - CPUNanoCoresLimit float64 +type NodeCapacity struct { + Name string + // node's CPU capacity in cores + CPUCapacity float64 + // node's Memory capacity in bytes + MemoryCapacity float64 } func getContainerResources(r *v1.ResourceRequirements) resources { @@ -80,7 +83,7 @@ func getContainerResources(r *v1.ResourceRequirements) resources { } } -func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, nodeResourceLimits NodeLimits, +func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, nodeCap NodeCapacity, detailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error) Metadata { m := Metadata{ Labels: getLabelsMap(labels), @@ -88,7 +91,7 @@ func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, nodeResourceL DetailedPVCResourceSetter: detailedPVCResourceSetter, podResources: make(map[string]resources), containerResources: make(map[string]resources), - cpuNodeLimit: nodeResourceLimits.CPUNanoCoresLimit, + nodeCapacity: nodeCap, } if podsMetadata != nil { diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index c5cb72b6f7df..7ba5e4eabd44 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -70,7 +70,7 @@ func TestSetExtraLabels(t *testing.T) { }{ { name: "no_labels", - metadata: NewMetadata([]MetadataLabel{}, nil, NodeLimits{}, nil), + metadata: NewMetadata([]MetadataLabel{}, nil, NodeCapacity{}, nil), args: []string{"uid", "container.id", "container"}, want: map[string]any{}, }, @@ -98,7 +98,7 @@ func TestSetExtraLabels(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), args: []string{"uid-1234", "container.id", "container1"}, want: map[string]any{ string(MetadataLabelContainerID): "test-container", @@ -128,7 +128,7 @@ func TestSetExtraLabels(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), args: []string{"uid-1234", "container.id", "init-container1"}, want: map[string]any{ string(MetadataLabelContainerID): "test-init-container", @@ -136,7 +136,7 @@ func TestSetExtraLabels(t *testing.T) { }, { name: "set_container_id_no_metadata", - metadata: NewMetadata([]MetadataLabel{MetadataLabelContainerID}, nil, NodeLimits{}, nil), + metadata: NewMetadata([]MetadataLabel{MetadataLabelContainerID}, nil, NodeCapacity{}, nil), args: []string{"uid-1234", "container.id", "container1"}, wantError: "pods metadata were not fetched", }, @@ -158,7 +158,7 @@ func TestSetExtraLabels(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), args: []string{"uid-1234", "container.id", "container1"}, wantError: "pod \"uid-1234\" with container \"container1\" not found in the fetched metadata", }, @@ -180,13 +180,13 @@ func TestSetExtraLabels(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), args: []string{"uid-1234", "container.id", "container1"}, wantError: "pod \"uid-1234\" with container \"container1\" has an empty containerID", }, { name: "set_volume_type_no_metadata", - metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, NodeLimits{}, nil), + metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, NodeCapacity{}, nil), args: []string{"uid-1234", "k8s.volume.type", "volume0"}, wantError: "pods metadata were not fetched", }, @@ -208,7 +208,7 @@ func TestSetExtraLabels(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), args: []string{"uid-1234", "k8s.volume.type", "volume1"}, wantError: "pod \"uid-1234\" with volume \"volume1\" not found in the fetched metadata", }, @@ -376,7 +376,7 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) { }, }, }, - }, NodeLimits{}, func(*metadata.ResourceBuilder, string, string, string) error { + }, NodeCapacity{}, func(*metadata.ResourceBuilder, string, string, string) error { return nil }) rb := metadata.NewResourceBuilder(metadata.DefaultResourceAttributesConfig()) @@ -407,7 +407,7 @@ func TestCpuAndMemoryGetters(t *testing.T) { }{ { name: "no metadata", - metadata: NewMetadata([]MetadataLabel{}, nil, NodeLimits{}, nil), + metadata: NewMetadata([]MetadataLabel{}, nil, NodeCapacity{}, nil), }, { name: "pod happy path", @@ -449,7 +449,7 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), podUID: "uid-1234", containerName: "container-2", wantPodCPULimit: 2.1, @@ -501,7 +501,7 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), podUID: "uid-12345", }, { @@ -544,7 +544,7 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), podUID: "uid-1234", containerName: "container-3", wantPodCPULimit: 0.7, @@ -584,7 +584,7 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), podUID: "uid-1234", containerName: "container-2", wantPodCPURequest: 2, @@ -624,7 +624,7 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), podUID: "uid-1234", containerName: "container-2", wantPodCPULimit: 2, @@ -662,7 +662,7 @@ func TestCpuAndMemoryGetters(t *testing.T) { }, }, }, - }, NodeLimits{}, nil), + }, NodeCapacity{}, nil), podUID: "uid-1234", containerName: "container-1", wantContainerCPULimit: 1, diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go index 8e9653ab102c..2f0dfd69623a 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go @@ -33,7 +33,7 @@ func TestMetricAccumulator(t *testing.T) { summary, _ := statsProvider.StatsSummary() metadataProvider := NewMetadataProvider(rc) podsMetadata, _ := metadataProvider.Pods() - k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, NodeLimits{}, nil) + k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, NodeCapacity{}, nil) mbs := &metadata.MetricsBuilders{ NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsBuilderConfig(), receivertest.NewNopSettings()), PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsBuilderConfig(), receivertest.NewNopSettings()), diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go index d2d97b455fe0..a0c98f6699d4 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go @@ -177,7 +177,7 @@ func TestDetailedPVCLabels(t *testing.T) { }, }, }, - }, NodeLimits{}, nil) + }, NodeCapacity{}, nil) metadata.DetailedPVCResourceSetter = tt.detailedPVCLabelsSetterOverride res, err := getVolumeResourceOptions(rb, podStats, stats.VolumeStats{Name: tt.volumeName}, metadata) diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go index 9db06b4d5c4c..1dcbd5fb3e1e 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go @@ -44,6 +44,7 @@ type MetricsConfig struct { K8sContainerCPUNodeUtilization MetricConfig `mapstructure:"k8s.container.cpu.node.utilization"` K8sContainerCPULimitUtilization MetricConfig `mapstructure:"k8s.container.cpu_limit_utilization"` K8sContainerCPURequestUtilization MetricConfig `mapstructure:"k8s.container.cpu_request_utilization"` + K8sContainerMemoryNodeUtilization MetricConfig `mapstructure:"k8s.container.memory.node.utilization"` K8sContainerMemoryLimitUtilization MetricConfig `mapstructure:"k8s.container.memory_limit_utilization"` K8sContainerMemoryRequestUtilization MetricConfig `mapstructure:"k8s.container.memory_request_utilization"` K8sNodeCPUTime MetricConfig `mapstructure:"k8s.node.cpu.time"` @@ -72,6 +73,7 @@ type MetricsConfig struct { K8sPodFilesystemUsage MetricConfig `mapstructure:"k8s.pod.filesystem.usage"` K8sPodMemoryAvailable MetricConfig `mapstructure:"k8s.pod.memory.available"` K8sPodMemoryMajorPageFaults MetricConfig `mapstructure:"k8s.pod.memory.major_page_faults"` + K8sPodMemoryNodeUtilization MetricConfig `mapstructure:"k8s.pod.memory.node.utilization"` K8sPodMemoryPageFaults MetricConfig `mapstructure:"k8s.pod.memory.page_faults"` K8sPodMemoryRss MetricConfig `mapstructure:"k8s.pod.memory.rss"` K8sPodMemoryUsage MetricConfig `mapstructure:"k8s.pod.memory.usage"` @@ -138,6 +140,9 @@ func DefaultMetricsConfig() MetricsConfig { K8sContainerCPURequestUtilization: MetricConfig{ Enabled: false, }, + K8sContainerMemoryNodeUtilization: MetricConfig{ + Enabled: false, + }, K8sContainerMemoryLimitUtilization: MetricConfig{ Enabled: false, }, @@ -222,6 +227,9 @@ func DefaultMetricsConfig() MetricsConfig { K8sPodMemoryMajorPageFaults: MetricConfig{ Enabled: true, }, + K8sPodMemoryNodeUtilization: MetricConfig{ + Enabled: false, + }, K8sPodMemoryPageFaults: MetricConfig{ Enabled: true, }, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go index 01ae22c6f6f1..a2f78d569561 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go @@ -41,6 +41,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sContainerCPUNodeUtilization: MetricConfig{Enabled: true}, K8sContainerCPULimitUtilization: MetricConfig{Enabled: true}, K8sContainerCPURequestUtilization: MetricConfig{Enabled: true}, + K8sContainerMemoryNodeUtilization: MetricConfig{Enabled: true}, K8sContainerMemoryLimitUtilization: MetricConfig{Enabled: true}, K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: true}, K8sNodeCPUTime: MetricConfig{Enabled: true}, @@ -69,6 +70,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sPodFilesystemUsage: MetricConfig{Enabled: true}, K8sPodMemoryAvailable: MetricConfig{Enabled: true}, K8sPodMemoryMajorPageFaults: MetricConfig{Enabled: true}, + K8sPodMemoryNodeUtilization: MetricConfig{Enabled: true}, K8sPodMemoryPageFaults: MetricConfig{Enabled: true}, K8sPodMemoryRss: MetricConfig{Enabled: true}, K8sPodMemoryUsage: MetricConfig{Enabled: true}, @@ -123,6 +125,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sContainerCPUNodeUtilization: MetricConfig{Enabled: false}, K8sContainerCPULimitUtilization: MetricConfig{Enabled: false}, K8sContainerCPURequestUtilization: MetricConfig{Enabled: false}, + K8sContainerMemoryNodeUtilization: MetricConfig{Enabled: false}, K8sContainerMemoryLimitUtilization: MetricConfig{Enabled: false}, K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: false}, K8sNodeCPUTime: MetricConfig{Enabled: false}, @@ -151,6 +154,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sPodFilesystemUsage: MetricConfig{Enabled: false}, K8sPodMemoryAvailable: MetricConfig{Enabled: false}, K8sPodMemoryMajorPageFaults: MetricConfig{Enabled: false}, + K8sPodMemoryNodeUtilization: MetricConfig{Enabled: false}, K8sPodMemoryPageFaults: MetricConfig{Enabled: false}, K8sPodMemoryRss: MetricConfig{Enabled: false}, K8sPodMemoryUsage: MetricConfig{Enabled: false}, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index ee6fa88f5c24..02aef00983ad 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -826,6 +826,55 @@ func newMetricK8sContainerCPURequestUtilization(cfg MetricConfig) metricK8sConta return m } +type metricK8sContainerMemoryNodeUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.container.memory.node.utilization metric with initial data. +func (m *metricK8sContainerMemoryNodeUtilization) init() { + m.data.SetName("k8s.container.memory.node.utilization") + m.data.SetDescription("Container memory utilization as a ratio of the node's capacity") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sContainerMemoryNodeUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sContainerMemoryNodeUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sContainerMemoryNodeUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sContainerMemoryNodeUtilization(cfg MetricConfig) metricK8sContainerMemoryNodeUtilization { + m := metricK8sContainerMemoryNodeUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricK8sContainerMemoryLimitUtilization struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2214,6 +2263,55 @@ func newMetricK8sPodMemoryMajorPageFaults(cfg MetricConfig) metricK8sPodMemoryMa return m } +type metricK8sPodMemoryNodeUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.memory.node.utilization metric with initial data. +func (m *metricK8sPodMemoryNodeUtilization) init() { + m.data.SetName("k8s.pod.memory.node.utilization") + m.data.SetDescription("Pod memory utilization as a ratio of the node's capacity") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodMemoryNodeUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodMemoryNodeUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodMemoryNodeUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodMemoryNodeUtilization(cfg MetricConfig) metricK8sPodMemoryNodeUtilization { + m := metricK8sPodMemoryNodeUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricK8sPodMemoryPageFaults struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2938,6 +3036,7 @@ type MetricsBuilder struct { metricK8sContainerCPUNodeUtilization metricK8sContainerCPUNodeUtilization metricK8sContainerCPULimitUtilization metricK8sContainerCPULimitUtilization metricK8sContainerCPURequestUtilization metricK8sContainerCPURequestUtilization + metricK8sContainerMemoryNodeUtilization metricK8sContainerMemoryNodeUtilization metricK8sContainerMemoryLimitUtilization metricK8sContainerMemoryLimitUtilization metricK8sContainerMemoryRequestUtilization metricK8sContainerMemoryRequestUtilization metricK8sNodeCPUTime metricK8sNodeCPUTime @@ -2966,6 +3065,7 @@ type MetricsBuilder struct { metricK8sPodFilesystemUsage metricK8sPodFilesystemUsage metricK8sPodMemoryAvailable metricK8sPodMemoryAvailable metricK8sPodMemoryMajorPageFaults metricK8sPodMemoryMajorPageFaults + metricK8sPodMemoryNodeUtilization metricK8sPodMemoryNodeUtilization metricK8sPodMemoryPageFaults metricK8sPodMemoryPageFaults metricK8sPodMemoryRss metricK8sPodMemoryRss metricK8sPodMemoryUsage metricK8sPodMemoryUsage @@ -3023,6 +3123,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricK8sContainerCPUNodeUtilization: newMetricK8sContainerCPUNodeUtilization(mbc.Metrics.K8sContainerCPUNodeUtilization), metricK8sContainerCPULimitUtilization: newMetricK8sContainerCPULimitUtilization(mbc.Metrics.K8sContainerCPULimitUtilization), metricK8sContainerCPURequestUtilization: newMetricK8sContainerCPURequestUtilization(mbc.Metrics.K8sContainerCPURequestUtilization), + metricK8sContainerMemoryNodeUtilization: newMetricK8sContainerMemoryNodeUtilization(mbc.Metrics.K8sContainerMemoryNodeUtilization), metricK8sContainerMemoryLimitUtilization: newMetricK8sContainerMemoryLimitUtilization(mbc.Metrics.K8sContainerMemoryLimitUtilization), metricK8sContainerMemoryRequestUtilization: newMetricK8sContainerMemoryRequestUtilization(mbc.Metrics.K8sContainerMemoryRequestUtilization), metricK8sNodeCPUTime: newMetricK8sNodeCPUTime(mbc.Metrics.K8sNodeCPUTime), @@ -3051,6 +3152,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricK8sPodFilesystemUsage: newMetricK8sPodFilesystemUsage(mbc.Metrics.K8sPodFilesystemUsage), metricK8sPodMemoryAvailable: newMetricK8sPodMemoryAvailable(mbc.Metrics.K8sPodMemoryAvailable), metricK8sPodMemoryMajorPageFaults: newMetricK8sPodMemoryMajorPageFaults(mbc.Metrics.K8sPodMemoryMajorPageFaults), + metricK8sPodMemoryNodeUtilization: newMetricK8sPodMemoryNodeUtilization(mbc.Metrics.K8sPodMemoryNodeUtilization), metricK8sPodMemoryPageFaults: newMetricK8sPodMemoryPageFaults(mbc.Metrics.K8sPodMemoryPageFaults), metricK8sPodMemoryRss: newMetricK8sPodMemoryRss(mbc.Metrics.K8sPodMemoryRss), metricK8sPodMemoryUsage: newMetricK8sPodMemoryUsage(mbc.Metrics.K8sPodMemoryUsage), @@ -3235,6 +3337,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricK8sContainerCPUNodeUtilization.emit(ils.Metrics()) mb.metricK8sContainerCPULimitUtilization.emit(ils.Metrics()) mb.metricK8sContainerCPURequestUtilization.emit(ils.Metrics()) + mb.metricK8sContainerMemoryNodeUtilization.emit(ils.Metrics()) mb.metricK8sContainerMemoryLimitUtilization.emit(ils.Metrics()) mb.metricK8sContainerMemoryRequestUtilization.emit(ils.Metrics()) mb.metricK8sNodeCPUTime.emit(ils.Metrics()) @@ -3263,6 +3366,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricK8sPodFilesystemUsage.emit(ils.Metrics()) mb.metricK8sPodMemoryAvailable.emit(ils.Metrics()) mb.metricK8sPodMemoryMajorPageFaults.emit(ils.Metrics()) + mb.metricK8sPodMemoryNodeUtilization.emit(ils.Metrics()) mb.metricK8sPodMemoryPageFaults.emit(ils.Metrics()) mb.metricK8sPodMemoryRss.emit(ils.Metrics()) mb.metricK8sPodMemoryUsage.emit(ils.Metrics()) @@ -3388,6 +3492,11 @@ func (mb *MetricsBuilder) RecordK8sContainerCPURequestUtilizationDataPoint(ts pc mb.metricK8sContainerCPURequestUtilization.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sContainerMemoryNodeUtilizationDataPoint adds a data point to k8s.container.memory.node.utilization metric. +func (mb *MetricsBuilder) RecordK8sContainerMemoryNodeUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sContainerMemoryNodeUtilization.recordDataPoint(mb.startTime, ts, val) +} + // RecordK8sContainerMemoryLimitUtilizationDataPoint adds a data point to k8s.container.memory_limit_utilization metric. func (mb *MetricsBuilder) RecordK8sContainerMemoryLimitUtilizationDataPoint(ts pcommon.Timestamp, val float64) { mb.metricK8sContainerMemoryLimitUtilization.recordDataPoint(mb.startTime, ts, val) @@ -3528,6 +3637,11 @@ func (mb *MetricsBuilder) RecordK8sPodMemoryMajorPageFaultsDataPoint(ts pcommon. mb.metricK8sPodMemoryMajorPageFaults.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sPodMemoryNodeUtilizationDataPoint adds a data point to k8s.pod.memory.node.utilization metric. +func (mb *MetricsBuilder) RecordK8sPodMemoryNodeUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sPodMemoryNodeUtilization.recordDataPoint(mb.startTime, ts, val) +} + // RecordK8sPodMemoryPageFaultsDataPoint adds a data point to k8s.pod.memory.page_faults metric. func (mb *MetricsBuilder) RecordK8sPodMemoryPageFaultsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricK8sPodMemoryPageFaults.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go index a9c56e31f5f3..1fd14d7c86f2 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go @@ -139,6 +139,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sContainerCPURequestUtilizationDataPoint(ts, 1) + allMetricsCount++ + mb.RecordK8sContainerMemoryNodeUtilizationDataPoint(ts, 1) + allMetricsCount++ mb.RecordK8sContainerMemoryLimitUtilizationDataPoint(ts, 1) @@ -243,6 +246,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sPodMemoryMajorPageFaultsDataPoint(ts, 1) + allMetricsCount++ + mb.RecordK8sPodMemoryNodeUtilizationDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordK8sPodMemoryPageFaultsDataPoint(ts, 1) @@ -530,6 +536,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.Equal(t, float64(1), dp.DoubleValue()) + case "k8s.container.memory.node.utilization": + assert.False(t, validatedMetrics["k8s.container.memory.node.utilization"], "Found a duplicate in the metrics slice: k8s.container.memory.node.utilization") + validatedMetrics["k8s.container.memory.node.utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Container memory utilization as a ratio of the node's capacity", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "k8s.container.memory_limit_utilization": assert.False(t, validatedMetrics["k8s.container.memory_limit_utilization"], "Found a duplicate in the metrics slice: k8s.container.memory_limit_utilization") validatedMetrics["k8s.container.memory_limit_utilization"] = true @@ -888,6 +906,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.pod.memory.node.utilization": + assert.False(t, validatedMetrics["k8s.pod.memory.node.utilization"], "Found a duplicate in the metrics slice: k8s.pod.memory.node.utilization") + validatedMetrics["k8s.pod.memory.node.utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Pod memory utilization as a ratio of the node's capacity", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "k8s.pod.memory.page_faults": assert.False(t, validatedMetrics["k8s.pod.memory.page_faults"], "Found a duplicate in the metrics slice: k8s.pod.memory.page_faults") validatedMetrics["k8s.pod.memory.page_faults"] = true diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go index 808d10366f6e..5f23d5264185 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go @@ -54,6 +54,7 @@ var ContainerCPUMetrics = CPUMetrics{ type MemoryMetrics struct { Available RecordIntDataPointFunc Usage RecordIntDataPointFunc + NodeUtilization RecordDoubleDataPointFunc LimitUtilization RecordDoubleDataPointFunc RequestUtilization RecordDoubleDataPointFunc Rss RecordIntDataPointFunc @@ -74,6 +75,7 @@ var NodeMemoryMetrics = MemoryMetrics{ var PodMemoryMetrics = MemoryMetrics{ Available: (*MetricsBuilder).RecordK8sPodMemoryAvailableDataPoint, Usage: (*MetricsBuilder).RecordK8sPodMemoryUsageDataPoint, + NodeUtilization: (*MetricsBuilder).RecordK8sPodMemoryNodeUtilizationDataPoint, LimitUtilization: (*MetricsBuilder).RecordK8sPodMemoryLimitUtilizationDataPoint, RequestUtilization: (*MetricsBuilder).RecordK8sPodMemoryRequestUtilizationDataPoint, Rss: (*MetricsBuilder).RecordK8sPodMemoryRssDataPoint, @@ -85,6 +87,7 @@ var PodMemoryMetrics = MemoryMetrics{ var ContainerMemoryMetrics = MemoryMetrics{ Available: (*MetricsBuilder).RecordContainerMemoryAvailableDataPoint, Usage: (*MetricsBuilder).RecordContainerMemoryUsageDataPoint, + NodeUtilization: (*MetricsBuilder).RecordK8sContainerMemoryNodeUtilizationDataPoint, LimitUtilization: (*MetricsBuilder).RecordK8sContainerMemoryLimitUtilizationDataPoint, RequestUtilization: (*MetricsBuilder).RecordK8sContainerMemoryRequestUtilizationDataPoint, Rss: (*MetricsBuilder).RecordContainerMemoryRssDataPoint, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml index 3f4e11bf5384..9d6d3fa832ba 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml @@ -33,6 +33,8 @@ all_set: enabled: true k8s.container.cpu_request_utilization: enabled: true + k8s.container.memory.node.utilization: + enabled: true k8s.container.memory_limit_utilization: enabled: true k8s.container.memory_request_utilization: @@ -89,6 +91,8 @@ all_set: enabled: true k8s.pod.memory.major_page_faults: enabled: true + k8s.pod.memory.node.utilization: + enabled: true k8s.pod.memory.page_faults: enabled: true k8s.pod.memory.rss: @@ -182,6 +186,8 @@ none_set: enabled: false k8s.container.cpu_request_utilization: enabled: false + k8s.container.memory.node.utilization: + enabled: false k8s.container.memory_limit_utilization: enabled: false k8s.container.memory_request_utilization: @@ -238,6 +244,8 @@ none_set: enabled: false k8s.pod.memory.major_page_faults: enabled: false + k8s.pod.memory.node.utilization: + enabled: false k8s.pod.memory.page_faults: enabled: false k8s.pod.memory.rss: diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index bc6d1bb779eb..4b0a83b22160 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -257,6 +257,13 @@ metrics: gauge: value_type: double attributes: [ ] + k8s.pod.memory.node.utilization: + enabled: false + description: "Pod memory utilization as a ratio of the node's capacity" + unit: 1 + gauge: + value_type: double + attributes: [ ] k8s.pod.memory_limit_utilization: enabled: false description: "Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted." @@ -407,6 +414,13 @@ metrics: gauge: value_type: double attributes: [ ] + k8s.container.memory.node.utilization: + enabled: false + description: "Container memory utilization as a ratio of the node's capacity" + unit: 1 + gauge: + value_type: double + attributes: [ ] k8s.container.memory_limit_utilization: enabled: false description: "Container memory utilization as a ratio of the container's limits" diff --git a/receiver/kubeletstatsreceiver/mocked_objects_test.go b/receiver/kubeletstatsreceiver/mocked_objects_test.go index eff596298062..3ad686b824d8 100644 --- a/receiver/kubeletstatsreceiver/mocked_objects_test.go +++ b/receiver/kubeletstatsreceiver/mocked_objects_test.go @@ -42,6 +42,23 @@ func getNodeWithCPUCapacity(nodeName string, cpuCap int) *v1.Node { } } +func getNodeWithMemoryCapacity(nodeName string, memoryCap string) *v1.Node { + resourceList := make(v1.ResourceList) + q := resource.QuantityValue{} + _ = q.Set(memoryCap) + resourceList["memory"] = q.Quantity + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + UID: "asdfg", + }, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Capacity: resourceList, + }, + } +} + var volumeClaim1 = getPVC("volume_claim_1", "kube-system", "storage-provisioner-token-qzlx6") var volumeClaim2 = getPVC("volume_claim_2", "kube-system", "kube-proxy") var volumeClaim3 = getPVC("volume_claim_3", "kube-system", "coredns-token-dzc5t") diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go index 6bece93409a3..582bbbfae57c 100644 --- a/receiver/kubeletstatsreceiver/scraper.go +++ b/receiver/kubeletstatsreceiver/scraper.go @@ -47,7 +47,7 @@ type kubletScraper struct { m sync.RWMutex // A struct that keeps Node's resource capacities - nodeLimits *kubelet.NodeLimits + nodeLimits *kubelet.NodeCapacity } func newKubletScraper( @@ -80,11 +80,13 @@ func newKubletScraper( metricsConfig.Metrics.K8sContainerMemoryLimitUtilization.Enabled || metricsConfig.Metrics.K8sContainerMemoryRequestUtilization.Enabled, stopCh: make(chan struct{}), - nodeLimits: &kubelet.NodeLimits{}, + nodeLimits: &kubelet.NodeCapacity{}, } if metricsConfig.Metrics.K8sContainerCPUNodeUtilization.Enabled || - metricsConfig.Metrics.K8sPodCPUNodeUtilization.Enabled { + metricsConfig.Metrics.K8sPodCPUNodeUtilization.Enabled || + metricsConfig.Metrics.K8sContainerMemoryNodeUtilization.Enabled || + metricsConfig.Metrics.K8sPodMemoryNodeUtilization.Enabled { ks.nodeInformer = k8sconfig.NewNodeSharedInformer(rOptions.k8sAPIClient, nodeName, 5*time.Minute) } @@ -113,7 +115,7 @@ func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) { } } - var node kubelet.NodeLimits + var node kubelet.NodeCapacity if r.nodeInformer != nil { node = r.node() } @@ -159,7 +161,7 @@ func (r *kubletScraper) detailedPVCLabelsSetter() func(rb *metadata.ResourceBuil } } -func (r *kubletScraper) node() kubelet.NodeLimits { +func (r *kubletScraper) node() kubelet.NodeCapacity { r.m.RLock() defer r.m.RUnlock() return *r.nodeLimits @@ -209,7 +211,13 @@ func (r *kubletScraper) addOrUpdateNode(node *v1.Node) { if cpu, ok := node.Status.Capacity["cpu"]; ok { if q, err := resource.ParseQuantity(cpu.String()); err == nil { - r.nodeLimits.CPUNanoCoresLimit = float64(q.MilliValue()) / 1000 + r.nodeLimits.CPUCapacity = float64(q.MilliValue()) / 1000 + } + } + if memory, ok := node.Status.Capacity["memory"]; ok { + // ie: 32564740Ki + if q, err := resource.ParseQuantity(memory.String()); err == nil { + r.nodeLimits.MemoryCapacity = float64(q.Value()) } } } diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 0dc55e405cbd..8affc749db83 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -80,7 +80,7 @@ func TestScraper(t *testing.T) { pmetrictest.IgnoreMetricsOrder())) } -func TestScraperWithNodeUtilization(t *testing.T) { +func TestScraperWithCPUNodeUtilization(t *testing.T) { watcherStarted := make(chan struct{}) // Create the fake client. client := fake.NewSimpleClientset() @@ -155,6 +155,80 @@ func TestScraperWithNodeUtilization(t *testing.T) { require.NoError(t, err) } +func TestScraperWithMemoryNodeUtilization(t *testing.T) { + watcherStarted := make(chan struct{}) + // Create the fake client. + client := fake.NewSimpleClientset() + // A catch-all watch reactor that allows us to inject the watcherStarted channel. + client.PrependWatchReactor("*", func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := client.Tracker().Watch(gvr, ns) + if err != nil { + return false, nil, err + } + close(watcherStarted) + return true, watch, nil + }) + + options := &scraperOptions{ + metricGroupsToCollect: map[kubelet.MetricGroup]bool{ + kubelet.ContainerMetricGroup: true, + kubelet.PodMetricGroup: true, + }, + k8sAPIClient: client, + } + r, err := newKubletScraper( + &fakeRestClient{}, + receivertest.NewNopSettings(), + options, + metadata.MetricsBuilderConfig{ + Metrics: metadata.MetricsConfig{ + K8sContainerMemoryNodeUtilization: metadata.MetricConfig{ + Enabled: true, + }, K8sPodMemoryNodeUtilization: metadata.MetricConfig{ + Enabled: true, + }, + }, + ResourceAttributes: metadata.DefaultResourceAttributesConfig(), + }, + "worker-42", + ) + require.NoError(t, err) + + err = r.Start(context.Background(), nil) + require.NoError(t, err) + + // we wait until the watcher starts + <-watcherStarted + // Inject an event node into the fake client. + node := getNodeWithMemoryCapacity("worker-42", "32564740Ki") + _, err = client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + if err != nil { + require.NoError(t, err) + } + + md, err := r.Scrape(context.Background()) + require.NoError(t, err) + require.Equal(t, numContainers+numPods, md.DataPointCount()) + expectedFile := filepath.Join("testdata", "scraper", "test_scraper_memory_util_nodelimit_expected.yaml") + + // Uncomment to regenerate '*_expected.yaml' files + // golden.WriteMetrics(t, expectedFile, md) + + expectedMetrics, err := golden.ReadMetrics(expectedFile) + require.NoError(t, err) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, md, + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreMetricsOrder())) + + err = r.Shutdown(context.Background()) + require.NoError(t, err) +} + func TestScraperWithMetadata(t *testing.T) { tests := []struct { name string diff --git a/receiver/kubeletstatsreceiver/testdata/config.yaml b/receiver/kubeletstatsreceiver/testdata/config.yaml index 6db7d52e9f6b..0c2d9333a4ca 100644 --- a/receiver/kubeletstatsreceiver/testdata/config.yaml +++ b/receiver/kubeletstatsreceiver/testdata/config.yaml @@ -40,3 +40,15 @@ kubeletstats/pod_cpu_node_utilization: metrics: k8s.pod.cpu.node.utilization: enabled: true +kubeletstats/container_memory_node_utilization: + collection_interval: 10s + metric_groups: [ container, pod, node ] + metrics: + k8s.container.memory.node.utilization: + enabled: true +kubeletstats/pod_memory_node_utilization: + collection_interval: 10s + metric_groups: [ container, pod, node ] + metrics: + k8s.pod.memory.node.utilization: + enabled: true diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_memory_util_nodelimit_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_memory_util_nodelimit_expected.yaml new file mode 100644 index 000000000000..1ad820ce21f3 --- /dev/null +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_memory_util_nodelimit_expected.yaml @@ -0,0 +1,460 @@ +resourceMetrics: + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.pod.name + value: + stringValue: go-hello-world-5456b4b8cd-99vxc + - key: k8s.pod.uid + value: + stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0007715093073060003 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: coredns-66bff467f8-58qvv + - key: k8s.pod.uid + value: + stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0002510691011198001 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: coredns-66bff467f8-szddj + - key: k8s.pod.uid + value: + stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.00025868470007744574 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: etcd-minikube + - key: k8s.pod.uid + value: + stringValue: 5a5fbd34cfb43ee7bee976798370c910 + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0011626071634534775 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-apiserver-minikube + - key: k8s.pod.uid + value: + stringValue: 3bef16d65fa74d46458df57d8f6f59af + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.008080641823026992 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-minikube + - key: k8s.pod.uid + value: + stringValue: 3016593d20758bbfe68aba26604a8e3d + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0011606418475934401 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-proxy-v48tf + - key: k8s.pod.uid + value: + stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0003083089255433945 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-scheduler-minikube + - key: k8s.pod.uid + value: + stringValue: 5795d0c442cb997ff93c49feeb9f6386 + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0004285616897294436 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: storage-provisioner + - key: k8s.pod.uid + value: + stringValue: 14bf95e0-9451-4192-b111-807b03163670 + scopeMetrics: + - metrics: + - description: Pod memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.00043052700558948113 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: coredns-66bff467f8-58qvv + - key: k8s.pod.uid + value: + stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.00023878587699456528 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: coredns-66bff467f8-szddj + - key: k8s.pod.uid + value: + stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.00023817171578830354 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: etcd + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: etcd-minikube + - key: k8s.pod.uid + value: + stringValue: 5a5fbd34cfb43ee7bee976798370c910 + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.001145533481919401 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: kube-apiserver + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-apiserver-minikube + - key: k8s.pod.uid + value: + stringValue: 3bef16d65fa74d46458df57d8f6f59af + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.008062953980286653 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: kube-controller-manager + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-minikube + - key: k8s.pod.uid + value: + stringValue: 3016593d20758bbfe68aba26604a8e3d + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0011416028501993261 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: kube-proxy + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-proxy-v48tf + - key: k8s.pod.uid + value: + stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0002913580762505704 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: kube-scheduler + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-scheduler-minikube + - key: k8s.pod.uid + value: + stringValue: 5795d0c442cb997ff93c49feeb9f6386 + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0004108738469891054 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: server + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.pod.name + value: + stringValue: go-hello-world-5456b4b8cd-99vxc + - key: k8s.pod.uid + value: + stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.000752347477670634 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.container.name + value: + stringValue: storage-provisioner + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: storage-provisioner + - key: k8s.pod.uid + value: + stringValue: 14bf95e0-9451-4192-b111-807b03163670 + scopeMetrics: + - metrics: + - description: Container memory utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.00041615563336295635 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest From 2b853bd7cf037a78485be47565e5ab2b1d9e49fe Mon Sep 17 00:00:00 2001 From: ChrsMark Date: Tue, 25 Jun 2024 10:31:11 +0300 Subject: [PATCH 2/2] fix test to avoid race condition flakiness Signed-off-by: ChrsMark --- receiver/kubeletstatsreceiver/scraper_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 8affc749db83..a5a1306237b7 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -9,8 +9,10 @@ import ( "os" "path/filepath" "testing" + "time" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/receivertest" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" @@ -208,9 +210,13 @@ func TestScraperWithMemoryNodeUtilization(t *testing.T) { require.NoError(t, err) } - md, err := r.Scrape(context.Background()) - require.NoError(t, err) - require.Equal(t, numContainers+numPods, md.DataPointCount()) + var md pmetric.Metrics + require.Eventually(t, func() bool { + md, err = r.Scrape(context.Background()) + require.NoError(t, err) + return numContainers+numPods == md.DataPointCount() + }, 10*time.Second, 100*time.Millisecond, + "metrics not collected") expectedFile := filepath.Join("testdata", "scraper", "test_scraper_memory_util_nodelimit_expected.yaml") // Uncomment to regenerate '*_expected.yaml' files