diff --git a/.editorconfig b/.editorconfig index 64069d7c4b..6821790dcc 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,3 +8,10 @@ indent_size = 2 trim_trailing_whitespace = true insert_final_newline = true charset = utf-8 + +[*.go] +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true +insert_final_newline = true +charset = utf-8 diff --git a/functional_tests/functional_test.go b/functional_tests/functional_test.go index c4bbb298a8..2cdde4d140 100644 --- a/functional_tests/functional_test.go +++ b/functional_tests/functional_test.go @@ -6,7 +6,6 @@ package functional_tests import ( "bytes" "context" - "encoding/json" "fmt" "os" "path/filepath" @@ -20,6 +19,7 @@ import ( "github.com/docker/docker/api/types" docker "github.com/docker/docker/client" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/ptracetest" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver" @@ -271,7 +271,7 @@ func testNodeJSTraces(t *testing.T) { var expectedTraces ptrace.Traces expectedTracesFile := filepath.Join("testdata", "expected_traces.yaml") - expectedTraces, err := readTraces(expectedTracesFile) + expectedTraces, err := golden.ReadTraces(expectedTracesFile) require.NoError(t, err) waitForTraces(t, 3, tracesConsumer) @@ -305,11 +305,6 @@ func testNodeJSTraces(t *testing.T) { ptracetest.IgnoreResourceSpansOrder(), ptracetest.IgnoreScopeSpansOrder(), ) - if err != nil { - internal.WriteTraces(t, filepath.Join("testdata", "actual_traces.yaml"), latestTrace) - b, _ := os.ReadFile(filepath.Join("testdata", "actual_traces.yaml")) - fmt.Println(string(b)) - } require.NoError(t, err) } @@ -425,7 +420,7 @@ func shortenNames(value string) string { func testK8sClusterReceiverMetrics(t *testing.T) { metricsConsumer := setupOnce(t).k8sclusterReceiverMetricsConsumer expectedMetricsFile := filepath.Join("testdata", "expected_cluster_receiver.yaml") - expectedMetrics, err := readMetrics(expectedMetricsFile) + expectedMetrics, err := golden.ReadMetrics(expectedMetricsFile) require.NoError(t, err) replaceWithStar := func(string) string { return "*" } @@ -796,46 +791,6 @@ func waitForData(t *testing.T, entriesNum int, tc *consumertest.TracesSink, mc * len(tc.AllTraces()), len(mc.AllMetrics()), len(lc.AllLogs()), timeoutMinutes) } -// readMetrics reads a pmetric.Metrics from the specified YAML or JSON file. -func readMetrics(filePath string) (pmetric.Metrics, error) { - b, err := os.ReadFile(filePath) - if err != nil { - return pmetric.Metrics{}, err - } - if strings.HasSuffix(filePath, ".yaml") || strings.HasSuffix(filePath, ".yml") { - var m map[string]interface{} - if err = yaml.Unmarshal(b, &m); err != nil { - return pmetric.Metrics{}, err - } - b, err = json.Marshal(m) - if err != nil { - return pmetric.Metrics{}, err - } - } - unmarshaller := &pmetric.JSONUnmarshaler{} - return unmarshaller.UnmarshalMetrics(b) -} - -// readTraces reads a ptrace.Traces from the specified YAML or JSON file. -func readTraces(filePath string) (ptrace.Traces, error) { - b, err := os.ReadFile(filePath) - if err != nil { - return ptrace.Traces{}, err - } - if strings.HasSuffix(filePath, ".yaml") || strings.HasSuffix(filePath, ".yml") { - var m map[string]interface{} - if err = yaml.Unmarshal(b, &m); err != nil { - return ptrace.Traces{}, err - } - b, err = json.Marshal(m) - if err != nil { - return ptrace.Traces{}, err - } - } - unmarshaler := ptrace.JSONUnmarshaler{} - return unmarshaler.UnmarshalTraces(b) -} - func setupTraces(t *testing.T) *consumertest.TracesSink { tc := new(consumertest.TracesSink) f := otlpreceiver.NewFactory() diff --git a/functional_tests/go.mod b/functional_tests/go.mod index 0e4551f52d..3b0f43427c 100644 --- a/functional_tests/go.mod +++ b/functional_tests/go.mod @@ -7,14 +7,14 @@ go 1.20 require ( github.com/docker/docker v23.0.3+incompatible + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.0.0-20231031211240-1c63ac99a6e0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.86.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.86.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.86.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.86.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/collector/component v0.86.0 go.opentelemetry.io/collector/consumer v0.86.0 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0017.0.20231026220224-6405e152a2d9 go.opentelemetry.io/collector/receiver v0.86.0 go.opentelemetry.io/collector/receiver/otlpreceiver v0.86.0 gopkg.in/yaml.v3 v3.0.1 @@ -111,6 +111,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.86.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.86.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.88.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.86.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc4 // indirect @@ -172,8 +173,8 @@ require ( golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/grpc v1.58.3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/functional_tests/go.sum b/functional_tests/go.sum index 8f10fad960..c7fc513ce7 100644 --- a/functional_tests/go.sum +++ b/functional_tests/go.sum @@ -131,7 +131,7 @@ cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63 cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= -cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -1061,10 +1061,12 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.86. github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.86.0/go.mod h1:yALAKkORvMeaaMGFhKIzuHGPdDCjEpBahWL+zTxfCuc= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.86.0 h1:2/KuYugX/jjjj+KRvKKVDwTuTPrSEnZUsznnmFobP34= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.86.0 h1:c0YrPA9p78Sqm3QWW5OFAuajdTWbTwVvawdvL1hbxvA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.0.0-20231031211240-1c63ac99a6e0 h1:V5JMjUXMPpfgIqZneDgi/9mOWFHTbh8Rwv+VeBfroNw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.0.0-20231031211240-1c63ac99a6e0/go.mod h1:b7QvLEydJ3J5ke7B7+3LgZg71+4uccf6TAtgnoLMZdw= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.86.0 h1:EzGSvuCXAsGpwgeieTVcy1gs0hOlPidhFPcvwcPEU8s= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.86.0/go.mod h1:EL62K0jwPFXa3uxYeYMZGS1TSLR6AdGHRvM7RnRlosY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.86.0 h1:xt/YvYpgssWk2Ix2C9SSXrILIzRqyWe+r5RE348m1fE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.86.0/go.mod h1:fjK1kn7PIDP+TqOIFVEth3w0Eiexx5jIk411c//fYkM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.88.0 h1:S1FEVDH5GEMZQuHg8jfv47lCHHDFVjZBpO/Yrb/vKpE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.88.0/go.mod h1:IJqzjDv6ZFeu7cYGCUzQ5/3CuTPVIo3UAGK3o2jK/Sw= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.86.0 h1:qR/wCuXENMydERtUBpTMCDuQIkGA+x2zh8vUy+bOGq0= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.86.0/go.mod h1:yrtyAYeuyUdaP6xfywqu1XPtkWqq3vlpUvNX3YSEL8Y= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.86.0 h1:6dKEwMkm/hxwxPQy4ClVArX2QL8Vuj5fHSgVB6iTNVY= @@ -1249,8 +1251,8 @@ go.opentelemetry.io/collector/extension/auth v0.86.0 h1:VwKbeElL8sBnvRDC565EWOw4 go.opentelemetry.io/collector/extension/auth v0.86.0/go.mod h1:qGIIkeWXaOtdYO1fYEn1vAEhUS+OhVcceUC1G3XOsdk= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0015 h1:Wv8JFRUD01MwWkhZwF85to5oukHDFPRjnt88ArDFqco= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0015/go.mod h1:fLmJMf1AoHttkF8p5oJAc4o5ZpHu8yO5XYJ7gbLCLzo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 h1:8PzrQFk3oKiT1Sd5EmNEcagdMyt1KcBy5/OyF5He5gY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0015/go.mod h1:I1PqyHJlsXjANC73tp43nDId7/jiv82NoZZ6uS0xdwM= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0017.0.20231026220224-6405e152a2d9 h1:TVYPzf0ZwFDTSoQ6gPk4lpQgVK4g43cWYuo710E0RHI= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0017.0.20231026220224-6405e152a2d9/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= go.opentelemetry.io/collector/processor v0.86.0 h1:b4Htiom5mgcM5d7Memw1NkxBKgOADF1je0mLIhulQUM= go.opentelemetry.io/collector/receiver v0.86.0 h1:AP+KZ225CmXR1oBD36+vV/pZcRFTkSiG7HvAVqfHoRg= go.opentelemetry.io/collector/receiver v0.86.0/go.mod h1:oFpofH/OG4HqmaVsb8ftnIAhLAhQnH/3bWrOdZZZjTk= @@ -1735,8 +1737,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1771,8 +1773,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/functional_tests/internal/normalize_metrics.go b/functional_tests/internal/normalize_metrics.go deleted file mode 100644 index 157b1fb479..0000000000 --- a/functional_tests/internal/normalize_metrics.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/golden" - -import ( - "sort" - "time" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -func normalizeTimestamps(metrics pmetric.Metrics) { - rms := metrics.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - for j := 0; j < rms.At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < rms.At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { - m := rms.At(i).ScopeMetrics().At(j).Metrics().At(k) - //exhaustive:enforce - switch m.Type() { - case pmetric.MetricTypeGauge: - normalizeDataPointSlice(dataPointSlice[pmetric.NumberDataPoint](m.Gauge().DataPoints())) - case pmetric.MetricTypeSum: - normalizeDataPointSlice(dataPointSlice[pmetric.NumberDataPoint](m.Sum().DataPoints())) - case pmetric.MetricTypeHistogram: - normalizeDataPointSlice(dataPointSlice[pmetric.HistogramDataPoint](m.Histogram().DataPoints())) - case pmetric.MetricTypeExponentialHistogram: - normalizeDataPointSlice(dataPointSlice[pmetric.ExponentialHistogramDataPoint](m.ExponentialHistogram().DataPoints())) - case pmetric.MetricTypeSummary: - normalizeDataPointSlice(dataPointSlice[pmetric.SummaryDataPoint](m.Summary().DataPoints())) - } - } - } - } -} - -// returns a map of the original timestamps with their corresponding normalized values. -// normalization entails setting nonunique subsequent timestamps to the same value while incrementing unique timestamps by a set value of 1,000,000 ns -func normalizeTimeSeries(timeSeries []pcommon.Timestamp) map[pcommon.Timestamp]pcommon.Timestamp { - sort.Slice(timeSeries, func(i, j int) bool { - return func(t1, t2 pcommon.Timestamp) int { - if t1 < t2 { - return -1 - } else if t1 > t2 { - return 1 - } - return 0 - }(timeSeries[i], timeSeries[j]) < 0 - }) - - // normalize values - normalizedTs := make(map[pcommon.Timestamp]pcommon.Timestamp) - count := 0 - for _, v := range timeSeries { - if v == 0 { - continue - } - if _, ok := normalizedTs[v]; !ok { - normalizedTs[v] = normalTime(count) - count++ - } - } - - return normalizedTs -} - -func normalTime(timeSeriesIndex int) pcommon.Timestamp { - return pcommon.NewTimestampFromTime(time.Unix(0, 0).Add(time.Duration(timeSeriesIndex+1) * 1000000 * time.Nanosecond)) -} - -type dataPointSlice[T dataPoint] interface { - Len() int - At(i int) T -} - -type dataPoint interface { - pmetric.NumberDataPoint | pmetric.HistogramDataPoint | pmetric.ExponentialHistogramDataPoint | pmetric.SummaryDataPoint - Attributes() pcommon.Map - StartTimestamp() pcommon.Timestamp - SetStartTimestamp(pcommon.Timestamp) - Timestamp() pcommon.Timestamp - SetTimestamp(pcommon.Timestamp) -} - -func normalizeDataPointSlice[T dataPoint](dps dataPointSlice[T]) { - attrCache := make(map[[16]byte]bool) - for i := 0; i < dps.Len(); i++ { - attrHash := pdatautil.MapHash(dps.At(i).Attributes()) - if attrCache[attrHash] { - continue - } - timeSeries := []pcommon.Timestamp{dps.At(i).StartTimestamp(), dps.At(i).Timestamp()} - - // Find any other data points in the time series - for j := i + 1; j < dps.Len(); j++ { - if pdatautil.MapHash(dps.At(j).Attributes()) != attrHash { - continue - } - timeSeries = append(timeSeries, dps.At(j).StartTimestamp(), dps.At(j).Timestamp()) - } - - normalizedTs := normalizeTimeSeries(timeSeries) - for k := 0; k < dps.Len(); k++ { - if pdatautil.MapHash(dps.At(k).Attributes()) != attrHash { - continue - } - dps.At(k).SetTimestamp(normalizedTs[dps.At(k).Timestamp()]) - dps.At(k).SetStartTimestamp(normalizedTs[dps.At(k).StartTimestamp()]) - } - attrCache[attrHash] = true - } -} diff --git a/functional_tests/internal/sort_metrics.go b/functional_tests/internal/sort_metrics.go deleted file mode 100644 index 068fdc22eb..0000000000 --- a/functional_tests/internal/sort_metrics.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/golden" - -import ( - "fmt" - "sort" - "strings" - - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -// sorts all Resource Metrics attributes and Datapoint Slice metric attributes and all Resource, Scope, and Datapoint Slices -func sortMetrics(ms pmetric.Metrics) { - rms := ms.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - sortAttributeMap(rms.At(i).Resource().Attributes()) - ilms := rms.At(i).ScopeMetrics() - for j := 0; j < ilms.Len(); j++ { - sortAttributeMap(ilms.At(j).Scope().Attributes()) - metricsList := ilms.At(j).Metrics() - for k := 0; k < metricsList.Len(); k++ { - metric := metricsList.At(k) - //exhaustive:enforce - switch metricsList.At(k).Type() { - case pmetric.MetricTypeGauge: - ds := metric.Gauge().DataPoints() - for l := 0; l < ds.Len(); l++ { - sortAttributeMap(ds.At(l).Attributes()) - } - case pmetric.MetricTypeSum: - ds := metric.Sum().DataPoints() - for l := 0; l < ds.Len(); l++ { - sortAttributeMap(ds.At(l).Attributes()) - } - case pmetric.MetricTypeHistogram: - ds := metric.Histogram().DataPoints() - for l := 0; l < ds.Len(); l++ { - sortAttributeMap(ds.At(l).Attributes()) - } - case pmetric.MetricTypeExponentialHistogram: - ds := metric.ExponentialHistogram().DataPoints() - for l := 0; l < ds.Len(); l++ { - sortAttributeMap(ds.At(l).Attributes()) - } - case pmetric.MetricTypeSummary: - ds := metric.Summary().DataPoints() - for l := 0; l < ds.Len(); l++ { - sortAttributeMap(ds.At(l).Attributes()) - } - } - } - } - } - - sortResources(ms) - sortScopes(ms) - sortMetricDataPointSlices(ms) -} - -// sortAttributeMap sorts the attributes of a pcommon.Map according to the alphanumeric ordering of the keys -func sortAttributeMap(mp pcommon.Map) { - tempMap := pcommon.NewMap() - keys := []string{} - mp.Range(func(key string, _ pcommon.Value) bool { - keys = append(keys, key) - return true - }) - sort.Strings(keys) - for _, k := range keys { - value, exists := mp.Get(k) - if exists { - switch value.Type() { - case pcommon.ValueTypeStr: - tempMap.PutStr(k, value.Str()) - case pcommon.ValueTypeBool: - tempMap.PutBool(k, value.Bool()) - case pcommon.ValueTypeInt: - tempMap.PutInt(k, value.Int()) - case pcommon.ValueTypeDouble: - tempMap.PutDouble(k, value.Double()) - case pcommon.ValueTypeMap: - value.Map().CopyTo(tempMap.PutEmptyMap(k)) - case pcommon.ValueTypeSlice: - value.Slice().CopyTo(tempMap.PutEmptySlice(k)) - case pcommon.ValueTypeBytes: - value.Bytes().CopyTo(tempMap.PutEmptyBytes(k)) - } - } - } - tempMap.CopyTo(mp) -} - -// sortMetricDataPointSlices sorts the datapoint slice of a pmetric.Metrics according to the alphanumeric ordering of map key -func sortMetricDataPointSlices(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { - for j := 0; j < ms.ResourceMetrics().At(i).ScopeMetrics().Len(); j++ { - for k := 0; k < ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().Len(); k++ { - m := ms.ResourceMetrics().At(i).ScopeMetrics().At(j).Metrics().At(k) - //exhaustive:enforce - switch m.Type() { - case pmetric.MetricTypeGauge: - sortNumberDataPointSlice(m.Gauge().DataPoints()) - case pmetric.MetricTypeSum: - sortNumberDataPointSlice(m.Sum().DataPoints()) - case pmetric.MetricTypeHistogram: - sortHistogramDataPointSlice(m.Histogram().DataPoints()) - case pmetric.MetricTypeExponentialHistogram: - sortExponentialHistogramDataPointSlice(m.ExponentialHistogram().DataPoints()) - case pmetric.MetricTypeSummary: - sortSummaryDataPointSlice(m.Summary().DataPoints()) - } - } - } - } -} - -func sortResources(ms pmetric.Metrics) { - ms.ResourceMetrics().Sort(func(a, b pmetric.ResourceMetrics) bool { - return compareMaps(a.Resource().Attributes(), b.Resource().Attributes()) < 0 - }) -} - -func sortScopes(ms pmetric.Metrics) { - for i := 0; i < ms.ResourceMetrics().Len(); i++ { - rm := ms.ResourceMetrics().At(i) - rm.ScopeMetrics().Sort(func(a, b pmetric.ScopeMetrics) bool { - return compareMaps(a.Scope().Attributes(), b.Scope().Attributes()) < 0 - }) - } -} - -func sortNumberDataPointSlice(ndps pmetric.NumberDataPointSlice) { - ndps.Sort(func(a, b pmetric.NumberDataPoint) bool { - return compareMaps(a.Attributes(), b.Attributes()) < 0 - }) -} - -func sortSummaryDataPointSlice(sdps pmetric.SummaryDataPointSlice) { - sdps.Sort(func(a, b pmetric.SummaryDataPoint) bool { - return compareMaps(a.Attributes(), b.Attributes()) < 0 - }) -} - -func sortHistogramDataPointSlice(hdps pmetric.HistogramDataPointSlice) { - hdps.Sort(func(a, b pmetric.HistogramDataPoint) bool { - return compareMaps(a.Attributes(), b.Attributes()) < 0 - }) -} - -func sortExponentialHistogramDataPointSlice(ehdps pmetric.ExponentialHistogramDataPointSlice) { - ehdps.Sort(func(a, b pmetric.ExponentialHistogramDataPoint) bool { - return compareMaps(a.Attributes(), b.Attributes()) < 0 - }) -} - -func compareMaps(a, b pcommon.Map) int { - sortAttributeMap(a) - sortAttributeMap(b) - - if a.Len() != b.Len() { - return a.Len() - b.Len() - } - - var aKeys, bKeys []string - a.Range(func(k string, v pcommon.Value) bool { - aKeys = append(aKeys, fmt.Sprintf("%s: %v", k, v.AsString())) - return true - }) - b.Range(func(k string, v pcommon.Value) bool { - bKeys = append(bKeys, fmt.Sprintf("%s: %v", k, v.AsString())) - return true - }) - - for i := 0; i < len(aKeys); i++ { - if aKeys[i] != bKeys[i] { - return strings.Compare(aKeys[i], bKeys[i]) - } - } - - return 0 -} diff --git a/functional_tests/internal/writer.go b/functional_tests/internal/writer.go deleted file mode 100644 index 90e9c19ba4..0000000000 --- a/functional_tests/internal/writer.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright Splunk Inc. -// SPDX-License-Identifier: Apache-2.0 - -package internal - -import ( - "bytes" - "encoding/json" - "os" - "testing" - - "go.opentelemetry.io/collector/pdata/plog" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/pdata/ptrace" - "gopkg.in/yaml.v3" -) - -// WriteMetrics writes a pmetric.Metrics to the specified file in YAML format. -func WriteMetrics(t *testing.T, filePath string, metrics pmetric.Metrics) error { - if err := writeMetrics(filePath, metrics); err != nil { - return err - } - t.Logf("Golden file successfully written to %s.", filePath) - t.Log("NOTE: The WriteMetrics call must be removed in order to pass the test.") - t.Fail() - return nil -} - -// marshalMetricsYAML marshals a pmetric.Metrics to YAML format. -func marshalMetricsYAML(metrics pmetric.Metrics) ([]byte, error) { - unmarshaler := &pmetric.JSONMarshaler{} - fileBytes, err := unmarshaler.MarshalMetrics(metrics) - if err != nil { - return nil, err - } - var jsonVal map[string]interface{} - if err = json.Unmarshal(fileBytes, &jsonVal); err != nil { - return nil, err - } - b := &bytes.Buffer{} - enc := yaml.NewEncoder(b) - enc.SetIndent(2) - if err := enc.Encode(jsonVal); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// writeMetrics writes a pmetric.Metrics to the specified file in YAML format. -func writeMetrics(filePath string, metrics pmetric.Metrics) error { - sortMetrics(metrics) - normalizeTimestamps(metrics) - b, err := marshalMetricsYAML(metrics) - if err != nil { - return err - } - return os.WriteFile(filePath, b, 0600) -} - -// WriteLogs writes a plog.Logs to the specified file in YAML format. -func WriteLogs(t *testing.T, filePath string, logs plog.Logs) error { - if err := writeLogs(filePath, logs); err != nil { - return err - } - t.Logf("Golden file successfully written to %s.", filePath) - t.Log("NOTE: The WriteLogs call must be removed in order to pass the test.") - t.Fail() - return nil -} - -func marshalLogs(logs plog.Logs) ([]byte, error) { - unmarshaler := &plog.JSONMarshaler{} - fileBytes, err := unmarshaler.MarshalLogs(logs) - if err != nil { - return nil, err - } - var jsonVal map[string]interface{} - if err = json.Unmarshal(fileBytes, &jsonVal); err != nil { - return nil, err - } - b := &bytes.Buffer{} - enc := yaml.NewEncoder(b) - enc.SetIndent(2) - if err := enc.Encode(jsonVal); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func writeLogs(filePath string, logs plog.Logs) error { - b, err := marshalLogs(logs) - if err != nil { - return err - } - return os.WriteFile(filePath, b, 0600) -} - -// WriteTraces writes a ptrace.Traces to the specified file in YAML format. -func WriteTraces(t *testing.T, filePath string, traces ptrace.Traces) error { - if err := writeTraces(filePath, traces); err != nil { - return err - } - t.Logf("Golden file successfully written to %s.", filePath) - t.Log("NOTE: The WriteLogs call must be removed in order to pass the test.") - t.Fail() - return nil -} - -func marshalTraces(traces ptrace.Traces) ([]byte, error) { - marshaler := &ptrace.JSONMarshaler{} - fileBytes, err := marshaler.MarshalTraces(traces) - if err != nil { - return nil, err - } - var jsonVal map[string]interface{} - if err = json.Unmarshal(fileBytes, &jsonVal); err != nil { - return nil, err - } - b := &bytes.Buffer{} - enc := yaml.NewEncoder(b) - enc.SetIndent(2) - if err := enc.Encode(jsonVal); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func writeTraces(filePath string, traces ptrace.Traces) error { - b, err := marshalTraces(traces) - if err != nil { - return err - } - return os.WriteFile(filePath, b, 0600) -} diff --git a/functional_tests/testdata/test_values.yaml b/functional_tests/testdata/test_values.yaml deleted file mode 100644 index 2224f95c85..0000000000 --- a/functional_tests/testdata/test_values.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -clusterName: dev-operator -splunkObservability: - realm: CHANGEME - accessToken: CHANGEME -agent: - config: - exporters: - otlp: - endpoint: $ENDPOINT - tls: - insecure: true - service: - pipelines: - traces: - exporters: - - otlp -clusterReceiver: - config: - exporters: - otlp: - endpoint: $ENDPOINT - tls: - insecure: true - service: - pipelines: - metrics: - # Remove batch processor. - processors: [ memory_limiter, resource, resource/k8s_cluster ] - exporters: - - otlp -environment: dev -operator: - enabled: true