diff --git a/.golangci.yml b/.golangci.yml index 1af55d5af28..01c82c7329c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -126,10 +126,10 @@ linters-settings: linters: enable: + - copyloopvar - depguard - errcheck - errorlint - - exportloopref - gocritic - gofmt - goimports diff --git a/component/config.go b/component/config.go index 91898258f6a..d7eb0e005fd 100644 --- a/component/config.go +++ b/component/config.go @@ -86,7 +86,7 @@ func callValidateIfPossible(v reflect.Value) error { if reflect.PointerTo(v.Type()).Implements(configValidatorType) { // If not addressable, then create a new *V pointer and set the value to current v. if !v.CanAddr() { - pv := reflect.New(reflect.PtrTo(v.Type()).Elem()) + pv := reflect.New(reflect.PointerTo(v.Type()).Elem()) pv.Elem().Set(v) v = pv.Elem() } diff --git a/config/confighttp/compress_readcloser_test.go b/config/confighttp/compress_readcloser_test.go index fd97a15e242..70f9f6fccdc 100644 --- a/config/confighttp/compress_readcloser_test.go +++ b/config/confighttp/compress_readcloser_test.go @@ -53,7 +53,6 @@ func TestCompressReadCloser(t *testing.T) { errVal: "failed to close reader", }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/config/confighttp/compression_test.go b/config/confighttp/compression_test.go index 9f161a3f7c3..870e9ba8371 100644 --- a/config/confighttp/compression_test.go +++ b/config/confighttp/compression_test.go @@ -415,7 +415,6 @@ func TestDecompressorAvoidDecompressionBomb(t *testing.T) { compress: compressLz4, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/confmap/confmap_test.go b/confmap/confmap_test.go index 5436e82036b..b96b55263b9 100644 --- a/confmap/confmap_test.go +++ b/confmap/confmap_test.go @@ -233,8 +233,6 @@ func TestUintUnmarshalerSuccess(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { stringMap := map[string]any{ "uint_test": tt.testValue, @@ -244,6 +242,7 @@ func TestUintUnmarshalerSuccess(t *testing.T) { err := conf.Unmarshal(cfg) require.NoError(t, err) + // nolint:gosec assert.Equal(t, cfg.UintTest, uint32(tt.testValue)) }) } @@ -251,6 +250,7 @@ func TestUintUnmarshalerSuccess(t *testing.T) { func TestUint64Unmarshaler(t *testing.T) { negativeInt := -1000 + // nolint:gosec testValue := uint64(negativeInt) type Uint64Config struct { @@ -663,8 +663,6 @@ func TestZeroSliceHookFunc(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { cfg := NewFromStringMap(tt.cfg) diff --git a/exporter/debugexporter/internal/otlptext/databuffer.go b/exporter/debugexporter/internal/otlptext/databuffer.go index 94b2f6551d4..4e73794da7a 100644 --- a/exporter/debugexporter/internal/otlptext/databuffer.go +++ b/exporter/debugexporter/internal/otlptext/databuffer.go @@ -202,9 +202,9 @@ func (b *dataBuffer) logExponentialHistogramDataPoints(ps pmetric.ExponentialHis for i := 0; i < negB.Len(); i++ { pos := negB.Len() - i - 1 - index := p.Negative().Offset() + int32(pos) - lower := math.Exp(float64(index) * factor) - upper := math.Exp(float64(index+1) * factor) + index := float64(p.Negative().Offset()) + float64(pos) + lower := math.Exp(index * factor) + upper := math.Exp((index + 1) * factor) b.logEntry("Bucket [%f, %f), Count: %d", -upper, -lower, negB.At(pos)) } @@ -213,9 +213,9 @@ func (b *dataBuffer) logExponentialHistogramDataPoints(ps pmetric.ExponentialHis } for pos := 0; pos < posB.Len(); pos++ { - index := p.Positive().Offset() + int32(pos) - lower := math.Exp(float64(index) * factor) - upper := math.Exp(float64(index+1) * factor) + index := float64(p.Positive().Offset()) + float64(pos) + lower := math.Exp(index * factor) + upper := math.Exp((index + 1) * factor) b.logEntry("Bucket (%f, %f], Count: %d", lower, upper, posB.At(pos)) } diff --git a/exporter/exporterhelper/internal/batch_sender_test.go b/exporter/exporterhelper/internal/batch_sender_test.go index c9f6cc084e2..456218fd914 100644 --- a/exporter/exporterhelper/internal/batch_sender_test.go +++ b/exporter/exporterhelper/internal/batch_sender_test.go @@ -75,7 +75,7 @@ func TestBatchSender_Merge(t *testing.T) { require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink, mergeErr: errors.New("merge error")})) - assert.Equal(t, uint64(1), sink.requestsCount.Load()) + assert.Equal(t, int64(1), sink.requestsCount.Load()) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 2 && sink.itemsCount.Load() == 15 }, 100*time.Millisecond, 10*time.Millisecond) @@ -89,8 +89,8 @@ func TestBatchSender_BatchExportError(t *testing.T) { tests := []struct { name string batcherOption Option - expectedRequests uint64 - expectedItems uint64 + expectedRequests int64 + expectedItems int64 }{ { name: "merge_only", @@ -131,7 +131,7 @@ func TestBatchSender_BatchExportError(t *testing.T) { // the first two requests should be blocked by the batchSender. time.Sleep(50 * time.Millisecond) - assert.Equal(t, uint64(0), sink.requestsCount.Load()) + assert.Equal(t, int64(0), sink.requestsCount.Load()) // the third request should trigger the export and cause an error. errReq := &fakeRequest{items: 20, exportErr: errors.New("transient error"), sink: sink} @@ -203,8 +203,8 @@ func TestBatchSender_Shutdown(t *testing.T) { require.NoError(t, be.Shutdown(context.Background())) // shutdown should force sending the batch - assert.Equal(t, uint64(1), sink.requestsCount.Load()) - assert.Equal(t, uint64(3), sink.itemsCount.Load()) + assert.Equal(t, int64(1), sink.requestsCount.Load()) + assert.Equal(t, int64(3), sink.itemsCount.Load()) } func TestBatchSender_Disabled(t *testing.T) { @@ -224,8 +224,8 @@ func TestBatchSender_Disabled(t *testing.T) { sink := newFakeRequestSink() // should be sent right away without splitting because batching is disabled. require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) - assert.Equal(t, uint64(1), sink.requestsCount.Load()) - assert.Equal(t, uint64(8), sink.itemsCount.Load()) + assert.Equal(t, int64(1), sink.requestsCount.Load()) + assert.Equal(t, int64(8), sink.itemsCount.Load()) } func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) { @@ -270,8 +270,8 @@ func TestBatchSender_PostShutdown(t *testing.T) { // Closed batch sender should act as a pass-through to not block queue draining. sink := newFakeRequestSink() require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) - assert.Equal(t, uint64(1), sink.requestsCount.Load()) - assert.Equal(t, uint64(8), sink.itemsCount.Load()) + assert.Equal(t, int64(1), sink.requestsCount.Load()) + assert.Equal(t, int64(8), sink.itemsCount.Load()) } func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { @@ -281,8 +281,8 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { tests := []struct { name string batcherCfg exporterbatcher.Config - expectedRequests uint64 - expectedItems uint64 + expectedRequests int64 + expectedItems int64 }{ { name: "merge_only", @@ -318,7 +318,6 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { qCfg := exporterqueue.NewDefaultConfig() qCfg.NumConsumers = 2 @@ -397,8 +396,8 @@ func TestBatchSender_BatchBlocking(t *testing.T) { wg.Wait() // should be sent in two batches since the batch size is 3 - assert.Equal(t, uint64(2), sink.requestsCount.Load()) - assert.Equal(t, uint64(6), sink.itemsCount.Load()) + assert.Equal(t, int64(2), sink.requestsCount.Load()) + assert.Equal(t, int64(6), sink.itemsCount.Load()) require.NoError(t, be.Shutdown(context.Background())) } @@ -433,8 +432,8 @@ func TestBatchSender_BatchCancelled(t *testing.T) { wg.Wait() // nothing should be delivered - assert.Equal(t, uint64(0), sink.requestsCount.Load()) - assert.Equal(t, uint64(0), sink.itemsCount.Load()) + assert.Equal(t, int64(0), sink.requestsCount.Load()) + assert.Equal(t, int64(0), sink.itemsCount.Load()) require.NoError(t, be.Shutdown(context.Background())) } @@ -468,8 +467,8 @@ func TestBatchSender_DrainActiveRequests(t *testing.T) { // It should take 120 milliseconds to complete. require.NoError(t, be.Shutdown(context.Background())) - assert.Equal(t, uint64(2), sink.requestsCount.Load()) - assert.Equal(t, uint64(3), sink.itemsCount.Load()) + assert.Equal(t, int64(2), sink.requestsCount.Load()) + assert.Equal(t, int64(3), sink.itemsCount.Load()) } func TestBatchSender_WithBatcherOption(t *testing.T) { @@ -654,7 +653,7 @@ func TestBatchSenderTimerResetNoConflict(t *testing.T) { // The batch should be sent either with the flush interval or by reaching the minimum items size with no conflict assert.EventuallyWithT(t, func(c *assert.CollectT) { - assert.LessOrEqual(c, uint64(1), sink.requestsCount.Load()) + assert.LessOrEqual(c, int64(1), sink.requestsCount.Load()) assert.EqualValues(c, 8, sink.itemsCount.Load()) }, 200*time.Millisecond, 10*time.Millisecond) @@ -683,7 +682,7 @@ func TestBatchSenderTimerFlush(t *testing.T) { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() assert.EventuallyWithT(t, func(c *assert.CollectT) { - assert.LessOrEqual(c, uint64(1), sink.requestsCount.Load()) + assert.LessOrEqual(c, int64(1), sink.requestsCount.Load()) assert.EqualValues(c, 8, sink.itemsCount.Load()) }, 30*time.Millisecond, 5*time.Millisecond) @@ -694,12 +693,12 @@ func TestBatchSenderTimerFlush(t *testing.T) { // Confirm that it is not flushed in 50ms time.Sleep(60 * time.Millisecond) - assert.LessOrEqual(t, uint64(1), sink.requestsCount.Load()) + assert.LessOrEqual(t, int64(1), sink.requestsCount.Load()) assert.EqualValues(t, 8, sink.itemsCount.Load()) // Confirm that it is flushed after 100ms (using 60+50=110 here to be safe) time.Sleep(50 * time.Millisecond) - assert.LessOrEqual(t, uint64(2), sink.requestsCount.Load()) + assert.LessOrEqual(t, int64(2), sink.requestsCount.Load()) assert.EqualValues(t, 12, sink.itemsCount.Load()) require.NoError(t, be.Shutdown(context.Background())) } @@ -711,3 +710,65 @@ func queueBatchExporter(t *testing.T, batchOption Option) *BaseExporter { require.NoError(t, err) return be } + +func fakeBatchMergeFunc(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { + if r1 == nil { + return r2, nil + } + fr1 := r1.(*fakeRequest) + fr2 := r2.(*fakeRequest) + if fr2.mergeErr != nil { + return nil, fr2.mergeErr + } + return &fakeRequest{ + items: fr1.items + fr2.items, + sink: fr1.sink, + exportErr: fr2.exportErr, + delay: fr1.delay + fr2.delay, + }, nil +} + +func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 internal.Request, r2 internal.Request) ([]internal.Request, error) { + maxItems := cfg.MaxSizeItems + if maxItems == 0 { + r, err := fakeBatchMergeFunc(ctx, r1, r2) + return []internal.Request{r}, err + } + + if r2.(*fakeRequest).mergeErr != nil { + return nil, r2.(*fakeRequest).mergeErr + } + + fr2 := r2.(*fakeRequest) + fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay} + var res []internal.Request + + // fill fr1 to maxItems if it's not nil + if r1 != nil { + fr1 := r1.(*fakeRequest) + fr1 = &fakeRequest{items: fr1.items, sink: fr1.sink, exportErr: fr1.exportErr, delay: fr1.delay} + if fr2.items <= maxItems-fr1.items { + fr1.items += fr2.items + if fr2.exportErr != nil { + fr1.exportErr = fr2.exportErr + } + return []internal.Request{fr1}, nil + } + // if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases + fr2.items -= maxItems - fr1.items + fr1.items = maxItems + res = append(res, fr1) + } + + // split fr2 to maxItems + for { + if fr2.items <= maxItems { + res = append(res, &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) + break + } + res = append(res, &fakeRequest{items: maxItems, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) + fr2.items -= maxItems + } + + return res, nil +} diff --git a/exporter/exporterhelper/internal/request.go b/exporter/exporterhelper/internal/request.go index 79f58a82608..33a1915c65d 100644 --- a/exporter/exporterhelper/internal/request.go +++ b/exporter/exporterhelper/internal/request.go @@ -8,7 +8,6 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -16,14 +15,14 @@ import ( ) type fakeRequestSink struct { - requestsCount *atomic.Uint64 - itemsCount *atomic.Uint64 + requestsCount *atomic.Int64 + itemsCount *atomic.Int64 } func newFakeRequestSink() *fakeRequestSink { return &fakeRequestSink{ - requestsCount: &atomic.Uint64{}, - itemsCount: &atomic.Uint64{}, + requestsCount: new(atomic.Int64), + itemsCount: new(atomic.Int64), } } @@ -46,7 +45,7 @@ func (r *fakeRequest) Export(ctx context.Context) error { } if r.sink != nil { r.sink.requestsCount.Add(1) - r.sink.itemsCount.Add(uint64(r.items)) + r.sink.itemsCount.Add(int64(r.items)) } return nil } @@ -55,68 +54,6 @@ func (r *fakeRequest) ItemsCount() int { return r.items } -func fakeBatchMergeFunc(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { - if r1 == nil { - return r2, nil - } - fr1 := r1.(*fakeRequest) - fr2 := r2.(*fakeRequest) - if fr2.mergeErr != nil { - return nil, fr2.mergeErr - } - return &fakeRequest{ - items: fr1.items + fr2.items, - sink: fr1.sink, - exportErr: fr2.exportErr, - delay: fr1.delay + fr2.delay, - }, nil -} - -func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 internal.Request, r2 internal.Request) ([]internal.Request, error) { - maxItems := cfg.MaxSizeItems - if maxItems == 0 { - r, err := fakeBatchMergeFunc(ctx, r1, r2) - return []internal.Request{r}, err - } - - if r2.(*fakeRequest).mergeErr != nil { - return nil, r2.(*fakeRequest).mergeErr - } - - fr2 := r2.(*fakeRequest) - fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay} - var res []internal.Request - - // fill fr1 to maxItems if it's not nil - if r1 != nil { - fr1 := r1.(*fakeRequest) - fr1 = &fakeRequest{items: fr1.items, sink: fr1.sink, exportErr: fr1.exportErr, delay: fr1.delay} - if fr2.items <= maxItems-fr1.items { - fr1.items += fr2.items - if fr2.exportErr != nil { - fr1.exportErr = fr2.exportErr - } - return []internal.Request{fr1}, nil - } - // if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases - fr2.items -= maxItems - fr1.items - fr1.items = maxItems - res = append(res, fr1) - } - - // split fr2 to maxItems - for { - if fr2.items <= maxItems { - res = append(res, &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) - break - } - res = append(res, &fakeRequest{items: maxItems, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) - fr2.items -= maxItems - } - - return res, nil -} - type FakeRequestConverter struct { MetricsError error TracesError error diff --git a/exporter/internal/queue/persistent_queue.go b/exporter/internal/queue/persistent_queue.go index e378e41699c..e7637332ff8 100644 --- a/exporter/internal/queue/persistent_queue.go +++ b/exporter/internal/queue/persistent_queue.go @@ -166,6 +166,7 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex initEls = make([]permanentQueueEl, initIndexSize) } + // nolint: gosec pq.sizedChannel = newSizedChannel[permanentQueueEl](pq.set.Capacity, initEls, int64(initQueueSize)) } @@ -213,6 +214,7 @@ func (pq *persistentQueue[T]) backupQueueSize(ctx context.Context) error { return nil } + // nolint: gosec return pq.client.Set(ctx, queueSizeKey, itemIndexToBytes(uint64(pq.Size()))) } @@ -524,6 +526,7 @@ func bytesToItemIndex(buf []byte) (uint64, error) { func itemIndexArrayToBytes(arr []uint64) []byte { size := len(arr) buf := make([]byte, 0, 4+size*8) + // nolint: gosec buf = binary.LittleEndian.AppendUint32(buf, uint32(size)) for _, item := range arr { buf = binary.LittleEndian.AppendUint64(buf, item) diff --git a/exporter/internal/queue/persistent_queue_test.go b/exporter/internal/queue/persistent_queue_test.go index 81945d295c9..6890f25037f 100644 --- a/exporter/internal/queue/persistent_queue_test.go +++ b/exporter/internal/queue/persistent_queue_test.go @@ -499,8 +499,8 @@ func TestPersistentQueue_CurrentlyProcessedItems(t *testing.T) { require.EqualValues(t, 6, newPs.writeIndex) // There should be no items left in the storage - for i := 0; i < int(newPs.writeIndex); i++ { - bb, err := newPs.client.Get(context.Background(), getItemKey(uint64(i))) + for i := uint64(0); i < newPs.writeIndex; i++ { + bb, err := newPs.client.Get(context.Background(), getItemKey(i)) require.NoError(t, err) require.Nil(t, bb) } diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index 5dae9ded0f4..9f31c900973 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -42,8 +42,8 @@ import ( type mockReceiver struct { srv *grpc.Server - requestCount *atomic.Int32 - totalItems *atomic.Int32 + requestCount *atomic.Int64 + totalItems *atomic.Int64 mux sync.Mutex metadata metadata.MD exportError error @@ -69,9 +69,9 @@ type mockTracesReceiver struct { } func (r *mockTracesReceiver) Export(ctx context.Context, req ptraceotlp.ExportRequest) (ptraceotlp.ExportResponse, error) { - r.requestCount.Add(int32(1)) + r.requestCount.Add(1) td := req.Traces() - r.totalItems.Add(int32(td.SpanCount())) + r.totalItems.Add(int64(td.SpanCount())) r.mux.Lock() defer r.mux.Unlock() r.lastRequest = td @@ -110,8 +110,8 @@ func otlpTracesReceiverOnGRPCServer(ln net.Listener, useTLS bool) (*mockTracesRe rcv := &mockTracesReceiver{ mockReceiver: mockReceiver{ srv: grpc.NewServer(sopts...), - requestCount: &atomic.Int32{}, - totalItems: &atomic.Int32{}, + requestCount: new(atomic.Int64), + totalItems: new(atomic.Int64), }, exportResponse: ptraceotlp.NewExportResponse, } @@ -133,9 +133,9 @@ type mockLogsReceiver struct { } func (r *mockLogsReceiver) Export(ctx context.Context, req plogotlp.ExportRequest) (plogotlp.ExportResponse, error) { - r.requestCount.Add(int32(1)) + r.requestCount.Add(1) ld := req.Logs() - r.totalItems.Add(int32(ld.LogRecordCount())) + r.totalItems.Add(int64(ld.LogRecordCount())) r.mux.Lock() defer r.mux.Unlock() r.lastRequest = ld @@ -159,8 +159,8 @@ func otlpLogsReceiverOnGRPCServer(ln net.Listener) *mockLogsReceiver { rcv := &mockLogsReceiver{ mockReceiver: mockReceiver{ srv: grpc.NewServer(), - requestCount: &atomic.Int32{}, - totalItems: &atomic.Int32{}, + requestCount: new(atomic.Int64), + totalItems: new(atomic.Int64), }, exportResponse: plogotlp.NewExportResponse, } @@ -183,8 +183,8 @@ type mockMetricsReceiver struct { func (r *mockMetricsReceiver) Export(ctx context.Context, req pmetricotlp.ExportRequest) (pmetricotlp.ExportResponse, error) { md := req.Metrics() - r.requestCount.Add(int32(1)) - r.totalItems.Add(int32(md.DataPointCount())) + r.requestCount.Add(1) + r.totalItems.Add(int64(md.DataPointCount())) r.mux.Lock() defer r.mux.Unlock() r.lastRequest = md @@ -208,8 +208,8 @@ func otlpMetricsReceiverOnGRPCServer(ln net.Listener) *mockMetricsReceiver { rcv := &mockMetricsReceiver{ mockReceiver: mockReceiver{ srv: grpc.NewServer(), - requestCount: &atomic.Int32{}, - totalItems: &atomic.Int32{}, + requestCount: new(atomic.Int64), + totalItems: new(atomic.Int64), }, exportResponse: pmetricotlp.NewExportResponse, } diff --git a/otelcol/collector.go b/otelcol/collector.go index 3526861ad83..13ac5986ee5 100644 --- a/otelcol/collector.go +++ b/otelcol/collector.go @@ -95,7 +95,7 @@ type Collector struct { serviceConfig *service.Config service *service.Service - state *atomic.Int32 + state *atomic.Int64 // shutdownChan is used to terminate the collector. shutdownChan chan struct{} @@ -121,8 +121,8 @@ func NewCollector(set CollectorSettings) (*Collector, error) { return nil, err } - state := &atomic.Int32{} - state.Store(int32(StateStarting)) + state := new(atomic.Int64) + state.Store(int64(StateStarting)) return &Collector{ set: set, state: state, @@ -366,5 +366,5 @@ func (col *Collector) shutdown(ctx context.Context) error { // setCollectorState provides current state of the collector func (col *Collector) setCollectorState(state State) { - col.state.Store(int32(state)) + col.state.Store(int64(state)) } diff --git a/pdata/pcommon/timestamp.go b/pdata/pcommon/timestamp.go index 5fd1758b1be..666f86f43f6 100644 --- a/pdata/pcommon/timestamp.go +++ b/pdata/pcommon/timestamp.go @@ -13,11 +13,13 @@ type Timestamp uint64 // NewTimestampFromTime constructs a new Timestamp from the provided time.Time. func NewTimestampFromTime(t time.Time) Timestamp { + // nolint:gosec return Timestamp(uint64(t.UnixNano())) } // AsTime converts this to a time.Time. func (ts Timestamp) AsTime() time.Time { + // nolint:gosec return time.Unix(0, int64(ts)).UTC() } diff --git a/pdata/pcommon/timestamp_test.go b/pdata/pcommon/timestamp_test.go index 35986663d10..edd94596ec6 100644 --- a/pdata/pcommon/timestamp_test.go +++ b/pdata/pcommon/timestamp_test.go @@ -12,6 +12,7 @@ import ( func TestUnixNanosConverters(t *testing.T) { t1 := time.Date(2020, 03, 24, 1, 13, 23, 789, time.UTC) + // nolint:gosec tun := Timestamp(t1.UnixNano()) assert.EqualValues(t, uint64(1585012403000000789), tun) diff --git a/pdata/pcommon/value.go b/pdata/pcommon/value.go index 77a84e51758..286b9c928e3 100644 --- a/pdata/pcommon/value.go +++ b/pdata/pcommon/value.go @@ -148,6 +148,7 @@ func (v Value) FromRaw(iv any) error { case int64: v.SetInt(tv) case uint: + // nolint:gosec v.SetInt(int64(tv)) case uint8: v.SetInt(int64(tv)) @@ -156,6 +157,7 @@ func (v Value) FromRaw(iv any) error { case uint32: v.SetInt(int64(tv)) case uint64: + // nolint:gosec v.SetInt(int64(tv)) case float32: v.SetDouble(float64(tv)) diff --git a/processor/batchprocessor/batch_processor_test.go b/processor/batchprocessor/batch_processor_test.go index 8ab9aca8565..3d2e6571406 100644 --- a/processor/batchprocessor/batch_processor_test.go +++ b/processor/batchprocessor/batch_processor_test.go @@ -167,12 +167,19 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) { } func TestBatchProcessorSentBySize(t *testing.T) { + const ( + sendBatchSize = 20 + requestCount = 100 + spansPerRequest = 5 + expectedBatchesNum = requestCount * spansPerRequest / sendBatchSize + expectedBatchingFactor = sendBatchSize / spansPerRequest + ) + tel := setupTestTelemetry() sizer := &ptrace.ProtoMarshaler{} sink := new(consumertest.TracesSink) cfg := createDefaultConfig().(*Config) - sendBatchSize := 20 - cfg.SendBatchSize = uint32(sendBatchSize) + cfg.SendBatchSize = sendBatchSize cfg.Timeout = 500 * time.Millisecond creationSet := tel.NewSettings() creationSet.MetricsLevel = configtelemetry.LevelDetailed @@ -180,9 +187,6 @@ func TestBatchProcessorSentBySize(t *testing.T) { require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) - requestCount := 100 - spansPerRequest := 5 - start := time.Now() sizeSum := 0 for requestNum := 0; requestNum < requestCount; requestNum++ { @@ -196,9 +200,6 @@ func TestBatchProcessorSentBySize(t *testing.T) { elapsed := time.Since(start) require.LessOrEqual(t, elapsed.Nanoseconds(), cfg.Timeout.Nanoseconds()) - expectedBatchesNum := requestCount * spansPerRequest / sendBatchSize - expectedBatchingFactor := sendBatchSize / spansPerRequest - require.Equal(t, requestCount*spansPerRequest, sink.SpanCount()) receivedTraces := sink.AllTraces() require.Len(t, receivedTraces, expectedBatchesNum) @@ -286,12 +287,18 @@ func TestBatchProcessorSentBySize(t *testing.T) { } func TestBatchProcessorSentBySizeWithMaxSize(t *testing.T) { + const ( + sendBatchSize = 20 + sendBatchMaxSize = 37 + requestCount = 1 + spansPerRequest = 500 + totalSpans = requestCount * spansPerRequest + ) + tel := setupTestTelemetry() sizer := &ptrace.ProtoMarshaler{} sink := new(consumertest.TracesSink) cfg := createDefaultConfig().(*Config) - sendBatchSize := 20 - sendBatchMaxSize := 37 cfg.SendBatchSize = uint32(sendBatchSize) cfg.SendBatchMaxSize = uint32(sendBatchMaxSize) cfg.Timeout = 500 * time.Millisecond @@ -301,10 +308,6 @@ func TestBatchProcessorSentBySizeWithMaxSize(t *testing.T) { require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) - requestCount := 1 - spansPerRequest := 500 - totalSpans := requestCount * spansPerRequest - start := time.Now() sizeSum := 0 @@ -319,11 +322,11 @@ func TestBatchProcessorSentBySizeWithMaxSize(t *testing.T) { require.LessOrEqual(t, elapsed.Nanoseconds(), cfg.Timeout.Nanoseconds()) // The max batch size is not a divisor of the total number of spans - expectedBatchesNum := int(math.Ceil(float64(totalSpans) / float64(sendBatchMaxSize))) + expectedBatchesNum := math.Ceil(float64(totalSpans) / float64(sendBatchMaxSize)) require.Equal(t, totalSpans, sink.SpanCount()) receivedTraces := sink.AllTraces() - require.Len(t, receivedTraces, expectedBatchesNum) + require.Len(t, receivedTraces, int(expectedBatchesNum)) // we have to count the size after it was processed since splitTraces will cause some // repeated ResourceSpan data to be sent through the processor var min, max int @@ -563,10 +566,12 @@ func TestBatchMetricProcessorBatchSize(t *testing.T) { SendBatchSize: 50, } - requestCount := 100 - metricsPerRequest := 5 - dataPointsPerMetric := 2 // Since the int counter uses two datapoints. - dataPointsPerRequest := metricsPerRequest * dataPointsPerMetric + const ( + requestCount = 100 + metricsPerRequest = 5 + dataPointsPerMetric = 2 // Since the int counter uses two datapoints. + dataPointsPerRequest = metricsPerRequest * dataPointsPerMetric + ) sink := new(consumertest.MetricsSink) creationSet := tel.NewSettings() @@ -587,12 +592,12 @@ func TestBatchMetricProcessorBatchSize(t *testing.T) { elapsed := time.Since(start) require.LessOrEqual(t, elapsed.Nanoseconds(), cfg.Timeout.Nanoseconds()) - expectedBatchesNum := requestCount * dataPointsPerRequest / int(cfg.SendBatchSize) + expectedBatchesNum := requestCount * dataPointsPerRequest / cfg.SendBatchSize expectedBatchingFactor := int(cfg.SendBatchSize) / dataPointsPerRequest require.Equal(t, requestCount*2*metricsPerRequest, sink.DataPointCount()) receivedMds := sink.AllMetrics() - require.Len(t, receivedMds, expectedBatchesNum) + require.Len(t, receivedMds, int(expectedBatchesNum)) for _, md := range receivedMds { require.Equal(t, expectedBatchingFactor, md.ResourceMetrics().Len()) for i := 0; i < expectedBatchingFactor; i++ { @@ -616,8 +621,8 @@ func TestBatchMetricProcessorBatchSize(t *testing.T) { 1000_000, 2000_000, 3000_000, 4000_000, 5000_000, 6000_000, 7000_000, 8000_000, 9000_000}, BucketCounts: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, uint64(expectedBatchesNum), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Sum: int64(size), - Min: metricdata.NewExtrema(int64(size / expectedBatchesNum)), - Max: metricdata.NewExtrema(int64(size / expectedBatchesNum)), + Min: metricdata.NewExtrema(int64(size / int(expectedBatchesNum))), + Max: metricdata.NewExtrema(int64(size / int(expectedBatchesNum))), }, }, }, @@ -946,8 +951,10 @@ func TestBatchLogProcessor_BatchSize(t *testing.T) { SendBatchSize: 50, } - requestCount := 100 - logsPerRequest := 5 + const ( + requestCount = 100 + logsPerRequest = 5 + ) sink := new(consumertest.LogsSink) creationSet := tel.NewSettings() @@ -968,12 +975,12 @@ func TestBatchLogProcessor_BatchSize(t *testing.T) { elapsed := time.Since(start) require.LessOrEqual(t, elapsed.Nanoseconds(), cfg.Timeout.Nanoseconds()) - expectedBatchesNum := requestCount * logsPerRequest / int(cfg.SendBatchSize) + expectedBatchesNum := requestCount * logsPerRequest / cfg.SendBatchSize expectedBatchingFactor := int(cfg.SendBatchSize) / logsPerRequest require.Equal(t, requestCount*logsPerRequest, sink.LogRecordCount()) receivedMds := sink.AllLogs() - require.Len(t, receivedMds, expectedBatchesNum) + require.Len(t, receivedMds, int(expectedBatchesNum)) for _, ld := range receivedMds { require.Equal(t, expectedBatchingFactor, ld.ResourceLogs().Len()) for i := 0; i < expectedBatchingFactor; i++ { @@ -997,8 +1004,8 @@ func TestBatchLogProcessor_BatchSize(t *testing.T) { 1000_000, 2000_000, 3000_000, 4000_000, 5000_000, 6000_000, 7000_000, 8000_000, 9000_000}, BucketCounts: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, uint64(expectedBatchesNum), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Sum: int64(size), - Min: metricdata.NewExtrema(int64(size / expectedBatchesNum)), - Max: metricdata.NewExtrema(int64(size / expectedBatchesNum)), + Min: metricdata.NewExtrema(int64(size / int(expectedBatchesNum))), + Max: metricdata.NewExtrema(int64(size / int(expectedBatchesNum))), }, }, }, diff --git a/processor/memorylimiterprocessor/memorylimiter_test.go b/processor/memorylimiterprocessor/memorylimiter_test.go index 5eac39cbe88..73fcfdd4d9d 100644 --- a/processor/memorylimiterprocessor/memorylimiter_test.go +++ b/processor/memorylimiterprocessor/memorylimiter_test.go @@ -48,6 +48,7 @@ func TestNoDataLoss(t *testing.T) { runtime.ReadMemStats(&ms) // Set the limit to current usage plus expected increase. This means initially we will not be limited. + // nolint:gosec cfg.MemoryLimitMiB = uint32(ms.Alloc/(1024*1024) + expectedMemoryIncreaseMiB) cfg.MemorySpikeLimitMiB = 1 diff --git a/receiver/otlpreceiver/otlp_test.go b/receiver/otlpreceiver/otlp_test.go index 1f174319767..23e2115a983 100644 --- a/receiver/otlpreceiver/otlp_test.go +++ b/receiver/otlpreceiver/otlp_test.go @@ -674,6 +674,7 @@ func TestOTLPReceiverHTTPTracesIngestTest(t *testing.T) { errStatus := &spb.Status{} require.NoError(t, proto.Unmarshal(respBytes, errStatus)) assert.Equal(t, ingestionState.expectedStatusCode, resp.StatusCode) + // nolint:gosec assert.Equal(t, ingestionState.expectedCode, codes.Code(errStatus.Code)) } } diff --git a/receiver/scraperhelper/config_test.go b/receiver/scraperhelper/config_test.go index 1da08e78006..c82b95ddfda 100644 --- a/receiver/scraperhelper/config_test.go +++ b/receiver/scraperhelper/config_test.go @@ -37,7 +37,6 @@ func TestScrapeControllerSettings(t *testing.T) { errVal: `"timeout": requires positive value`, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() diff --git a/service/internal/graph/node.go b/service/internal/graph/node.go index 0e17bb74bf5..0d1c329c68e 100644 --- a/service/internal/graph/node.go +++ b/service/internal/graph/node.go @@ -17,5 +17,6 @@ func (n nodeID) ID() int64 { func newNodeID(parts ...string) nodeID { h := fnv.New64a() h.Write([]byte(strings.Join(parts, "|"))) + // nolint:gosec return nodeID(h.Sum64()) } diff --git a/service/internal/proctelemetry/process_telemetry.go b/service/internal/proctelemetry/process_telemetry.go index 0fe58d5d694..de60d4518ed 100644 --- a/service/internal/proctelemetry/process_telemetry.go +++ b/service/internal/proctelemetry/process_telemetry.go @@ -68,6 +68,7 @@ func RegisterProcessMetrics(cfg component.TelemetrySettings, opts ...RegisterOpt ctx = context.WithValue(ctx, common.EnvKey, common.EnvMap{common.HostProcEnvKey: set.hostProc}) } pm.context = ctx + // nolint:gosec pm.proc, err = process.NewProcessWithContext(pm.context, int32(os.Getpid())) if err != nil { return err @@ -93,6 +94,7 @@ func (pm *processMetrics) updateAllocMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.Alloc) } @@ -100,6 +102,7 @@ func (pm *processMetrics) updateTotalAllocMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.TotalAlloc) } @@ -107,6 +110,7 @@ func (pm *processMetrics) updateSysMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.Sys) } @@ -125,6 +129,7 @@ func (pm *processMetrics) updateRSSMemory() int64 { if err != nil { return 0 } + // nolint:gosec return int64(mem.RSS) } diff --git a/service/internal/status/status_test.go b/service/internal/status/status_test.go index 18a5fa3553f..8c9498bd1e3 100644 --- a/service/internal/status/status_test.go +++ b/service/internal/status/status_test.go @@ -241,7 +241,6 @@ func TestStatusFuncsConcurrent(t *testing.T) { wg.Add(len(ids)) for _, id := range ids { - id := id go func() { compFn := NewReportStatusFunc(id, rep.ReportStatus) compFn(componentstatus.NewEvent(componentstatus.StatusStarting))