From 11d765c3afe5767f6963a9dcf05e4974d3756fe3 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 17:48:22 -0700 Subject: [PATCH] more style fixes --- .../Scheduling/ResourcesScrapingJob.cs | 2 +- .../AzureResourceDefinitionBatching.cs | 6 +- .../Model/Metrics/BatchScrapeDefinition.cs | 3 +- .../Model/Metrics/ScrapeDefinitionBatch.cs | 105 ------------------ .../AzureMonitorQueryClient.cs | 2 +- .../Extensions/MetricResultExtension.cs | 4 +- ...odifyOutgoingAzureMonitorRequestsPolicy.cs | 4 +- .../Collectors/PrometheusSystemMetricsSink.cs | 1 - .../AzureResourceDefinitionBatchingTests.cs | 12 +- 9 files changed, 16 insertions(+), 123 deletions(-) delete mode 100644 src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 73cb7c01d..1e5a7d834 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -257,7 +257,7 @@ private async Task ScrapeMetrics(IEnumerable - public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) + public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo()) .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max - .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches + .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize)) // split to right-sized batches .SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition(group.Key, batch))) .ToList(); // flatten } @@ -27,7 +27,7 @@ public static List> GroupScrapeD /// /// splits the "raw" batch according to max batch size configured /// - private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize, CancellationToken cancellationToken) + private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize) { int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1; diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index 97f96563f..d5d9052f8 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -1,4 +1,3 @@ -using System; using System.Collections.Generic; using GuardNet; using Promitor.Core.Contracts; @@ -33,7 +32,7 @@ public BatchScrapeDefinition(ScrapeDefinitionBatchProperties scrapeDefinitionBat /// /// A batch of scrape job definitions to be executed as a single request /// - public List> ScrapeDefinitions { get; set; } = new List>(); + public List> ScrapeDefinitions { get; set; } public ScrapeDefinitionBatchProperties ScrapeDefinitionBatchProperties { get; set; } } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs deleted file mode 100644 index c98669582..000000000 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Collections.Generic; -using GuardNet; -using Promitor.Core.Contracts; - -namespace Promitor.Core.Scraping.Configuration.Model.Metrics -{ - /// - /// Defines properties of a batch of scrape definitions - /// - public class ScrapeDefinitionBatchProperties : IEquatable - { - /// Configuration about the Azure Monitor metric to scrape - /// Configuration about the LogAnalytics resource to scrape - /// The details of the prometheus metric that will be created. - /// The scraping model. - /// Resource type of the batch - /// Specify a subscription to scrape that defers from the default subscription. - public ScrapeDefinitionBatchProperties( - AzureMetricConfiguration azureMetricConfiguration, - LogAnalyticsConfiguration logAnalyticsConfiguration, - PrometheusMetricDefinition prometheusMetricDefinition, - ResourceType resourceType, - Scraping scraping, - string subscriptionId) - { - Guard.NotNull(azureMetricConfiguration, nameof(azureMetricConfiguration)); - Guard.NotNull(prometheusMetricDefinition, nameof(prometheusMetricDefinition)); - Guard.NotNull(scraping, nameof(scraping)); - Guard.NotNull(subscriptionId, nameof(subscriptionId)); - - AzureMetricConfiguration = azureMetricConfiguration; - LogAnalyticsConfiguration = logAnalyticsConfiguration; - PrometheusMetricDefinition = prometheusMetricDefinition; - Scraping = scraping; - SubscriptionId = subscriptionId; - ResourceType = resourceType; - } - - /// - /// Configuration about the Azure Monitor metric to scrape - /// - public AzureMetricConfiguration AzureMetricConfiguration { get; } - - /// - /// Configuration about the Azure Monitor log analytics resource to scrape - /// - public LogAnalyticsConfiguration LogAnalyticsConfiguration { get; } - - /// - /// The details of the prometheus metric that will be created. - /// - public PrometheusMetricDefinition PrometheusMetricDefinition { get; } - - /// - /// The scraping model. - /// - public Scraping Scraping { get; } - - /// - /// The Azure subscription to get the metric from. - /// - public string SubscriptionId { get; } - - /// - /// The Azure resource type shared by all scrape definitions in the batch - /// - public ResourceType ResourceType { get; } - - public TimeSpan? GetAggregationInterval() - { - if (ResourceType == ResourceType.LogAnalytics) - { - return LogAnalyticsConfiguration?.Aggregation?.Interval; - } - return AzureMetricConfiguration?.Aggregation?.Interval; - } - - public override int GetHashCode() - { - return this.BuildBatchHashKey().GetHashCode(); - } - - /// - /// Builds a namespaced string key to satisfy batch restrictions, in the format of - /// ___ - /// - public string BuildBatchHashKey() - { - return string.Join("_", new List {AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), GetAggregationInterval().ToString()}); - } - - /// - /// Equality comparison override in case of hash collision - /// - public bool Equals(ScrapeDefinitionBatchProperties obj) - { - if (obj == null || !(obj is ScrapeDefinitionBatchProperties)) - return false; - - ScrapeDefinitionBatchProperties other = obj; - return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && GetAggregationInterval().Equals(other.GetAggregationInterval()); - } - } -} \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index b343696a4..1f8f34ea5 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -302,7 +302,7 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud public static string InsertRegionIntoUrl(string region, string baseUrl) { // Find the position where ".metrics" starts in the URL - int metricsIndex = baseUrl.IndexOf("metrics"); + int metricsIndex = baseUrl.IndexOf("metrics", System.StringComparison.Ordinal); // Split the base URL into two parts: before and after the ".metrics" string beforeMetrics = baseUrl.Substring(0, metricsIndex); diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index b51ea03a4..6db3e1c37 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -15,8 +15,8 @@ public static string ParseResourceIdFromResultId(this MetricResult metricResult) private static string ExtractResourceId(string fullId) { // Find the index of the second occurrence of "/providers/" - int firstIndex = fullId.IndexOf("/providers/"); - int secondIndex = fullId.IndexOf("/providers/", firstIndex + 1); + int firstIndex = fullId.IndexOf("/providers/", System.StringComparison.Ordinal); + int secondIndex = fullId.IndexOf("/providers/", firstIndex + 1, System.StringComparison.Ordinal); // If the second "/providers/" is found, slice the string up to that point if (secondIndex != -1) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 1da811323..9db3814c2 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -10,7 +10,7 @@ namespace Promitor.Integrations.AzureMonitor.HttpPipelinePolicies{ /// - /// Work around to make sure range queries work properly. + /// Work around to make sure range queries work properly. /// public class ModifyOutgoingAzureMonitorRequestsPolicy : HttpPipelinePolicy { @@ -50,7 +50,7 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) // Update the message with the modified URI } } - message.Request.Uri.Query = query.ToString(); + message.Request.Uri.Query = query.ToString(); } } } diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs index 636a2e0f5..a3c260cb2 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs @@ -49,7 +49,6 @@ public Task WriteHistogramMeasurementAsync(string name, string description, doub { var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); - // TODO: are histogram instruments created on every invocation? Would that interfere with correctness? var histogram = _metricFactory.CreateHistogram(name, help: description, includeTimestamp: includeTimestamp, labelNames: orderedLabels.Keys.ToArray(), buckets: [1, 2, 4, 8, 16, 32, 64]); histogram.WithLabels(orderedLabels.Values.ToArray()).Observe(value); return Task.CompletedTask; diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index 4090b4b47..68a674481 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -61,7 +61,7 @@ public void IdenticalBatchPropertiesShouldBatchTogether() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize); // expect one batch of 10 Assert.Single(groupedScrapeDefinitions); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -79,7 +79,7 @@ public void BatchShouldSplitAccordingToConfiguredBatchSize() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 25 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize); // expect three batches adding up to total size Assert.Equal(3, groupedScrapeDefinitions.Count); Assert.Equal(25, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); @@ -99,7 +99,7 @@ public void DifferentBatchPropertiesShouldBatchSeparately() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Equal(2, groupedScrapeDefinitions.Count); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -123,7 +123,7 @@ public void DifferentAggregationIntervalsShouldBatchSeparately() azureMetricConfiguration: azureMetricConfiguration2MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Equal(2, groupedScrapeDefinitions.Count); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -145,7 +145,7 @@ public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 120 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Equal(6, groupedScrapeDefinitions.Count); Assert.Equal(250, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); @@ -165,7 +165,7 @@ public void BatchConstructionShouldBeAgnosticToResourceGroup() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: "group2", 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Single(groupedScrapeDefinitions); Assert.Equal(20, groupedScrapeDefinitions[0].ScrapeDefinitions.Count);