Skip to content

Commit

Permalink
more style fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
hkfgo committed Sep 26, 2024
1 parent 430c725 commit 11d765c
Show file tree
Hide file tree
Showing 9 changed files with 16 additions and 123 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ private async Task ScrapeMetrics(IEnumerable<ScrapeDefinition<IAzureResourceDefi
Logger.LogInformation("Parsed batch config: {Enabled}, {BatchSize}", this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.Enabled, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize);
Logger.LogInformation("Parsed SDK runtime config {Enabled}", this._azureMonitorIntegrationConfiguration.Value.UseAzureMonitorSdk);
if (batchScrapingEnabled) {
var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize, cancellationToken);
var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize);

foreach(var batchScrapeDefinition in batchScrapeDefinitions) {
var azureMetricName = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.MetricName;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,19 @@ public static class AzureResourceDefinitionBatching
/// 3. Definitions in a batch must have the same time granularity
/// 4. Batch size cannot exceed configured maximum
/// </summary>
public static List<BatchScrapeDefinition<IAzureResourceDefinition>> GroupScrapeDefinitions(IEnumerable<ScrapeDefinition<IAzureResourceDefinition>> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken)
public static List<BatchScrapeDefinition<IAzureResourceDefinition>> GroupScrapeDefinitions(IEnumerable<ScrapeDefinition<IAzureResourceDefinition>> allScrapeDefinitions, int maxBatchSize)
{
return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo())
.ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max
.ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches
.ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize)) // split to right-sized batches
.SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition<IAzureResourceDefinition>(group.Key, batch)))
.ToList(); // flatten
}

/// <summary>
/// splits the "raw" batch according to max batch size configured
/// </summary>
private static List<List<ScrapeDefinition<IAzureResourceDefinition>>> SplitScrapeDefinitionBatch(List<ScrapeDefinition<IAzureResourceDefinition>> batchToSplit, int maxBatchSize, CancellationToken cancellationToken)
private static List<List<ScrapeDefinition<IAzureResourceDefinition>>> SplitScrapeDefinitionBatch(List<ScrapeDefinition<IAzureResourceDefinition>> batchToSplit, int maxBatchSize)
{
int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1;

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
using System;
using System.Collections.Generic;
using GuardNet;
using Promitor.Core.Contracts;
Expand Down Expand Up @@ -33,7 +32,7 @@ public BatchScrapeDefinition(ScrapeDefinitionBatchProperties scrapeDefinitionBat
/// <summary>
/// A batch of scrape job definitions to be executed as a single request
/// </summary>
public List<ScrapeDefinition<TResourceDefinition>> ScrapeDefinitions { get; set; } = new List<ScrapeDefinition<TResourceDefinition>>();
public List<ScrapeDefinition<TResourceDefinition>> ScrapeDefinitions { get; set; }

public ScrapeDefinitionBatchProperties ScrapeDefinitionBatchProperties { get; set; }

Check failure on line 37 in src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs

View workflow job for this annotation

GitHub Actions / Analyse

The type or namespace name 'ScrapeDefinitionBatchProperties' could not be found (are you missing a using directive or an assembly reference?)

Check failure on line 37 in src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs

View workflow job for this annotation

GitHub Actions / Analyse

The type or namespace name 'ScrapeDefinitionBatchProperties' could not be found (are you missing a using directive or an assembly reference?)

Check failure on line 37 in src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs

View workflow job for this annotation

GitHub Actions / Verify Codebase

The type or namespace name 'ScrapeDefinitionBatchProperties' could not be found (are you missing a using directive or an assembly reference?)

Check failure on line 37 in src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs

View workflow job for this annotation

GitHub Actions / Verify Codebase

The type or namespace name 'ScrapeDefinitionBatchProperties' could not be found (are you missing a using directive or an assembly reference?)
}
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud
public static string InsertRegionIntoUrl(string region, string baseUrl)
{
// Find the position where ".metrics" starts in the URL
int metricsIndex = baseUrl.IndexOf("metrics");
int metricsIndex = baseUrl.IndexOf("metrics", System.StringComparison.Ordinal);

// Split the base URL into two parts: before and after the ".metrics"
string beforeMetrics = baseUrl.Substring(0, metricsIndex);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ public static string ParseResourceIdFromResultId(this MetricResult metricResult)
private static string ExtractResourceId(string fullId)
{
// Find the index of the second occurrence of "/providers/"
int firstIndex = fullId.IndexOf("/providers/");
int secondIndex = fullId.IndexOf("/providers/", firstIndex + 1);
int firstIndex = fullId.IndexOf("/providers/", System.StringComparison.Ordinal);
int secondIndex = fullId.IndexOf("/providers/", firstIndex + 1, System.StringComparison.Ordinal);

// If the second "/providers/" is found, slice the string up to that point
if (secondIndex != -1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

namespace Promitor.Integrations.AzureMonitor.HttpPipelinePolicies{
/// <summary>
/// Work around to make sure range queries work properly. <see cref="https://github.com/Azure/azure-sdk-for-net/issues/40047">
/// Work around to make sure range queries work properly. <see cref="https://github.com/Azure/azure-sdk-for-net/issues/40047"/>
/// </summary>
public class ModifyOutgoingAzureMonitorRequestsPolicy : HttpPipelinePolicy
{
Expand Down Expand Up @@ -50,7 +50,7 @@ private void ModifyDateTimeParam(List<string> paramNames, HttpMessage message)
// Update the message with the modified URI
}
}
message.Request.Uri.Query = query.ToString();
message.Request.Uri.Query = query.ToString();
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ public Task WriteHistogramMeasurementAsync(string name, string description, doub
{
var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value);

// TODO: are histogram instruments created on every invocation? Would that interfere with correctness?
var histogram = _metricFactory.CreateHistogram(name, help: description, includeTimestamp: includeTimestamp, labelNames: orderedLabels.Keys.ToArray(), buckets: [1, 2, 4, 8, 16, 32, 64]);
histogram.WithLabels(orderedLabels.Values.ToArray()).Observe(value);
return Task.CompletedTask;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public void IdenticalBatchPropertiesShouldBatchTogether()
azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping,
resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10
);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize, CancellationToken.None);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize);
// expect one batch of 10
Assert.Single(groupedScrapeDefinitions);
Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count);
Expand All @@ -79,7 +79,7 @@ public void BatchShouldSplitAccordingToConfiguredBatchSize()
azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping,
resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 25
);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize, CancellationToken.None);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize);
// expect three batches adding up to total size
Assert.Equal(3, groupedScrapeDefinitions.Count);
Assert.Equal(25, CountTotalScrapeDefinitions(groupedScrapeDefinitions));
Expand All @@ -99,7 +99,7 @@ public void DifferentBatchPropertiesShouldBatchSeparately()
azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping,
resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10
);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize);
// expect two batch of 10 each
Assert.Equal(2, groupedScrapeDefinitions.Count);
Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count);
Expand All @@ -123,7 +123,7 @@ public void DifferentAggregationIntervalsShouldBatchSeparately()
azureMetricConfiguration: azureMetricConfiguration2MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping,
resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10
);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize, CancellationToken.None);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize);
// expect two batch of 10 each
Assert.Equal(2, groupedScrapeDefinitions.Count);
Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count);
Expand All @@ -145,7 +145,7 @@ public void MixedBatchShouldSplitAccordingToConfiguredBatchSize()
azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping,
resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 120
);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize);
// expect two batch of 10 each
Assert.Equal(6, groupedScrapeDefinitions.Count);
Assert.Equal(250, CountTotalScrapeDefinitions(groupedScrapeDefinitions));
Expand All @@ -165,7 +165,7 @@ public void BatchConstructionShouldBeAgnosticToResourceGroup()
azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping,
resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: "group2", 10
);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None);
var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize);
// expect two batch of 10 each
Assert.Single(groupedScrapeDefinitions);
Assert.Equal(20, groupedScrapeDefinitions[0].ScrapeDefinitions.Count);
Expand Down

0 comments on commit 11d765c

Please sign in to comment.