From ba4df0b84dd1a061c7413334e211cd6b6e56a4b5 Mon Sep 17 00:00:00 2001 From: Bastian Krol Date: Fri, 23 Aug 2024 19:14:22 +0200 Subject: [PATCH] feat(crd): more detailed configuration settings for exporting telemetry --- .../v1alpha1/dash0monitoring_types.go | 156 +++++- .../v1alpha1/zz_generated.deepcopy.go | 144 +++++- .../operator.dash0.com_dash0monitorings.yaml | 149 +++++- helm-chart/dash0-operator/README.md | 132 +++++- .../custom-resource-definition-dash0.yaml | 149 +++++- ...m-resource-definition-dash0_test.yaml.snap | 135 +++++- .../backendconnection_manager.go | 6 - .../backendconnection_manager_test.go | 48 +- .../otelcolresources/collector_config_map.go | 146 ++++++ .../collector_config_map_test.go | 446 ++++++++++++++++++ .../otelcolresources/config.yaml.template | 46 +- .../otelcolresources/desired_state.go | 131 ++--- .../otelcolresources/desired_state_test.go | 77 ++- .../otelcolresources/otelcol_resources.go | 20 +- .../dash0/controller/dash0_controller_test.go | 11 +- .../bin/test-roundtrip-01-aum-operator-cr.sh | 2 +- .../bin/test-roundtrip-02-operator-cr-aum.sh | 2 +- test-resources/bin/util | 2 +- .../dash0monitoring.secret.yaml | 19 +- .../dash0monitoring.token.yaml.template | 18 +- test-resources/otlp-sink/otlp-sink.yaml | 18 +- test/e2e/dash0_monitoring_resource.go | 4 +- test/e2e/dash0monitoring.e2e.yaml.template | 6 +- test/e2e/e2e_test.go | 16 +- test/util/constants.go | 10 +- test/util/dash0_monitoring_resource.go | 23 +- 26 files changed, 1576 insertions(+), 340 deletions(-) create mode 100644 internal/backendconnection/otelcolresources/collector_config_map.go create mode 100644 internal/backendconnection/otelcolresources/collector_config_map_test.go diff --git a/api/dash0monitoring/v1alpha1/dash0monitoring_types.go b/api/dash0monitoring/v1alpha1/dash0monitoring_types.go index 23e74c57..8b58a0f9 100644 --- a/api/dash0monitoring/v1alpha1/dash0monitoring_types.go +++ b/api/dash0monitoring/v1alpha1/dash0monitoring_types.go @@ -14,29 +14,16 @@ const ( FinalizerId = "operator.dash0.com/dash0-monitoring-finalizer" ) -// Dash0MonitoringSpec defines the desired state of the Dash0 monitoring resource. +// Dash0MonitoringSpec describes the details of monitoring a single Kubernetes namespace with Dash0 and sending +// telemetry to an observability backend. type Dash0MonitoringSpec struct { - // The URL of the observability backend to which telemetry data will be sent. This property is mandatory. The value - // needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - // https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - // `dash0.com:4317`. - // - // +kubebuilder:validation:Mandatory - Endpoint string `json:"endpoint"` - - // The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has - // to be provided. If both are provided, the AuthorizationToken will be used and SecretRef will be ignored. The - // authorization token for your Dash0 organization can be copied from https://app.dash0.com/settings. - // - // +kubebuilder:validation:Optional - AuthorizationToken string `json:"authorizationToken"` - - // A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is - // ignored if the AuthorizationToken property is set. The authorization token for your Dash0 organization - // can be copied from https://app.dash0.com/settings. + // The configuration of the observability backend to which telemetry data will be sent. This property is mandatory. + // This can either be Dash0 or another OTLP-compatible backend. You can also combine sending to Dash0 with an + // HTTP exporter to send the same data to two targets simultaneously, or you can combine a gRPC exporter with an + // HTTP exporter. Combining Dash0 with a gRPC exporter is currently not supported. // - // +kubebuilder:validation:Optional - SecretRef string `json:"secretRef"` + // +kubebuilder:validation:Required + Export `json:"export"` // Global opt-out for workload instrumentation for the target namespace. There are three possible settings: `all`, // `created-and-updated` and `none`. By default, the setting `all` is assumed. @@ -69,10 +56,135 @@ type Dash0MonitoringSpec struct { // More fine-grained per-workload control over instrumentation is available by setting the label // dash0.com/enable=false on individual workloads. // - // +kubebuilder:validation:Optional + // +kubebuilder:default=all InstrumentWorkloads InstrumentWorkloadsMode `json:"instrumentWorkloads,omitempty"` } +// Export describes the observability backend to which telemetry data will be sent. This can either be Dash0 or another +// OTLP-compatible backend. You can also combine Dash0 with an HTTP exporter or an arbitrary gRPC exporter with an HTTP +// exporter. Combining Dash0 with a gRPC exporter is currently not supported. +// +// +kubebuilder:validation:MinProperties=1 +// +kubebuilder:validation:MaxProperties=2 +type Export struct { + // The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. + // + // +kubebuilder:validation:Optional + Dash0 *Dash0Configuration `json:"dash0,omitempty"` + + // The settings for an exporter to send telemetry to an arbitrary OTLP-compatible receiver via HTTP. + // + // +kubebuilder:validation:Optional + Http *HttpConfiguration `json:"http,omitempty"` + + // The settings for an exporter to send telemetry to an arbitrary OTLP-compatible receiver via gRPC. + // + // +kubebuilder:validation:Optional + Grpc *GrpcConfiguration `json:"grpc,omitempty"` +} + +// Dash0Configuration describes to which Dash0 ingress endpoint telemetry data will be sent. +type Dash0Configuration struct { + // The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value + // needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom + // https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in + // `dash0.com:4317`. + // + // +kubebuilder:validation:Required + Endpoint string `json:"endpoint"` + + // The name of the Dash0 dataset to which telemetry data will be sent. This property is optional. If omitted, the + // dataset "default" will be used. + // + // +kubebuilder:default=default + Dataset string `json:"dataset,omitempty"` + + // Mandatory authorization settings for sending data to Dash0. + // + // +kubebuilder:validation:Required + Authorization Authorization `json:"authorization"` +} + +// Authorization contains the authorization settings for Dash0. +// +// +kubebuilder:validation:MinProperties=1 +// +kubebuilder:validation:MaxProperties=1 +type Authorization struct { + // The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has + // to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization + // token for your Dash0 organization can be copied from https://app.dash0.com/settings. + // + // +kubebuilder:validation:Optional + Token *string `json:"token"` // either token or secret ref, with token taking precedence + + // A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is + // ignored if the token property is set. The authorization token for your Dash0 organization can be copied from + // https://app.dash0.com/settings. + // + // +kubebuilder:validation:Optional + SecretRef *SecretRef `json:"secretRef"` +} + +type SecretRef struct { + // The name of the secret containing the Dash0 authorization token. Defaults to "dash0-authorization-secret". + // +kubebuilder:default=dash0-authorization-secret + Name string `json:"name"` + + // The key of the value which contains the Dash0 authorization token. Defaults to "token" + // +kubebuilder:default=token + Key string `json:"key"` +} + +// HttpConfiguration describe the settings for an exporter to send telemetry to an arbitrary OTLP-compatible receiver +// via HTTP. +type HttpConfiguration struct { + // The URL of the OTLP-compatible receiver to which telemetry data will be sent. This property is mandatory. + // + // +kubebuilder:validation:Required + Endpoint string `json:"endpoint"` + + // Additional headers to be sent with each HTTP request, for example for authorization. This property is optional. + // + // +kubebuilder:validation:Optional + Headers []Header `json:"headers,omitempty"` + + // The encoding of the OTLP data when sent via HTTP. Can be either proto or json, defaults to proto. + // + // +kubebuilder:default=proto + Encoding OtlpEncoding `json:"encoding,omitempty"` +} + +// GrpcConfiguration descibe the settings for an exporter to send telemetry to an arbitrary OTLP-compatible receiver +// via gRPC. +type GrpcConfiguration struct { + // The URL of the OTLP-compatible receiver to which telemetry data will be sent. This property is mandatory. + // + // +kubebuilder:validation:Required + Endpoint string `json:"endpoint"` + + // Additional headers to be sent with each gRPC request, for example for authorization. This property is optional. + // + // +kubebuilder:validation:Optional + Headers []Header `json:"headers,omitempty"` +} + +// OtlpEncoding describes the encoding of the OTLP data when sent via HTTP. +// +// +kubebuilder:validation:Enum=proto;json +type OtlpEncoding string + +const ( + Proto OtlpEncoding = "proto" + Json OtlpEncoding = "json" +) + +type Header struct { + // +kubebuilder:validation:Required + Name string `json:"name"` + // +kubebuilder:validation:Required + Value string `json:"value"` +} + // InstrumentWorkloadsMode describes when exactly workloads will be instrumented. Only one of the following modes // may be specified. If none of the following policies is specified, the default one is All. See // Dash0MonitoringSpec#InstrumentWorkloads for more details. diff --git a/api/dash0monitoring/v1alpha1/zz_generated.deepcopy.go b/api/dash0monitoring/v1alpha1/zz_generated.deepcopy.go index 1422377a..1b00855b 100644 --- a/api/dash0monitoring/v1alpha1/zz_generated.deepcopy.go +++ b/api/dash0monitoring/v1alpha1/zz_generated.deepcopy.go @@ -12,12 +12,53 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authorization) DeepCopyInto(out *Authorization) { + *out = *in + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(string) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authorization. +func (in *Authorization) DeepCopy() *Authorization { + if in == nil { + return nil + } + out := new(Authorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dash0Configuration) DeepCopyInto(out *Dash0Configuration) { + *out = *in + in.Authorization.DeepCopyInto(&out.Authorization) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dash0Configuration. +func (in *Dash0Configuration) DeepCopy() *Dash0Configuration { + if in == nil { + return nil + } + out := new(Dash0Configuration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Dash0Monitoring) DeepCopyInto(out *Dash0Monitoring) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -74,6 +115,7 @@ func (in *Dash0MonitoringList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Dash0MonitoringSpec) DeepCopyInto(out *Dash0MonitoringSpec) { *out = *in + in.Export.DeepCopyInto(&out.Export) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dash0MonitoringSpec. @@ -107,3 +149,103 @@ func (in *Dash0MonitoringStatus) DeepCopy() *Dash0MonitoringStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Export) DeepCopyInto(out *Export) { + *out = *in + if in.Dash0 != nil { + in, out := &in.Dash0, &out.Dash0 + *out = new(Dash0Configuration) + (*in).DeepCopyInto(*out) + } + if in.Http != nil { + in, out := &in.Http, &out.Http + *out = new(HttpConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Grpc != nil { + in, out := &in.Grpc, &out.Grpc + *out = new(GrpcConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Export. +func (in *Export) DeepCopy() *Export { + if in == nil { + return nil + } + out := new(Export) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrpcConfiguration) DeepCopyInto(out *GrpcConfiguration) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]Header, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcConfiguration. +func (in *GrpcConfiguration) DeepCopy() *GrpcConfiguration { + if in == nil { + return nil + } + out := new(GrpcConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Header) DeepCopyInto(out *Header) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Header. +func (in *Header) DeepCopy() *Header { + if in == nil { + return nil + } + out := new(Header) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HttpConfiguration) DeepCopyInto(out *HttpConfiguration) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]Header, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpConfiguration. +func (in *HttpConfiguration) DeepCopy() *HttpConfiguration { + if in == nil { + return nil + } + out := new(HttpConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretRef) DeepCopyInto(out *SecretRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretRef. +func (in *SecretRef) DeepCopy() *SecretRef { + if in == nil { + return nil + } + out := new(SecretRef) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/operator.dash0.com_dash0monitorings.yaml b/config/crd/bases/operator.dash0.com_dash0monitorings.yaml index 6b717723..987e94dd 100644 --- a/config/crd/bases/operator.dash0.com_dash0monitorings.yaml +++ b/config/crd/bases/operator.dash0.com_dash0monitorings.yaml @@ -37,23 +37,136 @@ spec: metadata: type: object spec: - description: Dash0MonitoringSpec defines the desired state of the Dash0 - monitoring resource. + description: |- + Dash0MonitoringSpec describes the details of monitoring a single Kubernetes namespace with Dash0 and sending + telemetry to an observability backend. properties: - authorizationToken: - description: |- - The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has - to be provided. If both are provided, the AuthorizationToken will be used and SecretRef will be ignored. The - authorization token for your Dash0 organization can be copied from https://app.dash0.com/settings. - type: string - endpoint: + export: description: |- - The URL of the observability backend to which telemetry data will be sent. This property is mandatory. The value - needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - `dash0.com:4317`. - type: string + The configuration of the observability backend to which telemetry data will be sent. This property is mandatory. + This can either be Dash0 or another OTLP-compatible backend. You can also combine sending to Dash0 with an + HTTP exporter to send the same data to two targets simultaneously, or you can combine a gRPC exporter with an + HTTP exporter. Combining Dash0 with a gRPC exporter is currently not supported. + maxProperties: 2 + minProperties: 1 + properties: + dash0: + description: The configuration of the Dash0 ingress endpoint to + which telemetry data will be sent. + properties: + authorization: + description: Mandatory authorization settings for sending + data to Dash0. + maxProperties: 1 + minProperties: 1 + properties: + secretRef: + description: |- + A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is + ignored if the token property is set. The authorization token for your Dash0 organization can be copied from + https://app.dash0.com/settings. + properties: + key: + default: token + description: The key of the value which contains the + Dash0 authorization token. Defaults to "token" + type: string + name: + default: dash0-authorization-secret + description: The name of the secret containing the + Dash0 authorization token. Defaults to "dash0-authorization-secret". + type: string + required: + - key + - name + type: object + token: + description: |- + The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has + to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization + token for your Dash0 organization can be copied from https://app.dash0.com/settings. + type: string + type: object + dataset: + default: default + description: |- + The name of the Dash0 dataset to which telemetry data will be sent. This property is optional. If omitted, the + dataset "default" will be used. + type: string + endpoint: + description: |- + The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value + needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom + https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in + `dash0.com:4317`. + type: string + required: + - authorization + - endpoint + type: object + grpc: + description: The settings for an exporter to send telemetry to + an arbitrary OTLP-compatible receiver via gRPC. + properties: + endpoint: + description: The URL of the OTLP-compatible receiver to which + telemetry data will be sent. This property is mandatory. + type: string + headers: + description: Additional headers to be sent with each gRPC + request, for example for authorization. This property is + optional. + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + required: + - endpoint + type: object + http: + description: The settings for an exporter to send telemetry to + an arbitrary OTLP-compatible receiver via HTTP. + properties: + encoding: + default: proto + description: The encoding of the OTLP data when sent via HTTP. + Can be either proto or json, defaults to proto. + enum: + - proto + - json + type: string + endpoint: + description: The URL of the OTLP-compatible receiver to which + telemetry data will be sent. This property is mandatory. + type: string + headers: + description: Additional headers to be sent with each HTTP + request, for example for authorization. This property is + optional. + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + required: + - endpoint + type: object + type: object instrumentWorkloads: + default: all description: |- Global opt-out for workload instrumentation for the target namespace. There are three possible settings: `all`, `created-and-updated` and `none`. By default, the setting `all` is assumed. @@ -95,14 +208,8 @@ spec: - created-and-updated - none type: string - secretRef: - description: |- - A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is - ignored if the AuthorizationToken property is set. The authorization token for your Dash0 organization - can be copied from https://app.dash0.com/settings. - type: string required: - - endpoint + - export type: object status: description: Dash0MonitoringStatus defines the observed state of the Dash0 diff --git a/helm-chart/dash0-operator/README.md b/helm-chart/dash0-operator/README.md index 4ea2105b..7088f308 100644 --- a/helm-chart/dash0-operator/README.md +++ b/helm-chart/dash0-operator/README.md @@ -52,15 +52,18 @@ into that namespace: Create a file `dash0-monitoring.yaml` with the following content: ```yaml apiVersion: operator.dash0.com/v1alpha1 -kind: Dash0 +kind: Dash0Monitoring metadata: name: dash0-monitoring-resource spec: - # Replace this value with the actual OTLP/gRPC endpoint of your Dash0 organization. - endpoint: ingress... # TODO needs to be replaced with the actual value, see below + export: + dash0: + # Replace this value with the actual OTLP/gRPC endpoint of your Dash0 organization. + endpoint: ingress... # TODO needs to be replaced with the actual value, see below - # Either provide the Dash0 authorization token as a string via the property authorizationToken: - authorizationToken: auth_... # TODO needs to be replaced with the actual value, see below + authorization: + # Provide the Dash0 authorization token as a string via the token property: + token: auth_... # TODO needs to be replaced with the actual value, see below # Opt-out settings for particular use cases. The default value is "all". Other possible values are # "created-and-updated" and "none". @@ -68,25 +71,25 @@ spec: ``` At this point, you need to provide two configuration settings: -* `endpoint`: The URL of the observability backend to which telemetry data will be sent. This property is - mandatory. +* `spec.export.dash0.endpoint`: The URL of the observability backend to which telemetry data will be sent. This property + is mandatory. Replace the value in the example above with the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom https://app.dash0.com/settings. Note that the correct endpoint value will always start with `ingress.` and end in `dash0.com:4317`. A protocol prefix (eg. `https://`) should not be included in the value. -* `authorizationToken` or `secretRef`: Exactly one of these two properties needs to be provided. - If both are provided, `authorizationToken` will be used and `secretRef` will be ignored. - * `authorizationToken`: Replace the value in the example above with the Dash0 authorization token of your +* `spec.export.dash0.token` or `spec.export.dash0.secretRef`: Exactly one of these two properties needs to be provided. + Providing both will cause a validation error when installing the Dash0Monitoring resource. + * `spec.export.dash0.token`: Replace the value in the example above with the Dash0 authorization token of your organization. The authorization token for your Dash0 organization can be copied from https://app.dash0.com/settings. The prefix `Bearer ` must *not* be included in the value. Note that the value will be rendered verbatim into a Kubernetes ConfigMap object. Anyone with API access to the Kubernetes cluster will be able to read the value. Use the `secretRef` property and a Kubernetes secret if you want to avoid that. - * `secretRef`: Replace the value in the example above with the name of an existing Kubernetes secret in the Dash0 - operator's namespace. + * `spec.export.dash0.secretRef`: A reference to an existing Kubernetes secret in the Dash0 operator's namespace. + See the next section for an example file that uses a `secretRef`. The secret needs to contain the Dash0 authorization token. - See below for details on how exactly the secret should be created. + See below for details on how exactly the secret should be created and configured. Note that by default, Kubernetes secrets are stored _unencrypted_, and anyone with API access to the Kubernetes cluster will be able to read the value. Additional steps are required to make sure secret values are encrypted. @@ -153,8 +156,7 @@ If you want to provide the Dash0 authorization token via a Kubernetes secret ins create the secret in the namespace where the Dash0 operator is installed. If you followed the guide above, the name of that namespace is `dash0-system`. The authorization token for your Dash0 organization can be copied from https://app.dash0.com/settings. -You can freely choose the name of the secret. -Make sure to use `dash0-authorization-token` as the token key. +You can freely choose the name of the secret and the key of the token within the secret. Create the secret by using the following command: @@ -162,14 +164,17 @@ Create the secret by using the following command: kubectl create secret generic \ dash0-authorization-secret \ --namespace dash0-system \ - --from-literal=dash0-authorization-token=auth_...your-token-here... + --from-literal=token=auth_...your-token-here... ``` With this example command, you would create a secret with the name `dash0-authorization-secret` in the namespace `dash0-system`. If you installed the operator into a different namespace, replace the `--namespace` parameter accordingly. -The name of the secret must be referenced in the YAML file for the Dash0 monitoring resource in the `secretRef` property. +The name of the secret as well as the key of the token value within the secret must be provided in the YAML file for +the Dash0 monitoring resource, in the `secretRef` property. +If the `name` property is omitted, the name `dash0-authorization-secret` will be assumed. +If the `key` property is omitted, the key `token` will be assumed. Here is an example that uses the secret created above: ```yaml apiVersion: operator.dash0.com/v1alpha1 @@ -177,11 +182,34 @@ kind: Dash0Monitoring metadata: name: dash0-monitoring-resource spec: - # Replace this value with the actual OTLP/gRPC endpoint of your Dash0 organization. - endpoint: ingress... # TODO needs to be replaced with the actual value, see below + export: + dash0: + # Replace this value with the actual OTLP/gRPC endpoint of your Dash0 organization. + endpoint: ingress... # TODO needs to be replaced with the actual value, see below + + authorization: + # Provide the name and key of a secret existing in the Dash0 operator's namespace as secretRef: + secretRef: + name: dash0-authorization-secret + key: token +``` - # Or provide the name of a secret existing in the Dash0 operator's namespace as the property secretRef: - secretRef: dash0-authorization-secret +Since the name `dash0-authorization-secret` and the key `token` are the defaults, this secretRef could have also been +written as follows: +```yaml +apiVersion: operator.dash0.com/v1alpha1 +kind: Dash0Monitoring +metadata: + name: dash0-monitoring-resource +spec: + export: + dash0: + # Replace this value with the actual OTLP/gRPC endpoint of your Dash0 organization. + endpoint: ingress... # TODO needs to be replaced with the actual value, see below + + authorization: + # Provide the name and key of a secret existing in the Dash0 operator's namespace as secretRef: + secretRef: {} ``` Note that by default, Kubernetes secrets are stored _unencrypted_, and anyone with API access to the Kubernetes cluster @@ -189,6 +217,68 @@ will be able to read the value. Additional steps are required to make sure secret values are encrypted. See https://kubernetes.io/docs/concepts/configur**ation/secret/ for more information on Kubernetes secrets. +### Dash0 Dataset Configuration + +Use the `spec.export.dash0.dataset` property to configure the dataset that should be used for the telemetry data. +By default, data will be sent to the dataset `default`. +```yaml +apiVersion: operator.dash0.com/v1alpha1 +kind: Dash0Monitoring +metadata: + name: dash0-monitoring-resource +spec: + export: + dash0: + endpoint: ingress... # TODO needs to be replaced with the actual value, see below + dataset: my-custom-dataset + authorization: + ... +``` + +### Exporting Data to Other Observability Backends + +Instead of `spec.export.dash0`, you can also provide `spec.export.http` or `spec.export.grpc` to export telemetry data +to arbitrary OTLP-compatible backends, or to another local OpenTelemetry collector. + +Here is an example for HTTP: +```yaml +apiVersion: operator.dash0.com/v1alpha1 +kind: Dash0Monitoring +metadata: + name: dash0-monitoring-resource +spec: + export: + http: + endpoint: ... + headers: + - name: X-My-Header + value: my-value + encoding: json +``` + +Here is an example for gRPC: +```yaml +apiVersion: operator.dash0.com/v1alpha1 +kind: Dash0Monitoring +metadata: + name: dash0-monitoring-resource +spec: + export: + grpc: + endpoint: ... + headers: + - name: X-My-Header + value: my-value +``` + +You can even combine two exporters to send data to multiple backends, although there are some restrictions: +* Combining `spec.export.dash0` with `spec.export.http` is supported. +* Combining `spec.export.grpc` with `spec.export.http` is supported. +* Combining `spec.export.dash0` with `spec.export.grpc` is currently not supported. +* Combining more than two exporters is not supported. +* At least one exporter configuration has to be provided. +* Listing two or more exporters of the same type (i.e. providing `spec.export.grpc` twice) is not supported. + ## Disable Dash0 Monitoring For a Namespace If you want to stop monitoring a namespace with Dash0, remove the Dash0 monitoring resource from that namespace. diff --git a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-dash0.yaml b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-dash0.yaml index 88c1e97e..8433d0c5 100644 --- a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-dash0.yaml +++ b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-dash0.yaml @@ -36,23 +36,136 @@ spec: metadata: type: object spec: - description: Dash0MonitoringSpec defines the desired state of the Dash0 - monitoring resource. + description: |- + Dash0MonitoringSpec describes the details of monitoring a single Kubernetes namespace with Dash0 and sending + telemetry to an observability backend. properties: - authorizationToken: - description: |- - The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has - to be provided. If both are provided, the AuthorizationToken will be used and SecretRef will be ignored. The - authorization token for your Dash0 organization can be copied from https://app.dash0.com/settings. - type: string - endpoint: + export: description: |- - The URL of the observability backend to which telemetry data will be sent. This property is mandatory. The value - needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - `dash0.com:4317`. - type: string + The configuration of the observability backend to which telemetry data will be sent. This property is mandatory. + This can either be Dash0 or another OTLP-compatible backend. You can also combine sending to Dash0 with an + HTTP exporter to send the same data to two targets simultaneously, or you can combine a gRPC exporter with an + HTTP exporter. Combining Dash0 with a gRPC exporter is currently not supported. + maxProperties: 2 + minProperties: 1 + properties: + dash0: + description: The configuration of the Dash0 ingress endpoint to + which telemetry data will be sent. + properties: + authorization: + description: Mandatory authorization settings for sending + data to Dash0. + maxProperties: 1 + minProperties: 1 + properties: + secretRef: + description: |- + A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is + ignored if the token property is set. The authorization token for your Dash0 organization can be copied from + https://app.dash0.com/settings. + properties: + key: + default: token + description: The key of the value which contains the + Dash0 authorization token. Defaults to "token" + type: string + name: + default: dash0-authorization-secret + description: The name of the secret containing the + Dash0 authorization token. Defaults to "dash0-authorization-secret". + type: string + required: + - key + - name + type: object + token: + description: |- + The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has + to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization + token for your Dash0 organization can be copied from https://app.dash0.com/settings. + type: string + type: object + dataset: + default: default + description: |- + The name of the Dash0 dataset to which telemetry data will be sent. This property is optional. If omitted, the + dataset "default" will be used. + type: string + endpoint: + description: |- + The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value + needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom + https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in + `dash0.com:4317`. + type: string + required: + - authorization + - endpoint + type: object + grpc: + description: The settings for an exporter to send telemetry to + an arbitrary OTLP-compatible receiver via gRPC. + properties: + endpoint: + description: The URL of the OTLP-compatible receiver to which + telemetry data will be sent. This property is mandatory. + type: string + headers: + description: Additional headers to be sent with each gRPC + request, for example for authorization. This property is + optional. + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + required: + - endpoint + type: object + http: + description: The settings for an exporter to send telemetry to + an arbitrary OTLP-compatible receiver via HTTP. + properties: + encoding: + default: proto + description: The encoding of the OTLP data when sent via HTTP. + Can be either proto or json, defaults to proto. + enum: + - proto + - json + type: string + endpoint: + description: The URL of the OTLP-compatible receiver to which + telemetry data will be sent. This property is mandatory. + type: string + headers: + description: Additional headers to be sent with each HTTP + request, for example for authorization. This property is + optional. + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + required: + - endpoint + type: object + type: object instrumentWorkloads: + default: all description: |- Global opt-out for workload instrumentation for the target namespace. There are three possible settings: `all`, `created-and-updated` and `none`. By default, the setting `all` is assumed. @@ -94,14 +207,8 @@ spec: - created-and-updated - none type: string - secretRef: - description: |- - A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is - ignored if the AuthorizationToken property is set. The authorization token for your Dash0 organization - can be copied from https://app.dash0.com/settings. - type: string required: - - endpoint + - export type: object status: description: Dash0MonitoringStatus defines the observed state of the Dash0 diff --git a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-dash0_test.yaml.snap b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-dash0_test.yaml.snap index 7f43aca9..e0e88245 100644 --- a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-dash0_test.yaml.snap +++ b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-dash0_test.yaml.snap @@ -38,22 +38,123 @@ custom resource definition should match snapshot: metadata: type: object spec: - description: Dash0MonitoringSpec defines the desired state of the Dash0 monitoring resource. + description: |- + Dash0MonitoringSpec describes the details of monitoring a single Kubernetes namespace with Dash0 and sending + telemetry to an observability backend. properties: - authorizationToken: - description: |- - The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has - to be provided. If both are provided, the AuthorizationToken will be used and SecretRef will be ignored. The - authorization token for your Dash0 organization can be copied from https://app.dash0.com/settings. - type: string - endpoint: + export: description: |- - The URL of the observability backend to which telemetry data will be sent. This property is mandatory. The value - needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - `dash0.com:4317`. - type: string + The configuration of the observability backend to which telemetry data will be sent. This property is mandatory. + This can either be Dash0 or another OTLP-compatible backend. You can also combine sending to Dash0 with an + HTTP exporter to send the same data to two targets simultaneously, or you can combine a gRPC exporter with an + HTTP exporter. Combining Dash0 with a gRPC exporter is currently not supported. + maxProperties: 2 + minProperties: 1 + properties: + dash0: + description: The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. + properties: + authorization: + description: Mandatory authorization settings for sending data to Dash0. + maxProperties: 1 + minProperties: 1 + properties: + secretRef: + description: |- + A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is + ignored if the token property is set. The authorization token for your Dash0 organization can be copied from + https://app.dash0.com/settings. + properties: + key: + default: token + description: The key of the value which contains the Dash0 authorization token. Defaults to "token" + type: string + name: + default: dash0-authorization-secret + description: The name of the secret containing the Dash0 authorization token. Defaults to "dash0-authorization-secret". + type: string + required: + - key + - name + type: object + token: + description: |- + The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has + to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization + token for your Dash0 organization can be copied from https://app.dash0.com/settings. + type: string + type: object + dataset: + default: default + description: |- + The name of the Dash0 dataset to which telemetry data will be sent. This property is optional. If omitted, the + dataset "default" will be used. + type: string + endpoint: + description: |- + The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value + needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom + https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in + `dash0.com:4317`. + type: string + required: + - authorization + - endpoint + type: object + grpc: + description: The settings for an exporter to send telemetry to an arbitrary OTLP-compatible receiver via gRPC. + properties: + endpoint: + description: The URL of the OTLP-compatible receiver to which telemetry data will be sent. This property is mandatory. + type: string + headers: + description: Additional headers to be sent with each gRPC request, for example for authorization. This property is optional. + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + required: + - endpoint + type: object + http: + description: The settings for an exporter to send telemetry to an arbitrary OTLP-compatible receiver via HTTP. + properties: + encoding: + default: proto + description: The encoding of the OTLP data when sent via HTTP. Can be either proto or json, defaults to proto. + enum: + - proto + - json + type: string + endpoint: + description: The URL of the OTLP-compatible receiver to which telemetry data will be sent. This property is mandatory. + type: string + headers: + description: Additional headers to be sent with each HTTP request, for example for authorization. This property is optional. + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + required: + - endpoint + type: object + type: object instrumentWorkloads: + default: all description: |- Global opt-out for workload instrumentation for the target namespace. There are three possible settings: `all`, `created-and-updated` and `none`. By default, the setting `all` is assumed. @@ -95,14 +196,8 @@ custom resource definition should match snapshot: - created-and-updated - none type: string - secretRef: - description: |- - A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is - ignored if the AuthorizationToken property is set. The authorization token for your Dash0 organization - can be copied from https://app.dash0.com/settings. - type: string required: - - endpoint + - export type: object status: description: Dash0MonitoringStatus defines the observed state of the Dash0 monitoring resource. diff --git a/internal/backendconnection/backendconnection_manager.go b/internal/backendconnection/backendconnection_manager.go index 037331a7..11520a8f 100644 --- a/internal/backendconnection/backendconnection_manager.go +++ b/internal/backendconnection/backendconnection_manager.go @@ -34,12 +34,6 @@ func (m *BackendConnectionManager) EnsureOpenTelemetryCollectorIsDeployedInDash0 ) error { logger := log.FromContext(ctx) - if dash0MonitoringResource.Spec.Endpoint == "" { - err := fmt.Errorf("no endpoint provided, unable to create the OpenTelemetry collector") - logger.Error(err, failedToCreateMsg) - return err - } - resourcesHaveBeenCreated, resourcesHaveBeenUpdated, err := m.OTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, diff --git a/internal/backendconnection/backendconnection_manager_test.go b/internal/backendconnection/backendconnection_manager_test.go index aced4980..cf994f73 100644 --- a/internal/backendconnection/backendconnection_manager_test.go +++ b/internal/backendconnection/backendconnection_manager_test.go @@ -26,8 +26,14 @@ var ( dash0MonitoringResource = &dash0v1alpha1.Dash0Monitoring{ Spec: dash0v1alpha1.Dash0MonitoringSpec{ - Endpoint: EndpointTest, - AuthorizationToken: AuthorizationTokenTest, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, }, } ) @@ -75,23 +81,37 @@ var _ = Describe("The backend connection manager", Ordered, func() { operatorNamespace, &dash0v1alpha1.Dash0Monitoring{ Spec: dash0v1alpha1.Dash0MonitoringSpec{ - AuthorizationToken: AuthorizationTokenTest, - }}) + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, + }, + }, + ) Expect(err).To(HaveOccurred()) VerifyCollectorResourcesDoNotExist(ctx, k8sClient, operatorNamespace) }) - It("should not fail if neither authorization token nor secret ref are provided", func() { + It("should fail if neither authorization token nor secret ref are provided for Dash0 exporter", func() { err := manager.EnsureOpenTelemetryCollectorIsDeployedInDash0OperatorNamespace( ctx, TestImages, operatorNamespace, &dash0v1alpha1.Dash0Monitoring{ Spec: dash0v1alpha1.Dash0MonitoringSpec{ - Endpoint: EndpointTest, - }}) - Expect(err).NotTo(HaveOccurred()) - VerifyCollectorResourcesExist(ctx, k8sClient, operatorNamespace) + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{}, + }, + }, + }, + }) + Expect(err).To(HaveOccurred()) + VerifyCollectorResourcesDoNotExist(ctx, k8sClient, operatorNamespace) }) }) @@ -222,8 +242,14 @@ var _ = Describe("The backend connection manager", Ordered, func() { UID: "3c0e72bb-26a7-40a4-bbdd-b1c978278fc5", }, Spec: dash0v1alpha1.Dash0MonitoringSpec{ - Endpoint: EndpointTest, - AuthorizationToken: AuthorizationTokenTest, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, }, }, ) diff --git a/internal/backendconnection/otelcolresources/collector_config_map.go b/internal/backendconnection/otelcolresources/collector_config_map.go new file mode 100644 index 00000000..7266bfb2 --- /dev/null +++ b/internal/backendconnection/otelcolresources/collector_config_map.go @@ -0,0 +1,146 @@ +// SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. +// SPDX-License-Identifier: Apache-2.0 + +package otelcolresources + +import ( + "bytes" + _ "embed" + "fmt" + "text/template" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" +) + +type otlpExporter struct { + Name string + Endpoint string + Headers []dash0v1alpha1.Header + Encoding string +} + +var ( + //go:embed config.yaml.template + collectorConfigurationTemplateSource string + collectorConfigurationTemplate = template.Must( + template.New("collector-configuration").Parse(collectorConfigurationTemplateSource)) +) + +func collectorConfigMap(config *oTelColConfig) (*corev1.ConfigMap, error) { + exporters, err := assembleExporters(config.Export) + if err != nil { + return nil, fmt.Errorf("cannot render the assemble the exporters for the configuration: %w", err) + } + collectorConfiguration, err := renderCollectorConfigs(&collectorConfigurationTemplateValues{ + Exporters: exporters, + IgnoreLogsFromNamespaces: []string{ + // Skipping kube-system, it requires bespoke filtering work + "kube-system", + // Skipping logs from the operator and the daemonset, otherwise + // logs will compound in case of log parsing errors + config.Namespace, + }, + }) + if err != nil { + return nil, fmt.Errorf("cannot render the collector configuration template: %w", err) + } + + configMapData := map[string]string{ + collectorConfigurationYaml: collectorConfiguration, + } + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: collectorConfigConfigMapName(config.NamePrefix), + Namespace: config.Namespace, + Labels: labels(false), + }, + Data: configMapData, + }, nil +} + +func assembleExporters(export dash0v1alpha1.Export) ([]otlpExporter, error) { + var exporters []otlpExporter + + if export.Dash0 != nil && export.Grpc != nil { + return nil, fmt.Errorf("combining the Dash0 exporter with a gRPC exporter is not supported, please use only one of them") + } + if export.Dash0 == nil && export.Grpc == nil && export.Http == nil { + return nil, fmt.Errorf("no exporter configuration found") + } + + if export.Dash0 != nil { + d0 := export.Dash0 + if d0.Endpoint == "" { + return nil, fmt.Errorf("no endpoint provided for the Dash0 exporter, unable to create the OpenTelemetry collector") + } + headers := []dash0v1alpha1.Header{{ + Name: "Authorization", + Value: "Bearer ${env:AUTH_TOKEN}", + }} + if d0.Dataset != "" && d0.Dataset != "default" { + headers = append(headers, dash0v1alpha1.Header{ + Name: "X-Dash0-Dataset", + Value: d0.Dataset, + }) + } + exporters = append(exporters, otlpExporter{ + Name: "otlp", + Endpoint: export.Dash0.Endpoint, + Headers: headers, + }) + } + + if export.Grpc != nil { + grpc := export.Grpc + if grpc.Endpoint == "" { + return nil, fmt.Errorf("no endpoint provided for the gRPC exporter, unable to create the OpenTelemetry collector") + } + grpcExporter := otlpExporter{ + Name: "otlp", + Endpoint: grpc.Endpoint, + Headers: grpc.Headers, + } + if grpc.Headers != nil && len(grpc.Headers) > 0 { + grpcExporter.Headers = grpc.Headers + } + exporters = append(exporters, grpcExporter) + } + + if export.Http != nil { + http := export.Http + if http.Endpoint == "" { + return nil, fmt.Errorf("no endpoint provided for the HTTP exporter, unable to create the OpenTelemetry collector") + } + if http.Encoding == "" { + return nil, fmt.Errorf("no encoding provided for the HTTP exporter, unable to create the OpenTelemetry collector") + } + httpExporter := otlpExporter{ + Name: "otlphttp", + Endpoint: http.Endpoint, + Encoding: string(http.Encoding), + } + if http.Headers != nil && len(http.Headers) > 0 { + httpExporter.Headers = http.Headers + } + exporters = append(exporters, httpExporter) + } + + return exporters, nil +} + +func renderCollectorConfigs(templateValues *collectorConfigurationTemplateValues) (string, error) { + var collectorConfiguration bytes.Buffer + if err := collectorConfigurationTemplate.Execute(&collectorConfiguration, templateValues); err != nil { + return "", err + } + + return collectorConfiguration.String(), nil +} diff --git a/internal/backendconnection/otelcolresources/collector_config_map_test.go b/internal/backendconnection/otelcolresources/collector_config_map_test.go new file mode 100644 index 00000000..2a8e7ccc --- /dev/null +++ b/internal/backendconnection/otelcolresources/collector_config_map_test.go @@ -0,0 +1,446 @@ +// SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. +// SPDX-License-Identifier: Apache-2.0 + +package otelcolresources + +import ( + "fmt" + + "gopkg.in/yaml.v3" + corev1 "k8s.io/api/core/v1" + + dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "github.com/dash0hq/dash0-operator/test/util" +) + +var _ = Describe("The OpenTelemetry Collector ConfigMap conent", func() { + + It("should fail if no exporter is configured", func() { + _, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{}, + }) + Expect(err).To(HaveOccurred()) + }) + + It("should fail to render the Dash0 exporter when no endpoint is provided", func() { + _, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, + }) + Expect(err).To( + MatchError( + ContainSubstring( + "no endpoint provided for the Dash0 exporter, unable to create the OpenTelemetry collector"))) + + }) + + It("should render the Dash0 exporter", func() { + configMap, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, + }) + + Expect(err).ToNot(HaveOccurred()) + collectorConfig := parseConfigMapContent(configMap) + exportersRaw := collectorConfig["exporters"] + Expect(exportersRaw).ToNot(BeNil()) + exporters := exportersRaw.(map[string]interface{}) + Expect(exporters).To(HaveLen(2)) + debugExporter := exporters["debug"] + Expect(debugExporter).ToNot(BeNil()) + + exporter2 := exporters["otlp"] + Expect(exporter2).ToNot(BeNil()) + dash0OtlpExporter := exporter2.(map[string]interface{}) + Expect(dash0OtlpExporter).ToNot(BeNil()) + Expect(dash0OtlpExporter["endpoint"]).To(Equal(EndpointTest)) + headersRaw := dash0OtlpExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers := headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(1)) + Expect(headers["Authorization"]).To(Equal("Bearer ${env:AUTH_TOKEN}")) + Expect(headers["X-Dash0-Dataset"]).To(BeNil()) + Expect(dash0OtlpExporter["encoding"]).To(BeNil()) + + verifyPipelines(collectorConfig, "otlp") + }) + + It("should render the Dash0 exporter with custom dataset", func() { + configMap, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Dataset: "custom-dataset", + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, + }) + + Expect(err).ToNot(HaveOccurred()) + collectorConfig := parseConfigMapContent(configMap) + exportersRaw := collectorConfig["exporters"] + Expect(exportersRaw).ToNot(BeNil()) + exporters := exportersRaw.(map[string]interface{}) + Expect(exporters).To(HaveLen(2)) + debugExporter := exporters["debug"] + Expect(debugExporter).ToNot(BeNil()) + + exporter2 := exporters["otlp"] + Expect(exporter2).ToNot(BeNil()) + dash0OtlpExporter := exporter2.(map[string]interface{}) + Expect(dash0OtlpExporter).ToNot(BeNil()) + Expect(dash0OtlpExporter["endpoint"]).To(Equal(EndpointTest)) + headersRaw := dash0OtlpExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers := headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(2)) + Expect(headers["Authorization"]).To(Equal("Bearer ${env:AUTH_TOKEN}")) + Expect(headers["X-Dash0-Dataset"]).To(Equal("custom-dataset")) + Expect(dash0OtlpExporter["encoding"]).To(BeNil()) + + verifyPipelines(collectorConfig, "otlp") + }) + + It("should fail to render a gRPC exporter when no endpoint is provided", func() { + _, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Grpc: &dash0v1alpha1.GrpcConfiguration{ + Headers: []dash0v1alpha1.Header{{ + Name: "Key1", + Value: "Value1", + }}, + }, + }, + }) + Expect(err).To( + MatchError( + ContainSubstring( + "no endpoint provided for the gRPC exporter, unable to create the OpenTelemetry collector"))) + + }) + + It("should render an arbitrary gRPC exporter", func() { + configMap, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Grpc: &dash0v1alpha1.GrpcConfiguration{ + Endpoint: "example.com:4317", + Headers: []dash0v1alpha1.Header{ + { + Name: "Key1", + Value: "Value1", + }, + { + Name: "Key2", + Value: "Value2", + }, + }, + }, + }, + }) + + Expect(err).ToNot(HaveOccurred()) + collectorConfig := parseConfigMapContent(configMap) + exportersRaw := collectorConfig["exporters"] + Expect(exportersRaw).ToNot(BeNil()) + exporters := exportersRaw.(map[string]interface{}) + Expect(exporters).To(HaveLen(2)) + debugExporter := exporters["debug"] + Expect(debugExporter).ToNot(BeNil()) + + exporter2 := exporters["otlp"] + Expect(exporter2).ToNot(BeNil()) + otlpGrpcExporter := exporter2.(map[string]interface{}) + Expect(otlpGrpcExporter).ToNot(BeNil()) + Expect(otlpGrpcExporter["endpoint"]).To(Equal("example.com:4317")) + headersRaw := otlpGrpcExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers := headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(2)) + Expect(headers["Key1"]).To(Equal("Value1")) + Expect(headers["Key2"]).To(Equal("Value2")) + Expect(otlpGrpcExporter["encoding"]).To(BeNil()) + + verifyPipelines(collectorConfig, "otlp") + }) + + It("should fail to render an HTTP exporter when no endpoint is provided", func() { + _, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Http: &dash0v1alpha1.HttpConfiguration{ + Headers: []dash0v1alpha1.Header{{ + Name: "Key1", + Value: "Value1", + }}, + Encoding: dash0v1alpha1.Proto, + }, + }, + }) + Expect(err).To( + MatchError( + ContainSubstring( + "no endpoint provided for the HTTP exporter, unable to create the OpenTelemetry collector"))) + }) + + It("should fail to render an HTTP exporter when no encoding is provided", func() { + _, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Http: &dash0v1alpha1.HttpConfiguration{ + Endpoint: "https://example.com:4318", + Headers: []dash0v1alpha1.Header{{ + Name: "Key1", + Value: "Value1", + }}, + }, + }, + }) + Expect(err).To( + MatchError( + ContainSubstring( + "no encoding provided for the HTTP exporter, unable to create the OpenTelemetry collector"))) + + }) + + It("should render an arbitrary HTTP exporter", func() { + configMap, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Http: &dash0v1alpha1.HttpConfiguration{ + Endpoint: "https://example.com:4318", + Headers: []dash0v1alpha1.Header{ + { + Name: "Key1", + Value: "Value1", + }, + { + Name: "Key2", + Value: "Value2", + }, + }, + Encoding: "json", + }, + }, + }) + + Expect(err).ToNot(HaveOccurred()) + collectorConfig := parseConfigMapContent(configMap) + exportersRaw := collectorConfig["exporters"] + Expect(exportersRaw).ToNot(BeNil()) + exporters := exportersRaw.(map[string]interface{}) + Expect(exporters).To(HaveLen(2)) + debugExporter := exporters["debug"] + Expect(debugExporter).ToNot(BeNil()) + + exporter2 := exporters["otlphttp"] + Expect(exporter2).ToNot(BeNil()) + otlpHttpExporter := exporter2.(map[string]interface{}) + Expect(otlpHttpExporter).ToNot(BeNil()) + Expect(otlpHttpExporter["endpoint"]).To(Equal("https://example.com:4318")) + headersRaw := otlpHttpExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers := headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(2)) + Expect(headers["Key1"]).To(Equal("Value1")) + Expect(headers["Key2"]).To(Equal("Value2")) + Expect(otlpHttpExporter["encoding"]).To(Equal("json")) + + verifyPipelines(collectorConfig, "otlphttp") + }) + + It("should refuse to render the Dash0 exporter together with a gRPC exporter", func() { + _, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + Grpc: &dash0v1alpha1.GrpcConfiguration{ + Endpoint: "https://example.com:4318", + Headers: []dash0v1alpha1.Header{{ + Name: "Key1", + Value: "Value1", + }}, + }, + }, + }) + Expect(err).To( + MatchError( + ContainSubstring( + "combining the Dash0 exporter with a gRPC exporter is not supported, please use only one of them"))) + }) + + It("should render the Dash0 exporter together with an HTTP exporter", func() { + configMap, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + Http: &dash0v1alpha1.HttpConfiguration{ + Endpoint: "https://example.com:4318", + Headers: []dash0v1alpha1.Header{{ + Name: "Key1", + Value: "Value1", + }}, + Encoding: "proto", + }, + }, + }) + Expect(err).ToNot(HaveOccurred()) + + collectorConfig := parseConfigMapContent(configMap) + exportersRaw := collectorConfig["exporters"] + Expect(exportersRaw).ToNot(BeNil()) + exporters := exportersRaw.(map[string]interface{}) + Expect(exporters).To(HaveLen(3)) + debugExporter := exporters["debug"] + Expect(debugExporter).ToNot(BeNil()) + + exporter2 := exporters["otlp"] + Expect(exporter2).ToNot(BeNil()) + dash0OtlpExporter := exporter2.(map[string]interface{}) + Expect(dash0OtlpExporter).ToNot(BeNil()) + Expect(dash0OtlpExporter["endpoint"]).To(Equal(EndpointTest)) + headersRaw := dash0OtlpExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers := headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(1)) + Expect(headers["Authorization"]).To(Equal("Bearer ${env:AUTH_TOKEN}")) + Expect(dash0OtlpExporter["encoding"]).To(BeNil()) + + exporter3 := exporters["otlphttp"] + Expect(exporter3).ToNot(BeNil()) + httpExporter := exporter3.(map[string]interface{}) + Expect(httpExporter["endpoint"]).To(Equal("https://example.com:4318")) + headersRaw = httpExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers = headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(1)) + Expect(headers["Key1"]).To(Equal("Value1")) + Expect(httpExporter["encoding"]).To(Equal("proto")) + + verifyPipelines(collectorConfig, "otlp", "otlphttp") + }) + + It("should render a gRPC exporter together with an HTTP exporter", func() { + configMap, err := collectorConfigMap(&oTelColConfig{ + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Grpc: &dash0v1alpha1.GrpcConfiguration{ + Endpoint: "example.com:4317", + Headers: []dash0v1alpha1.Header{{ + Name: "Key1", + Value: "Value1", + }}, + }, + Http: &dash0v1alpha1.HttpConfiguration{ + Endpoint: "https://example.com:4318", + Headers: []dash0v1alpha1.Header{{ + Name: "Key2", + Value: "Value2", + }}, + Encoding: "proto", + }, + }, + }) + Expect(err).ToNot(HaveOccurred()) + + collectorConfig := parseConfigMapContent(configMap) + exportersRaw := collectorConfig["exporters"] + Expect(exportersRaw).ToNot(BeNil()) + exporters := exportersRaw.(map[string]interface{}) + Expect(exporters).To(HaveLen(3)) + debugExporter := exporters["debug"] + Expect(debugExporter).ToNot(BeNil()) + + exporter2 := exporters["otlp"] + Expect(exporter2).ToNot(BeNil()) + grpcOtlpExporter := exporter2.(map[string]interface{}) + Expect(grpcOtlpExporter).ToNot(BeNil()) + Expect(grpcOtlpExporter["endpoint"]).To(Equal("example.com:4317")) + headersRaw := grpcOtlpExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers := headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(1)) + Expect(headers["Key1"]).To(Equal("Value1")) + Expect(grpcOtlpExporter["encoding"]).To(BeNil()) + + exporter3 := exporters["otlphttp"] + Expect(exporter3).ToNot(BeNil()) + httpExporter := exporter3.(map[string]interface{}) + Expect(httpExporter["endpoint"]).To(Equal("https://example.com:4318")) + headersRaw = httpExporter["headers"] + Expect(headersRaw).ToNot(BeNil()) + headers = headersRaw.(map[string]interface{}) + Expect(headers).To(HaveLen(1)) + Expect(headers).To(HaveLen(1)) + Expect(headers["Key2"]).To(Equal("Value2")) + Expect(httpExporter["encoding"]).To(Equal("proto")) + + verifyPipelines(collectorConfig, "otlp", "otlphttp") + }) +}) + +func parseConfigMapContent(configMap *corev1.ConfigMap) map[string]interface{} { + configMapContent := configMap.Data["config.yaml"] + configMapParsed := &map[string]interface{}{} + err := yaml.Unmarshal([]byte(configMapContent), configMapParsed) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("Cannot parse config map content:\n%s\n", configMapContent)) + return *configMapParsed +} + +func verifyPipelines(collectorConfig map[string]interface{}, expectedExporters ...string) { + pipelines := ((collectorConfig["service"]).(map[string]interface{})["pipelines"]).(map[string]interface{}) + Expect(pipelines).ToNot(BeNil()) + tracesPipeline := (pipelines["traces/downstream"]).(map[string]interface{}) + tracesExporters := (tracesPipeline["exporters"]).([]interface{}) + Expect(tracesExporters).To(ContainElements(expectedExporters)) + metricsPipeline := (pipelines["metrics/downstream"]).(map[string]interface{}) + metricsExporters := (metricsPipeline["exporters"]).([]interface{}) + Expect(metricsExporters).To(ContainElements(expectedExporters)) + logsPipeline := (pipelines["logs/downstream"]).(map[string]interface{}) + logsExporters := (logsPipeline["exporters"]).([]interface{}) + Expect(logsExporters).To(ContainElements(expectedExporters)) +} diff --git a/internal/backendconnection/otelcolresources/config.yaml.template b/internal/backendconnection/otelcolresources/config.yaml.template index 5d2a2fd1..70b2040a 100644 --- a/internal/backendconnection/otelcolresources/config.yaml.template +++ b/internal/backendconnection/otelcolresources/config.yaml.template @@ -3,20 +3,17 @@ connectors: exporters: debug: {} -{{- if eq .ExportProtocol "grpc" }} - otlp: - endpoint: {{ .Endpoint }} -{{- if .HasExportAuthentication }} +{{- range $i, $exporter := .Exporters }} + {{ $exporter.Name }}: + endpoint: "{{ $exporter.Endpoint }}" +{{- if $exporter.Headers }} headers: - Authorization: Bearer ${env:AUTH_TOKEN} +{{- range $i, $header := $exporter.Headers }} + "{{ $header.Name }}": "{{ $header.Value }}" {{- end }} {{- end }} -{{- if eq .ExportProtocol "http" }} - otlphttp: - endpoint: {{ .Endpoint }} -{{- if .HasExportAuthentication }} - headers: - Authorization: Bearer ${env:AUTH_TOKEN} +{{- if $exporter.Encoding }} + encoding: "{{ $exporter.Encoding }}" {{- end }} {{- end }} @@ -177,12 +174,9 @@ service: - memory_limiter - batch exporters: -{{- if eq .ExportProtocol "grpc" }} - - otlp -{{- end }} -{{- if eq .ExportProtocol "http" }} - - otlphttp -{{- end }} + {{- range $i, $exporter := .Exporters }} + - {{ $exporter.Name }} + {{- end }} metrics/downstream: receivers: @@ -192,12 +186,9 @@ service: - memory_limiter - batch exporters: -{{- if eq .ExportProtocol "grpc" }} - - otlp -{{- end }} -{{- if eq .ExportProtocol "http" }} - - otlphttp -{{- end }} + {{- range $i, $exporter := .Exporters }} + - {{ $exporter.Name }} + {{- end }} logs/otlp: receivers: @@ -224,12 +215,9 @@ service: - batch exporters: - debug -{{- if eq .ExportProtocol "grpc" }} - - otlp -{{- end }} -{{- if eq .ExportProtocol "http" }} - - otlphttp -{{- end }} + {{- range $i, $exporter := .Exporters }} + - {{ $exporter.Name }} + {{- end }} telemetry: metrics: diff --git a/internal/backendconnection/otelcolresources/desired_state.go b/internal/backendconnection/otelcolresources/desired_state.go index 97a5dcd6..1a8eca74 100644 --- a/internal/backendconnection/otelcolresources/desired_state.go +++ b/internal/backendconnection/otelcolresources/desired_state.go @@ -4,12 +4,8 @@ package otelcolresources import ( - "bytes" - _ "embed" "fmt" - "net/url" "path/filepath" - "text/template" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -20,24 +16,17 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" "github.com/dash0hq/dash0-operator/internal/dash0/util" ) type oTelColConfig struct { - Namespace string - NamePrefix string - Endpoint string - AuthorizationToken string - SecretRef string - Images util.Images + Namespace string + NamePrefix string + Export dash0v1alpha1.Export + Images util.Images } -func (c *oTelColConfig) hasAuthentication() bool { - return c.SecretRef != "" || c.AuthorizationToken != "" -} - -type exportProtocol string - const ( OtlpGrpcHostPort = 40317 OtlpHttpHostPort = 40318 @@ -46,15 +35,11 @@ const ( // ports. When the operator creates its daemonset, the pods of one of the two otelcol daemonsets would fail to start // due to port conflicts. - grpcExportProtocol exportProtocol = "grpc" - httpExportProtocol exportProtocol = "http" - rbacApiVersion = "rbac.authorization.k8s.io/v1" + rbacApiVersion = "rbac.authorization.k8s.io/v1" ) type collectorConfigurationTemplateValues struct { - HasExportAuthentication bool - Endpoint string - ExportProtocol exportProtocol + Exporters []otlpExporter IgnoreLogsFromNamespaces []string } @@ -100,34 +85,27 @@ var ( appKubernetesIoInstanceKey: appKubernetesIoInstanceValue, appKubernetesIoComponentLabelKey: serviceComponent, } - - //go:embed config.yaml.template - collectorConfigurationTemplateSource string - collectorConfigurationTemplate = template.Must(template.New("collector-configuration").Parse(collectorConfigurationTemplateSource)) ) func assembleDesiredState(config *oTelColConfig) ([]client.Object, error) { - if config.Endpoint == "" { - return nil, fmt.Errorf("no endpoint provided, unable to create the OpenTelemetry collector") - } - var desiredState []client.Object desiredState = append(desiredState, serviceAccount(config)) - collectorCM, err := collectorConfigMap(config) if err != nil { return desiredState, err } desiredState = append(desiredState, collectorCM) - desiredState = append(desiredState, filelogOffsetsConfigMap(config)) - desiredState = append(desiredState, clusterRole(config)) desiredState = append(desiredState, clusterRoleBinding(config)) desiredState = append(desiredState, role(config)) desiredState = append(desiredState, roleBinding(config)) desiredState = append(desiredState, service(config)) - desiredState = append(desiredState, daemonSet(config)) + ds, err := daemonSet(config) + if err != nil { + return desiredState, err + } + desiredState = append(desiredState, ds) return desiredState, nil } @@ -145,58 +123,6 @@ func serviceAccount(config *oTelColConfig) *corev1.ServiceAccount { } } -func renderCollectorConfigs(templateValues *collectorConfigurationTemplateValues) (string, error) { - var collectorConfiguration bytes.Buffer - if err := collectorConfigurationTemplate.Execute(&collectorConfiguration, templateValues); err != nil { - return "", err - } - - return collectorConfiguration.String(), nil -} - -func collectorConfigMap(config *oTelColConfig) (*corev1.ConfigMap, error) { - endpoint := config.Endpoint - exportProtocol := grpcExportProtocol - if url, err := url.ParseRequestURI(endpoint); err != nil { - // Not a valid URL, assume it's grpc - } else if url.Scheme == "https" || url.Scheme == "http" { - exportProtocol = httpExportProtocol - } - - collectorConfiguration, err := renderCollectorConfigs(&collectorConfigurationTemplateValues{ - Endpoint: endpoint, - ExportProtocol: exportProtocol, - HasExportAuthentication: config.hasAuthentication(), - IgnoreLogsFromNamespaces: []string{ - // Skipping kube-system, it requires bespoke filtering work - "kube-system", - // Skipping logs from the operator and the daemonset, otherwise - // logs will compound in case of log parsing errors - config.Namespace, - }, - }) - if err != nil { - return nil, fmt.Errorf("cannot render the collector configuration template: %w", err) - } - - configMapData := map[string]string{ - collectorConfigurationYaml: collectorConfiguration, - } - - return &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: collectorConfigConfigMapName(config.NamePrefix), - Namespace: config.Namespace, - Labels: labels(false), - }, - Data: configMapData, - }, nil -} - func filelogOffsetsConfigMap(config *oTelColConfig) *corev1.ConfigMap { return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ @@ -349,7 +275,7 @@ func service(config *oTelColConfig) *corev1.Service { } } -func daemonSet(config *oTelColConfig) *appsv1.DaemonSet { +func daemonSet(config *oTelColConfig) (*appsv1.DaemonSet, error) { configMapItems := []corev1.KeyToPath{{ Key: collectorConfigurationYaml, Path: collectorConfigurationYaml, @@ -473,29 +399,29 @@ func daemonSet(config *oTelColConfig) *appsv1.DaemonSet { }, } - if config.hasAuthentication() { - var authTokenEnvVar corev1.EnvVar - - if config.AuthorizationToken != "" { - authTokenEnvVar = corev1.EnvVar{ + if config.Export.Dash0 != nil { + token := config.Export.Dash0.Authorization.Token + secretRef := config.Export.Dash0.Authorization.SecretRef + if token != nil && *token != "" { + env = append(env, corev1.EnvVar{ Name: authTokenEnvVarName, - Value: config.AuthorizationToken, - } - } else { - authTokenEnvVar = corev1.EnvVar{ + Value: *token, + }) + } else if secretRef != nil && secretRef.Name != "" && secretRef.Key != "" { + env = append(env, corev1.EnvVar{ Name: authTokenEnvVarName, ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ - Name: config.SecretRef, + Name: secretRef.Name, }, - Key: "dash0-authorization-token", // TODO Make configurable + Key: secretRef.Key, }, }, - } + }) + } else { + return nil, fmt.Errorf("neither token nor secretRef provided for the Dash0 exporter") } - - env = append(env, authTokenEnvVar) } probe := corev1.Probe{ @@ -649,7 +575,7 @@ func daemonSet(config *oTelColConfig) *appsv1.DaemonSet { filelogOffsetSynchContainer.ImagePullPolicy = config.Images.FilelogOffsetSynchImagePullPolicy } - return &appsv1.DaemonSet{ + ds := &appsv1.DaemonSet{ TypeMeta: metav1.TypeMeta{ Kind: "DaemonSet", APIVersion: "apps/v1", @@ -688,6 +614,7 @@ func daemonSet(config *oTelColConfig) *appsv1.DaemonSet { }, }, } + return ds, nil } func serviceAccountName(namePrefix string) string { diff --git a/internal/backendconnection/otelcolresources/desired_state_test.go b/internal/backendconnection/otelcolresources/desired_state_test.go index bc84ea75..69907b55 100644 --- a/internal/backendconnection/otelcolresources/desired_state_test.go +++ b/internal/backendconnection/otelcolresources/desired_state_test.go @@ -11,6 +11,8 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -25,27 +27,39 @@ const ( var _ = Describe("The desired state of the OpenTelemetry Collector resources", func() { It("should fail if no endpoint has been provided", func() { _, err := assembleDesiredState(&oTelColConfig{ - Namespace: namespace, - NamePrefix: namePrefix, - AuthorizationToken: AuthorizationTokenTest, - Images: TestImages, + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, + Images: TestImages, }) Expect(err).To(HaveOccurred()) }) It("should describe the desired state as a set of Kubernetes client objects", func() { desiredState, err := assembleDesiredState(&oTelColConfig{ - Namespace: namespace, - NamePrefix: namePrefix, - Endpoint: EndpointTest, - AuthorizationToken: AuthorizationTokenTest, - Images: TestImages, + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, + Images: TestImages, }) Expect(err).ToNot(HaveOccurred()) Expect(desiredState).To(HaveLen(9)) collectorConfigConfigMapContent := getCollectorConfigConfigMapContent(desiredState) - Expect(collectorConfigConfigMapContent).To(ContainSubstring(fmt.Sprintf("endpoint: %s", EndpointTest))) + Expect(collectorConfigConfigMapContent).To(ContainSubstring(fmt.Sprintf("endpoint: %s", EndpointTestQuoted))) Expect(collectorConfigConfigMapContent).NotTo(ContainSubstring("file/traces")) Expect(collectorConfigConfigMapContent).NotTo(ContainSubstring("file/metrics")) Expect(collectorConfigConfigMapContent).NotTo(ContainSubstring("file/logs")) @@ -110,15 +124,21 @@ var _ = Describe("The desired state of the OpenTelemetry Collector resources", f It("should use the authorization token directly if provided", func() { desiredState, err := assembleDesiredState(&oTelColConfig{ - Namespace: namespace, - NamePrefix: namePrefix, - Endpoint: EndpointTest, - AuthorizationToken: AuthorizationTokenTest, + Namespace: namespace, + NamePrefix: namePrefix, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, }) Expect(err).ToNot(HaveOccurred()) configMapContent := getCollectorConfigConfigMapContent(desiredState) - Expect(configMapContent).To(ContainSubstring("Authorization: Bearer ${env:AUTH_TOKEN}")) + Expect(configMapContent).To(ContainSubstring("\"Authorization\": \"Bearer ${env:AUTH_TOKEN}\"")) daemonSet := getDaemonSet(desiredState) @@ -131,33 +151,44 @@ var _ = Describe("The desired state of the OpenTelemetry Collector resources", f desiredState, err := assembleDesiredState(&oTelColConfig{ Namespace: namespace, NamePrefix: namePrefix, - Endpoint: EndpointTest, - SecretRef: SecretRefTest, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + SecretRef: &SecretRefTest, + }, + }, + }, }) Expect(err).ToNot(HaveOccurred()) configMapContent := getCollectorConfigConfigMapContent(desiredState) - Expect(configMapContent).To(ContainSubstring("Authorization: Bearer ${env:AUTH_TOKEN}")) + Expect(configMapContent).To(ContainSubstring("\"Authorization\": \"Bearer ${env:AUTH_TOKEN}\"")) daemonSet := getDaemonSet(desiredState) podSpec := daemonSet.Spec.Template.Spec container := podSpec.Containers[0] authTokenEnvVar := findEnvVarByName(container.Env, "AUTH_TOKEN") Expect(authTokenEnvVar).NotTo(BeNil()) - Expect(authTokenEnvVar.ValueFrom.SecretKeyRef.Name).To(Equal(SecretRefTest)) - Expect(authTokenEnvVar.ValueFrom.SecretKeyRef.Key).To(Equal("dash0-authorization-token")) + Expect(authTokenEnvVar.ValueFrom.SecretKeyRef.Name).To(Equal(SecretRefTest.Name)) + Expect(authTokenEnvVar.ValueFrom.SecretKeyRef.Key).To(Equal(SecretRefTest.Key)) }) - It("should not add the auth token env var if no authorization token nor secret has been provided", func() { + It("should not add the auth token env var if no Dash0 exporter is used", func() { desiredState, err := assembleDesiredState(&oTelColConfig{ Namespace: namespace, NamePrefix: namePrefix, - Endpoint: EndpointTest, + Export: dash0v1alpha1.Export{ + Http: &dash0v1alpha1.HttpConfiguration{ + Endpoint: EndpointTest, + Encoding: dash0v1alpha1.Proto, + }, + }, }) Expect(err).ToNot(HaveOccurred()) configMapContent := getCollectorConfigConfigMapContent(desiredState) - Expect(configMapContent).NotTo(ContainSubstring("Authorization: Bearer ${env:AUTH_TOKEN}")) + Expect(configMapContent).NotTo(ContainSubstring("\"Authorization\": \"Bearer ${env:AUTH_TOKEN}\"")) daemonSet := getDaemonSet(desiredState) podSpec := daemonSet.Spec.Template.Spec diff --git a/internal/backendconnection/otelcolresources/otelcol_resources.go b/internal/backendconnection/otelcolresources/otelcol_resources.go index 795b54ef..32d401c1 100644 --- a/internal/backendconnection/otelcolresources/otelcol_resources.go +++ b/internal/backendconnection/otelcolresources/otelcol_resources.go @@ -38,12 +38,10 @@ func (m *OTelColResourceManager) CreateOrUpdateOpenTelemetryCollectorResources( logger *logr.Logger, ) (bool, bool, error) { config := &oTelColConfig{ - Namespace: namespace, - NamePrefix: m.OTelCollectorNamePrefix, - Endpoint: dash0MonitoringResource.Spec.Endpoint, - AuthorizationToken: dash0MonitoringResource.Spec.AuthorizationToken, - SecretRef: dash0MonitoringResource.Spec.SecretRef, - Images: images, + Namespace: namespace, + NamePrefix: m.OTelCollectorNamePrefix, + Export: dash0MonitoringResource.Spec.Export, + Images: images, } desiredState, err := assembleDesiredState(config) if err != nil { @@ -208,12 +206,10 @@ func (m *OTelColResourceManager) DeleteResources( logger *logr.Logger, ) error { config := &oTelColConfig{ - Namespace: namespace, - NamePrefix: m.OTelCollectorNamePrefix, - Endpoint: dash0MonitoringResource.Spec.Endpoint, - AuthorizationToken: dash0MonitoringResource.Spec.AuthorizationToken, - SecretRef: dash0MonitoringResource.Spec.SecretRef, - Images: images, + Namespace: namespace, + NamePrefix: m.OTelCollectorNamePrefix, + Export: dash0MonitoringResource.Spec.Export, + Images: images, } allObjects, err := assembleDesiredState(config) if err != nil { diff --git a/internal/dash0/controller/dash0_controller_test.go b/internal/dash0/controller/dash0_controller_test.go index 81a0949e..6a85bb44 100644 --- a/internal/dash0/controller/dash0_controller_test.go +++ b/internal/dash0/controller/dash0_controller_test.go @@ -749,10 +749,15 @@ var _ = Describe("The Dash0 controller", Ordered, func() { Namespace: Dash0MonitoringResourceQualifiedName.Namespace, }, Spec: dash0v1alpha1.Dash0MonitoringSpec{ - Endpoint: EndpointTest, - AuthorizationToken: AuthorizationTokenTest, - SecretRef: SecretRefTest, InstrumentWorkloads: "invalid", + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, }, })).ToNot(Succeed()) }) diff --git a/test-resources/bin/test-roundtrip-01-aum-operator-cr.sh b/test-resources/bin/test-roundtrip-01-aum-operator-cr.sh index 118179b3..cc481123 100755 --- a/test-resources/bin/test-roundtrip-01-aum-operator-cr.sh +++ b/test-resources/bin/test-roundtrip-01-aum-operator-cr.sh @@ -31,7 +31,7 @@ kubectl create secret \ generic \ dash0-authorization-secret \ --namespace dash0-system \ - --from-literal=dash0-authorization-token="${DASH0_AUTHORIZATION_TOKEN}" + --from-literal=token="${DASH0_AUTHORIZATION_TOKEN}" echo echo diff --git a/test-resources/bin/test-roundtrip-02-operator-cr-aum.sh b/test-resources/bin/test-roundtrip-02-operator-cr-aum.sh index 97d3aba6..ce583eb5 100755 --- a/test-resources/bin/test-roundtrip-02-operator-cr-aum.sh +++ b/test-resources/bin/test-roundtrip-02-operator-cr-aum.sh @@ -31,7 +31,7 @@ kubectl create secret \ generic \ dash0-authorization-secret \ --namespace dash0-system \ - --from-literal=dash0-authorization-token="${DASH0_AUTHORIZATION_TOKEN}" + --from-literal=token="${DASH0_AUTHORIZATION_TOKEN}" echo echo diff --git a/test-resources/bin/util b/test-resources/bin/util index 8f5c362b..b7acee28 100644 --- a/test-resources/bin/util +++ b/test-resources/bin/util @@ -195,7 +195,7 @@ deploy_via_helm() { fi if ! has_been_set_to_empty_string "FILELOG_OFFSET_SYNCH_IMG_REPOSITORY"; then - helm_install_command+=" --set operator.filelogOffsetSynchImage.repository=${FILELOG_OFFSET_SYNCH_IMG_REPOSITORY:-configuration-reloader}" + helm_install_command+=" --set operator.filelogOffsetSynchImage.repository=${FILELOG_OFFSET_SYNCH_IMG_REPOSITORY:-filelog-offset-synch}" fi if ! has_been_set_to_empty_string "FILELOG_OFFSET_SYNCH_IMG_TAG"; then helm_install_command+=" --set operator.filelogOffsetSynchImage.tag=${FILELOG_OFFSET_SYNCH_IMG_TAG:-latest}" diff --git a/test-resources/customresources/dash0monitoring/dash0monitoring.secret.yaml b/test-resources/customresources/dash0monitoring/dash0monitoring.secret.yaml index b61b3779..eb0b3d83 100644 --- a/test-resources/customresources/dash0monitoring/dash0monitoring.secret.yaml +++ b/test-resources/customresources/dash0monitoring/dash0monitoring.secret.yaml @@ -3,17 +3,8 @@ kind: Dash0Monitoring metadata: name: dash0-monitoring-resource spec: - # production: endpoint: ingress.eu-west-1.aws.dash0.com:4317 - # development: ingress.eu-west-1.aws.dash0-dev.com:4317 - endpoint: ingress.eu-west-1.aws.dash0-dev.com:4317 - - # Either provide the name of a secret existing in the Dash0 operator's namespace as secretRef: - secretRef: dash0-authorization-secret - - # Or provide the token directly as authorizationToken: - # authorizationToken: auth_... - - # Opt-out settings for particular use cases. The default value is "all". Other possible values are - # "created-and-updated" and "none". - # instrumentWorkloads: all - + export: + dash0: + endpoint: ingress.eu-west-1.aws.dash0-dev.com:4317 + authorization: + secretRef: {} diff --git a/test-resources/customresources/dash0monitoring/dash0monitoring.token.yaml.template b/test-resources/customresources/dash0monitoring/dash0monitoring.token.yaml.template index de73484c..abd641e8 100644 --- a/test-resources/customresources/dash0monitoring/dash0monitoring.token.yaml.template +++ b/test-resources/customresources/dash0monitoring/dash0monitoring.token.yaml.template @@ -3,16 +3,8 @@ kind: Dash0Monitoring metadata: name: dash0-monitoring-resource spec: - # production: endpoint: ingress.eu-west-1.aws.dash0.com:4317 - # development: ingress.eu-west-1.aws.dash0-dev.com:4317 - endpoint: ingress.eu-west-1.aws.dash0-dev.com:4317 - - # Either provide the name of a secret existing in the Dash0 operator's namespace as secretRef: - # secretRef: dash0-authorization-secret - - # Or provide the token directly as authorizationToken: - authorizationToken: "$DASH0_AUTHORIZATION_TOKEN" - - # Opt-out settings for particular use cases. The default value is "all". Other possible values are - # "created-and-updated" and "none". - # instrumentWorkloads: all + export: + dash0: + endpoint: ingress.eu-west-1.aws.dash0-dev.com:4317 + authorization: + token: "$DASH0_AUTHORIZATION_TOKEN" diff --git a/test-resources/otlp-sink/otlp-sink.yaml b/test-resources/otlp-sink/otlp-sink.yaml index 3e02d768..a84f45c2 100644 --- a/test-resources/otlp-sink/otlp-sink.yaml +++ b/test-resources/otlp-sink/otlp-sink.yaml @@ -31,9 +31,10 @@ data: receivers: otlp: protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 http: - endpoint: ${env:MY_POD_IP}:8080 - + endpoint: ${env:MY_POD_IP}:4318 service: extensions: - health_check @@ -90,7 +91,10 @@ spec: value: 400MiB ports: - name: otlp - containerPort: 8080 + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 protocol: TCP volumeMounts: - name: otelcol-config @@ -158,8 +162,12 @@ metadata: spec: ports: - name: otlp - port: 80 + port: 4317 + protocol: TCP + targetPort: 4317 + - name: otlp-http + port: 4318 protocol: TCP - targetPort: 8080 + targetPort: 4318 selector: app.kubernetes.io/instance: otlp-sink \ No newline at end of file diff --git a/test/e2e/dash0_monitoring_resource.go b/test/e2e/dash0_monitoring_resource.go index 009a1bc6..6f7e10da 100644 --- a/test/e2e/dash0_monitoring_resource.go +++ b/test/e2e/dash0_monitoring_resource.go @@ -24,7 +24,7 @@ type dash0MonitoringValues struct { const ( dash0MonitoringResourceName = "dash0-monitoring-resource-e2e" - defaultEndpoint = "http://otlp-sink.otlp-sink.svc.cluster.local" + defaultEndpoint = "http://otlp-sink.otlp-sink.svc.cluster.local:4318" ) var ( @@ -91,7 +91,7 @@ func updateEndpointOfDash0MonitoringResource( ) { updateDash0MonitoringResource( namespace, - fmt.Sprintf("{\"spec\":{\"endpoint\":\"%s\"}}", newEndpoint), + fmt.Sprintf("{\"spec\":{\"export\":{\"http\":{\"endpoint\":\"%s\"}}}}", newEndpoint), ) } diff --git a/test/e2e/dash0monitoring.e2e.yaml.template b/test/e2e/dash0monitoring.e2e.yaml.template index 93dd750e..0034052d 100644 --- a/test/e2e/dash0monitoring.e2e.yaml.template +++ b/test/e2e/dash0monitoring.e2e.yaml.template @@ -4,4 +4,8 @@ metadata: name: dash0-monitoring-resource-e2e spec: instrumentWorkloads: {{ .InstrumentWorkloads }} - endpoint: {{ .Endpoint }} + export: + # In the e2e tests we only want to export to the local otlp-sink collector. Since the Dash0 exporter config uses + # gRPC, and gRPC requires a TLS connection, we are configuring the local otlp-sink collector via HTTP instead. + http: + endpoint: {{ .Endpoint }} \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 0225e3bd..a0919c85 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -47,8 +47,13 @@ var _ = Describe("Dash0 Kubernetes Operator", Ordered, func() { if localKubeCtx == "" { Fail(fmt.Sprintf("The mandatory setting LOCAL_KUBECTX is missing in the file %s.", dotEnvFile)) } - kubeContextHasBeenChanged, originalKubeContext = setKubeContext(localKubeCtx) + + // Cleans up the test namespace, otlp sink and the operator. Usually this is cleaned up in AfterAll/AfterEach + // steps, but for cases where we want to troubleshoot failing e2e tests and have disabled cleanup in After steps + // we clean up here at the beginning as well. + cleanupAll() + checkIfRequiredPortsAreBlocked() renderTemplates() @@ -769,6 +774,15 @@ func runInParallelForAllWorkloadTypes[C workloadConfig]( } } +func cleanupAll() { + if applicationUnderTestNamespace != "default" { + By("removing namespace for application under test") + _ = runAndIgnoreOutput(exec.Command("kubectl", "delete", "ns", applicationUnderTestNamespace, "--ignore-not-found")) + } + undeployOperator(operatorNamespace) + uninstallOtlpSink(workingDir) +} + func readAndApplyEnvironmentVariables() { var err error diff --git a/test/util/constants.go b/test/util/constants.go index defc606d..1ab7e6d8 100644 --- a/test/util/constants.go +++ b/test/util/constants.go @@ -8,6 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" "github.com/dash0hq/dash0-operator/internal/dash0/util" ) @@ -30,11 +31,16 @@ const ( OTelCollectorBaseUrlTest = "http://$(DASH0_NODE_IP):40318" EndpointTest = "endpoint.dash0.com:4317" - AuthorizationTokenTest = "authorization-token" - SecretRefTest = "secret-ref" + EndpointTestQuoted = "\"endpoint.dash0.com:4317\"" ) var ( + AuthorizationTokenTest = "authorization-token" + SecretRefTest = dash0v1alpha1.SecretRef{ + Name: "secret-ref", + Key: "key", + } + ArbitraryNumer int64 = 1302 TestImages = util.Images{ diff --git a/test/util/dash0_monitoring_resource.go b/test/util/dash0_monitoring_resource.go index 5c16c9f2..258610ca 100644 --- a/test/util/dash0_monitoring_resource.go +++ b/test/util/dash0_monitoring_resource.go @@ -61,11 +61,15 @@ func EnsureDash0MonitoringResourceExistsWithNamespacedName( instrumentWorkloads dash0v1alpha1.InstrumentWorkloadsMode, ) *dash0v1alpha1.Dash0Monitoring { By("creating the Dash0 monitoring resource") - spec := dash0v1alpha1.Dash0MonitoringSpec{ - Endpoint: EndpointTest, - AuthorizationToken: AuthorizationTokenTest, - SecretRef: SecretRefTest, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, } if instrumentWorkloads != "" { spec.InstrumentWorkloads = instrumentWorkloads @@ -98,9 +102,14 @@ func CreateDash0MonitoringResource( Namespace: dash0MonitoringResourceName.Namespace, }, Spec: dash0v1alpha1.Dash0MonitoringSpec{ - Endpoint: EndpointTest, - AuthorizationToken: AuthorizationTokenTest, - SecretRef: SecretRefTest, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, }, } Expect(k8sClient.Create(ctx, dash0MonitoringResource)).To(Succeed())