From 2a3049c54605f3712359ed0c4ea249da94ffbada Mon Sep 17 00:00:00 2001 From: Sathyajith Krishnan Date: Fri, 6 Sep 2024 10:06:17 +0200 Subject: [PATCH] [FEAT] added version cleanup monitoring - `CAPApplicationVersion` enhanced with - deletion rules based on metric rules or PromQL expression - scrape configuration - New routine to periodically collect relevant versions and queue them for evaluation - New routine to process queued versions by evaluating metrics rules - added service monitor creation for workload services based on scrape configuration --- cmd/controller/main.go | 15 +- crds/sme.sap.com_capapplicationversions.yaml | 51 ++ go.mod | 12 +- go.sum | 23 + internal/controller/common_test.go | 63 +- internal/controller/controller.go | 16 +- internal/controller/controller_test.go | 2 + .../reconcile-capapplicationversion.go | 114 +++- .../reconcile-capapplicationversion_test.go | 44 ++ internal/controller/reconcile.go | 8 +- internal/controller/reconcile_test.go | 8 +- .../testdata/common/crd-servicemonitors.yaml | 66 ++ .../ca-cleanup-dry-run-enabled.yaml | 41 ++ .../ca-cleanup-enabled.yaml | 41 ++ .../cat-consumer-v2-ready-never.yaml | 38 ++ .../cat-consumer-v2-upgrading.yaml | 38 ++ .../cat-provider-v2-ready.yaml | 38 ++ .../cat-provider-v3-ready.yaml | 38 ++ .../cav-v1-deletion-rules-error.yaml | 91 +++ .../cav-v1-deletion-rules-processing.yaml | 93 +++ .../cav-v1-deletion-rules.yaml | 93 +++ .../cav-v1-monitoring-port-missing.yaml | 88 +++ .../cav-v2-deletion-rules.yaml | 83 +++ .../cav-v3-deletion-rules-processing.yaml | 83 +++ .../cav-v3-deletion-rules.yaml | 84 +++ .../servicemonitors-cav-v1.yaml | 38 ++ internal/controller/version-monitoring.go | 390 +++++++++++ .../controller/version-monitoring_test.go | 604 ++++++++++++++++++ pkg/apis/sme.sap.com/v1alpha1/types.go | 69 ++ .../v1alpha1/zz_generated.deepcopy.go | 89 +++ .../sme.sap.com/v1alpha1/deletionrules.go | 42 ++ .../sme.sap.com/v1alpha1/deploymentdetails.go | 19 +- .../sme.sap.com/v1alpha1/metricrule.go | 59 ++ .../sme.sap.com/v1alpha1/monitoringconfig.go | 59 ++ .../v1alpha1/workloadmonitoring.go | 37 ++ pkg/client/applyconfiguration/utils.go | 8 + 36 files changed, 2656 insertions(+), 29 deletions(-) create mode 100644 internal/controller/testdata/common/crd-servicemonitors.yaml create mode 100644 internal/controller/testdata/version-monitoring/ca-cleanup-dry-run-enabled.yaml create mode 100644 internal/controller/testdata/version-monitoring/ca-cleanup-enabled.yaml create mode 100644 internal/controller/testdata/version-monitoring/cat-consumer-v2-ready-never.yaml create mode 100644 internal/controller/testdata/version-monitoring/cat-consumer-v2-upgrading.yaml create mode 100644 internal/controller/testdata/version-monitoring/cat-provider-v2-ready.yaml create mode 100644 internal/controller/testdata/version-monitoring/cat-provider-v3-ready.yaml create mode 100644 internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-error.yaml create mode 100644 internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-processing.yaml create mode 100644 internal/controller/testdata/version-monitoring/cav-v1-deletion-rules.yaml create mode 100644 internal/controller/testdata/version-monitoring/cav-v1-monitoring-port-missing.yaml create mode 100644 internal/controller/testdata/version-monitoring/cav-v2-deletion-rules.yaml create mode 100644 internal/controller/testdata/version-monitoring/cav-v3-deletion-rules-processing.yaml create mode 100644 internal/controller/testdata/version-monitoring/cav-v3-deletion-rules.yaml create mode 100644 internal/controller/testdata/version-monitoring/servicemonitors-cav-v1.yaml create mode 100644 internal/controller/version-monitoring.go create mode 100644 internal/controller/version-monitoring_test.go create mode 100644 pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deletionrules.go create mode 100644 pkg/client/applyconfiguration/sme.sap.com/v1alpha1/metricrule.go create mode 100644 pkg/client/applyconfiguration/sme.sap.com/v1alpha1/monitoringconfig.go create mode 100644 pkg/client/applyconfiguration/sme.sap.com/v1alpha1/workloadmonitoring.go diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 7dc1d4c..3db3948 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -26,6 +26,9 @@ import ( "github.com/sap/cap-operator/pkg/client/clientset/versioned" istio "istio.io/client-go/pkg/clientset/versioned" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + promop "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" ) const ( @@ -52,6 +55,16 @@ func main() { klog.Fatal("could not create client for custom resources: ", err.Error()) } + apiExtClient, err := apiext.NewForConfig(config) + if err != nil { + klog.Fatal("could not create client for api-extensions: ", err.Error()) + } + + promClient, err := promop.NewForConfig(config) + if err != nil { + klog.Fatal("could not create client for prometheus-operator resources: ", err.Error()) + } + istioClient, err := istio.NewForConfig(config) if err != nil { klog.Fatal("could not create client for istio resources: ", err.Error()) @@ -107,7 +120,7 @@ func main() { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { klog.InfoS("Started leading: ", LeaseLockName, leaseLockId) - c := controller.NewController(coreClient, crdClient, istioClient, certClient, certManagerClient, dnsClient) + c := controller.NewController(coreClient, crdClient, istioClient, certClient, certManagerClient, dnsClient, apiExtClient, promClient) go c.Start(ctx) }, OnStoppedLeading: func() { diff --git a/crds/sme.sap.com_capapplicationversions.yaml b/crds/sme.sap.com_capapplicationversions.yaml index 93bb710..2f8c0e9 100644 --- a/crds/sme.sap.com_capapplicationversions.yaml +++ b/crds/sme.sap.com_capapplicationversions.yaml @@ -1375,6 +1375,57 @@ spec: format: int32 type: integer type: object + monitoring: + properties: + deletionRules: + oneOf: + - required: + - metrics + - required: + - expression + properties: + expression: + type: string + metrics: + items: + properties: + calculationPeriod: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + name: + type: string + thresholdValue: + format: double + type: string + type: + enum: + - Gauge + - Counter + type: string + required: + - calculationPeriod + - name + - thresholdValue + - type + type: object + type: array + type: object + scrapeConfig: + properties: + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + path: + type: string + port: + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + required: + - port + type: object + type: object nodeName: type: string nodeSelector: diff --git a/go.mod b/go.mod index e6383b3..a51662c 100644 --- a/go.mod +++ b/go.mod @@ -13,16 +13,23 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/lestrrat-go/jwx/v2 v2.1.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 + github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 + github.com/prometheus/client_golang v1.20.2 + github.com/prometheus/common v0.55.0 go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 golang.org/x/mod v0.21.0 google.golang.org/protobuf v1.34.2 istio.io/api v1.23.1 istio.io/client-go v1.23.1 k8s.io/api v0.31.1 + k8s.io/apiextensions-apiserver v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.31.1 k8s.io/code-generator v0.31.1 k8s.io/klog/v2 v2.130.1 + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ) require ( @@ -52,12 +59,12 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.27.0 // indirect - golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect @@ -71,12 +78,11 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.31.1 // indirect k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 // indirect + sigs.k8s.io/controller-runtime v0.18.5 // indirect sigs.k8s.io/gateway-api v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 408ff83..a171444 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,11 @@ github.com/MicahParks/keyfunc/v2 v2.1.0 h1:6ZXKb9Rp6qp1bDbJefnG7cTH8yMN1IC/4nf+GVjO99k= github.com/MicahParks/keyfunc/v2 v2.1.0/go.mod h1:rW42fi+xgLJ2FRRXAfNx9ZA8WpD4OeE/yHVMteCkw9k= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cert-manager/cert-manager v1.15.3 h1:/u9T0griwd5MegPfWbB7v0KcVcT9OJrEvPNhc9tl7xQ= github.com/cert-manager/cert-manager v1.15.3/go.mod h1:stBge/DTvrhfQMB/93+Y62s+gQgZBsfL1o0C/4AL/mI= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -26,6 +30,7 @@ github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= @@ -54,6 +59,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -83,6 +90,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= @@ -92,6 +101,18 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 h1:BpGDC87A2SaxbKgONsFLEX3kRcRJee2aLQbjXsuz0hA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 h1:yncs8NglhE3hB+viNsabCAF9TBBDOBljHUyxHC5fSGY= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2/go.mod h1:AfbzyEUFxJmSoTiMcgNHHjDKcorBVd9TIwx0viURgEw= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= @@ -200,6 +221,8 @@ k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUx k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= +sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/internal/controller/common_test.go b/internal/controller/common_test.go index 5b946c0..5760c1d 100644 --- a/internal/controller/common_test.go +++ b/internal/controller/common_test.go @@ -30,6 +30,9 @@ import ( gardenerdnsscheme "github.com/gardener/external-dns-management/pkg/client/dns/clientset/versioned/scheme" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + monv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + promopFake "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" + promopScheme "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme" "github.com/sap/cap-operator/pkg/apis/sme.sap.com/v1alpha1" copfake "github.com/sap/cap-operator/pkg/client/clientset/versioned/fake" smeScheme "github.com/sap/cap-operator/pkg/client/clientset/versioned/scheme" @@ -42,6 +45,9 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextFake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + apiExtScheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -210,6 +216,8 @@ func initializeControllerForReconciliationTests(t *testing.T, items []ResourceAc gardenerdnsscheme.AddToScheme(scheme.Scheme) istioscheme.AddToScheme(scheme.Scheme) certManagerScheme.AddToScheme(scheme.Scheme) + apiExtScheme.AddToScheme(scheme.Scheme) + promopScheme.AddToScheme(scheme.Scheme) coreClient := k8sfake.NewSimpleClientset() copClient := copfake.NewSimpleClientset() @@ -217,6 +225,8 @@ func initializeControllerForReconciliationTests(t *testing.T, items []ResourceAc gardenerCertClient := gardenercertfake.NewSimpleClientset() gardenerDNSClient := gardenerdnsfake.NewSimpleClientset() certManagerClient := certManagerFake.NewSimpleClientset() + apiExtClient := apiextFake.NewSimpleClientset() + promopClient := promopFake.NewSimpleClientset() copClient.PrependReactor("create", "*", generateNameCreateHandler) copClient.PrependReactor("update", "*", removeStatusTimestampHandler) @@ -239,7 +249,7 @@ func initializeControllerForReconciliationTests(t *testing.T, items []ResourceAc gardenerCertClient.PrependReactor("*", "*", getErrorReactorWithResources(t, items)) gardenerCertClient.PrependReactor("delete-collection", "*", getDeleteCollectionHandler(t, gardenerDNSClient)) - c := NewController(coreClient, copClient, istioClient, gardenerCertClient, certManagerClient, gardenerDNSClient) + c := NewController(coreClient, copClient, istioClient, gardenerCertClient, certManagerClient, gardenerDNSClient, apiExtClient, promopClient) c.eventRecorder = events.NewFakeRecorder(10) return c } @@ -408,22 +418,16 @@ func processTestData(t *testing.T, c *Controller, data TestData, dataType TestDa var processFile = func(file string) { defer wg.Done() - i, err := os.ReadFile(file) + + resources, err := readYAMLResourcesFromFile(file) if err != nil { t.Error(err.Error()) } - - fileContents := string(i) - splits := strings.Split(fileContents, "---") - for _, part := range splits { - if part == "\n" || part == "" { - continue - } - + for i := range resources { if dataType == TestDataTypeInitial { - err = addInitialObjectToStore(t, []byte(part), c) + err = addInitialObjectToStore(resources[i], c) } else { - err = compareExpectedWithStore(t, []byte(part), c) + err = compareExpectedWithStore(t, resources[i], c) } if err != nil { t.Error(err.Error()) @@ -438,7 +442,25 @@ func processTestData(t *testing.T, c *Controller, data TestData, dataType TestDa wg.Wait() } -func addInitialObjectToStore(t *testing.T, resource []byte, c *Controller) error { +func readYAMLResourcesFromFile(file string) ([][]byte, error) { + i, err := os.ReadFile(file) + if err != nil { + return nil, err + } + + resources := [][]byte{} + fileContents := string(i) + splits := strings.Split(fileContents, "---") + for _, part := range splits { + if part == "\n" || part == "" { + continue + } + resources = append(resources, []byte(part)) + } + return resources, nil +} + +func addInitialObjectToStore(resource []byte, c *Controller) error { decoder := scheme.Codecs.UniversalDeserializer().Decode obj, _, err := decoder(resource, nil, nil) if err != nil { @@ -533,6 +555,18 @@ func addInitialObjectToStore(t *testing.T, resource []byte, c *Controller) error case *v1alpha1.CAPTenantOperation: err = c.crdInformerFactory.Sme().V1alpha1().CAPTenantOperations().Informer().GetIndexer().Add(obj) } + case *apiextv1.CustomResourceDefinition: + fakeClient, ok := c.apiExtClient.(*apiextFake.Clientset) + if !ok { + return fmt.Errorf("controller is not using a fake clientset") + } + fakeClient.Tracker().Add(obj) + case *monv1.ServiceMonitor: + fakeClient, ok := c.promClient.(*promopFake.Clientset) + if !ok { + return fmt.Errorf("controller is not using a fake clientset") + } + fakeClient.Tracker().Add(obj) default: return fmt.Errorf("unknown object type") } @@ -592,6 +626,9 @@ func compareExpectedWithStore(t *testing.T, resource []byte, c *Controller) erro case *v1alpha1.CAPTenantOperation: actual, err = fakeClient.Tracker().Get(gvk.GroupVersion().WithResource("captenantoperations"), mo.GetNamespace(), mo.GetName()) } + case *monv1.ServiceMonitor: + fakeClient := c.promClient.(*promopFake.Clientset) + actual, err = fakeClient.Tracker().Get(gvk.GroupVersion().WithResource("servicemonitors"), mo.GetNamespace(), mo.GetName()) default: return fmt.Errorf("unknown expected object type") } diff --git a/internal/controller/controller.go b/internal/controller/controller.go index d98598e..e7c259f 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -33,6 +33,9 @@ import ( "k8s.io/client-go/tools/events" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + + promop "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" ) type Controller struct { @@ -42,6 +45,8 @@ type Controller struct { gardenerCertificateClient gardenerCert.Interface certManagerCertificateClient certManager.Interface gardenerDNSClient gardenerDNS.Interface + apiExtClient apiext.Interface + promClient promop.Interface kubeInformerFactory informers.SharedInformerFactory crdInformerFactory crdInformers.SharedInformerFactory istioInformerFactory istioInformers.SharedInformerFactory @@ -53,7 +58,7 @@ type Controller struct { eventRecorder events.EventRecorder } -func NewController(client kubernetes.Interface, crdClient versioned.Interface, istioClient istio.Interface, gardenerCertificateClient gardenerCert.Interface, certManagerCertificateClient certManager.Interface, gardenerDNSClient gardenerDNS.Interface) *Controller { +func NewController(client kubernetes.Interface, crdClient versioned.Interface, istioClient istio.Interface, gardenerCertificateClient gardenerCert.Interface, certManagerCertificateClient certManager.Interface, gardenerDNSClient gardenerDNS.Interface, apiExtClient apiext.Interface, promClient promop.Interface) *Controller { queues := map[int]workqueue.RateLimitingInterface{ ResourceCAPApplication: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), ResourceCAPApplicationVersion: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), @@ -101,6 +106,8 @@ func NewController(client kubernetes.Interface, crdClient versioned.Interface, i gardenerCertificateClient: gardenerCertificateClient, certManagerCertificateClient: certManagerCertificateClient, gardenerDNSClient: gardenerDNSClient, + apiExtClient: apiExtClient, + promClient: promClient, kubeInformerFactory: kubeInformerFactory, crdInformerFactory: crdInformerFactory, istioInformerFactory: istioInformerFactory, @@ -180,6 +187,13 @@ func (c *Controller) Start(ctx context.Context) { }(k) } + // start version cleanup routines + wg.Add(1) + go func() { + defer wg.Done() + c.startVersionCleanup(qCxt) + }() + // wait for workers wg.Wait() } diff --git a/internal/controller/controller_test.go b/internal/controller/controller_test.go index 5b42e38..093cef7 100644 --- a/internal/controller/controller_test.go +++ b/internal/controller/controller_test.go @@ -88,6 +88,8 @@ func TestController_processQueue(t *testing.T) { istioClient: c.istioClient, gardenerCertificateClient: c.gardenerCertificateClient, gardenerDNSClient: c.gardenerDNSClient, + apiExtClient: c.apiExtClient, + promClient: c.promClient, kubeInformerFactory: dummyKubeInformerFactory, crdInformerFactory: c.crdInformerFactory, istioInformerFactory: c.istioInformerFactory, diff --git a/internal/controller/reconcile-capapplicationversion.go b/internal/controller/reconcile-capapplicationversion.go index e3458a5..f82ac90 100644 --- a/internal/controller/reconcile-capapplicationversion.go +++ b/internal/controller/reconcile-capapplicationversion.go @@ -13,6 +13,7 @@ import ( "strings" "time" + monv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/sap/cap-operator/internal/util" "github.com/sap/cap-operator/pkg/apis/sme.sap.com/v1alpha1" "golang.org/x/exp/slices" @@ -20,10 +21,14 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apiextensions-apiserver/pkg/apihelpers" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" ) const ( @@ -31,8 +36,9 @@ const ( ) const ( - CategoryWorkload = "Workload" - CategoryService = "Service" + CategoryWorkload = "Workload" + CategoryService = "Service" + CategoryServiceMonitor = "ServiceMonitor" ) const ( @@ -397,7 +403,9 @@ func (c *Controller) updateServices(ca *v1alpha1.CAPApplication, cav *v1alpha1.C return err } } - return nil + + // attempt to reconcile service monitors + return c.updateServiceMonitors(context.TODO(), ca, cav, workloadServicePortInfos) } // newService creates a new Service for a CAV resource. It also sets the appropriate OwnerReferences. @@ -437,6 +445,104 @@ func newService(ca *v1alpha1.CAPApplication, cav *v1alpha1.CAPApplicationVersion // #endregion Service +// #region ServiceMonitor +func (c *Controller) checkServiceMonitorCapability(ctx context.Context) error { + crdName := "servicemonitors.monitoring.coreos.com" + crd, err := c.apiExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crdName, v1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get custom resource definition %s: %v", crdName, err) + } + requiredVersion := "v1" + if !apihelpers.HasVersionServed(crd, requiredVersion) { + return fmt.Errorf("version %s of custom resource %s is not served", requiredVersion, crdName) + } + if !apihelpers.IsCRDConditionTrue(crd, apiextv1.Established) { + return fmt.Errorf("custom resource %s condition %s not true", crdName, apiextv1.Established) + } + return nil +} + +func (c *Controller) updateServiceMonitors(ctx context.Context, ca *v1alpha1.CAPApplication, cav *v1alpha1.CAPApplicationVersion, workloadServicePortInfos []servicePortInfo) error { + if err := c.checkServiceMonitorCapability(ctx); err != nil { + klog.ErrorS(err, "could not confirm availability of service monitor resource; service monitors will not be reconciled") + return nil + } + + for i := range cav.Spec.Workloads { + wl := cav.Spec.Workloads[i] + if wl.DeploymentDefinition == nil || wl.DeploymentDefinition.Monitoring == nil || wl.DeploymentDefinition.Monitoring.ScrapeConfig == nil { + continue // do not reconcile service monitors + } + + var wlPortInfos *servicePortInfo + for j := range workloadServicePortInfos { + item := workloadServicePortInfos[j] + if item.WorkloadName == getWorkloadName(cav.Name, wl.Name) { + wlPortInfos = &item + break + } + } + if wlPortInfos == nil { + return fmt.Errorf("could not identify workload port information for workload %s in version %s", wl.Name, cav.Name) + } + + portVerified := false + for j := range wlPortInfos.Ports { + if wlPortInfos.Ports[j].Name == wl.DeploymentDefinition.Monitoring.ScrapeConfig.WorkloadPort { + portVerified = true + break + } + } + if !portVerified { + return fmt.Errorf("invalid port reference in workload %s monitoring config of version %s", wl.Name, cav.Name) + } + + sm, err := c.promClient.MonitoringV1().ServiceMonitors(cav.Namespace).Get(ctx, wlPortInfos.WorkloadName+ServiceSuffix, v1.GetOptions{}) + if err != nil { + if k8sErrors.IsNotFound(err) { + sm, err = c.promClient.MonitoringV1().ServiceMonitors(cav.Namespace).Create(ctx, newServiceMonitor(ca, cav, &wl, wlPortInfos), v1.CreateOptions{}) + if err == nil { + util.LogInfo("ServiceMonitor created successfully", string(Processing), cav, sm, "version", cav.Spec.Version) + } + } + } + err = doChecks(err, sm, cav, wlPortInfos.WorkloadName+ServiceSuffix) + if err != nil { + return err + } + } + + return nil +} + +func newServiceMonitor(ca *v1alpha1.CAPApplication, cav *v1alpha1.CAPApplicationVersion, wl *v1alpha1.WorkloadDetails, wlPortInfos *servicePortInfo) *monv1.ServiceMonitor { + config := wl.DeploymentDefinition.Monitoring.ScrapeConfig + return &monv1.ServiceMonitor{ + ObjectMeta: v1.ObjectMeta{ + Name: wlPortInfos.WorkloadName + ServiceSuffix, + Namespace: cav.Namespace, + Labels: copyMaps(wl.Labels, getLabels(ca, cav, CategoryServiceMonitor, string(wl.DeploymentDefinition.Type), wlPortInfos.WorkloadName+ServiceSuffix, true)), + Annotations: copyMaps(wl.Annotations, getAnnotations(ca, cav, true)), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(cav, v1alpha1.SchemeGroupVersion.WithKind(v1alpha1.CAPApplicationVersionKind)), + }, + }, + Spec: monv1.ServiceMonitorSpec{ + Endpoints: []monv1.Endpoint{{ + Port: config.WorkloadPort, + Interval: monv1.Duration(config.ScrapeInterval), + ScrapeTimeout: monv1.Duration(config.Timeout), + Path: config.Path, + }}, + Selector: v1.LabelSelector{ + MatchLabels: copyMaps(wl.Labels, getLabels(ca, cav, CategoryService, wlPortInfos.DeploymentType, wlPortInfos.WorkloadName+ServiceSuffix, false)), + }, + }, + } +} + +// #endregion ServiceMonitor + // #region NetworkPolicy func (c *Controller) updateNetworkPolicies(ca *v1alpha1.CAPApplication, cav *v1alpha1.CAPApplicationVersion) error { var ( @@ -601,7 +707,7 @@ func newDeployment(ca *v1alpha1.CAPApplication, cav *v1alpha1.CAPApplicationVers } func createDeployment(params *DeploymentParameters) *appsv1.Deployment { - workloadName := params.CAV.Name + "-" + strings.ToLower(params.WorkloadDetails.Name) + workloadName := getWorkloadName(params.CAV.Name, params.WorkloadDetails.Name) annotations := copyMaps(params.WorkloadDetails.Annotations, getAnnotations(params.CA, params.CAV, true)) labels := copyMaps(params.WorkloadDetails.Labels, getLabels(params.CA, params.CAV, CategoryWorkload, string(params.WorkloadDetails.DeploymentDefinition.Type), workloadName, true)) diff --git a/internal/controller/reconcile-capapplicationversion_test.go b/internal/controller/reconcile-capapplicationversion_test.go index 183b51f..4f8c56d 100644 --- a/internal/controller/reconcile-capapplicationversion_test.go +++ b/internal/controller/reconcile-capapplicationversion_test.go @@ -7,6 +7,7 @@ package controller import ( "context" + "fmt" "testing" ) @@ -802,3 +803,46 @@ func TestCAV_DeploymentFailure(t *testing.T) { }, ) } + +func TestCAV_ServiceMonitorCreation(t *testing.T) { + reconcileTestItem( + context.TODO(), t, + QueueItem{Key: ResourceCAPApplicationVersion, ResourceKey: NamespacedResourceKey{Namespace: "default", Name: "test-cap-01-cav-v1"}}, + TestData{ + description: "capapplication version - service monitor creation", + initialResources: []string{ + "testdata/common/crd-servicemonitors.yaml", + "testdata/common/capapplication.yaml", + "testdata/common/credential-secrets.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules-processing.yaml", + "testdata/capapplicationversion/deployments-ready.yaml", + "testdata/capapplicationversion/content-job-completed.yaml", + }, + expectedResources: "testdata/version-monitoring/servicemonitors-cav-v1.yaml", + backlogItems: []string{}, + }, + ) +} + +func TestCAV_InvalidMonitoringConfig(t *testing.T) { + err := reconcileTestItem( + context.TODO(), t, + QueueItem{Key: ResourceCAPApplicationVersion, ResourceKey: NamespacedResourceKey{Namespace: "default", Name: "test-cap-01-cav-v1"}}, + TestData{ + description: "capapplication version - service monitor creation", + initialResources: []string{ + "testdata/common/crd-servicemonitors.yaml", + "testdata/common/capapplication.yaml", + "testdata/common/credential-secrets.yaml", + "testdata/version-monitoring/cav-v1-monitoring-port-missing.yaml", + "testdata/capapplicationversion/deployments-ready.yaml", + "testdata/capapplicationversion/content-job-completed.yaml", + }, + expectError: true, + backlogItems: []string{}, + }, + ) + if err == nil || err.Error() != fmt.Sprintf("invalid port reference in workload %s monitoring config of version %s", "app-router", "test-cap-01-cav-v1") { + + } +} diff --git a/internal/controller/reconcile.go b/internal/controller/reconcile.go index b182576..2c941c7 100644 --- a/internal/controller/reconcile.go +++ b/internal/controller/reconcile.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "strings" "github.com/sap/cap-operator/internal/util" "github.com/sap/cap-operator/pkg/apis/sme.sap.com/v1alpha1" @@ -46,6 +47,7 @@ const ( AnnotationKubernetesDNSTarget = "external-dns.alpha.kubernetes.io/hostname" AnnotationSubscriptionContextSecret = "sme.sap.com/subscription-context-secret" AnnotationProviderSubAccountId = "sme.sap.com/provider-sub-account-id" + AnnotationEnableCleanupMonitoring = "sme.sap.com/enable-cleanup-monitoring" FinalizerCAPApplication = "sme.sap.com/capapplication" FinalizerCAPApplicationVersion = "sme.sap.com/capapplicationversion" FinalizerCAPTenant = "sme.sap.com/captenant" @@ -547,7 +549,7 @@ func updateWorkloadPortInfo(cavName string, workloadName string, deploymentType if len(servicePorts) > 0 { workloadPortInfo = &servicePortInfo{ - WorkloadName: cavName + "-" + workloadName, + WorkloadName: getWorkloadName(cavName, workloadName), DeploymentType: string(deploymentType), Ports: servicePorts, Destinations: destinationDetails, @@ -593,3 +595,7 @@ func updateInitContainers(initContainers []corev1.Container, additionalEnv []cor } return &updatedInitContainers } + +func getWorkloadName(cavName, workloadName string) string { + return fmt.Sprintf("%s-%s", cavName, strings.ToLower(workloadName)) +} diff --git a/internal/controller/reconcile_test.go b/internal/controller/reconcile_test.go index d6ad544..d2995ba 100644 --- a/internal/controller/reconcile_test.go +++ b/internal/controller/reconcile_test.go @@ -27,10 +27,12 @@ import ( certfake "github.com/gardener/cert-management/pkg/client/cert/clientset/versioned/fake" dnsv1alpha1 "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" dnsfake "github.com/gardener/external-dns-management/pkg/client/dns/clientset/versioned/fake" + promopFake "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" "github.com/sap/cap-operator/pkg/apis/sme.sap.com/v1alpha1" "github.com/sap/cap-operator/pkg/client/clientset/versioned/fake" istionwv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" istiofake "istio.io/client-go/pkg/clientset/versioned/fake" + apiextFake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" ) const ( @@ -331,6 +333,10 @@ func getTestController(resources testResources) *Controller { crdClient := fake.NewSimpleClientset(crdObjects...) + apiExtClient := apiextFake.NewSimpleClientset() + + promopClient := promopFake.NewSimpleClientset() + istioClient := istiofake.NewSimpleClientset(istioObjects...) certClient := certfake.NewSimpleClientset(gardenerCertObjects...) @@ -339,7 +345,7 @@ func getTestController(resources testResources) *Controller { dnsClient := dnsfake.NewSimpleClientset(dnsObjects...) - c := NewController(coreClient, crdClient, istioClient, certClient, certManagerCertClient, dnsClient) + c := NewController(coreClient, crdClient, istioClient, certClient, certManagerCertClient, dnsClient, apiExtClient, promopClient) for _, ca := range resources.cas { if ca != nil { diff --git a/internal/controller/testdata/common/crd-servicemonitors.yaml b/internal/controller/testdata/common/crd-servicemonitors.yaml new file mode 100644 index 0000000..f829ea7 --- /dev/null +++ b/internal/controller/testdata/common/crd-servicemonitors.yaml @@ -0,0 +1,66 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + operator.prometheus.io/version: 0.76.0 + creationTimestamp: "2024-08-22T20:48:51Z" + generation: 1 + name: servicemonitors.monitoring.coreos.com + resourceVersion: "35356816" + uid: e9a5eb91-fa13-407b-86f4-58641b190d24 +spec: + conversion: + strategy: None + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: ServiceMonitor + listKind: ServiceMonitorList + plural: servicemonitors + shortNames: + - smon + singular: servicemonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + categories: + - prometheus-operator + kind: ServiceMonitor + listKind: ServiceMonitorList + plural: servicemonitors + shortNames: + - smon + singular: servicemonitor + conditions: + - lastTransitionTime: "2024-08-22T20:48:52Z" + message: no conflicts found + reason: NoConflicts + status: "True" + type: NamesAccepted + - lastTransitionTime: "2024-08-22T20:48:52Z" + message: the initial names have been accepted + reason: InitialNamesAccepted + status: "True" + type: Established + storedVersions: + - v1 diff --git a/internal/controller/testdata/version-monitoring/ca-cleanup-dry-run-enabled.yaml b/internal/controller/testdata/version-monitoring/ca-cleanup-dry-run-enabled.yaml new file mode 100644 index 0000000..88dc073 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/ca-cleanup-dry-run-enabled.yaml @@ -0,0 +1,41 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplication +metadata: + finalizers: + - sme.sap.com/capapplication + generation: 2 + name: test-cap-01 + namespace: default + annotations: + sme.sap.com/enable-cleanup-monitoring: "dry-run" + resourceVersion: "11373799" + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 +spec: + btp: + services: + - class: xsuaa + name: cap-uaa + secret: cap-cap-01-uaa-bind-cf + - class: xsuaa + name: cap-uaa2 + secret: cap-cap-01-uaa2-bind-cf + - class: saas-registry + name: cap-saas-registry + secret: cap-cap-01-saas-bind-cf + - class: service-manager + name: cap-service-manager + secret: cap-cap-01-svc-man-bind-cf + btpAppName: test-cap-01 + domains: + istioIngressGatewayLabels: + - name: app + value: istio-ingressgateway + - name: istio + value: ingressgateway + primary: app-domain.test.local + secondary: + - foo.bar.local + globalAccountId: btp-glo-acc-id + provider: + subDomain: my-provider + tenantId: tenant-id-for-provider diff --git a/internal/controller/testdata/version-monitoring/ca-cleanup-enabled.yaml b/internal/controller/testdata/version-monitoring/ca-cleanup-enabled.yaml new file mode 100644 index 0000000..6101a39 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/ca-cleanup-enabled.yaml @@ -0,0 +1,41 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplication +metadata: + finalizers: + - sme.sap.com/capapplication + generation: 2 + name: test-cap-01 + namespace: default + annotations: + sme.sap.com/enable-cleanup-monitoring: "true" + resourceVersion: "11373799" + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 +spec: + btp: + services: + - class: xsuaa + name: cap-uaa + secret: cap-cap-01-uaa-bind-cf + - class: xsuaa + name: cap-uaa2 + secret: cap-cap-01-uaa2-bind-cf + - class: saas-registry + name: cap-saas-registry + secret: cap-cap-01-saas-bind-cf + - class: service-manager + name: cap-service-manager + secret: cap-cap-01-svc-man-bind-cf + btpAppName: test-cap-01 + domains: + istioIngressGatewayLabels: + - name: app + value: istio-ingressgateway + - name: istio + value: ingressgateway + primary: app-domain.test.local + secondary: + - foo.bar.local + globalAccountId: btp-glo-acc-id + provider: + subDomain: my-provider + tenantId: tenant-id-for-provider diff --git a/internal/controller/testdata/version-monitoring/cat-consumer-v2-ready-never.yaml b/internal/controller/testdata/version-monitoring/cat-consumer-v2-ready-never.yaml new file mode 100644 index 0000000..4cd0bf7 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cat-consumer-v2-ready-never.yaml @@ -0,0 +1,38 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPTenant +metadata: + finalizers: + - sme.sap.com/captenant + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/btp-tenant-id: tenant-id-for-consumer + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + sme.sap.com/tenant-type: consumer + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + name: test-cap-01-consumer + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 +spec: + capApplicationInstance: test-cap-01 + subDomain: my-consumer + tenantId: tenant-id-for-consumer + version: 8.9.10 + versionUpgradeStrategy: never +status: + conditions: + - message: "CAPTenantOperation default.test-cap-01-provider-s6f4l successfully completed" + reason: ProvisioningCompleted + status: "True" + type: Ready + state: Ready + currentCAPApplicationVersionInstance: test-cap-01-cav-v2 diff --git a/internal/controller/testdata/version-monitoring/cat-consumer-v2-upgrading.yaml b/internal/controller/testdata/version-monitoring/cat-consumer-v2-upgrading.yaml new file mode 100644 index 0000000..4cef901 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cat-consumer-v2-upgrading.yaml @@ -0,0 +1,38 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPTenant +metadata: + finalizers: + - sme.sap.com/captenant + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/btp-tenant-id: tenant-id-for-consumer + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + sme.sap.com/tenant-type: consumer + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + name: test-cap-01-consumer + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 +spec: + capApplicationInstance: test-cap-01 + subDomain: my-consumer + tenantId: tenant-id-for-consumer + version: 11.12.13 + versionUpgradeStrategy: always +status: + state: Upgrading + currentCAPApplicationVersionInstance: test-cap-01-cav-v2 + conditions: + - message: "waiting for CAPTenantOperation default.test-cap-01-consumer-ctop-gen of type upgrade to complete" + reason: UpgradeOperationCreated + status: "False" + type: Ready diff --git a/internal/controller/testdata/version-monitoring/cat-provider-v2-ready.yaml b/internal/controller/testdata/version-monitoring/cat-provider-v2-ready.yaml new file mode 100644 index 0000000..2088a5c --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cat-provider-v2-ready.yaml @@ -0,0 +1,38 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPTenant +metadata: + finalizers: + - sme.sap.com/captenant + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + sme.sap.com/subscription-context-secret: test-cap-01-gen + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/btp-tenant-id: tenant-id-for-provider + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + sme.sap.com/tenant-type: provider + name: test-cap-01-provider + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 +spec: + capApplicationInstance: test-cap-01 + subDomain: my-provider + tenantId: tenant-id-for-provider + version: 8.9.10 + versionUpgradeStrategy: always +status: + conditions: + - message: "CAPTenantOperation default.test-cap-01-provider-s6f4l successfully completed" + reason: ProvisioningCompleted + status: "True" + type: Ready + state: Ready + currentCAPApplicationVersionInstance: test-cap-01-cav-v2 diff --git a/internal/controller/testdata/version-monitoring/cat-provider-v3-ready.yaml b/internal/controller/testdata/version-monitoring/cat-provider-v3-ready.yaml new file mode 100644 index 0000000..8461399 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cat-provider-v3-ready.yaml @@ -0,0 +1,38 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPTenant +metadata: + finalizers: + - sme.sap.com/captenant + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + sme.sap.com/subscription-context-secret: test-cap-01-gen + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/btp-tenant-id: tenant-id-for-provider + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + sme.sap.com/tenant-type: provider + name: test-cap-01-provider + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 +spec: + capApplicationInstance: test-cap-01 + subDomain: my-provider + tenantId: tenant-id-for-provider + version: 11.12.13 + versionUpgradeStrategy: always +status: + conditions: + - message: "CAPTenantOperation default.test-cap-01-provider-s6f4l successfully completed" + reason: ProvisioningCompleted + status: "True" + type: Ready + state: Ready + currentCAPApplicationVersionInstance: test-cap-01-cav-v3 diff --git a/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-error.yaml b/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-error.yaml new file mode 100644 index 0000000..776db1e --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-error.yaml @@ -0,0 +1,91 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplicationVersion +metadata: + creationTimestamp: "2022-03-18T22:14:33Z" + generation: 1 + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + name: test-cap-01-cav-v1 + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 + resourceVersion: "11371108" + uid: 5e64489b-7346-4984-8617-e8c37338b3d8 + finalizers: + - sme.sap.com/capapplicationversion +spec: + capApplicationInstance: test-cap-01 + registrySecrets: + - regcred + version: 5.6.7 + workloads: + - name: cap-backend-srv + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + deploymentDefinition: + type: CAP + image: docker.image.repo/srv/server:latest + monitoring: + deletionRules: + metrics: + - name: total_http_requests + type: Counter + calculationPeriod: 2m + thresholdValue: "0.01" + - name: active_jobs + type: Gauge + calculationPeriod: 3m + thresholdValue: "0" + - name: content + consumedBTPServices: + - cap-uaa + jobDefinition: + type: Content + image: docker.image.repo/content/cap-content:latest + - name: mtx + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + jobDefinition: + type: "TenantOperation" + image: docker.image.repo/srv/server:latest + - name: app-router + consumedBTPServices: + - cap-uaa + - cap-saas-registry + deploymentDefinition: + type: Router + image: docker.image.repo/approuter/approuter:latest + monitoring: + deletionRules: + expression: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1" + - name: no-rules + consumedBTPServices: [] + deploymentDefinition: + type: Additional + image: docker.image.repo/some/image:latest + monitoring: {} +status: + conditions: + - reason: ErrorInWorkloadStatus + observedGeneration: 1 + status: "False" + type: Ready + message: "content deployer error in job 'test-cap-01-cav-v1-content'" + observedGeneration: 1 + finishedJobs: + - "test-cap-01-cav-v1-content" + state: Error diff --git a/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-processing.yaml b/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-processing.yaml new file mode 100644 index 0000000..385decf --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules-processing.yaml @@ -0,0 +1,93 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplicationVersion +metadata: + creationTimestamp: "2022-03-18T22:14:33Z" + generation: 1 + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + name: test-cap-01-cav-v1 + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 + resourceVersion: "11371108" + uid: 5e64489b-7346-4984-8617-e8c37338b3d8 + finalizers: + - sme.sap.com/capapplicationversion +spec: + capApplicationInstance: test-cap-01 + registrySecrets: + - regcred + version: 5.6.7 + workloads: + - name: cap-backend-srv + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + deploymentDefinition: + type: CAP + image: docker.image.repo/srv/server:latest + monitoring: + deletionRules: + metrics: + - name: total_http_requests + type: Counter + calculationPeriod: 2m + thresholdValue: "0.01" + - name: active_jobs + type: Gauge + calculationPeriod: 3m + thresholdValue: "0" + - name: content + consumedBTPServices: + - cap-uaa + jobDefinition: + type: Content + image: docker.image.repo/content/cap-content:latest + - name: mtx + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + jobDefinition: + type: "TenantOperation" + image: docker.image.repo/srv/server:latest + - name: app-router + consumedBTPServices: + - cap-uaa + - cap-saas-registry + deploymentDefinition: + type: Router + image: docker.image.repo/approuter/approuter:latest + ports: + - name: metrics-port + port: 9000 + appProtocol: http + networkPolicy: Cluster + monitoring: + scrapeConfig: + port: metrics-port + interval: 10s + path: /metrics + deletionRules: + expression: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1" +status: + conditions: + - reason: ReadyForProcessing + status: "False" + observedGeneration: 1 + type: Ready + finishedJobs: + - test-cap-01-cav-v1-content + observedGeneration: 1 + state: Processing diff --git a/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules.yaml b/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules.yaml new file mode 100644 index 0000000..fa70b6e --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cav-v1-deletion-rules.yaml @@ -0,0 +1,93 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplicationVersion +metadata: + creationTimestamp: "2022-03-18T22:14:33Z" + generation: 1 + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + name: test-cap-01-cav-v1 + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 + resourceVersion: "11371108" + uid: 5e64489b-7346-4984-8617-e8c37338b3d8 + finalizers: + - sme.sap.com/capapplicationversion +spec: + capApplicationInstance: test-cap-01 + registrySecrets: + - regcred + version: 5.6.7 + workloads: + - name: cap-backend-srv + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + deploymentDefinition: + type: CAP + image: docker.image.repo/srv/server:latest + monitoring: + deletionRules: + metrics: + - name: total_http_requests + type: Counter + calculationPeriod: 2m + thresholdValue: "0.01" + - name: active_jobs + type: Gauge + calculationPeriod: 3m + thresholdValue: "0" + - name: content + consumedBTPServices: + - cap-uaa + jobDefinition: + type: Content + image: docker.image.repo/content/cap-content:latest + - name: mtx + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + jobDefinition: + type: "TenantOperation" + image: docker.image.repo/srv/server:latest + - name: app-router + consumedBTPServices: + - cap-uaa + - cap-saas-registry + deploymentDefinition: + type: Router + image: docker.image.repo/approuter/approuter:latest + ports: + - name: metrics-port + port: 9000 + appProtocol: http + networkPolicy: Cluster + monitoring: + scrapeConfig: + port: metrics-port + interval: 10s + path: /metrics + deletionRules: + expression: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1" +status: + conditions: + - reason: WorkloadsReady + observedGeneration: 1 + status: "True" + type: Ready + observedGeneration: 1 + finishedJobs: + - test-cap-01-cav-v1-content + state: Ready diff --git a/internal/controller/testdata/version-monitoring/cav-v1-monitoring-port-missing.yaml b/internal/controller/testdata/version-monitoring/cav-v1-monitoring-port-missing.yaml new file mode 100644 index 0000000..fe4de36 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cav-v1-monitoring-port-missing.yaml @@ -0,0 +1,88 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplicationVersion +metadata: + creationTimestamp: "2022-03-18T22:14:33Z" + generation: 1 + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + name: test-cap-01-cav-v1 + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 + resourceVersion: "11371108" + uid: 5e64489b-7346-4984-8617-e8c37338b3d8 + finalizers: + - sme.sap.com/capapplicationversion +spec: + capApplicationInstance: test-cap-01 + registrySecrets: + - regcred + version: 5.6.7 + workloads: + - name: cap-backend-srv + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + deploymentDefinition: + type: CAP + image: docker.image.repo/srv/server:latest + monitoring: + deletionRules: + metrics: + - name: total_http_requests + type: Counter + calculationPeriod: 2m + thresholdValue: "0.01" + - name: active_jobs + type: Gauge + calculationPeriod: 3m + thresholdValue: "0" + - name: content + consumedBTPServices: + - cap-uaa + jobDefinition: + type: Content + image: docker.image.repo/content/cap-content:latest + - name: mtx + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + jobDefinition: + type: "TenantOperation" + image: docker.image.repo/srv/server:latest + - name: app-router + consumedBTPServices: + - cap-uaa + - cap-saas-registry + deploymentDefinition: + type: Router + image: docker.image.repo/approuter/approuter:latest + monitoring: + scrapeConfig: + port: metrics-port + interval: 10s + path: /metrics + deletionRules: + expression: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1" +status: + conditions: + - reason: ReadyForProcessing + status: "False" + observedGeneration: 1 + type: Ready + finishedJobs: + - test-cap-01-cav-v1-content + observedGeneration: 1 + state: Processing diff --git a/internal/controller/testdata/version-monitoring/cav-v2-deletion-rules.yaml b/internal/controller/testdata/version-monitoring/cav-v2-deletion-rules.yaml new file mode 100644 index 0000000..60c8624 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cav-v2-deletion-rules.yaml @@ -0,0 +1,83 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplicationVersion +metadata: + generation: 1 + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + name: test-cap-01-cav-v2 + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 + resourceVersion: "11371108" + uid: 5e64489b-7346-4984-8617-e8c37338b3d8 + finalizers: + - sme.sap.com/capapplicationversion +spec: + capApplicationInstance: test-cap-01 + version: 8.9.10 + registrySecrets: + - regcred + workloads: + - name: cap-backend + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + deploymentDefinition: + type: CAP + image: docker.image.repo/srv/server:v2 + monitoring: + deletionRules: + metrics: + - name: total_http_requests + type: Counter + calculationPeriod: 2m + thresholdValue: "0.01" + - name: active_jobs + type: Gauge + calculationPeriod: 3m + - name: content + consumedBTPServices: + - cap-uaa + jobDefinition: + type: Content + image: docker.image.repo/content/cap-content:v2 + - name: mtx + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + jobDefinition: + type: "TenantOperation" + image: docker.image.repo/srv/server:v2 + - name: app-router + consumedBTPServices: + - cap-uaa + - cap-saas-registry + deploymentDefinition: + type: Router + image: docker.image.repo/approuter/approuter:v2 + monitoring: + deletionRules: + expression: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1" +status: + conditions: + - lastTransitionTime: "2022-03-18T23:07:47Z" + lastUpdateTime: "2022-03-18T23:07:47Z" + reason: WorkloadsReady + status: "True" + type: Ready + finishedJobs: + - test-cap-01-cav-v2-content + observedGeneration: 1 + state: Ready diff --git a/internal/controller/testdata/version-monitoring/cav-v3-deletion-rules-processing.yaml b/internal/controller/testdata/version-monitoring/cav-v3-deletion-rules-processing.yaml new file mode 100644 index 0000000..045bc3e --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cav-v3-deletion-rules-processing.yaml @@ -0,0 +1,83 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplicationVersion +metadata: + generation: 1 + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + name: test-cap-01-cav-v3 + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 + resourceVersion: "113715468" + uid: 5e64489b-1234-4984-8617-e8c37338b3d8 + finalizers: + - sme.sap.com/capapplicationversion +spec: + capApplicationInstance: test-cap-01 + version: 11.12.13 + registrySecrets: + - regcred + workloads: + - name: cap-backend + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + deploymentDefinition: + type: CAP + image: docker.image.repo/srv/server:v3 + monitoring: + deletionRules: + metrics: + - name: total_http_requests + type: Counter + calculationPeriod: 2m + thresholdValue: "0.01" + - name: active_jobs + type: Gauge + calculationPeriod: 3m + thresholdValue: "0" + - name: content + consumedBTPServices: + - cap-uaa + jobDefinition: + type: Content + image: docker.image.repo/content/cap-content:v3 + - name: mtx + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + jobDefinition: + type: "TenantOperation" + image: docker.image.repo/srv/server:v3 + - name: app-router + consumedBTPServices: + - cap-uaa + - cap-saas-registry + deploymentDefinition: + type: Router + image: docker.image.repo/approuter/approuter:v3 + monitoring: + deletionRules: + expression: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1" +status: + conditions: + - reason: ReadyForProcessing + status: "False" + observedGeneration: 1 + type: Ready + finishedJobs: + - test-cap-01-cav-v3-content + observedGeneration: 1 + state: Processing diff --git a/internal/controller/testdata/version-monitoring/cav-v3-deletion-rules.yaml b/internal/controller/testdata/version-monitoring/cav-v3-deletion-rules.yaml new file mode 100644 index 0000000..9299d47 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/cav-v3-deletion-rules.yaml @@ -0,0 +1,84 @@ +apiVersion: sme.sap.com/v1alpha1 +kind: CAPApplicationVersion +metadata: + generation: 1 + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01 + labels: + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/owner-generation: "2" + sme.sap.com/owner-identifier-hash: 1f74ae2fbff71a708786a4df4bb2ca87ec603581 + name: test-cap-01-cav-v3 + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplication + name: test-cap-01 + uid: 3c7ba7cb-dc04-4fd1-be86-3eb3a5c64a98 + resourceVersion: "113715468" + uid: 5e64489b-1234-4984-8617-e8c37338b3d8 + finalizers: + - sme.sap.com/capapplicationversion +spec: + capApplicationInstance: test-cap-01 + version: 11.12.13 + registrySecrets: + - regcred + workloads: + - name: cap-backend + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + deploymentDefinition: + type: CAP + image: docker.image.repo/srv/server:v3 + monitoring: + deletionRules: + metrics: + - name: total_http_requests + type: Counter + calculationPeriod: 2m + thresholdValue: "0.01" + - name: active_jobs + type: Gauge + calculationPeriod: 3m + thresholdValue: "0" + - name: content + consumedBTPServices: + - cap-uaa + jobDefinition: + type: Content + image: docker.image.repo/content/cap-content:v3 + - name: mtx + consumedBTPServices: + - cap-uaa + - cap-service-manager + - cap-saas-registry + jobDefinition: + type: "TenantOperation" + image: docker.image.repo/srv/server:v3 + - name: app-router + consumedBTPServices: + - cap-uaa + - cap-saas-registry + deploymentDefinition: + type: Router + image: docker.image.repo/approuter/approuter:v3 + monitoring: + deletionRules: + expression: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1" +status: + conditions: + - lastTransitionTime: "2022-03-18T23:07:47Z" + lastUpdateTime: "2022-03-18T23:07:47Z" + reason: WorkloadsReady + status: "True" + type: Ready + finishedJobs: + - test-cap-01-cav-v3-content + observedGeneration: 1 + state: Ready diff --git a/internal/controller/testdata/version-monitoring/servicemonitors-cav-v1.yaml b/internal/controller/testdata/version-monitoring/servicemonitors-cav-v1.yaml new file mode 100644 index 0000000..9ffbe98 --- /dev/null +++ b/internal/controller/testdata/version-monitoring/servicemonitors-cav-v1.yaml @@ -0,0 +1,38 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + annotations: + sme.sap.com/btp-app-identifier: btp-glo-acc-id.test-cap-01 + sme.sap.com/owner-identifier: default.test-cap-01-cav-v1 + labels: + app: test-cap-01 + sme.sap.com/category: ServiceMonitor + sme.sap.com/workload-name: test-cap-01-cav-v1-app-router-svc + sme.sap.com/workload-type: Router + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/cav-version: "5.6.7" + sme.sap.com/owner-generation: "1" + sme.sap.com/owner-identifier-hash: e95e0682f33a657e75e1fc435972d19bd407ba3b + name: test-cap-01-cav-v1-app-router-svc + namespace: default + ownerReferences: + - apiVersion: sme.sap.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: CAPApplicationVersion + name: test-cap-01-cav-v1 + uid: 5e64489b-7346-4984-8617-e8c37338b3d8 +spec: + endpoints: + - interval: 10s + path: /metrics + port: metrics-port + namespaceSelector: {} + selector: + matchLabels: + app: test-cap-01 + sme.sap.com/btp-app-identifier-hash: f20cc8aeb2003b3abc33f749a16bd53544b6bab2 + sme.sap.com/category: Service + sme.sap.com/cav-version: "5.6.7" + sme.sap.com/workload-name: test-cap-01-cav-v1-app-router-svc + sme.sap.com/workload-type: Router diff --git a/internal/controller/version-monitoring.go b/internal/controller/version-monitoring.go new file mode 100644 index 0000000..1b9c472 --- /dev/null +++ b/internal/controller/version-monitoring.go @@ -0,0 +1,390 @@ +/* +SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and cap-operator contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + promapi "github.com/prometheus/client_golang/api" + promv1 "github.com/prometheus/client_golang/api/prometheus/v1" + prommodel "github.com/prometheus/common/model" + "github.com/sap/cap-operator/pkg/apis/sme.sap.com/v1alpha1" + "golang.org/x/mod/semver" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const ( + EnvPrometheusAddress = "PROMETHEUS_ADDRESS" + EnvPrometheusAcquireClientRetryDelay = "PROM_ACQUIRE_CLIENT_RETRY_DELAY" // Value should be a duration + EnvMetricsEvaluationInterval = "METRICS_EVAL_INTERVAL" +) + +const ( + CAPApplicationVersionEventReadForDeletion = "ReadyForDeletion" + EventActionEvaluateMetrics = "EvaluateMetrics" +) + +const ( + GaugeEvaluationExpression = "sum(avg_over_time(%s{job=\"%s\",namespace=\"%s\"}[%s]))" + CounterEvaluationExpression = "sum(rate(%s{job=\"%s\",namespace=\"%s\"}[%s]))" +) + +type cleanupOrchestrator struct { + api promv1.API + queue workqueue.TypedRateLimitingInterface[NamespacedResourceKey] + mEnv *monitoringEnv +} + +type monitoringEnv struct { + address string + acquireClientRetryDelay time.Duration + evaluationInterval time.Duration +} + +func parseMonitoringEnv() *monitoringEnv { + promAdd := strings.TrimSpace(os.Getenv(EnvPrometheusAddress)) + if promAdd == "" { + return nil + } + env := &monitoringEnv{address: promAdd} + + evalDurationEnv := func(envName string, fallback time.Duration) time.Duration { + if v, ok := os.LookupEnv(envName); ok && strings.TrimSpace(v) != "" { + dur, err := time.ParseDuration(strings.TrimSpace(v)) + if err == nil { + return dur + } + } + return fallback + } + env.acquireClientRetryDelay = evalDurationEnv(EnvPrometheusAcquireClientRetryDelay, time.Hour) + env.evaluationInterval = evalDurationEnv(EnvMetricsEvaluationInterval, 10*time.Minute) + return env +} + +func (c *Controller) startVersionCleanup(ctx context.Context) { + mEnv := parseMonitoringEnv() + if mEnv == nil { + return // no prometheus address + } + + restartSignal := make(chan bool, 1) + setup := func() context.CancelFunc { + for { + o := initializeVersionCleanupOrchestrator(ctx, mEnv) + if o == nil { + select { + case <-ctx.Done(): + return nil + case <-time.After(mEnv.acquireClientRetryDelay): // sleep a long time before attempting to setup the cleanup process + continue + } + } + child, cancelFn := context.WithCancel(ctx) + go func() { + <-child.Done() + o.queue.ShutDown() + }() + go c.scheduleVersionCollectionForCleanup(child, o, restartSignal) + go c.processVersionCleanupQueue(child, o, restartSignal) + return cancelFn + } + } + + for { + cancel := setup() + select { + case <-ctx.Done(): + return + case <-restartSignal: // restart broken routines + cancel() + } + } +} + +func recoverVersionCleanupRoutine(restart chan<- bool) { + if r := recover(); r != nil { + err := fmt.Errorf("panic@version-cleanup: %v", r) + klog.ErrorS(err, "recovered from panic") + select { // send restart signal restart process + case restart <- true: // send to channel if empty (channel size 1) + default: + } + } +} + +func initializeVersionCleanupOrchestrator(ctx context.Context, mEnv *monitoringEnv) *cleanupOrchestrator { + promClient, err := promapi.NewClient(promapi.Config{Address: mEnv.address}) + if err != nil { + klog.ErrorS(err, "could not create client", "address", mEnv.address) + return nil + } + v1api := promv1.NewAPI(promClient) + _, err = v1api.Runtimeinfo(ctx) + if err != nil { + klog.ErrorS(err, "could not fetch runtime info from prometheus server", "address", mEnv.address) + return nil + } + + // create orchestrator + return &cleanupOrchestrator{ + api: v1api, + queue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[NamespacedResourceKey]()), + mEnv: mEnv, + } +} + +func (c *Controller) scheduleVersionCollectionForCleanup(ctx context.Context, orc *cleanupOrchestrator, restart chan<- bool) { + defer recoverVersionCleanupRoutine(restart) + for { + if err := c.queueVersionsForCleanupEvaluation(ctx, orc); err != nil { + klog.ErrorS(err, "could not select applications for version cleanup evaluation") + } + select { + case <-ctx.Done(): + return + case <-time.After(orc.mEnv.evaluationInterval): // sleep for (say 10m) before reading versions again + continue + } + } +} + +func (c *Controller) queueVersionsForCleanupEvaluation(ctx context.Context, orc *cleanupOrchestrator) error { + lister := c.crdInformerFactory.Sme().V1alpha1().CAPApplications().Lister() + cas, err := lister.List(labels.Everything()) + if err != nil { + return err + } + + for i := range cas { + ca := cas[i] + if v, ok := ca.Annotations[AnnotationEnableCleanupMonitoring]; !ok || !(strings.ToLower(v) == "true" || strings.ToLower(v) == "dry-run") { + continue + } + outdated, err := c.getCleanupRelevantVersions(ctx, ca) + if err != nil || len(outdated) == 0 { + continue + } + for n := range outdated { + cav := outdated[n] + orc.queue.Add(NamespacedResourceKey{Namespace: cav.Namespace, Name: cav.Name}) + } + } + return nil +} + +func (c *Controller) getCleanupRelevantVersions(ctx context.Context, ca *v1alpha1.CAPApplication) ([]*v1alpha1.CAPApplicationVersion, error) { + excludedVersions := map[string]bool{} + excludedVersionNames := map[string]bool{} + + selector, _ := labels.ValidatedSelectorFromSet(map[string]string{ + LabelOwnerIdentifierHash: sha1Sum(ca.Namespace, ca.Name), + }) + tenantLister := c.crdInformerFactory.Sme().V1alpha1().CAPTenants().Lister() + cats, err := tenantLister.CAPTenants(ca.Namespace).List(selector) + if err != nil { + return nil, err + } + for i := range cats { + cat := cats[i] + if cat.Spec.Version != "" { + excludedVersions[cat.Spec.Version] = true + } + if cat.Status.CurrentCAPApplicationVersionInstance != "" { + excludedVersionNames[cat.Status.CurrentCAPApplicationVersionInstance] = true + } + } + + latestReadyVersion, err := c.getLatestReadyCAPApplicationVersion(ctx, ca, true) + if err != nil || latestReadyVersion == nil { + // if there are no Ready versions yet - do not initiate cleanup + return nil, err + } + + outdatedVersions := []*v1alpha1.CAPApplicationVersion{} + cavs, _ := c.getCachedCAPApplicationVersions(ctx, ca) // ignoring error as this is not critical + for i := range cavs { + cav := cavs[i] + // ignore all versions greater than latest Ready one + if semver.Compare("v"+cav.Spec.Version, "v"+latestReadyVersion.Spec.Version) == 1 { + continue + } + if excludedVersions[cav.Spec.Version] || excludedVersionNames[cav.Name] { + continue // filter out versions attached to tenants + } + outdatedVersions = append(outdatedVersions, cav) + } + + return outdatedVersions, nil +} + +func (c *Controller) processVersionCleanupQueue(ctx context.Context, orc *cleanupOrchestrator, restart chan<- bool) { + defer recoverVersionCleanupRoutine(restart) + for { + select { + case <-ctx.Done(): + return + default: + if stop := c.processVersionCleanupQueueItem(ctx, orc); stop { + return + } + } + } +} + +func (c *Controller) processVersionCleanupQueueItem(ctx context.Context, orc *cleanupOrchestrator) (stop bool) { + item, shutdown := orc.queue.Get() + if shutdown { + return true // stop processing + } + defer orc.queue.Done(item) + + if err := c.evaluateVersionForCleanup(ctx, item, orc.api); err != nil { + orc.queue.AddRateLimited(item) + } else { + orc.queue.Forget(item) + } + return false +} + +func (c *Controller) evaluateVersionForCleanup(ctx context.Context, item NamespacedResourceKey, promapi promv1.API) error { + lister := c.crdInformerFactory.Sme().V1alpha1().CAPApplicationVersions().Lister() + cav, err := lister.CAPApplicationVersions(item.Namespace).Get(item.Name) + if err != nil { + return handleOperatorResourceErrors(err) + } + + // read CAPApplication to determine dry-run mode + ca, err := c.crdInformerFactory.Sme().V1alpha1().CAPApplications().Lister().CAPApplications(cav.Namespace).Get(cav.Spec.CAPApplicationInstance) + if err != nil { + return err + } + + cleanup := true + for i := range cav.Spec.Workloads { + wl := cav.Spec.Workloads[i] + workloadEvaluation := true + if wl.DeploymentDefinition != nil && wl.DeploymentDefinition.Monitoring != nil && wl.DeploymentDefinition.Monitoring.DeletionRules != nil { + if wl.DeploymentDefinition.Monitoring.DeletionRules.ScalarExpression != nil { // evaluate provided expression + expr := strings.TrimSpace(*wl.DeploymentDefinition.Monitoring.DeletionRules.ScalarExpression) + if expr == "" { + workloadEvaluation = false + } else { + isRelevantForCleanup, err := evaluateExpression(ctx, expr, promapi) + if err != nil || !isRelevantForCleanup { + if err != nil { + klog.ErrorS(err, "could not evaluate PromQL expression for workload", "workload", wl.Name, "version", cav.Name) + } + workloadEvaluation = false + } + } + } else { + for j := range wl.DeploymentDefinition.Monitoring.DeletionRules.Metrics { + rule := wl.DeploymentDefinition.Monitoring.DeletionRules.Metrics[j] + isRelevantForCleanup, err := evaluateMetric(ctx, &rule, fmt.Sprintf("%s%s", getWorkloadName(cav.Name, wl.Name), ServiceSuffix), cav.Namespace, promapi) + if err != nil || !isRelevantForCleanup { + if err != nil { + klog.ErrorS(err, "could not evaluate metric for workload", "workload", wl.Name, "version", cav.Name) + } + workloadEvaluation = false + break + } + } + } + } + if !workloadEvaluation { + cleanup = false + break + } + } + + if cleanup { + klog.InfoS("version has been evaluated to be ready for deletion", "version", cav.Name) + c.Event(cav, nil, corev1.EventTypeNormal, CAPApplicationVersionEventReadForDeletion, EventActionEvaluateMetrics, fmt.Sprintf("version %s is now ready for deletion", cav.Name)) + + if v, ok := ca.Annotations[AnnotationEnableCleanupMonitoring]; ok && strings.ToLower(v) == "true" { + return c.crdClient.SmeV1alpha1().CAPApplicationVersions(cav.Namespace).Delete(ctx, cav.Name, v1.DeleteOptions{}) + } + } + + return nil +} + +func executePromQL(ctx context.Context, promapi promv1.API, query string) (prommodel.Value, error) { + // klog.InfoS("executing prometheus query", "query", query) + result, warnings, err := promapi.Query(ctx, query, time.Now()) + if err != nil { + klog.ErrorS(err, "prometheus query error", "query", query) + return nil, err + } + if len(warnings) > 0 { + klog.InfoS(fmt.Sprintf("query %s returned warnings [%s]", query, strings.Join(warnings, ", "))) + } + klog.InfoS(fmt.Sprintf("query %s returned result: %v", query, result)) + return result, nil +} + +func evaluateExpression(ctx context.Context, expr string, promapi promv1.API) (bool, error) { + result, err := executePromQL(ctx, promapi, expr) + if err != nil { + return false, err + } + + s, ok := result.(*prommodel.Scalar) + if !ok { + err := fmt.Errorf("result from query %s could not be casted as a scalar", expr) + klog.ErrorS(err, "error parsing query result") + return false, err + } + + return s.Value == 1, nil // expecting a boolean result +} + +func evaluateMetric(ctx context.Context, rule *v1alpha1.MetricRule, job, ns string, promapi promv1.API) (bool, error) { + query := "" + switch rule.Type { + case v1alpha1.MetricTypeGauge: + query = fmt.Sprintf(GaugeEvaluationExpression, rule.Name, job, ns, rule.CalculationPeriod) + case v1alpha1.MetricTypeCounter: + query = fmt.Sprintf(CounterEvaluationExpression, rule.Name, job, ns, rule.CalculationPeriod) + default: + return false, fmt.Errorf("metric %s has unsupported type %s", rule.Name, rule.Type) + } + + result, err := executePromQL(ctx, promapi, query) + if err != nil { + return false, err + } + + vec, ok := result.(prommodel.Vector) + if !ok { + err := fmt.Errorf("result from query %s could not be casted as a vector", query) + klog.ErrorS(err, "error parsing query result") + return false, err + } + if len(vec) > 0 { + sample := vec[0] // use the first one - expecting only one sample based on the expressions + var threshold prommodel.SampleValue + err = threshold.UnmarshalJSON([]byte(fmt.Sprintf("\"%s\"", rule.ThresholdValue))) + if err != nil { + klog.ErrorS(err, "error parsing threshold value", "value", rule.ThresholdValue, "metric", rule.Name) + return false, err + } + klog.InfoS("parsed prometheus query result and threshold", "threshold", threshold.String(), "result", sample.Value.String(), "query", query) + return sample.Value <= threshold, nil + } else { + // there could be no results if the version was not transmitting metrics for a very long time + return true, nil + } +} diff --git a/internal/controller/version-monitoring_test.go b/internal/controller/version-monitoring_test.go new file mode 100644 index 0000000..6693938 --- /dev/null +++ b/internal/controller/version-monitoring_test.go @@ -0,0 +1,604 @@ +/* +SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and cap-operator contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "sync" + "testing" + "time" + + prommodel "github.com/prometheus/common/model" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/workqueue" +) + +func TestMonitoringEnv(t *testing.T) { + expAdd := "http://prom.server.local" + expAcqRetryInt := "10s" + expEvalInt := "3h" + tests := []struct { + add *string + acqRetryInt *string + evalInt *string + }{ + {}, + {add: &expAdd, acqRetryInt: &expAcqRetryInt, evalInt: &expEvalInt}, + {add: &expAdd}, + } + + for _, tt := range tests { + t.Run("test monitoring env", func(t *testing.T) { + if tt.add != nil { + os.Setenv(EnvPrometheusAddress, *tt.add) + defer os.Unsetenv(EnvPrometheusAddress) + } + if tt.acqRetryInt != nil { + os.Setenv(EnvPrometheusAcquireClientRetryDelay, *tt.acqRetryInt) + defer os.Unsetenv(EnvPrometheusAcquireClientRetryDelay) + } + if tt.evalInt != nil { + os.Setenv(EnvMetricsEvaluationInterval, *tt.evalInt) + defer os.Unsetenv(EnvMetricsEvaluationInterval) + } + + mEnv := parseMonitoringEnv() + + if tt.add == nil { + if mEnv != nil { + t.Errorf("did not expect monitoring environment") + } + return + } + if tt.acqRetryInt != nil { + exp, _ := time.ParseDuration(*tt.acqRetryInt) + if mEnv.acquireClientRetryDelay != exp { + t.Errorf("expected acquire client retry interval to be %s", *tt.acqRetryInt) + } + } else { + if mEnv.acquireClientRetryDelay != time.Hour { + t.Errorf("expected default acquire client retry interval") + } + } + if tt.evalInt != nil { + exp, _ := time.ParseDuration(*tt.evalInt) + if mEnv.evaluationInterval != exp { + t.Errorf("expected evaluation interval to be %s", *tt.evalInt) + } + } else { + if mEnv.evaluationInterval != 10*time.Minute { + t.Errorf("expected default evaluation interval") + } + } + }) + } +} + +func setupTestControllerWithInitialResources(t *testing.T, initialResources []string) *Controller { + c := initializeControllerForReconciliationTests(t, []ResourceAction{}) + var wg sync.WaitGroup + work := func(file string) { + defer wg.Done() + raw, err := readYAMLResourcesFromFile(file) + if err != nil { + t.Errorf("error reading resources from file %s: %s", file, err.Error()) + return + } + for j := range raw { + err = addInitialObjectToStore(raw[j], c) + if err != nil { + t.Error(err) + return + } + } + } + + for i := range initialResources { + wg.Add(1) + go work(initialResources[i]) + } + wg.Wait() + + return c +} + +func TestGracefulShutdownMonitoringRoutines(t *testing.T) { + c := setupTestControllerWithInitialResources(t, []string{}) + + s, _ := getPromServer(false, []queryTestCase{}) + defer s.Close() + + os.Setenv(EnvPrometheusAddress, s.URL) + defer os.Unsetenv(EnvPrometheusAddress) + + testCtx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + c.startVersionCleanup(testCtx) + wg.Done() + }() + + wg.Wait() // check whether routines are closing - or test timeout +} + +func TestVersionSelectionForCleanup(t *testing.T) { + tests := []struct { + name string + resources []string + expectedVersions []string + expectError bool + }{ + { + name: "select versions not assigned to tenants", + resources: []string{ + "testdata/version-monitoring/ca-cleanup-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + "testdata/version-monitoring/cav-v3-deletion-rules.yaml", + "testdata/version-monitoring/cat-provider-v3-ready.yaml", + }, + expectedVersions: []string{"default.test-cap-01-cav-v1", "default.test-cap-01-cav-v2"}, + }, + { + name: "version cleanup must ignore CAPApplications without specified annotation", + resources: []string{ + "testdata/common/capapplication.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + "testdata/version-monitoring/cav-v3-deletion-rules.yaml", + "testdata/version-monitoring/cat-provider-v3-ready.yaml", + }, + expectedVersions: []string{}, + }, + { + name: "should not consider versions higher than the latest Ready version", + resources: []string{ + "testdata/version-monitoring/ca-cleanup-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules-error.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + "testdata/version-monitoring/cav-v3-deletion-rules-processing.yaml", + "testdata/version-monitoring/cat-provider-v2-ready.yaml", + }, + expectedVersions: []string{"default.test-cap-01-cav-v1"}, + }, + { + name: "should not consider any version when there are no Ready versions", + resources: []string{ + "testdata/version-monitoring/ca-cleanup-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules-error.yaml", + "testdata/version-monitoring/cav-v3-deletion-rules-processing.yaml", + }, + expectedVersions: []string{}, + }, + { + name: "should not consider versions with tenants (using dry-run)", + resources: []string{ + "testdata/version-monitoring/ca-cleanup-dry-run-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + "testdata/version-monitoring/cav-v3-deletion-rules.yaml", + "testdata/version-monitoring/cat-consumer-v2-upgrading.yaml", + }, + expectedVersions: []string{"default.test-cap-01-cav-v1"}, + }, + } + + getQueuedItems := func(o *cleanupOrchestrator) []string { + res := []string{} + for { + i, stop := o.queue.Get() + if stop { + return res + } + o.queue.Done(i) + res = append(res, fmt.Sprintf("%s.%s", i.Namespace, i.Name)) + o.queue.Forget(i) + } + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + c := setupTestControllerWithInitialResources(t, tc.resources) + orc := &cleanupOrchestrator{queue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[NamespacedResourceKey]())} + defer orc.queue.ShutDown() + err := c.queueVersionsForCleanupEvaluation(context.TODO(), orc) + if err != nil { + if !tc.expectError { + t.Errorf("not expecting error for test case %s -> error: %s", tc.name, err.Error()) + } + return + } + evs := map[string]bool{} + for _, s := range tc.expectedVersions { + evs[s] = false + } + orc.queue.ShutDownWithDrain() // allows existing items to be processed before shutting down + results := getQueuedItems(orc) + for _, r := range results { + if _, ok := evs[r]; ok { + evs[r] = true + } else { + t.Errorf("unexpected version %s queued for cleanup", r) + } + } + for exp, found := range evs { + if !found { + t.Errorf("was expecting version %s to be queued for cleanup", exp) + } + } + }) + } +} + +type queryTestCase struct { + expectedQuery string + simulateError bool + simulateEmptyResult bool + simulatedResultType string // vector | scalar | invalid + simulatedValue float64 +} + +type evalTestConfig struct { + name string + evaluatedVersion string + startResources []string + expectCleanup bool + expectError bool + cases []queryTestCase +} + +func mockPromRuntimeInfoHandler(simError bool, w http.ResponseWriter) { + if simError { + w.WriteHeader(http.StatusServiceUnavailable) + } else { + io.WriteString(w, ` + { + "status": "success", + "data": { + "CWD": "/", + "goroutineCount": 48, + "GOMAXPROCS": 4 + } + } + `) + } +} + +func mockPromQueryHandler(testCases []queryTestCase, query string, w http.ResponseWriter) { + var tCase *queryTestCase + for i := range testCases { + tc := testCases[i] + if tc.expectedQuery == query { + tCase = &tc + break + } + } + if tCase == nil { + io.WriteString(w, ` + { + "status": "error", + "errorType": "TestCaseMismatch", + "error": "could not match received query to a specified test case" + } + `) + return + } + if tCase.simulateError { + io.WriteString(w, ` + { + "status": "error", + "errorType": "SimulatedError", + "error": "simulated error" + } + `) + return + } + if tCase.simulateEmptyResult { + io.WriteString(w, + fmt.Sprintf(`{ + "status": "success", + "data": { + "resultType": "%s", + "result": [] + } + }`, tCase.simulatedResultType), + ) + } + + getScalar := func() *prommodel.Scalar { + return &prommodel.Scalar{ + Timestamp: prommodel.Now(), + Value: prommodel.SampleValue(tCase.simulatedValue), + } + } + + getVector := func() *prommodel.Vector { + return &prommodel.Vector{{ + Timestamp: prommodel.Now(), + Value: prommodel.SampleValue(tCase.simulatedValue), + Metric: prommodel.Metric{}, + }} + } + + var ( + raw []byte + err error + ) + switch tCase.simulatedResultType { + case "scalar": + raw, err = getScalar().MarshalJSON() + case "vector": + raw, err = json.Marshal(getVector()) + case "invalid": + raw = []byte("{\"property\":\"invalid\"}") + } + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + val := string(raw) + + io.WriteString(w, + fmt.Sprintf(`{ + "status": "success", + "data": { + "resultType": "%s", + "result": %s + } + }`, tCase.simulatedResultType, val), + ) +} + +func getPromServer(unavailable bool, cases []queryTestCase) (*httptest.Server, func() map[string]bool) { + calledQueries := map[string]bool{} + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/api/v1/status/runtimeinfo" { + mockPromRuntimeInfoHandler(unavailable, w) + return + } + if r.URL.Path == "/api/v1/query" { + q := r.FormValue("query") + if q != "" { + calledQueries[q] = false + } + mockPromQueryHandler(cases, q, w) + return + } + w.WriteHeader(http.StatusInternalServerError) // unexpected path + })) + return server, func() map[string]bool { + return calledQueries + } +} + +func Test_initializeVersionCleanupOrchestrator(t *testing.T) { + tests := []struct { + name string + serverUnavailable bool + }{ + { + name: "initialize cleanup orchestrator and verify connection", + serverUnavailable: false, + }, + { + name: "ensure retry of cleanup orchestrator initialization", + serverUnavailable: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, _ := getPromServer(tt.serverUnavailable, []queryTestCase{}) + defer s.Close() + var o *cleanupOrchestrator + testCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + go func() { + o = initializeVersionCleanupOrchestrator(testCtx, &monitoringEnv{address: s.URL, evaluationInterval: 2 * time.Minute, acquireClientRetryDelay: 30 * time.Second}) + if o != nil { + cancel() + } + }() + <-testCtx.Done() + if tt.serverUnavailable { + if testCtx.Err() == nil || testCtx.Err() != context.DeadlineExceeded { + t.Error("expected to exceed test context deadline") + } + } else { + if o == nil { + t.Errorf("could not initialize prometheus client") + } + defer o.queue.ShutDown() + } + + }) + } +} + +func TestVersionCleanupEvaluation(t *testing.T) { + tests := []evalTestConfig{ + { + name: "evaluate version with missing application - expect error", + evaluatedVersion: "test-cap-01-cav-v1", + startResources: []string{ + "testdata/version-monitoring/cav-v1-deletion-rules-error.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + }, + expectCleanup: false, + expectError: true, + cases: []queryTestCase{}, + }, + { + name: "evaluate version workloads - expecting deletion", + evaluatedVersion: "test-cap-01-cav-v1", + startResources: []string{ + "testdata/version-monitoring/ca-cleanup-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules-error.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + }, + expectCleanup: true, + expectError: false, + cases: []queryTestCase{ + { + expectedQuery: "sum(rate(total_http_requests{job=\"test-cap-01-cav-v1-cap-backend-srv-svc\",namespace=\"default\"}[2m]))", + simulateError: false, + simulateEmptyResult: false, + simulatedResultType: "vector", + simulatedValue: 0.005, + }, + { + expectedQuery: "sum(avg_over_time(active_jobs{job=\"test-cap-01-cav-v1-cap-backend-srv-svc\",namespace=\"default\"}[3m]))", + simulateError: false, + simulateEmptyResult: false, + simulatedResultType: "vector", + simulatedValue: 0, + }, + { + expectedQuery: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1", + simulateError: false, + simulateEmptyResult: false, + simulatedResultType: "scalar", + simulatedValue: 1, + }, + }, + }, + { + name: "evaluate version workloads - prom query error - from metric rule", + evaluatedVersion: "test-cap-01-cav-v1", + startResources: []string{ + "testdata/version-monitoring/ca-cleanup-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules-error.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + }, + expectCleanup: false, + expectError: false, + cases: []queryTestCase{ + { + expectedQuery: "sum(rate(total_http_requests{job=\"test-cap-01-cav-v1-cap-backend-srv-svc\",namespace=\"default\"}[2m]))", + simulateError: true, + simulateEmptyResult: false, + simulatedResultType: "vector", + simulatedValue: 0.005, + }, + }, + }, + { + name: "evaluate version workloads - prom query error - from expression", + evaluatedVersion: "test-cap-01-cav-v1", + startResources: []string{ + "testdata/version-monitoring/ca-cleanup-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules-error.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + }, + expectCleanup: false, + expectError: false, + cases: []queryTestCase{ + { + expectedQuery: "sum(rate(total_http_requests{job=\"test-cap-01-cav-v1-cap-backend-srv-svc\",namespace=\"default\"}[2m]))", + simulateError: false, + simulateEmptyResult: false, + simulatedResultType: "vector", + simulatedValue: 0.005, + }, + { + expectedQuery: "sum(avg_over_time(active_jobs{job=\"test-cap-01-cav-v1-cap-backend-srv-svc\",namespace=\"default\"}[3m]))", + simulateError: false, + simulateEmptyResult: false, + simulatedResultType: "vector", + simulatedValue: 0, + }, + { + expectedQuery: "scalar(sum(avg_over_time(current_sessions{job=\"test-cap-01-cav-v1-app-router-svc\"}[12m]))) <= bool 1", + simulateError: true, + simulateEmptyResult: false, + simulatedResultType: "scalar", + simulatedValue: 1, + }, + }, + }, + { + name: "evaluate version workloads - prom query - invalid result type", + evaluatedVersion: "test-cap-01-cav-v1", + startResources: []string{ + "testdata/version-monitoring/ca-cleanup-enabled.yaml", + "testdata/version-monitoring/cav-v1-deletion-rules-error.yaml", + "testdata/version-monitoring/cav-v2-deletion-rules.yaml", + }, + expectCleanup: false, + expectError: false, + cases: []queryTestCase{ + { + expectedQuery: "sum(rate(total_http_requests{job=\"test-cap-01-cav-v1-cap-backend-srv-svc\",namespace=\"default\"}[2m]))", + simulateError: false, + simulateEmptyResult: false, + simulatedResultType: "invalid", + simulatedValue: 0.005, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, getActualQueries := getPromServer(false, tt.cases) + defer s.Close() + o := initializeVersionCleanupOrchestrator(context.TODO(), &monitoringEnv{address: s.URL, acquireClientRetryDelay: 1 * time.Minute}) + defer o.queue.ShutDown() + c := setupTestControllerWithInitialResources(t, tt.startResources) + item := NamespacedResourceKey{Namespace: "default", Name: tt.evaluatedVersion} + o.queue.Add(item) + _ = c.processVersionCleanupQueueItem(context.TODO(), o) + + // Verify error occurrence + if tt.expectError { + if o.queue.NumRequeues(item) == 0 { + t.Errorf("expected requeue for version %s", tt.evaluatedVersion) + } + } else { + if o.queue.NumRequeues(item) > 0 { + t.Errorf("expected no requeues for version %s", tt.evaluatedVersion) + } + } + + // check whether expected queries were called + act := getActualQueries() + for _, c := range tt.cases { + if _, ok := act[c.expectedQuery]; !ok { + t.Errorf("expected query %s to be called", c.expectedQuery) + } else { + act[c.expectedQuery] = true + } + } + for q, ok := range act { + if !ok { + t.Errorf("unexpected query %s was called", q) + } + } + + // verify version deletion + _, err := c.crdClient.SmeV1alpha1().CAPApplicationVersions("default").Get(context.TODO(), tt.evaluatedVersion, v1.GetOptions{}) + if tt.expectCleanup { + if err == nil || !errors.IsNotFound(err) { + t.Errorf("expected version %s to be deleted", tt.evaluatedVersion) + } + } else { + if err != nil { + t.Errorf("expected to fetch version %s", tt.evaluatedVersion) + } + } + + }) + } +} diff --git a/pkg/apis/sme.sap.com/v1alpha1/types.go b/pkg/apis/sme.sap.com/v1alpha1/types.go index 1e65cbd..43e37ca 100644 --- a/pkg/apis/sme.sap.com/v1alpha1/types.go +++ b/pkg/apis/sme.sap.com/v1alpha1/types.go @@ -96,6 +96,7 @@ type ApplicationDomains struct { // +kubebuilder:validation:MaxLength=62 // Primary application domain will be used to generate a wildcard TLS certificate. In project "Gardener" managed clusters this is (usually) a subdomain of the cluster domain Primary string `json:"primary"` + // +kubebuilder:validation:items:Pattern=^[a-z0-9-.]+$ // Customer specific domains to serve application endpoints (optional) Secondary []string `json:"secondary,omitempty"` // +kubebuilder:validation:Pattern=^[a-z0-9-.]*$ @@ -253,8 +254,76 @@ type DeploymentDetails struct { LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty"` // Readiness probe ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty"` + // Workload monitoring specification + Monitoring *WorkloadMonitoring `json:"monitoring,omitempty"` +} + +// WorkloadMonitoring specifies the metrics related to the workload +type WorkloadMonitoring struct { + // DeletionRules specify the metrics conditions that need to be satisfied for the version to be deleted automatically. + // Either a set of metrics based rules can be specified, or a PromQL expression which evaluates to a boolean scalar. + DeletionRules *DeletionRules `json:"deletionRules,omitempty"` + // Configuration to be used to create ServiceMonitor for the workload service. + // If not specified, CAP Operator will not attempt to create a ServiceMonitor for the workload + ScrapeConfig *MonitoringConfig `json:"scrapeConfig,omitempty"` +} + +type MonitoringConfig struct { + // Interval at which Prometheus scrapes the metrics from the target. + ScrapeInterval Duration `json:"interval,omitempty"` + // Name of the port (specified on the workload) which will be used by Prometheus server to scrape metrics + WorkloadPort string `json:"port"` + // HTTP path from which to scrape for metrics. + Path string `json:"path,omitempty"` + // Timeout after which Prometheus considers the scrape to be failed. + Timeout Duration `json:"scrapeTimeout,omitempty"` +} + +type DeletionRules struct { + Metrics []MetricRule `json:"metrics,omitempty"` + // A promQL expression that evaluates to a scalar boolean (1 or 0). + // Example: scalar(sum(avg_over_time(demo_metric{job="cav-demo-app-4-srv-svc",namespace="demo"}[2m]))) <= bool 0.1 + ScalarExpression *string `json:"expression,omitempty"` +} + +// MetricRule specifies a Prometheus metric and rule which represents a cleanup condition. Metrics of type Gauge and Counter are supported. +// +// Rule evaluation for Gauge type metric: The time series data of the metric (restricted to the current workload by setting `job` label as workload service name) is calculated as an average over the specified period. +// A sum of the calculated average from different time series is then compared to the provided threshold value to determine whether the rule has been satisfied. +// Evaluation: `sum(avg_over_time({job=}[])) <= ` +// +// Rule evaluation for Counter type metric: The time series data of the metric (restricted to the current workload by setting `job` label as workload service name) is calculated as rate of increase over the specified period. +// The sum of the calculated rates from different time series is then compared to the provided threshold value to determine whether the rule has been satisfied. +// Evaluation: `sum(rate({job=}[])) <= ` +type MetricRule struct { + // Prometheus metric. For example `http_request_count` + Name string `json:"name"` + // Type of Prometheus metric which can be either `Gauge` or `Counter` + // +kubebuilder:validation:Enum=Gauge;Counter + Type MetricType `json:"type"` + // Duration of time series data used for the rule evaluation + CalculationPeriod Duration `json:"calculationPeriod"` + // The threshold value which is compared against the calculated value. If calculated value is less than or equal to the threshold the rule condition is fulfilled. + // +kubebuilder:validation:Format:=double + ThresholdValue string `json:"thresholdValue"` } +// Duration is a valid time duration that can be parsed by Prometheus +// Supported units: y, w, d, h, m, s, ms +// Examples: `30s`, `1m`, `1h20m15s`, `15d` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +type Duration string + +// Type of Prometheus metric +type MetricType string + +const ( + // Prometheus Metric type Gauge + MetricTypeGauge MetricType = "Gauge" + // Prometheus Metric type Counter + MetricTypeCounter MetricType = "Counter" +) + // Type of deployment type DeploymentType string diff --git a/pkg/apis/sme.sap.com/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/sme.sap.com/v1alpha1/zz_generated.deepcopy.go index 6b27e3f..d9fc6a4 100644 --- a/pkg/apis/sme.sap.com/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/sme.sap.com/v1alpha1/zz_generated.deepcopy.go @@ -687,6 +687,32 @@ func (in *CommonDetails) DeepCopy() *CommonDetails { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeletionRules) DeepCopyInto(out *DeletionRules) { + *out = *in + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricRule, len(*in)) + copy(*out, *in) + } + if in.ScalarExpression != nil { + in, out := &in.ScalarExpression, &out.ScalarExpression + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeletionRules. +func (in *DeletionRules) DeepCopy() *DeletionRules { + if in == nil { + return nil + } + out := new(DeletionRules) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) { *out = *in @@ -713,6 +739,11 @@ func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) { *out = new(v1.Probe) (*in).DeepCopyInto(*out) } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(WorkloadMonitoring) + (*in).DeepCopyInto(*out) + } return } @@ -776,6 +807,38 @@ func (in *JobDetails) DeepCopy() *JobDetails { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricRule) DeepCopyInto(out *MetricRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricRule. +func (in *MetricRule) DeepCopy() *MetricRule { + if in == nil { + return nil + } + out := new(MetricRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringConfig) DeepCopyInto(out *MonitoringConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConfig. +func (in *MonitoringConfig) DeepCopy() *MonitoringConfig { + if in == nil { + return nil + } + out := new(MonitoringConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameValue) DeepCopyInto(out *NameValue) { *out = *in @@ -920,3 +983,29 @@ func (in *WorkloadDetails) DeepCopy() *WorkloadDetails { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadMonitoring) DeepCopyInto(out *WorkloadMonitoring) { + *out = *in + if in.DeletionRules != nil { + in, out := &in.DeletionRules, &out.DeletionRules + *out = new(DeletionRules) + (*in).DeepCopyInto(*out) + } + if in.ScrapeConfig != nil { + in, out := &in.ScrapeConfig, &out.ScrapeConfig + *out = new(MonitoringConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitoring. +func (in *WorkloadMonitoring) DeepCopy() *WorkloadMonitoring { + if in == nil { + return nil + } + out := new(WorkloadMonitoring) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deletionrules.go b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deletionrules.go new file mode 100644 index 0000000..50f7994 --- /dev/null +++ b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deletionrules.go @@ -0,0 +1,42 @@ +/* +SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and cap-operator contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// DeletionRulesApplyConfiguration represents a declarative configuration of the DeletionRules type for use +// with apply. +type DeletionRulesApplyConfiguration struct { + Metrics []MetricRuleApplyConfiguration `json:"metrics,omitempty"` + ScalarExpression *string `json:"expression,omitempty"` +} + +// DeletionRulesApplyConfiguration constructs a declarative configuration of the DeletionRules type for use with +// apply. +func DeletionRules() *DeletionRulesApplyConfiguration { + return &DeletionRulesApplyConfiguration{} +} + +// WithMetrics adds the given value to the Metrics field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Metrics field. +func (b *DeletionRulesApplyConfiguration) WithMetrics(values ...*MetricRuleApplyConfiguration) *DeletionRulesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMetrics") + } + b.Metrics = append(b.Metrics, *values[i]) + } + return b +} + +// WithScalarExpression sets the ScalarExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScalarExpression field is set to the value of the last call. +func (b *DeletionRulesApplyConfiguration) WithScalarExpression(value string) *DeletionRulesApplyConfiguration { + b.ScalarExpression = &value + return b +} diff --git a/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deploymentdetails.go b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deploymentdetails.go index d4fa6fb..0c55146 100644 --- a/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deploymentdetails.go +++ b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/deploymentdetails.go @@ -16,11 +16,12 @@ import ( // with apply. type DeploymentDetailsApplyConfiguration struct { CommonDetailsApplyConfiguration `json:",inline"` - Type *smesapcomv1alpha1.DeploymentType `json:"type,omitempty"` - Replicas *int32 `json:"replicas,omitempty"` - Ports []PortsApplyConfiguration `json:"ports,omitempty"` - LivenessProbe *v1.Probe `json:"livenessProbe,omitempty"` - ReadinessProbe *v1.Probe `json:"readinessProbe,omitempty"` + Type *smesapcomv1alpha1.DeploymentType `json:"type,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Ports []PortsApplyConfiguration `json:"ports,omitempty"` + LivenessProbe *v1.Probe `json:"livenessProbe,omitempty"` + ReadinessProbe *v1.Probe `json:"readinessProbe,omitempty"` + Monitoring *WorkloadMonitoringApplyConfiguration `json:"monitoring,omitempty"` } // DeploymentDetailsApplyConfiguration constructs a declarative configuration of the DeploymentDetails type for use with @@ -229,3 +230,11 @@ func (b *DeploymentDetailsApplyConfiguration) WithReadinessProbe(value v1.Probe) b.ReadinessProbe = &value return b } + +// WithMonitoring sets the Monitoring field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Monitoring field is set to the value of the last call. +func (b *DeploymentDetailsApplyConfiguration) WithMonitoring(value *WorkloadMonitoringApplyConfiguration) *DeploymentDetailsApplyConfiguration { + b.Monitoring = value + return b +} diff --git a/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/metricrule.go b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/metricrule.go new file mode 100644 index 0000000..aec1378 --- /dev/null +++ b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/metricrule.go @@ -0,0 +1,59 @@ +/* +SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and cap-operator contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/sap/cap-operator/pkg/apis/sme.sap.com/v1alpha1" +) + +// MetricRuleApplyConfiguration represents a declarative configuration of the MetricRule type for use +// with apply. +type MetricRuleApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Type *v1alpha1.MetricType `json:"type,omitempty"` + CalculationPeriod *v1alpha1.Duration `json:"calculationPeriod,omitempty"` + ThresholdValue *string `json:"thresholdValue,omitempty"` +} + +// MetricRuleApplyConfiguration constructs a declarative configuration of the MetricRule type for use with +// apply. +func MetricRule() *MetricRuleApplyConfiguration { + return &MetricRuleApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MetricRuleApplyConfiguration) WithName(value string) *MetricRuleApplyConfiguration { + b.Name = &value + return b +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MetricRuleApplyConfiguration) WithType(value v1alpha1.MetricType) *MetricRuleApplyConfiguration { + b.Type = &value + return b +} + +// WithCalculationPeriod sets the CalculationPeriod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CalculationPeriod field is set to the value of the last call. +func (b *MetricRuleApplyConfiguration) WithCalculationPeriod(value v1alpha1.Duration) *MetricRuleApplyConfiguration { + b.CalculationPeriod = &value + return b +} + +// WithThresholdValue sets the ThresholdValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ThresholdValue field is set to the value of the last call. +func (b *MetricRuleApplyConfiguration) WithThresholdValue(value string) *MetricRuleApplyConfiguration { + b.ThresholdValue = &value + return b +} diff --git a/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/monitoringconfig.go b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/monitoringconfig.go new file mode 100644 index 0000000..814e64f --- /dev/null +++ b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/monitoringconfig.go @@ -0,0 +1,59 @@ +/* +SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and cap-operator contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/sap/cap-operator/pkg/apis/sme.sap.com/v1alpha1" +) + +// MonitoringConfigApplyConfiguration represents a declarative configuration of the MonitoringConfig type for use +// with apply. +type MonitoringConfigApplyConfiguration struct { + ScrapeInterval *v1alpha1.Duration `json:"interval,omitempty"` + WorkloadPort *string `json:"port,omitempty"` + Path *string `json:"path,omitempty"` + Timeout *v1alpha1.Duration `json:"scrapeTimeout,omitempty"` +} + +// MonitoringConfigApplyConfiguration constructs a declarative configuration of the MonitoringConfig type for use with +// apply. +func MonitoringConfig() *MonitoringConfigApplyConfiguration { + return &MonitoringConfigApplyConfiguration{} +} + +// WithScrapeInterval sets the ScrapeInterval field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeInterval field is set to the value of the last call. +func (b *MonitoringConfigApplyConfiguration) WithScrapeInterval(value v1alpha1.Duration) *MonitoringConfigApplyConfiguration { + b.ScrapeInterval = &value + return b +} + +// WithWorkloadPort sets the WorkloadPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WorkloadPort field is set to the value of the last call. +func (b *MonitoringConfigApplyConfiguration) WithWorkloadPort(value string) *MonitoringConfigApplyConfiguration { + b.WorkloadPort = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *MonitoringConfigApplyConfiguration) WithPath(value string) *MonitoringConfigApplyConfiguration { + b.Path = &value + return b +} + +// WithTimeout sets the Timeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Timeout field is set to the value of the last call. +func (b *MonitoringConfigApplyConfiguration) WithTimeout(value v1alpha1.Duration) *MonitoringConfigApplyConfiguration { + b.Timeout = &value + return b +} diff --git a/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/workloadmonitoring.go b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/workloadmonitoring.go new file mode 100644 index 0000000..7ae0b60 --- /dev/null +++ b/pkg/client/applyconfiguration/sme.sap.com/v1alpha1/workloadmonitoring.go @@ -0,0 +1,37 @@ +/* +SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and cap-operator contributors +SPDX-License-Identifier: Apache-2.0 +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// WorkloadMonitoringApplyConfiguration represents a declarative configuration of the WorkloadMonitoring type for use +// with apply. +type WorkloadMonitoringApplyConfiguration struct { + DeletionRules *DeletionRulesApplyConfiguration `json:"deletionRules,omitempty"` + ScrapeConfig *MonitoringConfigApplyConfiguration `json:"scrapeConfig,omitempty"` +} + +// WorkloadMonitoringApplyConfiguration constructs a declarative configuration of the WorkloadMonitoring type for use with +// apply. +func WorkloadMonitoring() *WorkloadMonitoringApplyConfiguration { + return &WorkloadMonitoringApplyConfiguration{} +} + +// WithDeletionRules sets the DeletionRules field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionRules field is set to the value of the last call. +func (b *WorkloadMonitoringApplyConfiguration) WithDeletionRules(value *DeletionRulesApplyConfiguration) *WorkloadMonitoringApplyConfiguration { + b.DeletionRules = value + return b +} + +// WithScrapeConfig sets the ScrapeConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ScrapeConfig field is set to the value of the last call. +func (b *WorkloadMonitoringApplyConfiguration) WithScrapeConfig(value *MonitoringConfigApplyConfiguration) *WorkloadMonitoringApplyConfiguration { + b.ScrapeConfig = value + return b +} diff --git a/pkg/client/applyconfiguration/utils.go b/pkg/client/applyconfiguration/utils.go index 410f530..3287d83 100644 --- a/pkg/client/applyconfiguration/utils.go +++ b/pkg/client/applyconfiguration/utils.go @@ -59,12 +59,18 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &smesapcomv1alpha1.CAPTenantStatusApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("CommonDetails"): return &smesapcomv1alpha1.CommonDetailsApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("DeletionRules"): + return &smesapcomv1alpha1.DeletionRulesApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("DeploymentDetails"): return &smesapcomv1alpha1.DeploymentDetailsApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("GenericStatus"): return &smesapcomv1alpha1.GenericStatusApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("JobDetails"): return &smesapcomv1alpha1.JobDetailsApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MetricRule"): + return &smesapcomv1alpha1.MetricRuleApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MonitoringConfig"): + return &smesapcomv1alpha1.MonitoringConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("NameValue"): return &smesapcomv1alpha1.NameValueApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("Ports"): @@ -77,6 +83,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &smesapcomv1alpha1.TenantOperationWorkloadReferenceApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("WorkloadDetails"): return &smesapcomv1alpha1.WorkloadDetailsApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("WorkloadMonitoring"): + return &smesapcomv1alpha1.WorkloadMonitoringApplyConfiguration{} } return nil