From e810520961663f96cc77339d848186039fcb6eb3 Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Tue, 25 Jul 2023 13:12:34 +0800 Subject: [PATCH] =?UTF-8?q?=F0=9F=8C=B1=20Refactor=20code=20to=20fix=20lin?= =?UTF-8?q?t=20warning=20(#218)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Refactor code to fix lint warning Signed-off-by: Jian Qiu * enable lint for testing files Signed-off-by: Jian Qiu --------- Signed-off-by: Jian Qiu --- .golangci.yaml | 2 +- .../addon_configuration_reconciler_test.go | 5 +- .../mgmt_addon_progressing_reconciler_test.go | 16 +- .../addonprogressing/controller_test.go | 5 - pkg/addon/templateagent/registration_test.go | 9 +- pkg/common/apply/rbac.go | 4 +- pkg/common/apply/rbac_test.go | 15 +- pkg/common/options/agent.go | 2 +- pkg/common/options/options.go | 2 +- pkg/common/options/options_test.go | 40 +++-- pkg/common/patcher/patcher.go | 11 +- pkg/common/patcher/patcher_test.go | 4 +- pkg/operator/certrotation/cabundle.go | 4 +- pkg/operator/certrotation/cabundle_test.go | 2 +- pkg/operator/certrotation/target_test.go | 4 +- pkg/operator/helpers/helpers.go | 4 +- pkg/operator/helpers/helpers_test.go | 103 ++++++----- pkg/operator/helpers/sa_syncer.go | 4 +- pkg/operator/helpers/sa_syncer_test.go | 5 +- .../certrotation_controller.go | 4 +- .../clustermanager_controller.go | 4 +- .../clustermanager_controller_test.go | 26 +-- .../crd_status_controller.go | 2 +- .../migration_controller.go | 9 +- .../migration_controller_test.go | 8 +- .../clustermanager_status_controller.go | 8 +- .../bootstrapcontroller.go | 17 +- .../bootstrapcontroller_test.go | 11 +- .../klusterlet_cleanup_controller.go | 2 +- .../klusterlet_cleanup_controller_test.go | 8 +- .../klusterlet_controller.go | 2 +- .../klusterlet_controller_test.go | 88 +++++---- .../klusterlet_managed_reconcile.go | 4 +- .../klusterlet_ssar_controller.go | 1 - .../klusterlet_status_controller.go | 2 +- pkg/placement/controllers/manager.go | 2 +- .../scheduling/cluster_event_handler.go | 2 +- .../scheduling/cluster_event_handler_test.go | 6 +- .../controllers/scheduling/enqueue_test.go | 34 ++-- .../controllers/scheduling/schedule.go | 8 +- .../controllers/scheduling/schedule_test.go | 94 +++++++--- .../scheduling/scheduling_controller.go | 41 ++--- .../scheduling/scheduling_controller_test.go | 27 ++- pkg/placement/debugger/debugger.go | 2 +- pkg/placement/debugger/debugger_test.go | 6 +- pkg/placement/helpers/clusters_test.go | 2 +- pkg/placement/helpers/testing/builders.go | 96 +++++----- .../clientcert/cert_controller.go | 8 - pkg/registration/clientcert/certificate.go | 4 +- .../clientcert/certificate_test.go | 34 ++-- .../clientcert/controller_test.go | 9 +- pkg/registration/helpers/helpers_test.go | 10 -- .../hub/addon/discovery_controller_test.go | 2 +- .../hub/addon/healthcheck_controller.go | 4 +- .../hub/clusterrole/controller.go | 2 +- pkg/registration/hub/lease/controller.go | 2 +- .../hub/managedcluster/controller.go | 4 +- .../hub/managedclusterset/controller.go | 16 +- .../hub/managedclusterset/controller_test.go | 30 ++-- ...fault_managedclusterset_controller_test.go | 10 +- ...lobal_managedclusterset_controller_test.go | 10 +- .../controller_test.go | 4 +- .../hub/rbacfinalizerdeletion/controller.go | 2 +- .../rbacfinalizerdeletion/controller_test.go | 2 +- .../spoke/addon/configuration_test.go | 18 +- .../spoke/addon/registration_controller.go | 10 +- .../addon/registration_controller_test.go | 94 +++++----- .../spoke/lease/lease_controller_test.go | 3 +- .../spoke/managedcluster/claim_reconcile.go | 3 +- .../managedcluster/claim_reconcile_test.go | 5 +- .../managedcluster/joining_controller_test.go | 3 +- .../managedcluster/resource_reconcile_test.go | 5 +- .../spoke/managedcluster/status_controller.go | 2 +- .../spoke/registration/secret_controller.go | 7 +- .../registration/secret_controller_test.go | 12 +- pkg/work/helper/helper_test.go | 4 +- .../manifestworkreplicaset_controller.go | 2 +- ...manifestworkreplicaset_controllers_test.go | 24 ++- ...manifestworkreplicaset_deploy_reconcile.go | 10 +- .../manifestworkreplicaset_deploy_test.go | 6 +- ...nifestworkreplicaset_finalize_reconcile.go | 8 +- .../spoke/apply/create_only_apply_test.go | 8 +- pkg/work/spoke/apply/server_side_apply.go | 2 +- .../spoke/apply/server_side_apply_test.go | 8 +- pkg/work/spoke/apply/update_apply_test.go | 46 ++--- pkg/work/spoke/auth/basic/auth_test.go | 4 +- pkg/work/spoke/auth/cache/auth_test.go | 28 +-- .../cache/executor_cache_controller_test.go | 11 +- ...appliedmanifestwork_finalize_controller.go | 2 +- .../manifestwork_finalize_controller_test.go | 3 +- .../manifestwork_controller.go | 6 +- .../manifestwork_controller_test.go | 170 ++++++++++++------ .../availablestatus_controller.go | 4 +- .../availablestatus_controller_test.go | 6 +- pkg/work/spoke/statusfeedback/reader.go | 4 +- pkg/work/webhook/start.go | 14 +- .../webhook/v1/manifestwork_validating.go | 2 +- .../v1/manifestwork_validating_test.go | 9 +- .../manifestworkreplicaset_validating.go | 4 +- pkg/work/webhook/v1alpha1/webhook.go | 2 +- test/benchmark/placement/benchmark_test.go | 2 - test/e2e/addon_lease_test.go | 16 +- test/e2e/addonmanagement_test.go | 3 +- test/e2e/clusterset_test.go | 6 +- test/e2e/managedcluster_loopback_test.go | 3 +- test/e2e/managedclusterset_test.go | 6 +- test/e2e/manifestworkreplicaset_test.go | 14 +- test/e2e/placement_test.go | 2 +- test/e2e/registration_webhook_test.go | 14 +- test/e2e/work_webhook_test.go | 2 +- test/e2e/work_workload_test.go | 4 +- test/integration/addon/addon_configs_test.go | 3 +- .../addon/addon_manager_upgrade_test.go | 44 +++-- test/integration/addon/agent_deploy_test.go | 32 ++-- test/integration/addon/assertion_test.go | 48 +++-- .../operator/clustermanager_hosted_test.go | 25 +-- .../operator/clustermanager_test.go | 20 ++- .../operator/integration_suite_test.go | 2 +- .../operator/klusterlet_hosted_test.go | 12 +- .../operator/klusterlet_singleton_test.go | 6 +- test/integration/operator/klusterlet_test.go | 51 ++++-- test/integration/placement/assertion_test.go | 15 +- test/integration/placement/placement_test.go | 35 ++-- .../integration/placement/prioritizer_test.go | 3 - test/integration/placement/toleration_test.go | 2 - .../registration/addon_lease_test.go | 8 +- .../registration/addon_registration_test.go | 22 +-- .../registration/certificate_rotation_test.go | 1 + .../registration/disaster_recovery_test.go | 25 ++- ...lobal_managedclusterset_controller_test.go | 12 +- .../registration/managedclusterset_test.go | 28 +-- .../registration/spokeagent_recovery_test.go | 2 + .../registration/spokeagent_restart_test.go | 4 +- .../spokecluster_autoapproval_test.go | 1 + .../registration/spokecluster_claim_test.go | 8 +- .../registration/spokecluster_joining_test.go | 2 +- .../registration/spokecluster_status_test.go | 7 +- test/integration/util/authentication.go | 2 +- test/integration/work/deleteoption_test.go | 75 ++++---- test/integration/work/executor_test.go | 67 +++---- .../work/manifestworkreplicaset_test.go | 14 +- test/integration/work/statusfeedback_test.go | 4 +- test/integration/work/suite_test.go | 1 + .../work/unmanaged_appliedwork_test.go | 9 +- test/integration/work/work_test.go | 32 ++-- 145 files changed, 1218 insertions(+), 935 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index bfe407f6e..68e73363b 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -23,7 +23,6 @@ run: skip-files: - ".*\\.pb\\.go" - ".*\\.gen\\.go" - - ".*_test\\.go" linters: # please, do not use `enable-all`: it's deprecated and will be removed soon. @@ -220,6 +219,7 @@ issues: linters: - errcheck - maligned + - goconst # Independently from option `exclude` we use default exclude patterns, # it can be disabled by this option. To list all diff --git a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go index 737a3ec19..37fa7d325 100644 --- a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go +++ b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go @@ -609,7 +609,10 @@ func (a byPatchName) Less(i, j int) bool { return patchi.Namespace < patchj.Namespace } -func newManagedClusterAddon(name, namespace string, configs []addonv1alpha1.AddOnConfig, configStatus []addonv1alpha1.ConfigReference) *addonv1alpha1.ManagedClusterAddOn { +func newManagedClusterAddon( + name, namespace string, + configs []addonv1alpha1.AddOnConfig, + configStatus []addonv1alpha1.ConfigReference) *addonv1alpha1.ManagedClusterAddOn { mca := addontesting.NewAddon(name, namespace) mca.Spec.Configs = configs mca.Status.ConfigReferences = configStatus diff --git a/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler_test.go b/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler_test.go index fd86b93a3..2e70e0714 100644 --- a/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler_test.go +++ b/pkg/addon/controllers/addonconfiguration/mgmt_addon_progressing_reconciler_test.go @@ -240,10 +240,14 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { if len(cma.Status.DefaultConfigReferences) != 0 { t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) } - if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + if !apiequality.Semantic.DeepEqual( + cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, + cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + if !apiequality.Semantic.DeepEqual( + cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, + cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstallSucceed { @@ -389,10 +393,14 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { if len(cma.Status.DefaultConfigReferences) != 0 { t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences) } - if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + if !apiequality.Semantic.DeepEqual( + cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, + cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { + if !apiequality.Semantic.DeepEqual( + cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, + cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgradeSucceed { diff --git a/pkg/addon/controllers/addonprogressing/controller_test.go b/pkg/addon/controllers/addonprogressing/controller_test.go index 63b12ceb2..fcbf740a0 100644 --- a/pkg/addon/controllers/addonprogressing/controller_test.go +++ b/pkg/addon/controllers/addonprogressing/controller_test.go @@ -25,11 +25,6 @@ import ( testingcommon "open-cluster-management.io/ocm/pkg/common/testing" ) -func newClusterManagementOwner(name string) metav1.OwnerReference { - clusterManagementAddon := addontesting.NewClusterManagementAddon(name, "testcrd", "testcr").Build() - return *metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) -} - func TestReconcile(t *testing.T) { cases := []struct { name string diff --git a/pkg/addon/templateagent/registration_test.go b/pkg/addon/templateagent/registration_test.go index 4ce58dca1..e3d943d30 100644 --- a/pkg/addon/templateagent/registration_test.go +++ b/pkg/addon/templateagent/registration_test.go @@ -9,6 +9,7 @@ import ( "time" certificatesv1 "k8s.io/api/certificates/v1" + certificates "k8s.io/api/certificates/v1beta1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -66,7 +67,7 @@ func TestTemplateCSRConfigurationsFunc(t *testing.T) { addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"), expectedConfigs: []addonapiv1alpha1.RegistrationConfig{ { - SignerName: "kubernetes.io/kube-apiserver-client", + SignerName: certificates.KubeAPIServerClientSignerName, Subject: addonapiv1alpha1.Subject{ User: "system:open-cluster-management:cluster:cluster1:addon:addon1:agent:agent1", @@ -188,7 +189,7 @@ func TestTemplateCSRApproveCheckFunc(t *testing.T) { Name: "csr1", }, Spec: certificatesv1.CertificateSigningRequestSpec{ - SignerName: "kubernetes.io/kube-apiserver-client", + SignerName: certificates.KubeAPIServerClientSignerName, }, }, expectedApprove: false, // fake csr data @@ -288,7 +289,7 @@ func TestTemplateCSRSignFunc(t *testing.T) { Name: "csr1", }, Spec: certificatesv1.CertificateSigningRequestSpec{ - SignerName: "kubernetes.io/kube-apiserver-client", + SignerName: certificates.KubeAPIServerClientSignerName, Username: "system:open-cluster-management:cluster1:adcde", }, }, @@ -356,7 +357,7 @@ func NewFakeManagedCluster(name string) *clusterv1.ManagedCluster { return &clusterv1.ManagedCluster{ TypeMeta: metav1.TypeMeta{ Kind: "ManagedCluster", - APIVersion: clusterv1.SchemeGroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/pkg/common/apply/rbac.go b/pkg/common/apply/rbac.go index bc9c72691..ef0b31218 100644 --- a/pkg/common/apply/rbac.go +++ b/pkg/common/apply/rbac.go @@ -43,7 +43,7 @@ func (m *PermissionApplier) Apply( recorder events.Recorder, manifests resourceapply.AssetFunc, files ...string) []resourceapply.ApplyResult { - ret := []resourceapply.ApplyResult{} + var ret []resourceapply.ApplyResult for _, file := range files { result := resourceapply.ApplyResult{File: file} objBytes, err := manifests(file) @@ -73,7 +73,7 @@ func (m *PermissionApplier) Apply( result.Result, result.Changed, result.Error = Apply[*rbacv1.RoleBinding]( ctx, m.roleBindingLister.RoleBindings(t.Namespace), m.client.RbacV1().RoleBindings(t.Namespace), compareRoleBinding, t, recorder) default: - result.Error = fmt.Errorf("object type is not correct.") + result.Error = fmt.Errorf("object type is not correct") } } return ret diff --git a/pkg/common/apply/rbac_test.go b/pkg/common/apply/rbac_test.go index 93717f112..f8516341a 100644 --- a/pkg/common/apply/rbac_test.go +++ b/pkg/common/apply/rbac_test.go @@ -65,7 +65,7 @@ rules: }, }, { - name: "comapre and no update clusterrole", + name: "compare and no update clusterrole", existingManifest: ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -220,7 +220,7 @@ rules: }, }, { - name: "comapre and no update clusterrole", + name: "compare and no update clusterrole", existingManifest: ` apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -351,13 +351,16 @@ subjects: informerFactory = informers.NewSharedInformerFactory(kubeClient, 3*time.Minute) switch t := o.(type) { case *rbacv1.ClusterRole: - informerFactory.Rbac().V1().ClusterRoles().Informer().GetStore().Add(t) + err = informerFactory.Rbac().V1().ClusterRoles().Informer().GetStore().Add(t) case *rbacv1.ClusterRoleBinding: - informerFactory.Rbac().V1().ClusterRoleBindings().Informer().GetStore().Add(t) + err = informerFactory.Rbac().V1().ClusterRoleBindings().Informer().GetStore().Add(t) case *rbacv1.Role: - informerFactory.Rbac().V1().Roles().Informer().GetStore().Add(t) + err = informerFactory.Rbac().V1().Roles().Informer().GetStore().Add(t) case *rbacv1.RoleBinding: - informerFactory.Rbac().V1().RoleBindings().Informer().GetStore().Add(t) + err = informerFactory.Rbac().V1().RoleBindings().Informer().GetStore().Add(t) + } + if err != nil { + t.Fatal(err) } } else { kubeClient = kubefake.NewSimpleClientset() diff --git a/pkg/common/options/agent.go b/pkg/common/options/agent.go index 63ee288f5..27dba37b9 100644 --- a/pkg/common/options/agent.go +++ b/pkg/common/options/agent.go @@ -65,7 +65,7 @@ func (o *AgentOptions) AddFlags(flags *pflag.FlagSet) { flags.StringVar(&o.AgentID, "agent-id", o.AgentID, "ID of the agent") } -// spokeKubeConfig builds kubeconfig for the spoke/managed cluster +// SpokeKubeConfig builds kubeconfig for the spoke/managed cluster func (o *AgentOptions) SpokeKubeConfig(managedRestConfig *rest.Config) (*rest.Config, error) { if o.SpokeKubeconfigFile == "" { managedRestConfig.QPS = o.CommoOpts.QPS diff --git a/pkg/common/options/options.go b/pkg/common/options/options.go index a985ed9dd..09a483153 100644 --- a/pkg/common/options/options.go +++ b/pkg/common/options/options.go @@ -14,7 +14,7 @@ type Options struct { QPS float32 } -// NewAgentOptions returns the flags with default value set +// NewOptions returns the flags with default value set func NewOptions() *Options { opts := &Options{ QPS: 50, diff --git a/pkg/common/options/options_test.go b/pkg/common/options/options_test.go index 257a35018..e9c877877 100644 --- a/pkg/common/options/options_test.go +++ b/pkg/common/options/options_test.go @@ -58,35 +58,42 @@ func TestComplete(t *testing.T) { { name: "override cluster name in cert with specified value", clusterName: "cluster1", - secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster2:agent2", 60*time.Second), map[string][]byte{ - "kubeconfig": testinghelpers.NewKubeconfig(nil, nil), - "cluster-name": []byte("cluster3"), - "agent-name": []byte("agent3"), - }), + secret: testinghelpers.NewHubKubeconfigSecret( + componentNamespace, "hub-kubeconfig-secret", "", + testinghelpers.NewTestCert("system:open-cluster-management:cluster2:agent2", 60*time.Second), map[string][]byte{ + "kubeconfig": testinghelpers.NewKubeconfig(nil, nil), + "cluster-name": []byte("cluster3"), + "agent-name": []byte("agent3"), + }), expectedClusterName: "cluster1", expectedAgentName: "agent2", }, { name: "take cluster/agent name from secret", - secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{ - "cluster-name": []byte("cluster1"), - "agent-name": []byte("agent1"), - }), + secret: testinghelpers.NewHubKubeconfigSecret( + componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{ + "cluster-name": []byte("cluster1"), + "agent-name": []byte("agent1"), + }), expectedClusterName: "cluster1", expectedAgentName: "agent1", }, { - name: "take cluster/agent name from cert", - secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{}), + name: "take cluster/agent name from cert", + secret: testinghelpers.NewHubKubeconfigSecret( + componentNamespace, "hub-kubeconfig-secret", "", + testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{}), expectedClusterName: "cluster1", expectedAgentName: "agent1", }, { name: "override cluster name in secret with value from cert", - secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{ - "cluster-name": []byte("cluster2"), - "agent-name": []byte("agent2"), - }), + secret: testinghelpers.NewHubKubeconfigSecret( + componentNamespace, "hub-kubeconfig-secret", "", + testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{ + "cluster-name": []byte("cluster2"), + "agent-name": []byte("agent2"), + }), expectedClusterName: "cluster1", expectedAgentName: "agent1", }, @@ -115,6 +122,9 @@ func TestComplete(t *testing.T) { err = registration.DumpSecret( kubeClient.CoreV1(), componentNamespace, "hub-kubeconfig-secret", options.HubKubeconfigDir, context.TODO(), eventstesting.NewTestingEventRecorder(t)) + if err != nil { + t.Error(err) + } if err := options.Complete(); err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/common/patcher/patcher.go b/pkg/common/patcher/patcher.go index d78e4d6a5..84429538f 100644 --- a/pkg/common/patcher/patcher.go +++ b/pkg/common/patcher/patcher.go @@ -16,7 +16,7 @@ import ( "k8s.io/klog/v2" ) -// Patcher is just the Patch API with a generic to keep use sites type safe. +// PatchClient is just the Patch API with a generic to keep use sites type safe. // This is inspired by the commiter code in https://github.com/kcp-dev/kcp/blob/main/pkg/reconciler/committer/committer.go type PatchClient[R runtime.Object] interface { Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (R, error) @@ -28,6 +28,7 @@ type Patcher[R runtime.Object, Sp any, St any] interface { PatchStatus(context.Context, R, St, St) (bool, error) PatchSpec(context.Context, R, Sp, Sp) (bool, error) PatchLabelAnnotations(context.Context, R, metav1.ObjectMeta, metav1.ObjectMeta) (bool, error) + WithOptions(options PatchOptions) Patcher[R, Sp, St] } type PatchOptions struct { @@ -47,14 +48,14 @@ type patcher[R runtime.Object, Sp any, St any] struct { opts PatchOptions } -func NewPatcher[R runtime.Object, Sp any, St any](client PatchClient[R]) *patcher[R, Sp, St] { +func NewPatcher[R runtime.Object, Sp any, St any](client PatchClient[R]) Patcher[R, Sp, St] { p := &patcher[R, Sp, St]{ client: client, } return p } -func (p *patcher[R, Sp, St]) WithOptions(options PatchOptions) *patcher[R, Sp, St] { +func (p *patcher[R, Sp, St]) WithOptions(options PatchOptions) Patcher[R, Sp, St] { p.opts = options return p } @@ -66,7 +67,7 @@ func (p *patcher[R, Sp, St]) AddFinalizer(ctx context.Context, object R, finaliz } existingFinalizers := accessor.GetFinalizers() - finalizersToAdd := []string{} + var finalizersToAdd []string for _, finalizer := range finalizers { hasFinalizer := false for i := range existingFinalizers { @@ -120,7 +121,7 @@ func (p *patcher[R, Sp, St]) RemoveFinalizer(ctx context.Context, object R, fina return err } - copiedFinalizers := []string{} + var copiedFinalizers []string existingFinalizers := accessor.GetFinalizers() for i := range existingFinalizers { matchFinalizer := false diff --git a/pkg/common/patcher/patcher_test.go b/pkg/common/patcher/patcher_test.go index 356f80a70..d7aaef818 100644 --- a/pkg/common/patcher/patcher_test.go +++ b/pkg/common/patcher/patcher_test.go @@ -328,7 +328,9 @@ func TestPatchLabelAnnotations(t *testing.T) { if err != nil { t.Fatal(err) } - if !equality.Semantic.DeepEqual(labelPatch["metadata"], map[string]interface{}{"uid": "", "resourceVersion": "", "labels": map[string]interface{}{"key1": nil}}) { + if !equality.Semantic.DeepEqual( + labelPatch["metadata"], + map[string]interface{}{"uid": "", "resourceVersion": "", "labels": map[string]interface{}{"key1": nil}}) { t.Errorf("not patched correctly got %v", labelPatch) } }, diff --git a/pkg/operator/certrotation/cabundle.go b/pkg/operator/certrotation/cabundle.go index 517d3e851..96e71bf33 100644 --- a/pkg/operator/certrotation/cabundle.go +++ b/pkg/operator/certrotation/cabundle.go @@ -71,7 +71,7 @@ func manageCABundleConfigMap(caBundleConfigMap *corev1.ConfigMap, currentSigner caBundleConfigMap.Data = map[string]string{} } - certificates := []*x509.Certificate{} + var certificates []*x509.Certificate caBundle := caBundleConfigMap.Data["ca-bundle.crt"] if len(caBundle) > 0 { var err error @@ -83,7 +83,7 @@ func manageCABundleConfigMap(caBundleConfigMap *corev1.ConfigMap, currentSigner certificates = append([]*x509.Certificate{currentSigner}, certificates...) certificates = crypto.FilterExpiredCerts(certificates...) - finalCertificates := []*x509.Certificate{} + var finalCertificates []*x509.Certificate // now check for duplicates. n^2, but super simple for i := range certificates { found := false diff --git a/pkg/operator/certrotation/cabundle_test.go b/pkg/operator/certrotation/cabundle_test.go index 11cd23c22..60ba3f04e 100644 --- a/pkg/operator/certrotation/cabundle_test.go +++ b/pkg/operator/certrotation/cabundle_test.go @@ -85,7 +85,7 @@ func TestManageCABundleConfigMap(t *testing.T) { } if !reflect.DeepEqual(c.signerCert, caCerts[0]) { - t.Fatalf("Current signer cert should be put at the begining") + t.Fatalf("Current signer cert should be put at the beginning") } } }) diff --git a/pkg/operator/certrotation/target_test.go b/pkg/operator/certrotation/target_test.go index 1ee6b0c76..8c02ddf42 100644 --- a/pkg/operator/certrotation/target_test.go +++ b/pkg/operator/certrotation/target_test.go @@ -167,9 +167,9 @@ func TestNeedNewTargetCertKeyPair(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - caBundleCerts := []*x509.Certificate{} + var caBundleCerts []*x509.Certificate if len(c.caBundle) > 0 { - caBundleCerts, err = cert.ParseCertsPEM([]byte(c.caBundle)) + caBundleCerts, err = cert.ParseCertsPEM(c.caBundle) if err != nil { t.Fatalf("Expected no error, but got: %v", err) } diff --git a/pkg/operator/helpers/helpers.go b/pkg/operator/helpers/helpers.go index 06d9c4c59..21c2a99fe 100644 --- a/pkg/operator/helpers/helpers.go +++ b/pkg/operator/helpers/helpers.go @@ -264,9 +264,9 @@ func ApplyDirectly( cache resourceapply.ResourceCache, manifests resourceapply.AssetFunc, files ...string) []resourceapply.ApplyResult { - ret := []resourceapply.ApplyResult{} + var ret []resourceapply.ApplyResult - genericApplyFiles := []string{} + var genericApplyFiles []string for _, file := range files { result := resourceapply.ApplyResult{File: file} objBytes, err := manifests(file) diff --git a/pkg/operator/helpers/helpers_test.go b/pkg/operator/helpers/helpers_test.go index deca53300..b55a51faf 100644 --- a/pkg/operator/helpers/helpers_test.go +++ b/pkg/operator/helpers/helpers_test.go @@ -22,7 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/version" fakekube "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/rest" @@ -38,6 +37,8 @@ import ( "open-cluster-management.io/ocm/manifests" ) +const nameFoo = "foo" + func newValidatingWebhookConfiguration(name, svc, svcNameSpace string) *admissionv1.ValidatingWebhookConfiguration { return &admissionv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ @@ -198,9 +199,14 @@ func TestApplyDirectly(t *testing.T) { { name: "Apply webhooks & secret", applyFiles: map[string]runtime.Object{ - "validatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}), - "mutatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}), - "secret": newUnstructured("v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}), + "validatingwebhooks": newUnstructured( + "admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "", + map[string]interface{}{"webhooks": []interface{}{}}), + "mutatingwebhooks": newUnstructured( + "admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "", + map[string]interface{}{"webhooks": []interface{}{}}), + "secret": newUnstructured( + "v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}), }, applyFileNames: []string{"validatingwebhooks", "mutatingwebhooks", "secret"}, expectErr: false, @@ -238,7 +244,7 @@ func TestApplyDirectly(t *testing.T) { fakeExtensionClient := fakeapiextensions.NewSimpleClientset() fakeApplyFunc := func(name string) ([]byte, error) { if c.applyFiles[name] == nil { - return nil, fmt.Errorf("Failed to find file") + return nil, fmt.Errorf("failed to find file") } return json.Marshal(c.applyFiles[name]) @@ -267,7 +273,7 @@ func TestApplyDirectly(t *testing.T) { ) } - aggregatedErr := []error{} + var aggregatedErr []error for _, r := range results { if r.Error != nil { aggregatedErr = append(aggregatedErr, r.Error) @@ -286,11 +292,18 @@ func TestApplyDirectly(t *testing.T) { func TestDeleteStaticObject(t *testing.T) { applyFiles := map[string]runtime.Object{ - "validatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}), - "mutatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}), - "secret": newUnstructured("v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}), - "crd": newUnstructured("apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "", "", map[string]interface{}{}), - "kind1": newUnstructured("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": []byte("key1")}}), + "validatingwebhooks": newUnstructured( + "admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "", + map[string]interface{}{"webhooks": []interface{}{}}), + "mutatingwebhooks": newUnstructured( + "admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "", + map[string]interface{}{"webhooks": []interface{}{}}), + "secret": newUnstructured( + "v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}), + "crd": newUnstructured( + "apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "", "", map[string]interface{}{}), + "kind1": newUnstructured( + "v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": []byte("key1")}}), } testcase := []struct { name string @@ -338,7 +351,7 @@ func TestDeleteStaticObject(t *testing.T) { fakeExtensionClient := fakeapiextensions.NewSimpleClientset() fakeAssetFunc := func(name string) ([]byte, error) { if applyFiles[name] == nil { - return nil, fmt.Errorf("Failed to find file") + return nil, fmt.Errorf("failed to find file") } return json.Marshal(applyFiles[name]) @@ -390,8 +403,9 @@ func TestLoadClientConfigFromSecret(t *testing.T) { secret: newKubeConfigSecret("ns1", "secret1", newKubeConfig("testhost", "", ""), nil, nil), }, { - name: "load kubeconfig with references to external key/cert files", - secret: newKubeConfigSecret("ns1", "secret1", newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---")), + name: "load kubeconfig with references to external key/cert files", + secret: newKubeConfigSecret("ns1", "secret1", + newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---")), expectedCertData: []byte("--- TRUNCATED ---"), expectedKeyData: []byte("--- REDACTED ---"), }, @@ -631,13 +645,13 @@ func TestApplyEndpoints(t *testing.T) { name: "create", existing: []runtime.Object{ &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Name: nameFoo}, }, }, input: &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "foo", + Name: nameFoo, + Namespace: nameFoo, }, Subsets: []corev1.EndpointSubset{ { @@ -660,7 +674,7 @@ func TestApplyEndpoints(t *testing.T) { if len(actions) != 2 { t.Fatal("action count mismatch") } - if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != nameFoo { t.Error("unexpected action:", actions[0]) } if !actions[1].Matches("create", "endpoints") { @@ -672,12 +686,12 @@ func TestApplyEndpoints(t *testing.T) { name: "remain same", existing: []runtime.Object{ &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Name: nameFoo}, }, &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "foo", + Name: nameFoo, + Namespace: nameFoo, }, Subsets: []corev1.EndpointSubset{ { @@ -697,8 +711,8 @@ func TestApplyEndpoints(t *testing.T) { }, input: &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "foo", + Name: nameFoo, + Namespace: nameFoo, }, Subsets: []corev1.EndpointSubset{ { @@ -720,7 +734,7 @@ func TestApplyEndpoints(t *testing.T) { if len(actions) != 1 { t.Fatal("action count mismatch") } - if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != nameFoo { t.Error("unexpected action:", actions[0]) } }, @@ -729,12 +743,12 @@ func TestApplyEndpoints(t *testing.T) { name: "update", existing: []runtime.Object{ &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + ObjectMeta: metav1.ObjectMeta{Name: nameFoo}, }, &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "foo", + Name: nameFoo, + Namespace: nameFoo, }, Subsets: []corev1.EndpointSubset{ { @@ -754,8 +768,8 @@ func TestApplyEndpoints(t *testing.T) { }, input: &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "foo", + Name: nameFoo, + Namespace: nameFoo, }, Subsets: []corev1.EndpointSubset{ { @@ -777,7 +791,7 @@ func TestApplyEndpoints(t *testing.T) { if len(actions) != 2 { t.Fatal("action count mismatch") } - if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != nameFoo { t.Error("unexpected action:", actions[0]) } if !actions[1].Matches("update", "endpoints") { @@ -870,10 +884,10 @@ func TestGetRelatedResource(t *testing.T) { relatedResource, err := GenerateRelatedResource(objData) if !errors.Is(err, c.expectedErr) { - t.Errorf(diff.ObjectDiff(err, c.expectedErr)) + t.Errorf(cmp.Diff(err, c.expectedErr)) } if !reflect.DeepEqual(relatedResource, c.expectedRelatedResource) { - t.Errorf(diff.ObjectDiff(err, c.expectedErr)) + t.Errorf(cmp.Diff(err, c.expectedErr)) } }) @@ -1267,7 +1281,7 @@ func TestSyncSecret(t *testing.T) { Name: "sourceName", }, Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{"foo": []byte("bar")}, + Data: map[string][]byte{nameFoo: []byte("bar")}, }, }, expectedSecret: &corev1.Secret{ @@ -1276,7 +1290,7 @@ func TestSyncSecret(t *testing.T) { Name: "targetName", }, Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{"foo": []byte("bar")}, + Data: map[string][]byte{nameFoo: []byte("bar")}, }, expectedChanged: true, expectedErr: "", @@ -1295,7 +1309,7 @@ func TestSyncSecret(t *testing.T) { Name: "sourceName", }, Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{"foo": []byte("bar2")}, + Data: map[string][]byte{nameFoo: []byte("bar2")}, }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -1303,7 +1317,7 @@ func TestSyncSecret(t *testing.T) { Name: "targetName", }, Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{"foo": []byte("bar1")}, + Data: map[string][]byte{nameFoo: []byte("bar1")}, }, }, expectedSecret: &corev1.Secret{ @@ -1312,7 +1326,7 @@ func TestSyncSecret(t *testing.T) { Name: "targetName", }, Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{"foo": []byte("bar2")}, + Data: map[string][]byte{nameFoo: []byte("bar2")}, }, expectedChanged: true, expectedErr: "", @@ -1343,7 +1357,7 @@ func TestSyncSecret(t *testing.T) { Name: "sourceName", }, Type: corev1.SecretTypeServiceAccountToken, - Data: map[string][]byte{"foo": []byte("bar")}, + Data: map[string][]byte{nameFoo: []byte("bar")}, }, }, expectedSecret: nil, @@ -1363,7 +1377,7 @@ func TestSyncSecret(t *testing.T) { Namespace: "sourceNamespace", Name: "sourceName", Annotations: map[string]string{ - corev1.ServiceAccountNameKey: "foo", + corev1.ServiceAccountNameKey: nameFoo, corev1.ServiceAccountUIDKey: "bar", }, }, @@ -1389,7 +1403,8 @@ func TestSyncSecret(t *testing.T) { client := fakekube.NewSimpleClientset(tc.existingObjects...) clientTarget := fakekube.NewSimpleClientset() secret, changed, err := SyncSecret( - context.TODO(), client.CoreV1(), clientTarget.CoreV1(), events.NewInMemoryRecorder("test"), tc.sourceNamespace, tc.sourceName, tc.targetNamespace, tc.targetName, tc.ownerRefs) + context.TODO(), client.CoreV1(), clientTarget.CoreV1(), + events.NewInMemoryRecorder("test"), tc.sourceNamespace, tc.sourceName, tc.targetNamespace, tc.targetName, tc.ownerRefs) if (err == nil && len(tc.expectedErr) != 0) || (err != nil && err.Error() != tc.expectedErr) { t.Errorf("%s: expected error %v, got %v", tc.name, tc.expectedErr, err) @@ -1442,9 +1457,11 @@ func TestGetHubKubeconfig(t *testing.T) { expectedErr: true, }, { - name: "hosted mode", - mode: operatorapiv1.InstallModeHosted, - secret: []runtime.Object{newKubeConfigSecret("test", ExternalHubKubeConfig, newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---"))}, + name: "hosted mode", + mode: operatorapiv1.InstallModeHosted, + secret: []runtime.Object{ + newKubeConfigSecret("test", ExternalHubKubeConfig, + newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---"))}, namespace: "test", expectedHost: "https://testhost:443", expectedErr: false, diff --git a/pkg/operator/helpers/sa_syncer.go b/pkg/operator/helpers/sa_syncer.go index bf0b7c158..2930f5680 100644 --- a/pkg/operator/helpers/sa_syncer.go +++ b/pkg/operator/helpers/sa_syncer.go @@ -59,7 +59,7 @@ func SATokenGetter(ctx context.Context, saName, saNamespace string, saClient kub tr, err := saClient.CoreV1().ServiceAccounts(saNamespace). CreateToken(ctx, saName, &authv1.TokenRequest{ Spec: authv1.TokenRequestSpec{ - ExpirationSeconds: pointer.Int64Ptr(8640 * 3600), + ExpirationSeconds: pointer.Int64(8640 * 3600), }, }, metav1.CreateOptions{}) if err != nil { @@ -80,7 +80,7 @@ func SATokenCreater(ctx context.Context, saName, saNamespace string, saClient ku tr, err := saClient.CoreV1().ServiceAccounts(saNamespace). CreateToken(ctx, saName, &authv1.TokenRequest{ Spec: authv1.TokenRequestSpec{ - ExpirationSeconds: pointer.Int64Ptr(8640 * 3600), + ExpirationSeconds: pointer.Int64(8640 * 3600), }, }, metav1.CreateOptions{}) if err != nil { diff --git a/pkg/operator/helpers/sa_syncer_test.go b/pkg/operator/helpers/sa_syncer_test.go index 14bda5a50..6d0833236 100644 --- a/pkg/operator/helpers/sa_syncer_test.go +++ b/pkg/operator/helpers/sa_syncer_test.go @@ -194,7 +194,10 @@ func TestApplyKubeconfigSecret(t *testing.T) { return tt.token, expiration, tt.tokenGetError } client := testclient.NewSimpleClientset(tt.secrets...) - err := SyncKubeConfigSecret(context.TODO(), secretName, secretNamespace, "/tmp/kubeconfig", tkc, client.CoreV1(), tokenGetter, eventstesting.NewTestingEventRecorder(t)) + err := SyncKubeConfigSecret( + context.TODO(), secretName, secretNamespace, + "/tmp/kubeconfig", tkc, client.CoreV1(), tokenGetter, + eventstesting.NewTestingEventRecorder(t)) if err != nil && !tt.wantErr { t.Error(err) } diff --git a/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go b/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go index bbc99a846..45e2e4f60 100644 --- a/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go @@ -105,7 +105,7 @@ func (c certRotationController) sync(ctx context.Context, syncCtx factory.SyncCo return nil } - errs := []error{} + var errs []error for i := range clustermanagers { err = c.syncOne(ctx, syncCtx, clustermanagers[i]) if err != nil { @@ -235,7 +235,7 @@ func (c certRotationController) syncOne(ctx context.Context, syncCtx factory.Syn } // reconcile target cert/key pairs - errs := []error{} + var errs []error for _, targetRotation := range rotations.targetRotations { if err := targetRotation.EnsureTargetCertKeyPair(ctx, signingCertKeyPair, cabundleCerts); err != nil { errs = append(errs, err) diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index fa48e6c37..fdf4291a2 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -156,14 +156,14 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates) - workFeatureGates := []operatorapiv1.FeatureGate{} + var workFeatureGates []operatorapiv1.FeatureGate if clusterManager.Spec.WorkConfiguration != nil { workFeatureGates = clusterManager.Spec.WorkConfiguration.FeatureGates } config.WorkFeatureGates, workFeatureMsgs = helpers.ConvertToFeatureGateFlags("Work", workFeatureGates, ocmfeature.DefaultHubWorkFeatureGates) config.MWReplicaSetEnabled = helpers.FeatureGateEnabled(workFeatureGates, ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.ManifestWorkReplicaSet) - addonFeatureGates := []operatorapiv1.FeatureGate{} + var addonFeatureGates []operatorapiv1.FeatureGate if clusterManager.Spec.AddOnManagerConfiguration != nil { addonFeatureGates = clusterManager.Spec.AddOnManagerConfiguration.FeatureGates } diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go index 7697fcaac..d1c4d928a 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go @@ -36,7 +36,8 @@ import ( ) var ( - ctx = context.Background() + ctx = context.Background() + createVerb = "create" ) type testController struct { @@ -260,7 +261,8 @@ func setup(t *testing.T, tc *testController, cd []runtime.Object, crds ...runtim // set clients in clustermanager controller tc.clusterManagerController.recorder = eventstesting.NewTestingEventRecorder(t) tc.clusterManagerController.operatorKubeClient = fakeManagementKubeClient - tc.clusterManagerController.generateHubClusterClients = func(hubKubeConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) { + tc.clusterManagerController.generateHubClusterClients = func(hubKubeConfig *rest.Config) ( + kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) { return fakeHubKubeClient, fakeAPIExtensionClient, fakeMigrationClient.MigrationV1alpha1(), nil } tc.clusterManagerController.ensureSAKubeconfigs = func(ctx context.Context, @@ -308,10 +310,10 @@ func TestSyncDeploy(t *testing.T) { t.Fatalf("Expected no error when sync, %v", err) } - createKubeObjects := []runtime.Object{} + var createKubeObjects []runtime.Object kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...) // record objects from both hub and management cluster for _, action := range kubeActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object createKubeObjects = append(createKubeObjects, object) } @@ -324,10 +326,10 @@ func TestSyncDeploy(t *testing.T) { ensureObject(t, object, clusterManager) } - createCRDObjects := []runtime.Object{} + var createCRDObjects []runtime.Object crdActions := tc.apiExtensionClient.Actions() for _, action := range crdActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object createCRDObjects = append(createCRDObjects, object) } @@ -348,10 +350,10 @@ func TestSyncDeployNoWebhook(t *testing.T) { t.Fatalf("Expected no error when sync, %v", err) } - createKubeObjects := []runtime.Object{} + var createKubeObjects []runtime.Object kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...) // record objects from both hub and management cluster for _, action := range kubeActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object createKubeObjects = append(createKubeObjects, object) } @@ -364,10 +366,10 @@ func TestSyncDeployNoWebhook(t *testing.T) { ensureObject(t, object, clusterManager) } - createCRDObjects := []runtime.Object{} + var createCRDObjects []runtime.Object crdActions := tc.apiExtensionClient.Actions() for _, action := range crdActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object createCRDObjects = append(createCRDObjects, object) } @@ -393,7 +395,7 @@ func TestSyncDelete(t *testing.T) { t.Fatalf("Expected non error when sync, %v", err) } - deleteKubeActions := []clienttesting.DeleteActionImpl{} + var deleteKubeActions []clienttesting.DeleteActionImpl kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...) for _, action := range kubeActions { if action.GetVerb() == "delete" { @@ -403,7 +405,7 @@ func TestSyncDelete(t *testing.T) { } testingcommon.AssertEqualNumber(t, len(deleteKubeActions), 29) // delete namespace both from the hub cluster and the mangement cluster - deleteCRDActions := []clienttesting.DeleteActionImpl{} + var deleteCRDActions []clienttesting.DeleteActionImpl crdActions := tc.apiExtensionClient.Actions() for _, action := range crdActions { if action.GetVerb() == "delete" { diff --git a/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go index 1d934ccc1..584e58deb 100644 --- a/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go @@ -42,7 +42,7 @@ type crdStatusController struct { generateHubClusterClients func(hubConfig *rest.Config) (apiextensionsclient.Interface, error) } -// NewClusterManagerController construct cluster manager hub controller +// NewCRDStatusController construct crd status controller func NewCRDStatusController( kubeconfig *rest.Config, kubeClient kubernetes.Interface, diff --git a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go index 09795f661..0c2403ebf 100644 --- a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go @@ -70,7 +70,7 @@ type crdMigrationController struct { generateHubClusterClients func(hubConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error) } -// NewClusterManagerController construct cluster manager hub controller +// NewCRDMigrationController construct crd migration controller func NewCRDMigrationController( kubeconfig *rest.Config, kubeClient kubernetes.Interface, @@ -231,7 +231,7 @@ func applyStorageVersionMigrations(ctx context.Context, continue } - _, _, err = applyStorageVersionMigration(migrationClient, required, recorder) + _, _, err = applyStorageVersionMigration(ctx, migrationClient, required, recorder) if err != nil { errs = append(errs, err) continue @@ -337,6 +337,7 @@ func parseStorageVersionMigrationFile( } func applyStorageVersionMigration( + ctx context.Context, client migrationv1alpha1client.StorageVersionMigrationsGetter, required *migrationv1alpha1.StorageVersionMigration, recorder events.Recorder, @@ -344,7 +345,7 @@ func applyStorageVersionMigration( if required == nil { return nil, false, fmt.Errorf("required StorageVersionMigration is nil") } - existing, err := client.StorageVersionMigrations().Get(context.TODO(), required.Name, metav1.GetOptions{}) + existing, err := client.StorageVersionMigrations().Get(ctx, required.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { actual, err := client.StorageVersionMigrations().Create(context.TODO(), required, metav1.CreateOptions{}) if err != nil { @@ -370,7 +371,7 @@ func applyStorageVersionMigration( return existing, false, nil } - actual, err := client.StorageVersionMigrations().Update(context.TODO(), existingCopy, metav1.UpdateOptions{}) + actual, err := client.StorageVersionMigrations().Update(ctx, existingCopy, metav1.UpdateOptions{}) if err != nil { recorder.Warningf("StorageVersionMigrationUpdateFailed", "Failed to update %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(existingCopy), err) return actual, true, err diff --git a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go index 125afa224..b86ed0756 100644 --- a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go @@ -433,7 +433,10 @@ func TestSync(t *testing.T) { } } -func newTestController(t *testing.T, clustermanager *operatorapiv1.ClusterManager, crds ...runtime.Object) (*crdMigrationController, *fakeoperatorlient.Clientset) { +func newTestController( + t *testing.T, + clustermanager *operatorapiv1.ClusterManager, + crds ...runtime.Object) (*crdMigrationController, *fakeoperatorlient.Clientset) { fakeOperatorClient := fakeoperatorlient.NewSimpleClientset(clustermanager) operatorInformers := operatorinformers.NewSharedInformerFactory(fakeOperatorClient, 5*time.Minute) fakeAPIExtensionClient := fakeapiextensions.NewSimpleClientset(crds...) @@ -446,7 +449,8 @@ func newTestController(t *testing.T, clustermanager *operatorapiv1.ClusterManage *operatorapiv1.ClusterManager, operatorapiv1.ClusterManagerSpec, operatorapiv1.ClusterManagerStatus]( fakeOperatorClient.OperatorV1().ClusterManagers()), } - crdMigrationController.generateHubClusterClients = func(hubKubeConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error) { + crdMigrationController.generateHubClusterClients = func( + hubKubeConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error) { return fakeAPIExtensionClient, fakeMigrationClient.MigrationV1alpha1(), nil } store := operatorInformers.Operator().V1().ClusterManagers().Informer().GetStore() diff --git a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go index c44b439f4..5aef40105 100644 --- a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go @@ -80,10 +80,10 @@ func (s *clusterManagerStatusController) sync(ctx context.Context, controllerCon clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManagerName, clusterManager.Spec.DeployOption.Mode) newClusterManager := clusterManager.DeepCopy() - registrationCond := s.updateStatusOfRegistration(ctx, clusterManager.Name, clusterManagerNamespace) + registrationCond := s.updateStatusOfRegistration(clusterManager.Name, clusterManagerNamespace) registrationCond.ObservedGeneration = clusterManager.Generation meta.SetStatusCondition(&newClusterManager.Status.Conditions, registrationCond) - placementCond := s.updateStatusOfPlacement(ctx, clusterManager.Name, clusterManagerNamespace) + placementCond := s.updateStatusOfPlacement(clusterManager.Name, clusterManagerNamespace) placementCond.ObservedGeneration = clusterManager.Generation meta.SetStatusCondition(&newClusterManager.Status.Conditions, placementCond) @@ -92,7 +92,7 @@ func (s *clusterManagerStatusController) sync(ctx context.Context, controllerCon } // updateStatusOfRegistration checks registration deployment status and updates condition of clustermanager -func (s *clusterManagerStatusController) updateStatusOfRegistration(ctx context.Context, clusterManagerName, clusterManagerNamespace string) metav1.Condition { +func (s *clusterManagerStatusController) updateStatusOfRegistration(clusterManagerName, clusterManagerNamespace string) metav1.Condition { // Check registration deployment status registrationDeploymentName := fmt.Sprintf("%s-registration-controller", clusterManagerName) registrationDeployment, err := s.deploymentLister.Deployments(clusterManagerNamespace).Get(registrationDeploymentName) @@ -124,7 +124,7 @@ func (s *clusterManagerStatusController) updateStatusOfRegistration(ctx context. } // updateStatusOfRegistration checks placement deployment status and updates condition of clustermanager -func (s *clusterManagerStatusController) updateStatusOfPlacement(ctx context.Context, clusterManagerName, clusterManagerNamespace string) metav1.Condition { +func (s *clusterManagerStatusController) updateStatusOfPlacement(clusterManagerName, clusterManagerNamespace string) metav1.Condition { // Check registration deployment status placementDeploymentName := fmt.Sprintf("%s-placement-controller", clusterManagerName) placementDeployment, err := s.deploymentLister.Deployments(clusterManagerNamespace).Get(placementDeploymentName) diff --git a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go index 5d86b78de..9bc38cbcc 100644 --- a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go +++ b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go @@ -55,7 +55,7 @@ func NewBootstrapController( secretInformers: secretInformers, } return factory.New().WithSync(controller.sync). - WithInformersQueueKeyFunc(bootstrapSecretQueueKeyFunc(controller.klusterletLister), + WithInformersQueueKeysFunc(bootstrapSecretQueueKeyFunc(controller.klusterletLister), secretInformers[helpers.HubKubeConfig].Informer(), secretInformers[helpers.BootstrapHubKubeConfig].Informer(), secretInformers[helpers.ExternalManagedKubeConfig].Informer()). @@ -110,6 +110,7 @@ func (k *bootstrapController) sync(ctx context.Context, controllerContext factor return nil } + // #nosec G101 hubKubeconfigSecret, err := k.secretInformers[helpers.HubKubeConfig].Lister().Secrets(agentNamespace).Get(helpers.HubKubeConfig) switch { case errors.IsNotFound(err): @@ -203,28 +204,28 @@ func (k *bootstrapController) loadKubeConfig(secret *corev1.Secret) (*clientcmda return cluster, nil } -func bootstrapSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeyFunc { - return func(obj runtime.Object) string { +func bootstrapSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeysFunc { + return func(obj runtime.Object) []string { accessor, err := meta.Accessor(obj) if err != nil { - return "" + return []string{} } name := accessor.GetName() if name != helpers.BootstrapHubKubeConfig { - return "" + return []string{} } namespace := accessor.GetNamespace() klusterlets, err := klusterletLister.List(labels.Everything()) if err != nil { - return "" + return []string{} } if klusterlet := helpers.FindKlusterletByNamespace(klusterlets, namespace); klusterlet != nil { - return namespace + "/" + klusterlet.Name + return []string{namespace + "/" + klusterlet.Name} } - return "" + return []string{} } } diff --git a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go index 6fc9d3d93..59b4121ae 100644 --- a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go @@ -13,6 +13,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" @@ -171,25 +172,25 @@ func TestBootstrapSecretQueueKeyFunc(t *testing.T) { name string object runtime.Object klusterlet *operatorapiv1.Klusterlet - expectedKey string + expectedKey []string }{ { name: "key by bootstrap secret", object: newSecret("bootstrap-hub-kubeconfig", "test", []byte{}), klusterlet: newKlusterlet("testklusterlet", "test"), - expectedKey: "test/testklusterlet", + expectedKey: []string{"test/testklusterlet"}, }, { name: "key by wrong secret", object: newSecret("dummy", "test", []byte{}), klusterlet: newKlusterlet("testklusterlet", "test"), - expectedKey: "", + expectedKey: []string{}, }, { name: "key by klusterlet with empty namespace", object: newSecret("bootstrap-hub-kubeconfig", "open-cluster-management-agent", []byte{}), klusterlet: newKlusterlet("testklusterlet", ""), - expectedKey: "open-cluster-management-agent/testklusterlet", + expectedKey: []string{"open-cluster-management-agent/testklusterlet"}, }, } @@ -203,7 +204,7 @@ func TestBootstrapSecretQueueKeyFunc(t *testing.T) { } keyFunc := bootstrapSecretQueueKeyFunc(operatorInformers.Operator().V1().Klusterlets().Lister()) actualKey := keyFunc(c.object) - if actualKey != c.expectedKey { + if !equality.Semantic.DeepEqual(actualKey, c.expectedKey) { t.Errorf("Queued key is not correct: actual %s, expected %s", actualKey, c.expectedKey) } }) diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go index a9fb68fd5..5c64fa8a7 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go @@ -198,7 +198,7 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex return n.patcher.RemoveFinalizer(ctx, klusterlet, klusterletFinalizer, klusterletHostedFinalizer) } -func (r *klusterletCleanupController) checkConnectivity(ctx context.Context, +func (n *klusterletCleanupController) checkConnectivity(ctx context.Context, amwClient workv1client.AppliedManifestWorkInterface, klusterlet *operatorapiv1.Klusterlet) (cleanupManagedClusterResources bool, err error) { _, err = amwClient.List(ctx, metav1.ListOptions{}) diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go index aa1d2c78c..6cce0931e 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go @@ -39,7 +39,7 @@ func TestSyncDelete(t *testing.T) { var deleteActions []clienttesting.DeleteActionImpl kubeActions := controller.kubeClient.Actions() for _, action := range kubeActions { - if action.GetVerb() == "delete" { + if action.GetVerb() == deleteVerb { deleteAction := action.(clienttesting.DeleteActionImpl) klog.Infof("kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace()) deleteActions = append(deleteActions, deleteAction) @@ -97,7 +97,7 @@ func TestSyncDeleteHosted(t *testing.T) { var deleteActionsManagement []clienttesting.DeleteActionImpl kubeActions := controller.kubeClient.Actions() for _, action := range kubeActions { - if action.GetVerb() == "delete" { + if action.GetVerb() == deleteVerb { deleteAction := action.(clienttesting.DeleteActionImpl) klog.Infof("management kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace()) deleteActionsManagement = append(deleteActionsManagement, deleteAction) @@ -112,7 +112,7 @@ func TestSyncDeleteHosted(t *testing.T) { var deleteActionsManaged []clienttesting.DeleteActionImpl for _, action := range controller.managedKubeClient.Actions() { - if action.GetVerb() == "delete" { + if action.GetVerb() == deleteVerb { deleteAction := action.(clienttesting.DeleteActionImpl) klog.Infof("managed kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace()) deleteActionsManaged = append(deleteActionsManaged, deleteAction) @@ -177,7 +177,7 @@ func TestSyncDeleteHostedDeleteWaitKubeconfig(t *testing.T) { // assert no delete action on the management cluster,should wait for the kubeconfig for _, action := range controller.kubeClient.Actions() { - if action.GetVerb() == "delete" { + if action.GetVerb() == deleteVerb { t.Errorf("Expected not delete the resources, should wait for the kubeconfig, but got delete actions") } } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go index 07a5d3800..7d9121ef8 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go @@ -238,7 +238,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", registrationFeatureGates, ocmfeature.DefaultSpokeRegistrationFeatureGates) - workFeatureGates := []operatorapiv1.FeatureGate{} + var workFeatureGates []operatorapiv1.FeatureGate if klusterlet.Spec.WorkConfiguration != nil { workFeatureGates = klusterlet.Spec.WorkConfiguration.FeatureGates } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go index ac5e5a98b..eda3c9d32 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go @@ -40,6 +40,12 @@ import ( testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing" ) +const ( + createVerb = "create" + deleteVerb = "delete" + crdResourceName = "customresourcedefinitions" +) + type testController struct { controller *klusterletController cleanupController *klusterletCleanupController @@ -198,7 +204,10 @@ func newTestController(t *testing.T, klusterlet *operatorapiv1.Klusterlet, appli } } -func newTestControllerHosted(t *testing.T, klusterlet *operatorapiv1.Klusterlet, appliedManifestWorks []runtime.Object, objects ...runtime.Object) *testController { +func newTestControllerHosted( + t *testing.T, klusterlet *operatorapiv1.Klusterlet, + appliedManifestWorks []runtime.Object, + objects ...runtime.Object) *testController { fakeKubeClient := fakekube.NewSimpleClientset(objects...) fakeAPIExtensionClient := fakeapiextensions.NewSimpleClientset() fakeOperatorClient := fakeoperatorclient.NewSimpleClientset(klusterlet) @@ -316,13 +325,13 @@ func (c *testController) setDefaultManagedClusterClientsBuilder() *testControlle func getDeployments(actions []clienttesting.Action, verb, suffix string) *appsv1.Deployment { - deployments := []*appsv1.Deployment{} + var deployments []*appsv1.Deployment for _, action := range actions { if action.GetVerb() != verb || action.GetResource().Resource != "deployments" { continue } - if verb == "create" { + if verb == createVerb { object := action.(clienttesting.CreateActionImpl).Object deployments = append(deployments, object.(*appsv1.Deployment)) } @@ -402,8 +411,9 @@ func assertWorkDeployment(t *testing.T, actions []clienttesting.Action, verb, cl } if mode == operatorapiv1.InstallModeHosted { - expectArgs = append(expectArgs, "--spoke-kubeconfig=/spoke/config/kubeconfig") - expectArgs = append(expectArgs, "--terminate-on-files=/spoke/config/kubeconfig") + expectArgs = append(expectArgs, + "--spoke-kubeconfig=/spoke/config/kubeconfig", + "--terminate-on-files=/spoke/config/kubeconfig") } expectArgs = append(expectArgs, "--terminate-on-files=/spoke/hub-kubeconfig/kubeconfig") @@ -477,10 +487,10 @@ func TestSyncDeploy(t *testing.T) { t.Errorf("Expected non error when sync, %v", err) } - createObjects := []runtime.Object{} + var createObjects []runtime.Object kubeActions := controller.kubeClient.Actions() for _, action := range kubeActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object createObjects = append(createObjects, object) @@ -497,9 +507,9 @@ func TestSyncDeploy(t *testing.T) { } apiExtenstionAction := controller.apiExtensionClient.Actions() - createCRDObjects := []runtime.Object{} + var createCRDObjects []runtime.Object for _, action := range apiExtenstionAction { - if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" { + if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName { object := action.(clienttesting.CreateActionImpl).Object createCRDObjects = append(createCRDObjects, object) } @@ -538,10 +548,10 @@ func TestSyncDeploySingleton(t *testing.T) { t.Errorf("Expected non error when sync, %v", err) } - createObjects := []runtime.Object{} + var createObjects []runtime.Object kubeActions := controller.kubeClient.Actions() for _, action := range kubeActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object createObjects = append(createObjects, object) @@ -558,9 +568,9 @@ func TestSyncDeploySingleton(t *testing.T) { } apiExtenstionAction := controller.apiExtensionClient.Actions() - createCRDObjects := []runtime.Object{} + var createCRDObjects []runtime.Object for _, action := range apiExtenstionAction { - if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" { + if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName { object := action.(clienttesting.CreateActionImpl).Object createCRDObjects = append(createCRDObjects, object) } @@ -608,17 +618,18 @@ func TestSyncDeployHosted(t *testing.T) { t.Errorf("Expected non error when sync, %v", err) } - createObjectsManagement := []runtime.Object{} + var createObjectsManagement []runtime.Object kubeActions := controller.kubeClient.Actions() for _, action := range kubeActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object klog.Infof("management kube create: %v\t resource:%v \t namespace:%v", object.GetObjectKind(), action.GetResource(), action.GetNamespace()) createObjectsManagement = append(createObjectsManagement, object) } } // Check if resources are created as expected on the management cluster - // 11 static manifests + 2 secrets(external-managed-kubeconfig-registration,external-managed-kubeconfig-work) + 2 deployments(registration-agent,work-agent) + 1 pull secret + // 11 static manifests + 2 secrets(external-managed-kubeconfig-registration,external-managed-kubeconfig-work) + + // 2 deployments(registration-agent,work-agent) + 1 pull secret if len(createObjectsManagement) != 16 { t.Errorf("Expect 16 objects created in the sync loop, actual %d", len(createObjectsManagement)) } @@ -626,9 +637,9 @@ func TestSyncDeployHosted(t *testing.T) { ensureObject(t, object, klusterlet) } - createObjectsManaged := []runtime.Object{} + var createObjectsManaged []runtime.Object for _, action := range controller.managedKubeClient.Actions() { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object klog.Infof("managed kube create: %v\t resource:%v \t namespace:%v", object.GetObjectKind().GroupVersionKind(), action.GetResource(), action.GetNamespace()) @@ -645,9 +656,9 @@ func TestSyncDeployHosted(t *testing.T) { } apiExtenstionAction := controller.apiExtensionClient.Actions() - createCRDObjects := []runtime.Object{} + var createCRDObjects []runtime.Object for _, action := range apiExtenstionAction { - if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" { + if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName { object := action.(clienttesting.CreateActionImpl).Object createCRDObjects = append(createCRDObjects, object) } @@ -656,9 +667,9 @@ func TestSyncDeployHosted(t *testing.T) { t.Errorf("Expect 0 objects created in the sync loop, actual %d", len(createCRDObjects)) } - createCRDObjectsManaged := []runtime.Object{} + var createCRDObjectsManaged []runtime.Object for _, action := range controller.managedApiExtensionClient.Actions() { - if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" { + if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName { object := action.(clienttesting.CreateActionImpl).Object createCRDObjectsManaged = append(createCRDObjectsManaged, object) } @@ -692,7 +703,7 @@ func TestSyncDeployHostedCreateAgentNamespace(t *testing.T) { klusterlet := newKlusterletHosted("klusterlet", "testns", "cluster1") meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ Type: klusterletReadyToApply, Status: metav1.ConditionFalse, Reason: "KlusterletPrepareFailed", - Message: fmt.Sprintf("Failed to build managed cluster clients: secrets \"external-managed-kubeconfig\" not found"), + Message: "Failed to build managed cluster clients: secrets \"external-managed-kubeconfig\" not found", }) controller := newTestControllerHosted(t, klusterlet, nil).setDefaultManagedClusterClientsBuilder() syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet") @@ -704,7 +715,7 @@ func TestSyncDeployHostedCreateAgentNamespace(t *testing.T) { kubeActions := controller.kubeClient.Actions() testingcommon.AssertGet(t, kubeActions[0], "", "v1", "namespaces") - testingcommon.AssertAction(t, kubeActions[1], "create") + testingcommon.AssertAction(t, kubeActions[1], createVerb) if kubeActions[1].GetResource().Resource != "namespaces" { t.Errorf("expect object namespaces, but got %v", kubeActions[2].GetResource().Resource) } @@ -774,8 +785,8 @@ func TestReplica(t *testing.T) { } // should have 1 replica for registration deployment and 0 for work - assertRegistrationDeployment(t, controller.kubeClient.Actions(), "create", "", "cluster1", 1) - assertWorkDeployment(t, controller.kubeClient.Actions(), "create", "cluster1", operatorapiv1.InstallModeDefault, 0) + assertRegistrationDeployment(t, controller.kubeClient.Actions(), createVerb, "", "cluster1", 1) + assertWorkDeployment(t, controller.kubeClient.Actions(), createVerb, "cluster1", operatorapiv1.InstallModeDefault, 0) klusterlet = newKlusterlet("klusterlet", "testns", "cluster1") klusterlet.Status.Conditions = []metav1.Condition{ @@ -838,7 +849,7 @@ func TestClusterNameChange(t *testing.T) { } // Check if deployment has the right cluster name set - assertRegistrationDeployment(t, controller.kubeClient.Actions(), "create", "", "cluster1", 1) + assertRegistrationDeployment(t, controller.kubeClient.Actions(), createVerb, "", "cluster1", 1) operatorAction := controller.operatorClient.Actions() testingcommon.AssertActions(t, operatorAction, "patch") @@ -928,7 +939,7 @@ func TestSyncWithPullSecret(t *testing.T) { var createdSecret *corev1.Secret kubeActions := controller.kubeClient.Actions() for _, action := range kubeActions { - if action.GetVerb() == "create" && action.GetResource().Resource == "secrets" { + if action.GetVerb() == createVerb && action.GetResource().Resource == "secrets" { createdSecret = action.(clienttesting.CreateActionImpl).Object.(*corev1.Secret) break } @@ -958,17 +969,18 @@ func TestDeployOnKube111(t *testing.T) { t.Errorf("Expected non error when sync, %v", err) } - createObjects := []runtime.Object{} + var createObjects []runtime.Object kubeActions := controller.kubeClient.Actions() for _, action := range kubeActions { - if action.GetVerb() == "create" { + if action.GetVerb() == createVerb { object := action.(clienttesting.CreateActionImpl).Object createObjects = append(createObjects, object) } } // Check if resources are created as expected - // 11 managed static manifests + 11 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings + // 11 managed static manifests + 11 management static manifests - + // 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings if len(createObjects) != 25 { t.Errorf("Expect 25 objects created in the sync loop, actual %d", len(createObjects)) } @@ -1003,7 +1015,7 @@ func TestDeployOnKube111(t *testing.T) { t.Errorf("Expected non error when sync, %v", err) } - deleteActions := []clienttesting.DeleteActionImpl{} + var deleteActions []clienttesting.DeleteActionImpl kubeActions = controller.kubeClient.Actions() for _, action := range kubeActions { if action.GetVerb() == "delete" { @@ -1054,19 +1066,19 @@ type fakeManagedClusterBuilder struct { fakeWorkClient *fakeworkclient.Clientset } -func (f *fakeManagedClusterBuilder) withMode(mode operatorapiv1.InstallMode) managedClusterClientsBuilderInterface { +func (f *fakeManagedClusterBuilder) withMode(_ operatorapiv1.InstallMode) managedClusterClientsBuilderInterface { return f } -func (f *fakeManagedClusterBuilder) withKubeConfigSecret(namespace, name string) managedClusterClientsBuilderInterface { +func (f *fakeManagedClusterBuilder) withKubeConfigSecret(_, _ string) managedClusterClientsBuilderInterface { return f } -func (m *fakeManagedClusterBuilder) build(ctx context.Context) (*managedClusterClients, error) { +func (f *fakeManagedClusterBuilder) build(_ context.Context) (*managedClusterClients, error) { return &managedClusterClients{ - kubeClient: m.fakeKubeClient, - apiExtensionClient: m.fakeAPIExtensionClient, - appliedManifestWorkClient: m.fakeWorkClient.WorkV1().AppliedManifestWorks(), + kubeClient: f.fakeKubeClient, + apiExtensionClient: f.fakeAPIExtensionClient, + appliedManifestWorkClient: f.fakeWorkClient.WorkV1().AppliedManifestWorks(), kubeconfig: &rest.Config{ Host: "testhost", TLSClientConfig: rest.TLSClientConfig{ diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go index 6341dcd62..b1b7d6937 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go @@ -22,7 +22,7 @@ import ( workapiv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/ocm/manifests" - patcher "open-cluster-management.io/ocm/pkg/common/patcher" + "open-cluster-management.io/ocm/pkg/common/patcher" "open-cluster-management.io/ocm/pkg/operator/helpers" ) @@ -170,7 +170,7 @@ func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1. // cleanUpAppliedManifestWorks removes finalizer from the AppliedManifestWorks whose name starts with // the hash of the given hub host. -func (r *managedReconcile) cleanUpAppliedManifestWorks(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) error { +func (r *managedReconcile) cleanUpAppliedManifestWorks(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, _ klusterletConfig) error { appliedManifestWorks, err := r.managedClusterClients.appliedManifestWorkClient.List(ctx, metav1.ListOptions{}) if errors.IsNotFound(err) { return nil diff --git a/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go index c7a94b852..a3e9a9935 100644 --- a/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go @@ -336,7 +336,6 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, func getHubConfigSSARs(clusterName string) []authorizationv1.SelfSubjectAccessReview { var reviews []authorizationv1.SelfSubjectAccessReview - // registration resources certResource := authorizationv1.ResourceAttributes{ Group: "certificates.k8s.io", diff --git a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go index b25120c86..3832f718c 100644 --- a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go @@ -154,7 +154,7 @@ func checkAgentDeploymentDesired(ctx context.Context, kubeClient kubernetes.Inte // Check agent deployments, if both of them have at least 1 available replicas, return available condition func checkAgentsDeploymentAvailable(ctx context.Context, kubeClient kubernetes.Interface, agents []klusterletAgent) metav1.Condition { - availableMessages := []string{} + var availableMessages []string for _, agent := range agents { deployment, err := kubeClient.AppsV1().Deployments(agent.namespace).Get(ctx, agent.deploymentName, metav1.GetOptions{}) if err != nil { diff --git a/pkg/placement/controllers/manager.go b/pkg/placement/controllers/manager.go index a951584a2..7745926b3 100644 --- a/pkg/placement/controllers/manager.go +++ b/pkg/placement/controllers/manager.go @@ -14,7 +14,7 @@ import ( clusterscheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" - scheduling "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" + "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" "open-cluster-management.io/ocm/pkg/placement/debugger" ) diff --git a/pkg/placement/controllers/scheduling/cluster_event_handler.go b/pkg/placement/controllers/scheduling/cluster_event_handler.go index f80dc7cc2..1ea171c59 100644 --- a/pkg/placement/controllers/scheduling/cluster_event_handler.go +++ b/pkg/placement/controllers/scheduling/cluster_event_handler.go @@ -5,7 +5,7 @@ import ( "reflect" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - cache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/cache" clusterapiv1 "open-cluster-management.io/api/cluster/v1" ) diff --git a/pkg/placement/controllers/scheduling/cluster_event_handler_test.go b/pkg/placement/controllers/scheduling/cluster_event_handler_test.go index a226e6ca3..0741e16bc 100644 --- a/pkg/placement/controllers/scheduling/cluster_event_handler_test.go +++ b/pkg/placement/controllers/scheduling/cluster_event_handler_test.go @@ -97,7 +97,7 @@ func TestOnClusterChange(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) syncCtx := testingcommon.NewFakeSyncContext(t, "fake") q := newEnqueuer( @@ -259,7 +259,7 @@ func TestOnClusterUpdate(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) syncCtx := testingcommon.NewFakeSyncContext(t, "fake") q := newEnqueuer( @@ -361,7 +361,7 @@ func TestOnClusterDelete(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) syncCtx := testingcommon.NewFakeSyncContext(t, "fake") q := newEnqueuer( diff --git a/pkg/placement/controllers/scheduling/enqueue_test.go b/pkg/placement/controllers/scheduling/enqueue_test.go index 08a85e3cf..834bbc4c4 100644 --- a/pkg/placement/controllers/scheduling/enqueue_test.go +++ b/pkg/placement/controllers/scheduling/enqueue_test.go @@ -23,17 +23,23 @@ import ( testinghelpers "open-cluster-management.io/ocm/pkg/placement/helpers/testing" ) -func newClusterInformerFactory(clusterClient clusterclient.Interface, objects ...runtime.Object) clusterinformers.SharedInformerFactory { +func newClusterInformerFactory(t *testing.T, clusterClient clusterclient.Interface, objects ...runtime.Object) clusterinformers.SharedInformerFactory { clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10) - clusterInformerFactory.Cluster().V1beta1().Placements().Informer().AddIndexers(cache.Indexers{ + err := clusterInformerFactory.Cluster().V1beta1().Placements().Informer().AddIndexers(cache.Indexers{ placementsByScore: indexPlacementsByScore, placementsByClusterSetBinding: indexPlacementByClusterSetBinding, }) + if err != nil { + t.Fatal(err) + } - clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings().Informer().AddIndexers(cache.Indexers{ + err = clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings().Informer().AddIndexers(cache.Indexers{ clustersetBindingsByClusterSet: indexClusterSetBindingByClusterSet, }) + if err != nil { + t.Fatal(err) + } clusterStore := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore() clusterSetStore := clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets().Informer().GetStore() @@ -43,19 +49,23 @@ func newClusterInformerFactory(clusterClient clusterclient.Interface, objects .. addOnPlacementStore := clusterInformerFactory.Cluster().V1alpha1().AddOnPlacementScores().Informer().GetStore() for _, obj := range objects { + var err error switch obj.(type) { case *clusterapiv1.ManagedCluster: - clusterStore.Add(obj) + err = clusterStore.Add(obj) case *clusterapiv1beta2.ManagedClusterSet: - clusterSetStore.Add(obj) + err = clusterSetStore.Add(obj) case *clusterapiv1beta2.ManagedClusterSetBinding: - clusterSetBindingStore.Add(obj) + err = clusterSetBindingStore.Add(obj) case *clusterapiv1beta1.Placement: - placementStore.Add(obj) + err = placementStore.Add(obj) case *clusterapiv1beta1.PlacementDecision: - placementDecisionStore.Add(obj) + err = placementDecisionStore.Add(obj) case *clusterapiv1alpha1.AddOnPlacementScore: - addOnPlacementStore.Add(obj) + err = addOnPlacementStore.Add(obj) + } + if err != nil { + t.Fatal(err) } } @@ -175,7 +185,7 @@ func TestEnqueuePlacementsByClusterSet(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) syncCtx := testingcommon.NewFakeSyncContext(t, "fake") q := newEnqueuer( @@ -282,7 +292,7 @@ func TestEnqueuePlacementsByClusterSetBinding(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) syncCtx := testingcommon.NewFakeSyncContext(t, "fake") q := newEnqueuer( @@ -370,7 +380,7 @@ func TestEnqueuePlacementsByScore(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) syncCtx := testingcommon.NewFakeSyncContext(t, "fake") q := newEnqueuer( diff --git a/pkg/placement/controllers/scheduling/schedule.go b/pkg/placement/controllers/scheduling/schedule.go index 169179b1b..9276589a0 100644 --- a/pkg/placement/controllers/scheduling/schedule.go +++ b/pkg/placement/controllers/scheduling/schedule.go @@ -56,7 +56,7 @@ type ScheduleResult interface { // PrioritizerScores returns total score for each cluster PrioritizerScores() PrioritizerScore - // Decision returns the decision groups of the schedule + // Decisions returns the decision groups of the schedule Decisions() []*clusterapiv1.ManagedCluster // NumOfUnscheduled returns the number of unscheduled. @@ -180,7 +180,7 @@ func (s *pluginScheduler) Schedule( } // filter clusters - filterPipline := []string{} + var filterPipline []string for _, f := range s.filters { filterResult, status := f.Filter(ctx, placement, filtered) @@ -389,10 +389,10 @@ func getPrioritizers(weights map[clusterapiv1beta1.ScoreCoordinate]int32, handle } func (r *scheduleResult) FilterResults() []FilterResult { - results := []FilterResult{} + var results []FilterResult // order the FilterResults by key length - filteredRecordsKey := []string{} + var filteredRecordsKey []string for name := range r.filteredRecords { filteredRecordsKey = append(filteredRecordsKey, name) } diff --git a/pkg/placement/controllers/scheduling/schedule_test.go b/pkg/placement/controllers/scheduling/schedule_test.go index 423490e43..85f3b6921 100644 --- a/pkg/placement/controllers/scheduling/schedule_test.go +++ b/pkg/placement/controllers/scheduling/schedule_test.go @@ -23,8 +23,6 @@ import ( func TestSchedule(t *testing.T) { clusterSetName := "clusterSets" - placementNamespace := "ns1" - placementName := "placement1" cases := []struct { name string @@ -294,8 +292,13 @@ func TestSchedule(t *testing.T) { expectedStatus: *framework.NewStatus("", framework.Success, ""), }, { - name: "placement with additive Prioritizer Policy", - placement: testinghelpers.NewPlacement(placementNamespace, placementName).WithNOC(2).WithPrioritizerPolicy("Additive").WithPrioritizerConfig("Balance", 3).WithPrioritizerConfig("ResourceAllocatableMemory", 1).WithScoreCoordinateAddOn("demo", "demo", 1).Build(), + name: "placement with additive Prioritizer Policy", + placement: testinghelpers.NewPlacement(placementNamespace, placementName). + WithNOC(2). + WithPrioritizerPolicy("Additive"). + WithPrioritizerConfig("Balance", 3). + WithPrioritizerConfig("ResourceAllocatableMemory", 1). + WithScoreCoordinateAddOn("demo", "demo", 1).Build(), initObjs: []runtime.Object{ testinghelpers.NewClusterSet(clusterSetName).Build(), testinghelpers.NewClusterSetBinding(placementNamespace, clusterSetName), @@ -304,13 +307,23 @@ func TestSchedule(t *testing.T) { testinghelpers.NewAddOnPlacementScore("cluster3", "demo").WithScore("demo", 50).Build(), }, clusters: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), - testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), + testinghelpers.NewManagedCluster("cluster3"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(), }, expectedDecisions: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), }, expectedFilterResult: []FilterResult{ { @@ -348,20 +361,33 @@ func TestSchedule(t *testing.T) { expectedStatus: *framework.NewStatus("", framework.Success, ""), }, { - name: "placement with exact Prioritizer Policy", - placement: testinghelpers.NewPlacement(placementNamespace, placementName).WithNOC(2).WithPrioritizerPolicy("Exact").WithPrioritizerConfig("Balance", 3).WithPrioritizerConfig("ResourceAllocatableMemory", 1).Build(), + name: "placement with exact Prioritizer Policy", + placement: testinghelpers.NewPlacement(placementNamespace, placementName). + WithNOC(2).WithPrioritizerPolicy("Exact"). + WithPrioritizerConfig("Balance", 3). + WithPrioritizerConfig("ResourceAllocatableMemory", 1).Build(), initObjs: []runtime.Object{ testinghelpers.NewClusterSet(clusterSetName).Build(), testinghelpers.NewClusterSetBinding(placementNamespace, clusterSetName), }, clusters: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), - testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), + testinghelpers.NewManagedCluster("cluster3"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(), }, expectedDecisions: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName). + WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(), }, expectedFilterResult: []FilterResult{ { @@ -399,12 +425,16 @@ func TestSchedule(t *testing.T) { WithDecisions("cluster1").Build(), }, clusters: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), }, expectedDecisions: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), }, expectedFilterResult: []FilterResult{ { @@ -441,12 +471,16 @@ func TestSchedule(t *testing.T) { WithDecisions("cluster1", "cluster2").Build(), }, clusters: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), - testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster3"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), }, expectedDecisions: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster3"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), }, expectedFilterResult: []FilterResult{ { @@ -488,12 +522,16 @@ func TestSchedule(t *testing.T) { WithDecisions("cluster3").Build(), }, clusters: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), - testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), - testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster1"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster2"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster3"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), }, expectedDecisions: []*clusterapiv1.ManagedCluster{ - testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), + testinghelpers.NewManagedCluster("cluster3"). + WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(), }, expectedFilterResult: []FilterResult{ { diff --git a/pkg/placement/controllers/scheduling/scheduling_controller.go b/pkg/placement/controllers/scheduling/scheduling_controller.go index ae6e3bfc0..7dcb149f6 100644 --- a/pkg/placement/controllers/scheduling/scheduling_controller.go +++ b/pkg/placement/controllers/scheduling/scheduling_controller.go @@ -8,7 +8,6 @@ import ( "sort" "strconv" "strings" - "time" "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" @@ -23,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" - cache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/cache" kevents "k8s.io/client-go/tools/events" "k8s.io/klog/v2" @@ -59,8 +58,6 @@ type clusterDecisionGroup struct { clusterDecisions []clusterapiv1beta1.ClusterDecision } -var ResyncInterval = time.Minute * 5 - // schedulingController schedules cluster decisions for Placements type schedulingController struct { clusterClient clusterclient.Interface @@ -167,8 +164,7 @@ func NewSchedulingController( placementInformer.Informer()). WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string { accessor, _ := meta.Accessor(obj) - labels := accessor.GetLabels() - placementName := labels[clusterapiv1beta1.PlacementLabel] + placementName := accessor.GetLabels()[clusterapiv1beta1.PlacementLabel] return fmt.Sprintf("%s/%s", accessor.GetNamespace(), placementName) }, queue.FileterByLabel(clusterapiv1beta1.PlacementLabel), @@ -287,7 +283,7 @@ func (c *schedulingController) getValidManagedClusterSetBindings(placementNamesp bindings = nil } - validBindings := []*clusterapiv1beta2.ManagedClusterSetBinding{} + var validBindings []*clusterapiv1beta2.ManagedClusterSetBinding for _, binding := range bindings { // ignore clustersetbinding refers to a non-existent clusterset _, err := c.clusterSetLister.Get(binding.Name) @@ -352,7 +348,7 @@ func (c *schedulingController) getAvailableClusters(clusterSetNames []string) ([ return nil, nil } - result := []*clusterapiv1.ManagedCluster{} + var result []*clusterapiv1.ManagedCluster for _, c := range availableClusters { result = append(result, c) } @@ -461,8 +457,8 @@ func (c *schedulingController) generatePlacementDecisionsAndStatus( clusters []*clusterapiv1.ManagedCluster, ) ([]*clusterapiv1beta1.PlacementDecision, []*clusterapiv1beta1.DecisionGroupStatus, *framework.Status) { placementDecisionIndex := 0 - placementDecisions := []*clusterapiv1beta1.PlacementDecision{} - decisionGroupStatus := []*clusterapiv1beta1.DecisionGroupStatus{} + var placementDecisions []*clusterapiv1beta1.PlacementDecision + var decisionGroupStatus []*clusterapiv1beta1.DecisionGroupStatus // generate decision group decisionGroups, status := c.generateDecisionGroups(placement, clusters) @@ -491,7 +487,7 @@ func (c *schedulingController) generateDecisionGroups( placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster, ) (clusterDecisionGroups, *framework.Status) { - groups := []clusterDecisionGroup{} + var groups []clusterDecisionGroup // Calculate the group length // The number of items in each group is determined by the specific number or percentage defined in @@ -502,7 +498,7 @@ func (c *schedulingController) generateDecisionGroups( } // Record the cluster names - clusterNames := sets.NewString() + clusterNames := sets.New[string]() for _, cluster := range clusters { clusterNames.Insert(cluster.Name) } @@ -514,15 +510,14 @@ func (c *schedulingController) generateDecisionGroups( if status.IsError() { return groups, status } - // If matched clusters number meets groupLength, divide into multiple groups. decisionGroups := divideDecisionGroups(d.GroupName, matched, groupLength) groups = append(groups, decisionGroups...) } // The rest of the clusters will also be put into decision groups. - matched := []clusterapiv1beta1.ClusterDecision{} - for _, cluster := range clusterNames.List() { + var matched []clusterapiv1beta1.ClusterDecision + for _, cluster := range clusterNames.UnsortedList() { matched = append(matched, clusterapiv1beta1.ClusterDecision{ ClusterName: cluster, }) @@ -547,7 +542,7 @@ func (c *schedulingController) generateDecision( ) ([]*clusterapiv1beta1.PlacementDecision, *clusterapiv1beta1.DecisionGroupStatus) { // split the cluster decisions into slices, the size of each slice cannot exceed // maxNumOfClusterDecisions. - decisionSlices := [][]clusterapiv1beta1.ClusterDecision{} + var decisionSlices [][]clusterapiv1beta1.ClusterDecision remainingDecisions := clusterDecisionGroup.clusterDecisions for index := 0; len(remainingDecisions) > 0; index++ { var decisionSlice []clusterapiv1beta1.ClusterDecision @@ -568,8 +563,8 @@ func (c *schedulingController) generateDecision( decisionSlices = append(decisionSlices, []clusterapiv1beta1.ClusterDecision{}) } - placementDecisionNames := []string{} - placementDecisions := []*clusterapiv1beta1.PlacementDecision{} + var placementDecisionNames []string + var placementDecisions []*clusterapiv1beta1.PlacementDecision for index, decisionSlice := range decisionSlices { placementDecisionName := fmt.Sprintf("%s-decision-%d", placement.Name, placementDecisionIndex+index) owner := metav1.NewControllerRef(placement, clusterapiv1beta1.GroupVersion.WithKind("Placement")) @@ -612,7 +607,7 @@ func (c *schedulingController) bind( clusterScores PrioritizerScore, status *framework.Status, ) error { - errs := []error{} + var errs []error placementDecisionNames := sets.NewString() // create/update placement decisions @@ -775,10 +770,9 @@ func calculateLength(intOrStr *intstr.IntOrString, total int) (int, *framework.S func filterClustersBySelector( selector clusterapiv1beta1.ClusterSelector, clusters []*clusterapiv1.ManagedCluster, - clusterNames sets.String, + clusterNames sets.Set[string], ) ([]clusterapiv1beta1.ClusterDecision, *framework.Status) { - matched := []clusterapiv1beta1.ClusterDecision{} - + var matched []clusterapiv1beta1.ClusterDecision // create cluster label selector clusterSelector, err := helpers.NewClusterSelector(selector) if err != nil { @@ -806,8 +800,7 @@ func filterClustersBySelector( // divideDecisionGroups divide the matched clusters to the groups and ensuring that each group has the specified length. func divideDecisionGroups(groupName string, matched []clusterapiv1beta1.ClusterDecision, groupLength int) []clusterDecisionGroup { - groups := []clusterDecisionGroup{} - + var groups []clusterDecisionGroup for len(matched) > 0 { groupClusters := matched if groupLength < len(matched) { diff --git a/pkg/placement/controllers/scheduling/scheduling_controller_test.go b/pkg/placement/controllers/scheduling/scheduling_controller_test.go index dec203d04..57ae8aa8e 100644 --- a/pkg/placement/controllers/scheduling/scheduling_controller_test.go +++ b/pkg/placement/controllers/scheduling/scheduling_controller_test.go @@ -32,6 +32,11 @@ type testScheduler struct { result ScheduleResult } +const ( + placementNamespace = "ns1" + placementName = "placement1" +) + func (s *testScheduler) Schedule(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster, @@ -40,9 +45,6 @@ func (s *testScheduler) Schedule(ctx context.Context, } func TestSchedulingController_sync(t *testing.T) { - placementNamespace := "ns1" - placementName := "placement1" - cases := []struct { name string placement *clusterapiv1beta1.Placement @@ -371,7 +373,7 @@ func TestSchedulingController_sync(t *testing.T) { validateActions: func(t *testing.T, actions []clienttesting.Action) { // check if PlacementDecision has been updated testingcommon.AssertActions(t, actions, "create", "patch") - // check if emtpy PlacementDecision has been created + // check if empty PlacementDecision has been created actual := actions[0].(clienttesting.CreateActionImpl).Object placementDecision, ok := actual.(*clusterapiv1beta1.PlacementDecision) if !ok { @@ -418,7 +420,7 @@ func TestSchedulingController_sync(t *testing.T) { validateActions: func(t *testing.T, actions []clienttesting.Action) { // check if PlacementDecision has been updated testingcommon.AssertActions(t, actions, "create", "patch") - // check if emtpy PlacementDecision has been created + // check if empty PlacementDecision has been created actual := actions[0].(clienttesting.CreateActionImpl).Object placementDecision, ok := actual.(*clusterapiv1beta1.PlacementDecision) if !ok { @@ -468,7 +470,7 @@ func TestSchedulingController_sync(t *testing.T) { validateActions: func(t *testing.T, actions []clienttesting.Action) { // check if PlacementDecision has been updated testingcommon.AssertActions(t, actions, "create", "patch") - // check if emtpy PlacementDecision has been created + // check if empty PlacementDecision has been created actual := actions[0].(clienttesting.CreateActionImpl).Object placementDecision, ok := actual.(*clusterapiv1beta1.PlacementDecision) if !ok { @@ -551,7 +553,7 @@ func TestSchedulingController_sync(t *testing.T) { t.Run(c.name, func(t *testing.T) { c.initObjs = append(c.initObjs, c.placement) clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) s := &testScheduler{result: c.scheduleResult} ctrl := schedulingController{ @@ -608,7 +610,7 @@ func TestGetValidManagedClusterSetBindings(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) ctrl := &schedulingController{ clusterSetLister: clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets().Lister(), @@ -683,7 +685,7 @@ func TestGetValidManagedClusterSets(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) ctrl := &schedulingController{ clusterSetLister: clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets().Lister(), @@ -811,7 +813,7 @@ func TestGetAvailableClusters(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { clusterClient := clusterfake.NewSimpleClientset(c.initObjs...) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) ctrl := &schedulingController{ clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(), @@ -975,9 +977,6 @@ func TestNewMisconfiguredCondition(t *testing.T) { } func TestBind(t *testing.T) { - placementNamespace := "ns1" - placementName := "placement1" - cases := []struct { name string initObjs []runtime.Object @@ -1387,7 +1386,7 @@ func TestBind(t *testing.T) { }, ) - clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...) + clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...) s := &testScheduler{} diff --git a/pkg/placement/debugger/debugger.go b/pkg/placement/debugger/debugger.go index 4c3caedc9..90e6bd028 100644 --- a/pkg/placement/debugger/debugger.go +++ b/pkg/placement/debugger/debugger.go @@ -13,7 +13,7 @@ import ( clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1" clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" - scheduling "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" + "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" ) const DebugPath = "/debug/placements/" diff --git a/pkg/placement/debugger/debugger_test.go b/pkg/placement/debugger/debugger_test.go index 9fd70cabe..f874210a6 100644 --- a/pkg/placement/debugger/debugger_test.go +++ b/pkg/placement/debugger/debugger_test.go @@ -4,7 +4,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "reflect" @@ -18,7 +18,7 @@ import ( clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" "open-cluster-management.io/ocm/pkg/placement/controllers/framework" - scheduling "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" + "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" testinghelpers "open-cluster-management.io/ocm/pkg/placement/helpers/testing" ) @@ -102,7 +102,7 @@ func TestDebugger(t *testing.T) { t.Errorf("Expect no error but get %v", err) } - responseBody, err := ioutil.ReadAll(res.Body) + responseBody, err := io.ReadAll(res.Body) if err != nil { t.Errorf("Unexpected error reading response body: %v", err) } diff --git a/pkg/placement/helpers/clusters_test.go b/pkg/placement/helpers/clusters_test.go index 4e0969ef0..dbb7a5959 100644 --- a/pkg/placement/helpers/clusters_test.go +++ b/pkg/placement/helpers/clusters_test.go @@ -151,7 +151,7 @@ func TestGetClusterClaims(t *testing.T) { expected: map[string]string{"cloud": "Amazon"}, }, { - name: "convert emtpy cluster claim", + name: "convert empty cluster claim", cluster: testinghelpers.NewManagedCluster("cluster1").Build(), expected: map[string]string{}, }, diff --git a/pkg/placement/helpers/testing/builders.go b/pkg/placement/helpers/testing/builders.go index 43a7eb6cb..244318510 100644 --- a/pkg/placement/helpers/testing/builders.go +++ b/pkg/placement/helpers/testing/builders.go @@ -15,12 +15,12 @@ import ( clusterapiv1beta2 "open-cluster-management.io/api/cluster/v1beta2" ) -type placementBuilder struct { +type PlacementBuilder struct { placement *clusterapiv1beta1.Placement } -func NewPlacement(namespace, name string) *placementBuilder { - return &placementBuilder{ +func NewPlacement(namespace, name string) *PlacementBuilder { + return &PlacementBuilder{ placement: &clusterapiv1beta1.Placement{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -30,8 +30,8 @@ func NewPlacement(namespace, name string) *placementBuilder { } } -func NewPlacementWithAnnotations(namespace, name string, annotations map[string]string) *placementBuilder { - return &placementBuilder{ +func NewPlacementWithAnnotations(namespace, name string, annotations map[string]string) *PlacementBuilder { + return &PlacementBuilder{ placement: &clusterapiv1beta1.Placement{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -42,29 +42,29 @@ func NewPlacementWithAnnotations(namespace, name string, annotations map[string] } } -func (b *placementBuilder) WithUID(uid string) *placementBuilder { +func (b *PlacementBuilder) WithUID(uid string) *PlacementBuilder { b.placement.UID = types.UID(uid) return b } -func (b *placementBuilder) WithNOC(noc int32) *placementBuilder { +func (b *PlacementBuilder) WithNOC(noc int32) *PlacementBuilder { b.placement.Spec.NumberOfClusters = &noc return b } -func (b *placementBuilder) WithGroupStrategy(groupStrategy clusterapiv1beta1.GroupStrategy) *placementBuilder { +func (b *PlacementBuilder) WithGroupStrategy(groupStrategy clusterapiv1beta1.GroupStrategy) *PlacementBuilder { b.placement.Spec.DecisionStrategy.GroupStrategy = groupStrategy return b } -func (b *placementBuilder) WithPrioritizerPolicy(mode clusterapiv1beta1.PrioritizerPolicyModeType) *placementBuilder { +func (b *PlacementBuilder) WithPrioritizerPolicy(mode clusterapiv1beta1.PrioritizerPolicyModeType) *PlacementBuilder { b.placement.Spec.PrioritizerPolicy = clusterapiv1beta1.PrioritizerPolicy{ Mode: mode, } return b } -func (b *placementBuilder) WithPrioritizerConfig(name string, weight int32) *placementBuilder { +func (b *PlacementBuilder) WithPrioritizerConfig(name string, weight int32) *PlacementBuilder { if b.placement.Spec.PrioritizerPolicy.Configurations == nil { b.placement.Spec.PrioritizerPolicy.Configurations = []clusterapiv1beta1.PrioritizerConfig{} } @@ -80,7 +80,7 @@ func (b *placementBuilder) WithPrioritizerConfig(name string, weight int32) *pla return b } -func (b *placementBuilder) WithScoreCoordinateAddOn(resourceName, scoreName string, weight int32) *placementBuilder { +func (b *PlacementBuilder) WithScoreCoordinateAddOn(resourceName, scoreName string, weight int32) *PlacementBuilder { if b.placement.Spec.PrioritizerPolicy.Configurations == nil { b.placement.Spec.PrioritizerPolicy.Configurations = []clusterapiv1beta1.PrioritizerConfig{} } @@ -96,18 +96,18 @@ func (b *placementBuilder) WithScoreCoordinateAddOn(resourceName, scoreName stri return b } -func (b *placementBuilder) WithClusterSets(clusterSets ...string) *placementBuilder { +func (b *PlacementBuilder) WithClusterSets(clusterSets ...string) *PlacementBuilder { b.placement.Spec.ClusterSets = clusterSets return b } -func (b *placementBuilder) WithDeletionTimestamp() *placementBuilder { +func (b *PlacementBuilder) WithDeletionTimestamp() *PlacementBuilder { now := metav1.Now() b.placement.DeletionTimestamp = &now return b } -func (b *placementBuilder) AddPredicate(labelSelector *metav1.LabelSelector, claimSelector *clusterapiv1beta1.ClusterClaimSelector) *placementBuilder { +func (b *PlacementBuilder) AddPredicate(labelSelector *metav1.LabelSelector, claimSelector *clusterapiv1beta1.ClusterClaimSelector) *PlacementBuilder { if b.placement.Spec.Predicates == nil { b.placement.Spec.Predicates = []clusterapiv1beta1.ClusterPredicate{} } @@ -115,7 +115,7 @@ func (b *placementBuilder) AddPredicate(labelSelector *metav1.LabelSelector, cla return b } -func (b *placementBuilder) AddToleration(toleration *clusterapiv1beta1.Toleration) *placementBuilder { +func (b *PlacementBuilder) AddToleration(toleration *clusterapiv1beta1.Toleration) *PlacementBuilder { if b.placement.Spec.Tolerations == nil { b.placement.Spec.Tolerations = []clusterapiv1beta1.Toleration{} } @@ -123,7 +123,7 @@ func (b *placementBuilder) AddToleration(toleration *clusterapiv1beta1.Toleratio return b } -func (b *placementBuilder) WithNumOfSelectedClusters(nosc int, placementName string) *placementBuilder { +func (b *PlacementBuilder) WithNumOfSelectedClusters(nosc int, placementName string) *PlacementBuilder { b.placement.Status.NumberOfSelectedClusters = int32(nosc) b.placement.Status.DecisionGroups = []clusterapiv1beta1.DecisionGroupStatus{ { @@ -136,7 +136,7 @@ func (b *placementBuilder) WithNumOfSelectedClusters(nosc int, placementName str return b } -func (b *placementBuilder) WithSatisfiedCondition(numbOfScheduledDecisions, numbOfUnscheduledDecisions int) *placementBuilder { +func (b *PlacementBuilder) WithSatisfiedCondition(numbOfScheduledDecisions, numbOfUnscheduledDecisions int) *PlacementBuilder { condition := metav1.Condition{ Type: clusterapiv1beta1.PlacementConditionSatisfied, } @@ -154,7 +154,7 @@ func (b *placementBuilder) WithSatisfiedCondition(numbOfScheduledDecisions, numb return b } -func (b *placementBuilder) WithMisconfiguredCondition(status metav1.ConditionStatus) *placementBuilder { +func (b *PlacementBuilder) WithMisconfiguredCondition(status metav1.ConditionStatus) *PlacementBuilder { condition := metav1.Condition{ Type: clusterapiv1beta1.PlacementConditionMisconfigured, Status: status, @@ -165,7 +165,7 @@ func (b *placementBuilder) WithMisconfiguredCondition(status metav1.ConditionSta return b } -func (b *placementBuilder) Build() *clusterapiv1beta1.Placement { +func (b *PlacementBuilder) Build() *clusterapiv1beta1.Placement { return b.placement } @@ -185,12 +185,12 @@ func NewClusterPredicate(labelSelector *metav1.LabelSelector, claimSelector *clu return predicate } -type placementDecisionBuilder struct { +type PlacementDecisionBuilder struct { placementDecision *clusterapiv1beta1.PlacementDecision } -func NewPlacementDecision(namespace, name string) *placementDecisionBuilder { - return &placementDecisionBuilder{ +func NewPlacementDecision(namespace, name string) *PlacementDecisionBuilder { + return &PlacementDecisionBuilder{ placementDecision: &clusterapiv1beta1.PlacementDecision{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -200,7 +200,7 @@ func NewPlacementDecision(namespace, name string) *placementDecisionBuilder { } } -func (b *placementDecisionBuilder) WithController(uid string) *placementDecisionBuilder { +func (b *PlacementDecisionBuilder) WithController(uid string) *PlacementDecisionBuilder { controller := true b.placementDecision.OwnerReferences = append(b.placementDecision.OwnerReferences, metav1.OwnerReference{ Controller: &controller, @@ -209,7 +209,7 @@ func (b *placementDecisionBuilder) WithController(uid string) *placementDecision return b } -func (b *placementDecisionBuilder) WithLabel(name, value string) *placementDecisionBuilder { +func (b *PlacementDecisionBuilder) WithLabel(name, value string) *PlacementDecisionBuilder { if b.placementDecision.Labels == nil { b.placementDecision.Labels = map[string]string{} } @@ -217,14 +217,14 @@ func (b *placementDecisionBuilder) WithLabel(name, value string) *placementDecis return b } -func (b *placementDecisionBuilder) WithDeletionTimestamp() *placementDecisionBuilder { +func (b *PlacementDecisionBuilder) WithDeletionTimestamp() *PlacementDecisionBuilder { now := metav1.Now() b.placementDecision.DeletionTimestamp = &now return b } -func (b *placementDecisionBuilder) WithDecisions(clusterNames ...string) *placementDecisionBuilder { - decisions := []clusterapiv1beta1.ClusterDecision{} +func (b *PlacementDecisionBuilder) WithDecisions(clusterNames ...string) *PlacementDecisionBuilder { + var decisions []clusterapiv1beta1.ClusterDecision for _, clusterName := range clusterNames { decisions = append(decisions, clusterapiv1beta1.ClusterDecision{ ClusterName: clusterName, @@ -234,16 +234,16 @@ func (b *placementDecisionBuilder) WithDecisions(clusterNames ...string) *placem return b } -func (b *placementDecisionBuilder) Build() *clusterapiv1beta1.PlacementDecision { +func (b *PlacementDecisionBuilder) Build() *clusterapiv1beta1.PlacementDecision { return b.placementDecision } -type managedClusterBuilder struct { +type ManagedClusterBuilder struct { cluster *clusterapiv1.ManagedCluster } -func NewManagedCluster(clusterName string) *managedClusterBuilder { - return &managedClusterBuilder{ +func NewManagedCluster(clusterName string) *ManagedClusterBuilder { + return &ManagedClusterBuilder{ cluster: &clusterapiv1.ManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, @@ -252,7 +252,7 @@ func NewManagedCluster(clusterName string) *managedClusterBuilder { } } -func (b *managedClusterBuilder) WithLabel(name, value string) *managedClusterBuilder { +func (b *ManagedClusterBuilder) WithLabel(name, value string) *ManagedClusterBuilder { if b.cluster.Labels == nil { b.cluster.Labels = map[string]string{} } @@ -260,14 +260,14 @@ func (b *managedClusterBuilder) WithLabel(name, value string) *managedClusterBui return b } -func (b *managedClusterBuilder) WithClaim(name, value string) *managedClusterBuilder { +func (b *ManagedClusterBuilder) WithClaim(name, value string) *ManagedClusterBuilder { claimMap := map[string]string{} for _, claim := range b.cluster.Status.ClusterClaims { claimMap[claim.Name] = claim.Value } claimMap[name] = value - clusterClaims := []clusterapiv1.ManagedClusterClaim{} + var clusterClaims []clusterapiv1.ManagedClusterClaim for k, v := range claimMap { clusterClaims = append(clusterClaims, clusterapiv1.ManagedClusterClaim{ Name: k, @@ -279,7 +279,7 @@ func (b *managedClusterBuilder) WithClaim(name, value string) *managedClusterBui return b } -func (b *managedClusterBuilder) WithResource(resourceName clusterapiv1.ResourceName, allocatable, capacity string) *managedClusterBuilder { +func (b *ManagedClusterBuilder) WithResource(resourceName clusterapiv1.ResourceName, allocatable, capacity string) *ManagedClusterBuilder { if b.cluster.Status.Allocatable == nil { b.cluster.Status.Allocatable = make(map[clusterapiv1.ResourceName]resource.Quantity) } @@ -292,7 +292,7 @@ func (b *managedClusterBuilder) WithResource(resourceName clusterapiv1.ResourceN return b } -func (b *managedClusterBuilder) WithTaint(taint *clusterapiv1.Taint) *managedClusterBuilder { +func (b *ManagedClusterBuilder) WithTaint(taint *clusterapiv1.Taint) *ManagedClusterBuilder { if b.cluster.Spec.Taints == nil { b.cluster.Spec.Taints = []clusterapiv1.Taint{} } @@ -300,16 +300,16 @@ func (b *managedClusterBuilder) WithTaint(taint *clusterapiv1.Taint) *managedClu return b } -func (b *managedClusterBuilder) Build() *clusterapiv1.ManagedCluster { +func (b *ManagedClusterBuilder) Build() *clusterapiv1.ManagedCluster { return b.cluster } -type managedClusterSetBuilder struct { +type ManagedClusterSetBuilder struct { clusterset *clusterapiv1beta2.ManagedClusterSet } -func NewClusterSet(clusterSetName string) *managedClusterSetBuilder { - return &managedClusterSetBuilder{ +func NewClusterSet(clusterSetName string) *ManagedClusterSetBuilder { + return &ManagedClusterSetBuilder{ clusterset: &clusterapiv1beta2.ManagedClusterSet{ ObjectMeta: metav1.ObjectMeta{ Name: clusterSetName, @@ -318,12 +318,12 @@ func NewClusterSet(clusterSetName string) *managedClusterSetBuilder { } } -func (b *managedClusterSetBuilder) WithClusterSelector(clusterSelector clusterapiv1beta2.ManagedClusterSelector) *managedClusterSetBuilder { +func (b *ManagedClusterSetBuilder) WithClusterSelector(clusterSelector clusterapiv1beta2.ManagedClusterSelector) *ManagedClusterSetBuilder { b.clusterset.Spec.ClusterSelector = clusterSelector return b } -func (b *managedClusterSetBuilder) Build() *clusterapiv1beta2.ManagedClusterSet { +func (b *ManagedClusterSetBuilder) Build() *clusterapiv1beta2.ManagedClusterSet { return b.clusterset } @@ -339,12 +339,12 @@ func NewClusterSetBinding(namespace, clusterSetName string) *clusterapiv1beta2.M } } -type addOnPlacementScoreBuilder struct { +type AddOnPlacementScoreBuilder struct { addOnPlacementScore *clusterapiv1alpha1.AddOnPlacementScore } -func NewAddOnPlacementScore(clusternamespace, name string) *addOnPlacementScoreBuilder { - return &addOnPlacementScoreBuilder{ +func NewAddOnPlacementScore(clusternamespace, name string) *AddOnPlacementScoreBuilder { + return &AddOnPlacementScoreBuilder{ addOnPlacementScore: &clusterapiv1alpha1.AddOnPlacementScore{ ObjectMeta: metav1.ObjectMeta{ Namespace: clusternamespace, @@ -354,7 +354,7 @@ func NewAddOnPlacementScore(clusternamespace, name string) *addOnPlacementScoreB } } -func (a *addOnPlacementScoreBuilder) WithScore(name string, score int32) *addOnPlacementScoreBuilder { +func (a *AddOnPlacementScoreBuilder) WithScore(name string, score int32) *AddOnPlacementScoreBuilder { if a.addOnPlacementScore.Status.Scores == nil { a.addOnPlacementScore.Status.Scores = []clusterapiv1alpha1.AddOnPlacementScoreItem{} } @@ -366,12 +366,12 @@ func (a *addOnPlacementScoreBuilder) WithScore(name string, score int32) *addOnP return a } -func (a *addOnPlacementScoreBuilder) WithValidUntil(validUntil time.Time) *addOnPlacementScoreBuilder { +func (a *AddOnPlacementScoreBuilder) WithValidUntil(validUntil time.Time) *AddOnPlacementScoreBuilder { vu := metav1.NewTime(validUntil) a.addOnPlacementScore.Status.ValidUntil = &vu return a } -func (a *addOnPlacementScoreBuilder) Build() *clusterapiv1alpha1.AddOnPlacementScore { +func (a *AddOnPlacementScoreBuilder) Build() *clusterapiv1alpha1.AddOnPlacementScore { return a.addOnPlacementScore } diff --git a/pkg/registration/clientcert/cert_controller.go b/pkg/registration/clientcert/cert_controller.go index 544ba6978..f2f886f9f 100644 --- a/pkg/registration/clientcert/cert_controller.go +++ b/pkg/registration/clientcert/cert_controller.go @@ -36,14 +36,6 @@ const ( // ClusterCertificateRotatedCondition is a condition type that client certificate is rotated ClusterCertificateRotatedCondition = "ClusterCertificateRotated" - - // ClientCertificateUpdateFailedReason is a reason of condition ClusterCertificateRotatedCondition that - // the client certificate rotation fails. - ClientCertificateUpdateFailedReason = "ClientCertificateUpdateFailed" - - // ClientCertificateUpdatedReason is a reason of condition ClusterCertificateRotatedCondition that - // the the client certificate succeeds - ClientCertificateUpdatedReason = "ClientCertificateUpdated" ) // ControllerResyncInterval is exposed so that integration tests can crank up the constroller sync speed. diff --git a/pkg/registration/clientcert/certificate.go b/pkg/registration/clientcert/certificate.go index 8f9a43a96..906a01a9e 100644 --- a/pkg/registration/clientcert/certificate.go +++ b/pkg/registration/clientcert/certificate.go @@ -29,7 +29,7 @@ import ( "open-cluster-management.io/ocm/pkg/registration/helpers" ) -// HasValidClientCertificate checks if there exists a valid client certificate in the given secret +// HasValidHubKubeconfig checks if there exists a valid client certificate in the given secret // Returns true if all the conditions below are met: // 1. KubeconfigFile exists when hasKubeconfig is true // 2. TLSKeyFile exists @@ -177,7 +177,7 @@ type CSRControl interface { isApproved(name string) (bool, error) getIssuedCertificate(name string) ([]byte, error) - // public so we can add indexer outside + // Informer is public so we can add indexer outside Informer() cache.SharedIndexInformer } diff --git a/pkg/registration/clientcert/certificate_test.go b/pkg/registration/clientcert/certificate_test.go index 04042017b..7e8f84dbb 100644 --- a/pkg/registration/clientcert/certificate_test.go +++ b/pkg/registration/clientcert/certificate_test.go @@ -82,27 +82,31 @@ func TestHasValidHubKubeconfig(t *testing.T) { }, { name: "no cert", - secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key")}, map[string][]byte{ - KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), - }), + secret: testinghelpers.NewHubKubeconfigSecret( + testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key")}, map[string][]byte{ + KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), + }), }, { name: "bad cert", - secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key"), Cert: []byte("bad cert")}, map[string][]byte{ - KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), - }), + secret: testinghelpers.NewHubKubeconfigSecret( + testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key"), Cert: []byte("bad cert")}, map[string][]byte{ + KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), + }), }, { name: "expired cert", - secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", -60*time.Second), map[string][]byte{ - KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), - }), + secret: testinghelpers.NewHubKubeconfigSecret( + testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", -60*time.Second), map[string][]byte{ + KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), + }), }, { name: "invalid common name", - secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", 60*time.Second), map[string][]byte{ - KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), - }), + secret: testinghelpers.NewHubKubeconfigSecret( + testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", 60*time.Second), map[string][]byte{ + KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil), + }), subject: &pkix.Name{ CommonName: "wrong-common-name", }, @@ -204,8 +208,10 @@ func TestGetCertValidityPeriod(t *testing.T) { expectedErr: "no client certificate found in secret \"testns/testsecret\"", }, { - name: "bad cert", - secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", &testinghelpers.TestCert{Cert: []byte("bad cert")}, map[string][]byte{}), + name: "bad cert", + secret: testinghelpers.NewHubKubeconfigSecret( + testNamespace, testSecretName, "", + &testinghelpers.TestCert{Cert: []byte("bad cert")}, map[string][]byte{}), expectedErr: "unable to parse TLS certificates: data does not contain any valid RSA or ECDSA certificates", }, { diff --git a/pkg/registration/clientcert/controller_test.go b/pkg/registration/clientcert/controller_test.go index 307c00beb..220849e2f 100644 --- a/pkg/registration/clientcert/controller_test.go +++ b/pkg/registration/clientcert/controller_test.go @@ -153,7 +153,7 @@ func TestSync(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { ctrl := &mockCSRControl{} - csrs := []runtime.Object{} + var csrs []runtime.Object if c.approvedCSRCert != nil { csr := testinghelpers.NewApprovedCSR(testinghelpers.CSRHolder{Name: testCSRName}) csr.Status.Certificate = c.approvedCSRCert.Cert @@ -224,7 +224,7 @@ func TestSync(t *testing.T) { } if !conditionEqual(c.expectedCondition, updater.cond) { - t.Errorf("conditon is not correct, expected %v, got %v", c.expectedCondition, updater.cond) + t.Errorf("condition is not correct, expected %v, got %v", c.expectedCondition, updater.cond) } c.validateActions(t, hubKubeClient.Actions(), agentKubeClient.Actions()) @@ -258,7 +258,7 @@ type fakeStatusUpdater struct { cond *metav1.Condition } -func (f *fakeStatusUpdater) update(ctx context.Context, cond metav1.Condition) error { +func (f *fakeStatusUpdater) update(_ context.Context, cond metav1.Condition) error { f.cond = cond.DeepCopy() return nil } @@ -269,7 +269,8 @@ type mockCSRControl struct { csrClient *clienttesting.Fake } -func (m *mockCSRControl) create(ctx context.Context, recorder events.Recorder, objMeta metav1.ObjectMeta, csrData []byte, signerName string, expirationSeconds *int32) (string, error) { +func (m *mockCSRControl) create( + _ context.Context, _ events.Recorder, objMeta metav1.ObjectMeta, _ []byte, _ string, _ *int32) (string, error) { mockCSR := &unstructured.Unstructured{} _, err := m.csrClient.Invokes(clienttesting.CreateActionImpl{ ActionImpl: clienttesting.ActionImpl{ diff --git a/pkg/registration/helpers/helpers_test.go b/pkg/registration/helpers/helpers_test.go index 6e69de704..133bbe8d0 100644 --- a/pkg/registration/helpers/helpers_test.go +++ b/pkg/registration/helpers/helpers_test.go @@ -4,8 +4,6 @@ import ( "reflect" "testing" - "k8s.io/apimachinery/pkg/runtime" - clusterv1 "open-cluster-management.io/api/cluster/v1" ) @@ -101,14 +99,6 @@ func TestFindTaintByKey(t *testing.T) { } } -func getApplyFileNames(applyFiles map[string]runtime.Object) []string { - keys := []string{} - for key := range applyFiles { - keys = append(keys, key) - } - return keys -} - var ( UnavailableTaint = clusterv1.Taint{ Key: clusterv1.ManagedClusterTaintUnavailable, diff --git a/pkg/registration/hub/addon/discovery_controller_test.go b/pkg/registration/hub/addon/discovery_controller_test.go index d76cb76ae..db1182998 100644 --- a/pkg/registration/hub/addon/discovery_controller_test.go +++ b/pkg/registration/hub/addon/discovery_controller_test.go @@ -244,7 +244,7 @@ func TestDiscoveryController_Sync(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objs := []runtime.Object{} + var objs []runtime.Object if c.cluster != nil { objs = append(objs, c.cluster) } diff --git a/pkg/registration/hub/addon/healthcheck_controller.go b/pkg/registration/hub/addon/healthcheck_controller.go index af3aba1d1..22f1e4239 100644 --- a/pkg/registration/hub/addon/healthcheck_controller.go +++ b/pkg/registration/hub/addon/healthcheck_controller.go @@ -19,7 +19,7 @@ import ( clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1" clusterv1 "open-cluster-management.io/api/cluster/v1" - patcher "open-cluster-management.io/ocm/pkg/common/patcher" + "open-cluster-management.io/ocm/pkg/common/patcher" "open-cluster-management.io/ocm/pkg/common/queue" ) @@ -83,7 +83,7 @@ func (c *managedClusterAddOnHealthCheckController) sync(ctx context.Context, syn return err } - errs := []error{} + var errs []error patcher := patcher.NewPatcher[ *addonv1alpha1.ManagedClusterAddOn, addonv1alpha1.ManagedClusterAddOnSpec, addonv1alpha1.ManagedClusterAddOnStatus]( c.addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName), diff --git a/pkg/registration/hub/clusterrole/controller.go b/pkg/registration/hub/clusterrole/controller.go index 19d522d2e..292fdbd60 100644 --- a/pkg/registration/hub/clusterrole/controller.go +++ b/pkg/registration/hub/clusterrole/controller.go @@ -76,7 +76,7 @@ func (c *clusterroleController) sync(ctx context.Context, syncCtx factory.SyncCo return err } - errs := []error{} + var errs []error // Clean up managedcluser cluserroles if there are no managed clusters if len(managedClusters) == 0 { results := resourceapply.DeleteAll( diff --git a/pkg/registration/hub/lease/controller.go b/pkg/registration/hub/lease/controller.go index f5944f59f..30e368985 100644 --- a/pkg/registration/hub/lease/controller.go +++ b/pkg/registration/hub/lease/controller.go @@ -101,7 +101,7 @@ func (c *leaseController) sync(ctx context.Context, syncCtx factory.SyncContext) Labels: map[string]string{clusterv1.ClusterNameLabelKey: cluster.Name}, }, Spec: coordv1.LeaseSpec{ - HolderIdentity: pointer.StringPtr(leaseName), + HolderIdentity: pointer.String(leaseName), RenewTime: &metav1.MicroTime{Time: time.Now()}, }, } diff --git a/pkg/registration/hub/managedcluster/controller.go b/pkg/registration/hub/managedcluster/controller.go index c791ed365..48c36d872 100644 --- a/pkg/registration/hub/managedcluster/controller.go +++ b/pkg/registration/hub/managedcluster/controller.go @@ -155,7 +155,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn }, } - errs := []error{} + var errs []error _, _, err = resourceapply.ApplyNamespace(ctx, c.kubeClient.CoreV1(), syncCtx.Recorder(), namespace) if err != nil { errs = append(errs, err) @@ -203,7 +203,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn } func (c *managedClusterController) removeManagedClusterResources(ctx context.Context, managedClusterName string) error { - errs := []error{} + var errs []error // Clean up managed cluster manifests assetFn := helpers.ManagedClusterAssetFn(manifestFiles, managedClusterName) resourceResults := resourceapply.DeleteAll(ctx, resourceapply.NewKubeClientHolder(c.kubeClient), c.eventRecorder, assetFn, staticFiles...) diff --git a/pkg/registration/hub/managedclusterset/controller.go b/pkg/registration/hub/managedclusterset/controller.go index 0b109bcf4..b35979b58 100644 --- a/pkg/registration/hub/managedclusterset/controller.go +++ b/pkg/registration/hub/managedclusterset/controller.go @@ -28,6 +28,12 @@ import ( "open-cluster-management.io/ocm/pkg/common/queue" ) +const ( + // TODO move these to api repos + ReasonClusterSelected = "ClustersSelected" + ReasonNoClusterMatchced = "NoClusterMatched" +) + // managedClusterSetController reconciles instances of ManagedClusterSet on the hub. type managedClusterSetController struct { patcher patcher.Patcher[*clusterv1beta2.ManagedClusterSet, clusterv1beta2.ManagedClusterSetSpec, clusterv1beta2.ManagedClusterSetStatus] @@ -157,11 +163,11 @@ func (c *managedClusterSetController) syncClusterSet(ctx context.Context, origin } if count == 0 { emptyCondition.Status = metav1.ConditionTrue - emptyCondition.Reason = "NoClusterMatched" + emptyCondition.Reason = ReasonNoClusterMatchced emptyCondition.Message = "No ManagedCluster selected" } else { emptyCondition.Status = metav1.ConditionFalse - emptyCondition.Reason = "ClustersSelected" + emptyCondition.Reason = ReasonClusterSelected emptyCondition.Message = fmt.Sprintf("%d ManagedClusters selected", count) } meta.SetStatusCondition(&clusterSet.Status.Conditions, emptyCondition) @@ -207,9 +213,9 @@ func (c *managedClusterSetController) enqueueUpdateClusterClusterSet(oldCluster, } // getDiffClusterSetsNames return the diff clustersets names -func getDiffClusterSetsNames(oldSets, newSets []*clusterv1beta2.ManagedClusterSet) sets.String { - oldSetsMap := sets.NewString() - newSetsMap := sets.NewString() +func getDiffClusterSetsNames(oldSets, newSets []*clusterv1beta2.ManagedClusterSet) sets.Set[string] { + oldSetsMap := sets.New[string]() + newSetsMap := sets.New[string]() for _, oldSet := range oldSets { oldSetsMap.Insert(oldSet.Name) diff --git a/pkg/registration/hub/managedclusterset/controller_test.go b/pkg/registration/hub/managedclusterset/controller_test.go index 25d4a6e76..367d6b9ff 100644 --- a/pkg/registration/hub/managedclusterset/controller_test.go +++ b/pkg/registration/hub/managedclusterset/controller_test.go @@ -43,7 +43,7 @@ func TestSyncClusterSet(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1beta2.ManagedClusterSetConditionEmpty, Status: metav1.ConditionFalse, - Reason: "ClustersSelected", + Reason: ReasonClusterSelected, Message: "1 ManagedClusters selected", }, }, @@ -67,7 +67,7 @@ func TestSyncClusterSet(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1beta2.ManagedClusterSetConditionEmpty, Status: metav1.ConditionFalse, - Reason: "ClustersSelected", + Reason: ReasonClusterSelected, Message: "1 ManagedClusters selected", }, }, @@ -91,7 +91,7 @@ func TestSyncClusterSet(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1beta2.ManagedClusterSetConditionEmpty, Status: metav1.ConditionTrue, - Reason: "NoClusterMatched", + Reason: ReasonNoClusterMatchced, Message: "No ManagedCluster selected", }, }, @@ -125,7 +125,7 @@ func TestSyncClusterSet(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1beta2.ManagedClusterSetConditionEmpty, Status: metav1.ConditionFalse, - Reason: "ClustersSelected", + Reason: ReasonClusterSelected, Message: "2 ManagedClusters selected", }, }, @@ -155,7 +155,7 @@ func TestSyncClusterSet(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1beta2.ManagedClusterSetConditionEmpty, Status: metav1.ConditionFalse, - Reason: "ClustersSelected", + Reason: ReasonClusterSelected, Message: "2 ManagedClusters selected", }, }, @@ -184,7 +184,7 @@ func TestSyncClusterSet(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1beta2.ManagedClusterSetConditionEmpty, Status: metav1.ConditionTrue, - Reason: "NoClusterMatched", + Reason: ReasonNoClusterMatchced, Message: "No ManagedCluster selected", }, }, @@ -206,7 +206,7 @@ func TestSyncClusterSet(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object for _, cluster := range c.existingClusters { objects = append(objects, cluster) } @@ -255,7 +255,7 @@ func TestSyncClusterSet(t *testing.T) { t.Errorf("Failed to get clusterset: %v, error: %v", c.existingClusterSet.Name, err) } if !hasCondition(updatedSet.Status.Conditions, c.expectCondition) { - t.Errorf("expected conditon:%v. is not found: %v", c.expectCondition, updatedSet.Status.Conditions) + t.Errorf("expected condition:%v. is not found: %v", c.expectCondition, updatedSet.Status.Conditions) } }) } @@ -266,7 +266,7 @@ func TestGetDiffClustersets(t *testing.T) { name string oldSets []*clusterv1beta2.ManagedClusterSet newSets []*clusterv1beta2.ManagedClusterSet - expectDiffSet sets.String + expectDiffSet sets.Set[string] }{ { name: "update a set", @@ -276,7 +276,7 @@ func TestGetDiffClustersets(t *testing.T) { newSets: []*clusterv1beta2.ManagedClusterSet{ newManagedClusterSet("s1"), newManagedClusterSet("s3"), }, - expectDiffSet: sets.NewString("s2", "s3"), + expectDiffSet: sets.New[string]("s2", "s3"), }, { name: "add a set", @@ -286,7 +286,7 @@ func TestGetDiffClustersets(t *testing.T) { newSets: []*clusterv1beta2.ManagedClusterSet{ newManagedClusterSet("s1"), newManagedClusterSet("s2"), }, - expectDiffSet: sets.NewString("s2"), + expectDiffSet: sets.New[string]("s2"), }, { name: "delete a set", @@ -296,7 +296,7 @@ func TestGetDiffClustersets(t *testing.T) { newSets: []*clusterv1beta2.ManagedClusterSet{ newManagedClusterSet("s1"), }, - expectDiffSet: sets.NewString("s2"), + expectDiffSet: sets.New[string]("s2"), }, { name: "old set is nil", @@ -304,7 +304,7 @@ func TestGetDiffClustersets(t *testing.T) { newSets: []*clusterv1beta2.ManagedClusterSet{ newManagedClusterSet("s1"), }, - expectDiffSet: sets.NewString("s1"), + expectDiffSet: sets.New[string]("s1"), }, { name: "new set is nil", @@ -312,7 +312,7 @@ func TestGetDiffClustersets(t *testing.T) { newManagedClusterSet("s1"), }, newSets: []*clusterv1beta2.ManagedClusterSet{}, - expectDiffSet: sets.NewString("s1"), + expectDiffSet: sets.New[string]("s1"), }, } @@ -368,7 +368,7 @@ func TestEnqueueUpdateClusterClusterSet(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object for _, clusterset := range c.existingClusterSets { objects = append(objects, clusterset) diff --git a/pkg/registration/hub/managedclusterset/default_managedclusterset_controller_test.go b/pkg/registration/hub/managedclusterset/default_managedclusterset_controller_test.go index debd84e29..1e8c8caaf 100644 --- a/pkg/registration/hub/managedclusterset/default_managedclusterset_controller_test.go +++ b/pkg/registration/hub/managedclusterset/default_managedclusterset_controller_test.go @@ -71,8 +71,9 @@ func TestSyncDefaultClusterSet(t *testing.T) { }, }, { - name: "sync default cluster set with disabled annotation", - existingClusterSet: newDefaultManagedClusterSetWithAnnotation(DefaultManagedClusterSetName, autoUpdateAnnotation, "false", DefaultManagedClusterSet.Spec, false), + name: "sync default cluster set with disabled annotation", + existingClusterSet: newDefaultManagedClusterSetWithAnnotation( + DefaultManagedClusterSetName, autoUpdateAnnotation, "false", DefaultManagedClusterSet.Spec, false), validateActions: func(t *testing.T, actions []clienttesting.Action) { testingcommon.AssertNoActions(t, actions) }, @@ -81,7 +82,7 @@ func TestSyncDefaultClusterSet(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existingClusterSet != nil { objects = append(objects, c.existingClusterSet) @@ -128,7 +129,8 @@ func newDefaultManagedClusterSet(name string, spec clusterv1beta2.ManagedCluster return clusterSet } -func newDefaultManagedClusterSetWithAnnotation(name string, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet { +func newDefaultManagedClusterSetWithAnnotation( + name, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet { clusterSet := &clusterv1beta2.ManagedClusterSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/pkg/registration/hub/managedclusterset/global_managedclusterset_controller_test.go b/pkg/registration/hub/managedclusterset/global_managedclusterset_controller_test.go index 223e9c00e..a5ec64311 100644 --- a/pkg/registration/hub/managedclusterset/global_managedclusterset_controller_test.go +++ b/pkg/registration/hub/managedclusterset/global_managedclusterset_controller_test.go @@ -64,8 +64,9 @@ func TestSyncGlobalClusterSet(t *testing.T) { }, }, { - name: "sync global cluster set with disabled annotation", - existingClusterSet: newGlobalManagedClusterSetWithAnnotation(GlobalManagedClusterSetName, autoUpdateAnnotation, "false", GlobalManagedClusterSet.Spec, false), + name: "sync global cluster set with disabled annotation", + existingClusterSet: newGlobalManagedClusterSetWithAnnotation( + GlobalManagedClusterSetName, autoUpdateAnnotation, "false", GlobalManagedClusterSet.Spec, false), validateActions: func(t *testing.T, actions []clienttesting.Action) { testingcommon.AssertNoActions(t, actions) }, @@ -74,7 +75,7 @@ func TestSyncGlobalClusterSet(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existingClusterSet != nil { objects = append(objects, c.existingClusterSet) @@ -120,7 +121,8 @@ func newGlobalManagedClusterSet(name string, spec clusterv1beta2.ManagedClusterS return clusterSet } -func newGlobalManagedClusterSetWithAnnotation(name string, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet { +func newGlobalManagedClusterSetWithAnnotation( + name, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet { clusterSet := &clusterv1beta2.ManagedClusterSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/pkg/registration/hub/managedclustersetbinding/controller_test.go b/pkg/registration/hub/managedclustersetbinding/controller_test.go index 9f01bb307..167098660 100644 --- a/pkg/registration/hub/managedclustersetbinding/controller_test.go +++ b/pkg/registration/hub/managedclustersetbinding/controller_test.go @@ -91,7 +91,7 @@ func TestSync(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object objects = append(objects, c.clusterSets...) objects = append(objects, c.clusterSetBinding) @@ -154,7 +154,7 @@ func TestEnqueue(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object objects = append(objects, c.clusterSet) objects = append(objects, c.clusterSetBindings...) diff --git a/pkg/registration/hub/rbacfinalizerdeletion/controller.go b/pkg/registration/hub/rbacfinalizerdeletion/controller.go index 24ac231e2..8d2f40a74 100644 --- a/pkg/registration/hub/rbacfinalizerdeletion/controller.go +++ b/pkg/registration/hub/rbacfinalizerdeletion/controller.go @@ -173,7 +173,7 @@ func removeFinalizer(obj runtime.Object, finalizerName string) bool { return false } - newFinalizers := []string{} + var newFinalizers []string accessor, _ := meta.Accessor(obj) found := false for _, finalizer := range accessor.GetFinalizers() { diff --git a/pkg/registration/hub/rbacfinalizerdeletion/controller_test.go b/pkg/registration/hub/rbacfinalizerdeletion/controller_test.go index 7a7bfa233..cc9582156 100644 --- a/pkg/registration/hub/rbacfinalizerdeletion/controller_test.go +++ b/pkg/registration/hub/rbacfinalizerdeletion/controller_test.go @@ -170,7 +170,7 @@ func TestSyncRoleAndRoleBinding(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.roleBinding != nil { objects = append(objects, c.roleBinding) } diff --git a/pkg/registration/spoke/addon/configuration_test.go b/pkg/registration/spoke/addon/configuration_test.go index 56975c787..f853d099a 100644 --- a/pkg/registration/spoke/addon/configuration_test.go +++ b/pkg/registration/spoke/addon/configuration_test.go @@ -3,6 +3,7 @@ package addon import ( "testing" + certificates "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" @@ -10,8 +11,11 @@ import ( testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing" ) +const ( + addOnName = "addon1" +) + func TestGetRegistrationConfigs(t *testing.T) { - addOnName := "addon1" addOnNamespace := "ns1" cases := []struct { @@ -44,13 +48,13 @@ func TestGetRegistrationConfigs(t *testing.T) { Status: addonv1alpha1.ManagedClusterAddOnStatus{ Registrations: []addonv1alpha1.RegistrationConfig{ { - SignerName: "kubernetes.io/kube-apiserver-client", + SignerName: certificates.KubeAPIServerClientSignerName, }, }, }, }, configs: []registrationConfig{ - newRegistrationConfig(addOnName, addOnNamespace, "kubernetes.io/kube-apiserver-client", "", nil, false), + newRegistrationConfig(addOnName, addOnNamespace, certificates.KubeAPIServerClientSignerName, "", nil, false), }, }, { @@ -64,14 +68,14 @@ func TestGetRegistrationConfigs(t *testing.T) { Status: addonv1alpha1.ManagedClusterAddOnStatus{ Registrations: []addonv1alpha1.RegistrationConfig{ { - SignerName: "kubernetes.io/kube-apiserver-client", + SignerName: certificates.KubeAPIServerClientSignerName, }, }, Namespace: addOnNamespace, }, }, configs: []registrationConfig{ - newRegistrationConfig(addOnName, addOnNamespace, "kubernetes.io/kube-apiserver-client", "", nil, false), + newRegistrationConfig(addOnName, addOnNamespace, certificates.KubeAPIServerClientSignerName, "", nil, false), }, }, { @@ -90,13 +94,13 @@ func TestGetRegistrationConfigs(t *testing.T) { Status: addonv1alpha1.ManagedClusterAddOnStatus{ Registrations: []addonv1alpha1.RegistrationConfig{ { - SignerName: "kubernetes.io/kube-apiserver-client", + SignerName: certificates.KubeAPIServerClientSignerName, }, }, }, }, configs: []registrationConfig{ - newRegistrationConfig(addOnName, addOnNamespace, "kubernetes.io/kube-apiserver-client", "", nil, true), + newRegistrationConfig(addOnName, addOnNamespace, certificates.KubeAPIServerClientSignerName, "", nil, true), }, }, { diff --git a/pkg/registration/spoke/addon/registration_controller.go b/pkg/registration/spoke/addon/registration_controller.go index 20e7142b8..e5cd2161f 100644 --- a/pkg/registration/spoke/addon/registration_controller.go +++ b/pkg/registration/spoke/addon/registration_controller.go @@ -114,7 +114,7 @@ func (c *addOnRegistrationController) sync(ctx context.Context, syncCtx factory. } // handle resync - errs := []error{} + var errs []error for addOnName := range c.addOnRegistrationConfigs { _, err := c.hubAddOnLister.ManagedClusterAddOns(c.clusterName).Get(addOnName) if err == nil { @@ -158,7 +158,7 @@ func (c *addOnRegistrationController) syncAddOn(ctx context.Context, syncCtx fac } // stop registration for the stale registration configs - errs := []error{} + var errs []error for hash, cachedConfig := range cachedConfigs { if _, ok := configs[hash]; ok { continue @@ -202,7 +202,7 @@ func (c *addOnRegistrationController) startRegistration(ctx context.Context, con // the addon agent runs outside the managed cluster, for more details see the hosted mode design docs for addon: // https://github.com/open-cluster-management-io/enhancements/pull/65), it generate the secret on the // management(hosting) cluster - var kubeClient kubernetes.Interface = c.spokeKubeClient + kubeClient := c.spokeKubeClient if config.AgentRunningOutsideManagedCluster { kubeClient = c.managementKubeClient } @@ -298,7 +298,7 @@ func (c *addOnRegistrationController) stopRegistration(ctx context.Context, conf config.stopFunc() } - var kubeClient kubernetes.Interface = c.spokeKubeClient + kubeClient := c.spokeKubeClient if config.AgentRunningOutsideManagedCluster { // delete the secret generated on the management cluster kubeClient = c.managementKubeClient @@ -315,7 +315,7 @@ func (c *addOnRegistrationController) stopRegistration(ctx context.Context, conf // cleanup cleans both the registration configs and client certificate controllers for the addon func (c *addOnRegistrationController) cleanup(ctx context.Context, addOnName string) error { - errs := []error{} + var errs []error for _, config := range c.addOnRegistrationConfigs[addOnName] { if err := c.stopRegistration(ctx, config); err != nil { errs = append(errs, err) diff --git a/pkg/registration/spoke/addon/registration_controller_test.go b/pkg/registration/spoke/addon/registration_controller_test.go index 1741d13a7..5cebc7178 100644 --- a/pkg/registration/spoke/addon/registration_controller_test.go +++ b/pkg/registration/spoke/addon/registration_controller_test.go @@ -23,7 +23,6 @@ import ( func TestFilterCSREvents(t *testing.T) { clusterName := "cluster1" - addonName := "addon1" signerName := "signer1" cases := []struct { @@ -50,7 +49,7 @@ func TestFilterCSREvents(t *testing.T) { Labels: map[string]string{ // the labels are only hints. Anyone could set/modify them. clusterv1.ClusterNameLabelKey: clusterName, - addonv1alpha1.AddonLabelKey: addonName, + addonv1alpha1.AddonLabelKey: addOnName, }, }, Spec: certificates.CertificateSigningRequestSpec{ @@ -63,7 +62,7 @@ func TestFilterCSREvents(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - filterFunc := createCSREventFilterFunc(clusterName, addonName, signerName) + filterFunc := createCSREventFilterFunc(clusterName, addOnName, signerName) actual := filterFunc(c.csr) if actual != c.expected { t.Errorf("Expected %v but got %v", c.expected, actual) @@ -74,7 +73,6 @@ func TestFilterCSREvents(t *testing.T) { func TestRegistrationSync(t *testing.T) { clusterName := "cluster1" - addonName := "addon1" signerName := "signer1" config1 := addonv1alpha1.RegistrationConfig{ @@ -84,7 +82,7 @@ func TestRegistrationSync(t *testing.T) { config2 := addonv1alpha1.RegistrationConfig{ SignerName: signerName, Subject: addonv1alpha1.Subject{ - User: addonName, + User: addOnName, }, } @@ -99,8 +97,8 @@ func TestRegistrationSync(t *testing.T) { }{ { name: "addon registration not enabled", - queueKey: addonName, - addOn: newManagedClusterAddOn(clusterName, addonName, nil, false), + queueKey: addOnName, + addOn: newManagedClusterAddOn(clusterName, addOnName, nil, false), validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(actions) != 0 { t.Errorf("expect 0 actions but got %d", len(actions)) @@ -112,11 +110,11 @@ func TestRegistrationSync(t *testing.T) { }, { name: "addon registration enabled", - queueKey: addonName, - addOn: newManagedClusterAddOn(clusterName, addonName, + queueKey: addOnName, + addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config1}, false), expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config1, "", false)}, + addOnName: {hash(config1, "", false)}, }, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(actions) != 0 { @@ -126,21 +124,21 @@ func TestRegistrationSync(t *testing.T) { }, { name: "addon registration updated", - queueKey: addonName, - addOn: newManagedClusterAddOn(clusterName, addonName, + queueKey: addOnName, + addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config2}, false), addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config1, "", false): { secretName: "secret1", addonInstallOption: addonInstallOption{ - InstallationNamespace: addonName, + InstallationNamespace: addOnName, }, }, }, }, expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config2, "", false)}, + addOnName: {hash(config2, "", false)}, }, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(actions) != 1 { @@ -151,21 +149,21 @@ func TestRegistrationSync(t *testing.T) { }, { name: "addon install namespace updated", - queueKey: addonName, - addOn: setAddonInstallNamespace(newManagedClusterAddOn(clusterName, addonName, + queueKey: addOnName, + addOn: setAddonInstallNamespace(newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config2}, false), "ns1"), addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config2, "", false): { secretName: "secret1", addonInstallOption: addonInstallOption{ - InstallationNamespace: addonName, + InstallationNamespace: addOnName, }, }, }, }, expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config2, "ns1", false)}, + addOnName: {hash(config2, "ns1", false)}, }, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(actions) != 1 { @@ -176,13 +174,13 @@ func TestRegistrationSync(t *testing.T) { }, { name: "addon is deleted", - queueKey: addonName, + queueKey: addOnName, addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config1, "", false): { secretName: "secret1", addonInstallOption: addonInstallOption{ - InstallationNamespace: addonName, + InstallationNamespace: addOnName, }, }, }, @@ -196,10 +194,10 @@ func TestRegistrationSync(t *testing.T) { }, { name: "hosted addon registration enabled", - queueKey: addonName, - addOn: newManagedClusterAddOn(clusterName, addonName, []addonv1alpha1.RegistrationConfig{config1}, true), + queueKey: addOnName, + addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config1}, true), expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config1, "", true)}, + addOnName: {hash(config1, "", true)}, }, addonAgentOutsideManagedCluster: true, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { @@ -213,23 +211,23 @@ func TestRegistrationSync(t *testing.T) { }, { name: "hosted addon registration updated", - queueKey: addonName, - addOn: newManagedClusterAddOn(clusterName, addonName, + queueKey: addOnName, + addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config2}, true), addonAgentOutsideManagedCluster: true, addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config1, "", true): { secretName: "secret1", addonInstallOption: addonInstallOption{ - InstallationNamespace: addonName, + InstallationNamespace: addOnName, AgentRunningOutsideManagedCluster: true, }, }, }, }, expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config2, "", true)}, + addOnName: {hash(config2, "", true)}, }, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(actions) != 0 { @@ -243,12 +241,12 @@ func TestRegistrationSync(t *testing.T) { }, { name: "deploy mode changes from hosted to default", - queueKey: addonName, - addOn: newManagedClusterAddOn(clusterName, addonName, + queueKey: addOnName, + addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config2}, false), addonAgentOutsideManagedCluster: false, addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config2, "", true): { secretName: "secret1", addonInstallOption: addonInstallOption{ @@ -258,7 +256,7 @@ func TestRegistrationSync(t *testing.T) { }, }, expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config2, "", false)}, + addOnName: {hash(config2, "", false)}, }, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(actions) != 0 { @@ -272,23 +270,23 @@ func TestRegistrationSync(t *testing.T) { }, { name: "deploy mode changes from default to hosted", - queueKey: addonName, - addOn: newManagedClusterAddOn(clusterName, addonName, + queueKey: addOnName, + addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config2}, true), addonAgentOutsideManagedCluster: true, addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config2, "", false): { secretName: "secret1", addonInstallOption: addonInstallOption{ - InstallationNamespace: addonName, + InstallationNamespace: addOnName, AgentRunningOutsideManagedCluster: false, }, }, }, }, expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config2, "", true)}, + addOnName: {hash(config2, "", true)}, }, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(managementActions) != 0 { @@ -302,13 +300,13 @@ func TestRegistrationSync(t *testing.T) { }, { name: "hosted addon is deleted", - queueKey: addonName, + queueKey: addOnName, addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config1, "", true): { secretName: "secret1", addonInstallOption: addonInstallOption{ - InstallationNamespace: addonName, + InstallationNamespace: addOnName, AgentRunningOutsideManagedCluster: true, }, }, @@ -324,14 +322,14 @@ func TestRegistrationSync(t *testing.T) { { name: "resync", queueKey: factory.DefaultQueueKey, - addOn: newManagedClusterAddOn(clusterName, addonName, + addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config1}, false), addOnRegistrationConfigs: map[string]map[string]registrationConfig{ - addonName: { + addOnName: { hash(config1, "", false): { secretName: "secret1", addonInstallOption: addonInstallOption{ - InstallationNamespace: addonName, + InstallationNamespace: addOnName, }, }, }, @@ -345,7 +343,7 @@ func TestRegistrationSync(t *testing.T) { }, }, expectedAddOnRegistrationConfigHashs: map[string][]string{ - addonName: {hash(config1, "", false)}, + addOnName: {hash(config1, "", false)}, }, validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) { if len(actions) != 1 { @@ -360,7 +358,7 @@ func TestRegistrationSync(t *testing.T) { t.Run(c.name, func(t *testing.T) { kubeClient := kubefake.NewSimpleClientset() managementClient := kubefake.NewSimpleClientset() - addons := []runtime.Object{} + var addons []runtime.Object if c.addOn != nil { addons = append(addons, c.addOn) } diff --git a/pkg/registration/spoke/lease/lease_controller_test.go b/pkg/registration/spoke/lease/lease_controller_test.go index 4b12afa66..b40ddb954 100644 --- a/pkg/registration/spoke/lease/lease_controller_test.go +++ b/pkg/registration/spoke/lease/lease_controller_test.go @@ -41,7 +41,8 @@ func TestLeaseUpdate(t *testing.T) { clusters: []runtime.Object{}, needToStartUpdateBefore: true, validateActions: testingcommon.AssertNoMoreUpdates, - expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", + expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: " + + "managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", }, { name: "unaccept a managed cluster after lease update routine is started", diff --git a/pkg/registration/spoke/managedcluster/claim_reconcile.go b/pkg/registration/spoke/managedcluster/claim_reconcile.go index 55cc66f2d..b4978bbc2 100644 --- a/pkg/registration/spoke/managedcluster/claim_reconcile.go +++ b/pkg/registration/spoke/managedcluster/claim_reconcile.go @@ -45,8 +45,7 @@ func (r *claimReconcile) reconcile(ctx context.Context, cluster *clusterv1.Manag // managed cluster on hub. Some of the customized claims might not be exposed once // the total number of the claims exceeds the value of `cluster-claims-max`. func (r *claimReconcile) exposeClaims(ctx context.Context, cluster *clusterv1.ManagedCluster) error { - reservedClaims := []clusterv1.ManagedClusterClaim{} - customClaims := []clusterv1.ManagedClusterClaim{} + var reservedClaims, customClaims []clusterv1.ManagedClusterClaim // clusterClaim with label `open-cluster-management.io/spoke-only` will not be synced to managedCluster.Status at hub. requirement, _ := labels.NewRequirement(labelCustomizedOnly, selection.DoesNotExist, []string{}) diff --git a/pkg/registration/spoke/managedcluster/claim_reconcile_test.go b/pkg/registration/spoke/managedcluster/claim_reconcile_test.go index dfafaeab2..59f039bcf 100644 --- a/pkg/registration/spoke/managedcluster/claim_reconcile_test.go +++ b/pkg/registration/spoke/managedcluster/claim_reconcile_test.go @@ -38,7 +38,8 @@ func TestSync(t *testing.T) { { name: "sync no managed cluster", validateActions: testingcommon.AssertNoActions, - expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", + expectedErr: "unable to get managed cluster \"testmanagedcluster\" " + + "from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", }, { name: "skip when managed cluster does not join the hub yet", @@ -87,7 +88,7 @@ func TestSync(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.cluster != nil { objects = append(objects, c.cluster) } diff --git a/pkg/registration/spoke/managedcluster/joining_controller_test.go b/pkg/registration/spoke/managedcluster/joining_controller_test.go index 4482ebcf5..62a20b16c 100644 --- a/pkg/registration/spoke/managedcluster/joining_controller_test.go +++ b/pkg/registration/spoke/managedcluster/joining_controller_test.go @@ -32,7 +32,8 @@ func TestSyncManagedCluster(t *testing.T) { name: "sync no managed cluster", startingObjects: []runtime.Object{}, validateActions: testingcommon.AssertNoActions, - expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", + expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: " + + "managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", }, { name: "sync an unaccepted managed cluster", diff --git a/pkg/registration/spoke/managedcluster/resource_reconcile_test.go b/pkg/registration/spoke/managedcluster/resource_reconcile_test.go index f4cc2f6d1..7a2bad13c 100644 --- a/pkg/registration/spoke/managedcluster/resource_reconcile_test.go +++ b/pkg/registration/spoke/managedcluster/resource_reconcile_test.go @@ -13,7 +13,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" - discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery" kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/rest" @@ -90,7 +90,8 @@ func TestHealthCheck(t *testing.T) { validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) { testingcommon.AssertNoActions(t, clusterClient.Actions()) }, - expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", + expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: " + + "managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", }, { name: "kube-apiserver is not health", diff --git a/pkg/registration/spoke/managedcluster/status_controller.go b/pkg/registration/spoke/managedcluster/status_controller.go index f417f2cd5..c3adf6a40 100644 --- a/pkg/registration/spoke/managedcluster/status_controller.go +++ b/pkg/registration/spoke/managedcluster/status_controller.go @@ -8,7 +8,7 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" "k8s.io/apimachinery/pkg/util/errors" - discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery" corev1informers "k8s.io/client-go/informers/core/v1" clientset "open-cluster-management.io/api/client/cluster/clientset/versioned" diff --git a/pkg/registration/spoke/registration/secret_controller.go b/pkg/registration/spoke/registration/secret_controller.go index 2acb7a832..ecedf754a 100644 --- a/pkg/registration/spoke/registration/secret_controller.go +++ b/pkg/registration/spoke/registration/secret_controller.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -91,11 +90,11 @@ func DumpSecret( // create/update files from the secret for key, data := range secret.Data { filename := path.Clean(path.Join(outputDir, key)) - lastData, err := ioutil.ReadFile(filepath.Clean(filename)) + lastData, err := os.ReadFile(filepath.Clean(filename)) switch { case os.IsNotExist(err): // create file - if err := ioutil.WriteFile(filename, data, 0600); err != nil { + if err := os.WriteFile(filename, data, 0600); err != nil { return fmt.Errorf("unable to write file %q: %w", filename, err) } recorder.Event("FileCreated", fmt.Sprintf("File %q is created from secret %s/%s", filename, secretNamespace, secretName)) @@ -106,7 +105,7 @@ func DumpSecret( continue default: // update file - if err := ioutil.WriteFile(path.Clean(filename), data, 0600); err != nil { + if err := os.WriteFile(path.Clean(filename), data, 0600); err != nil { return fmt.Errorf("unable to write file %q: %w", filename, err) } recorder.Event("FileUpdated", fmt.Sprintf("File %q is updated from secret %s/%s", filename, secretNamespace, secretName)) diff --git a/pkg/registration/spoke/registration/secret_controller_test.go b/pkg/registration/spoke/registration/secret_controller_test.go index 7b9d0df6d..a9396693a 100644 --- a/pkg/registration/spoke/registration/secret_controller_test.go +++ b/pkg/registration/spoke/registration/secret_controller_test.go @@ -3,7 +3,6 @@ package registration import ( "context" "fmt" - "io/ioutil" "os" "path" "testing" @@ -24,11 +23,16 @@ const ( ) func TestDumpSecret(t *testing.T) { - testDir, err := ioutil.TempDir("", "dumpsecret") + testDir, err := os.MkdirTemp("", "dumpsecret") if err != nil { t.Errorf("unexpected error: %v", err) } - defer os.RemoveAll(testDir) + defer func() { + err := os.RemoveAll(testDir) + if err != nil { + t.Fatal(err) + } + }() kubeConfigFile := testinghelpers.NewKubeconfig(nil, nil) @@ -44,7 +48,7 @@ func TestDumpSecret(t *testing.T) { queueKey: "", secret: testinghelpers.NewHubKubeconfigSecret("irrelevant", "irrelevant", "", nil, map[string][]byte{}), validateFiles: func(t *testing.T, hubKubeconfigDir string) { - files, err := ioutil.ReadDir(hubKubeconfigDir) + files, err := os.ReadDir(hubKubeconfigDir) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/work/helper/helper_test.go b/pkg/work/helper/helper_test.go index e8dda1c21..8a01ada20 100644 --- a/pkg/work/helper/helper_test.go +++ b/pkg/work/helper/helper_test.go @@ -122,7 +122,9 @@ func TestMergeManifestConditions(t *testing.T) { newManifestCondition(0, "resource1", newCondition("two", "False", "my-reason", "my-message", nil)), }, expectedConditions: []workapiv1.ManifestCondition{ - newManifestCondition(0, "resource1", newCondition("one", "True", "my-reason", "my-message", nil), newCondition("two", "False", "my-reason", "my-message", nil)), + newManifestCondition(0, "resource1", + newCondition("one", "True", "my-reason", "my-message", nil), + newCondition("two", "False", "my-reason", "my-message", nil)), }, }, { diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go index 7b2cfd1e6..65f5220ee 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go @@ -50,7 +50,7 @@ type ManifestWorkReplicaSetController struct { reconcilers []ManifestWorkReplicaSetReconcile } -// manifestWorkReplicaSetReconcile is a interface for reconcile logic. It returns an updated manifestWorkReplicaSet and whether further +// ManifestWorkReplicaSetReconcile is a interface for reconcile logic. It returns an updated manifestWorkReplicaSet and whether further // reconcile needs to proceed. type ManifestWorkReplicaSetReconcile interface { reconcile(ctx context.Context, pw *workapiv1alpha1.ManifestWorkReplicaSet) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go index 714eaa684..659841a53 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go @@ -144,7 +144,7 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) { }, }, { - name: "no additonal apply needed", + name: "no additional apply needed", mwrSet: func() *workapiv1alpha1.ManifestWorkReplicaSet { w := helpertest.CreateTestManifestWorkReplicaSet("test", "default", "placement") w.Finalizers = []string{ManifestWorkReplicaSetFinalizer} @@ -213,15 +213,27 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) { workObjects = append(workObjects, c.works...) fakeClient := fakeworkclient.NewSimpleClientset(workObjects...) workInformers := workinformers.NewSharedInformerFactory(fakeClient, 10*time.Minute) - workInformers.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(c.mwrSet) + err := workInformers.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(c.mwrSet) + if err != nil { + t.Fatal(err) + } for _, o := range c.works { - workInformers.Work().V1().ManifestWorks().Informer().GetStore().Add(o) + err = workInformers.Work().V1().ManifestWorks().Informer().GetStore().Add(o) + if err != nil { + t.Fatal(err) + } } fakeClusterClient := fakeclusterclient.NewSimpleClientset(c.placement, c.decision) clusterInformers := clusterinformers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute) - clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(c.placement) - clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(c.decision) + err = clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(c.placement) + if err != nil { + t.Fatal(err) + } + err = clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(c.decision) + if err != nil { + t.Fatal(err) + } ctrl := newController( fakeClient, @@ -232,7 +244,7 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) { ) controllerContext := testingcommon.NewFakeSyncContext(t, c.mwrSet.Namespace+"/"+c.mwrSet.Name) - err := ctrl.sync(context.TODO(), controllerContext) + err = ctrl.sync(context.TODO(), controllerContext) if err != nil { t.Error(err) } diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go index 96202a49b..76aed7380 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go @@ -39,7 +39,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha return mwrSet, reconcileStop, nil } if err != nil { - return mwrSet, reconcileContinue, fmt.Errorf("Failed get placement %w", err) + return mwrSet, reconcileContinue, fmt.Errorf("failed get placement %w", err) } placements = append(placements, placement) } @@ -49,7 +49,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha return mwrSet, reconcileContinue, err } - errs := []error{} + var errs []error addedClusters, deletedClusters, existingClusters := sets.New[string](), sets.New[string](), sets.New[string]() for _, mw := range manifestWorks { existingClusters.Insert(mw.Namespace) @@ -127,7 +127,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha return mwrSet, reconcileContinue, utilerrors.NewAggregate(errs) } -// Return only True status if there all clusters have manifests applied as expected +// GetManifestworkApplied return only True status if there all clusters have manifests applied as expected func GetManifestworkApplied(reason string, message string) metav1.Condition { if reason == workapiv1alpha1.ReasonAsExpected { return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionManifestworkApplied, reason, message, metav1.ConditionTrue) @@ -137,7 +137,7 @@ func GetManifestworkApplied(reason string, message string) metav1.Condition { } -// Return only True status if there are clusters selected +// GetPlacementDecisionVerified return only True status if there are clusters selected func GetPlacementDecisionVerified(reason string, message string) metav1.Condition { if reason == workapiv1alpha1.ReasonAsExpected { return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified, reason, message, metav1.ConditionTrue) @@ -158,7 +158,7 @@ func getCondition(conditionType string, reason string, message string, status me func CreateManifestWork(mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, clusterNS string) (*workv1.ManifestWork, error) { if clusterNS == "" { - return nil, fmt.Errorf("Invalid cluster namespace") + return nil, fmt.Errorf("invalid cluster namespace") } return &workv1.ManifestWork{ diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go index cae44246d..45cd37552 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go @@ -73,7 +73,7 @@ func TestDeployReconcileAsExpected(t *testing.T) { } // Check the PlacedManifestWork conditions - placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, string(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified)) + placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified) if placeCondition == nil { t.Fatal("Placement condition not found ", mwrSet.Status.Conditions) @@ -137,7 +137,7 @@ func TestDeployReconcileAsPlacementDecisionEmpty(t *testing.T) { } // Check the PlacedManifestWork conditions - placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, string(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified)) + placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified) if placeCondition == nil { t.Fatal("Placement condition not found ", mwrSet.Status.Conditions) @@ -184,7 +184,7 @@ func TestDeployReconcileAsPlacementNotExist(t *testing.T) { } // Check the PlacedManifestWork conditions - placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, string(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified)) + placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified) if placeCondition == nil { t.Fatal("Placement condition not found ", mwrSet.Status.Conditions) diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go index 9590d6865..e7035218a 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go @@ -43,15 +43,15 @@ func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alp return mwrSet, reconcileStop, nil } -func (m *finalizeReconciler) finalizeManifestWorkReplicaSet(ctx context.Context, manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet) error { - manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(manifestWorkReplicaSet, m.manifestWorkLister) +func (f *finalizeReconciler) finalizeManifestWorkReplicaSet(ctx context.Context, manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet) error { + manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(manifestWorkReplicaSet, f.manifestWorkLister) if err != nil { return err } - errs := []error{} + var errs []error for _, mw := range manifestWorks { - err = m.workApplier.Delete(ctx, mw.Namespace, mw.Name) + err = f.workApplier.Delete(ctx, mw.Namespace, mw.Name) if err != nil && !errors.IsNotFound(err) { errs = append(errs, err) } diff --git a/pkg/work/spoke/apply/create_only_apply_test.go b/pkg/work/spoke/apply/create_only_apply_test.go index 9a1a0a742..2d1a4813b 100644 --- a/pkg/work/spoke/apply/create_only_apply_test.go +++ b/pkg/work/spoke/apply/create_only_apply_test.go @@ -27,7 +27,7 @@ func TestCreateOnlyApply(t *testing.T) { }{ { name: "create a non exist object", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, existing: nil, required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, @@ -40,14 +40,14 @@ func TestCreateOnlyApply(t *testing.T) { t.Errorf("Expect 1 owners, but have %d", len(owners)) } - if owners[0].UID != "testowner" { + if owners[0].UID != defaultOwner { t.Errorf("Owner UId is not correct, got %s", owners[0].UID) } }, }, { name: "create an already existing object", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, existing: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, @@ -64,7 +64,7 @@ func TestCreateOnlyApply(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existing != nil { objects = append(objects, c.existing) } diff --git a/pkg/work/spoke/apply/server_side_apply.go b/pkg/work/spoke/apply/server_side_apply.go index a3d111539..4d32dac1b 100644 --- a/pkg/work/spoke/apply/server_side_apply.go +++ b/pkg/work/spoke/apply/server_side_apply.go @@ -39,7 +39,7 @@ func (c *ServerSideApply) Apply( ctx context.Context, gvr schema.GroupVersionResource, required *unstructured.Unstructured, - owner metav1.OwnerReference, + _ metav1.OwnerReference, applyOption *workapiv1.ManifestConfigOption, recorder events.Recorder) (runtime.Object, error) { diff --git a/pkg/work/spoke/apply/server_side_apply_test.go b/pkg/work/spoke/apply/server_side_apply_test.go index 3a4b21dd2..8d2454fca 100644 --- a/pkg/work/spoke/apply/server_side_apply_test.go +++ b/pkg/work/spoke/apply/server_side_apply_test.go @@ -22,6 +22,8 @@ import ( "open-cluster-management.io/ocm/pkg/work/spoke/spoketesting" ) +const defaultOwner = "test-owner" + func TestServerSideApply(t *testing.T) { cases := []struct { name string @@ -34,7 +36,7 @@ func TestServerSideApply(t *testing.T) { }{ { name: "server side apply successfully", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, existing: nil, required: spoketesting.NewUnstructured("v1", "Namespace", "", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "namespaces"}, @@ -42,7 +44,7 @@ func TestServerSideApply(t *testing.T) { }, { name: "server side apply successfully conflict", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, existing: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, @@ -55,7 +57,7 @@ func TestServerSideApply(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existing != nil { objects = append(objects, c.existing) } diff --git a/pkg/work/spoke/apply/update_apply_test.go b/pkg/work/spoke/apply/update_apply_test.go index 650e31781..5049aad3e 100644 --- a/pkg/work/spoke/apply/update_apply_test.go +++ b/pkg/work/spoke/apply/update_apply_test.go @@ -47,15 +47,19 @@ func TestIsSameUnstructured(t *testing.T) { expected: false, }, { - name: "different spec", - obj1: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}), - obj2: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}}), + name: "different spec", + obj1: spoketesting.NewUnstructuredWithContent( + "v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}), + obj2: spoketesting.NewUnstructuredWithContent( + "v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}}), expected: false, }, { - name: "same spec, different status", - obj1: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status1"}), - obj2: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status2"}), + name: "same spec, different status", + obj1: spoketesting.NewUnstructuredWithContent( + "v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status1"}), + obj2: spoketesting.NewUnstructuredWithContent( + "v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status2"}), expected: true, }, } @@ -81,7 +85,7 @@ func TestApplyUnstructred(t *testing.T) { }{ { name: "create a new object with owner", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, validateActions: func(t *testing.T, actions []clienttesting.Action) { @@ -92,7 +96,7 @@ func TestApplyUnstructred(t *testing.T) { t.Errorf("Expect 1 owners, but have %d", len(owners)) } - if owners[0].UID != "testowner" { + if owners[0].UID != defaultOwner { t.Errorf("Owner UId is not correct, got %s", owners[0].UID) } }, @@ -120,7 +124,7 @@ func TestApplyUnstructred(t *testing.T) { name: "update an object owner", existing: spoketesting.NewUnstructured( "v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"}), - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, validateActions: func(t *testing.T, actions []clienttesting.Action) { @@ -139,7 +143,7 @@ func TestApplyUnstructred(t *testing.T) { if owners[0].UID != "testowner1" { t.Errorf("Owner UId is not correct, got %s", owners[0].UID) } - if owners[1].UID != "testowner" { + if owners[1].UID != defaultOwner { t.Errorf("Owner UId is not correct, got %s", owners[1].UID) } }, @@ -160,7 +164,7 @@ func TestApplyUnstructred(t *testing.T) { { name: "remove an object owner", existing: spoketesting.NewUnstructured( - "v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}), + "v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}), owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner-"}, required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, @@ -285,7 +289,7 @@ func TestApplyUnstructred(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existing != nil { objects = append(objects, c.existing) } @@ -318,7 +322,7 @@ func TestUpdateApplyKube(t *testing.T) { }{ { name: "apply non exist object using kube client", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, validateActions: func(t *testing.T, actions []clienttesting.Action) { @@ -331,7 +335,7 @@ func TestUpdateApplyKube(t *testing.T) { }, { name: "apply existing object using kube client", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, existing: spoketesting.NewSecretWithType("test", "ns1", "foo", corev1.SecretTypeOpaque), required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, @@ -353,7 +357,7 @@ func TestUpdateApplyKube(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existing != nil { objects = append(objects, c.existing) } @@ -404,14 +408,14 @@ func TestUpdateApplyDynamic(t *testing.T) { }{ { name: "apply non exist object using dynamic client", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, required: spoketesting.NewUnstructured("monitoring.coreos.com/v1", "ServiceMonitor", "ns1", "test"), gvr: schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"}, ownerApplied: true, }, { name: "apply existing object using dynamic client", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, existing: spoketesting.NewUnstructured("monitoring.coreos.com/v1", "ServiceMonitor", "ns1", "test"), required: spoketesting.NewUnstructured("monitoring.coreos.com/v1", "ServiceMonitor", "ns1", "test"), gvr: schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"}, @@ -421,7 +425,7 @@ func TestUpdateApplyDynamic(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existing != nil { objects = append(objects, c.existing) } @@ -473,7 +477,7 @@ func TestUpdateApplyApiExtension(t *testing.T) { }{ { name: "apply non exist object using api extension client", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, required: spoketesting.NewUnstructured("apiextensions.k8s.io/v1", "CustomResourceDefinition", "", "testcrd"), gvr: schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinition"}, validateActions: func(t *testing.T, actions []clienttesting.Action) { @@ -486,7 +490,7 @@ func TestUpdateApplyApiExtension(t *testing.T) { }, { name: "apply existing object using api extension client", - owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}, + owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}, existing: newCRD("testcrd"), required: spoketesting.NewUnstructured("apiextensions.k8s.io/v1", "CustomResourceDefinition", "", "testcrd"), gvr: schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinition"}, @@ -502,7 +506,7 @@ func TestUpdateApplyApiExtension(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - objects := []runtime.Object{} + var objects []runtime.Object if c.existing != nil { objects = append(objects, c.existing) } diff --git a/pkg/work/spoke/auth/basic/auth_test.go b/pkg/work/spoke/auth/basic/auth_test.go index 631a03e29..c954da864 100644 --- a/pkg/work/spoke/auth/basic/auth_test.go +++ b/pkg/work/spoke/auth/basic/auth_test.go @@ -140,7 +140,9 @@ func TestValidateEscalation(t *testing.T) { namespace: "test-deny", name: "test", obj: spoketesting.NewUnstructured("v1", "ClusterRole", "", "test"), - expect: fmt.Errorf("not allowed to apply the resource rbac.authorization.k8s.io roles, test-deny test, error: permission escalation, will try again in 1m0s"), + expect: fmt.Errorf( + "not allowed to apply the resource rbac.authorization.k8s.io roles, " + + "test-deny test, error: permission escalation, will try again in 1m0s"), }, "allow": { executor: &workapiv1.ManifestWorkExecutor{ diff --git a/pkg/work/spoke/auth/cache/auth_test.go b/pkg/work/spoke/auth/cache/auth_test.go index d39cc1613..c8d2bbec4 100644 --- a/pkg/work/spoke/auth/cache/auth_test.go +++ b/pkg/work/spoke/auth/cache/auth_test.go @@ -23,6 +23,12 @@ import ( "open-cluster-management.io/ocm/pkg/work/spoke/spoketesting" ) +const ( + denyNS = "test-deny" + allowNS = "test-allow" + clusterName = "cluster1" +) + func newExecutorCacheValidator(t *testing.T, ctx context.Context, clusterName string, kubeClient kubernetes.Interface, manifestWorkObjects ...runtime.Object) *sarCacheValidator { @@ -84,7 +90,7 @@ func TestValidate(t *testing.T) { }, }, }, - namespace: "test-deny", + namespace: denyNS, name: "test", expect: fmt.Errorf("not allowed to apply the resource secrets, test-deny test, will try again in 1m0s"), }, @@ -98,7 +104,7 @@ func TestValidate(t *testing.T) { }, }, }, - namespace: "test-allow", + namespace: allowNS, name: "test", expect: nil, }, @@ -110,7 +116,7 @@ func TestValidate(t *testing.T) { func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { obj := action.(clienttesting.CreateActionImpl).Object.(*v1.SubjectAccessReview) - if obj.Spec.ResourceAttributes.Namespace == "test-allow" { + if obj.Spec.ResourceAttributes.Namespace == allowNS { return true, &v1.SubjectAccessReview{ Status: v1.SubjectAccessReviewStatus{ Allowed: true, @@ -118,7 +124,7 @@ func TestValidate(t *testing.T) { }, nil } - if obj.Spec.ResourceAttributes.Namespace == "test-deny" { + if obj.Spec.ResourceAttributes.Namespace == denyNS { return true, &v1.SubjectAccessReview{ Status: v1.SubjectAccessReviewStatus{ Denied: true, @@ -129,7 +135,6 @@ func TestValidate(t *testing.T) { }, ) - clusterName := "cluster1" ctx := context.TODO() cacheValidator := newExecutorCacheValidator(t, ctx, clusterName, kubeClient) for testName, test := range tests { @@ -165,13 +170,13 @@ func TestCacheWorks(t *testing.T) { }{ "forbidden": { executor: executor, - namespace: "test-deny", + namespace: denyNS, name: "test", expect: fmt.Errorf("not allowed to apply the resource secrets, test-deny test, will try again in 1m0s"), }, "allow": { executor: executor, - namespace: "test-allow", + namespace: allowNS, name: "test", expect: nil, }, @@ -183,7 +188,7 @@ func TestCacheWorks(t *testing.T) { func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { obj := action.(clienttesting.CreateActionImpl).Object.(*v1.SubjectAccessReview) - if obj.Spec.ResourceAttributes.Namespace == "test-allow" { + if obj.Spec.ResourceAttributes.Namespace == allowNS { return true, &v1.SubjectAccessReview{ Status: v1.SubjectAccessReviewStatus{ Allowed: true, @@ -191,7 +196,7 @@ func TestCacheWorks(t *testing.T) { }, nil } - if obj.Spec.ResourceAttributes.Namespace == "test-deny" { + if obj.Spec.ResourceAttributes.Namespace == denyNS { return true, &v1.SubjectAccessReview{ Status: v1.SubjectAccessReviewStatus{ Denied: true, @@ -202,12 +207,11 @@ func TestCacheWorks(t *testing.T) { }, ) - clusterName := "cluster1" ctx := context.TODO() work, _ := spoketesting.NewManifestWork(0, - spoketesting.NewUnstructured("v1", "Secret", "test-allow", "test"), - spoketesting.NewUnstructured("v1", "Secret", "test-deny", "test"), + spoketesting.NewUnstructured("v1", "Secret", allowNS, "test"), + spoketesting.NewUnstructured("v1", "Secret", denyNS, "test"), ) work.Spec.Executor = executor diff --git a/pkg/work/spoke/auth/cache/executor_cache_controller_test.go b/pkg/work/spoke/auth/cache/executor_cache_controller_test.go index ff98ef546..e9b9cd01c 100644 --- a/pkg/work/spoke/auth/cache/executor_cache_controller_test.go +++ b/pkg/work/spoke/auth/cache/executor_cache_controller_test.go @@ -156,7 +156,7 @@ func TestCacheController(t *testing.T) { func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { obj := action.(clienttesting.CreateActionImpl).Object.(*v1.SubjectAccessReview) - if obj.Spec.ResourceAttributes.Namespace == "test-allow" { + if obj.Spec.ResourceAttributes.Namespace == allowNS { return true, &v1.SubjectAccessReview{ Status: v1.SubjectAccessReviewStatus{ Allowed: true, @@ -164,7 +164,7 @@ func TestCacheController(t *testing.T) { }, nil } - if obj.Spec.ResourceAttributes.Namespace == "test-deny" { + if obj.Spec.ResourceAttributes.Namespace == denyNS { return true, &v1.SubjectAccessReview{ Status: v1.SubjectAccessReviewStatus{ Denied: true, @@ -175,12 +175,11 @@ func TestCacheController(t *testing.T) { }, ) - clusterName := "cluster1" ctx := context.TODO() work, _ := spoketesting.NewManifestWork(0, - spoketesting.NewUnstructured("v1", "Secret", "test-allow", "test"), - spoketesting.NewUnstructured("v1", "Secret", "test-deny", "test"), + spoketesting.NewUnstructured("v1", "Secret", allowNS, "test"), + spoketesting.NewUnstructured("v1", "Secret", denyNS, "test"), ) work.Spec.Executor = executor work.Spec.DeleteOption = &workapiv1.DeleteOption{ @@ -190,7 +189,7 @@ func TestCacheController(t *testing.T) { { Group: "", Resource: "secrets", - Namespace: "test-allow", + Namespace: allowNS, Name: "test", }, }, diff --git a/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go b/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go index b804a6cd4..bc993bc4f 100644 --- a/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go +++ b/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go @@ -71,7 +71,7 @@ func (m *AppliedManifestWorkFinalizeController) sync(ctx context.Context, contro return m.syncAppliedManifestWork(ctx, controllerContext, appliedManifestWork) } -// syncAppliedManifestWork ensures that when a appliedmanifestwork has been deleted, everything it created is also deleted. +// syncAppliedManifestWork ensures that when an appliedmanifestwork has been deleted, everything it created is also deleted. // Foreground deletion is implemented, which means all resources created will be deleted and finalized // before removing finalizer from appliedmanifestwork func (m *AppliedManifestWorkFinalizeController) syncAppliedManifestWork(ctx context.Context, diff --git a/pkg/work/spoke/controllers/finalizercontroller/manifestwork_finalize_controller_test.go b/pkg/work/spoke/controllers/finalizercontroller/manifestwork_finalize_controller_test.go index c47565d74..d5c0f8706 100644 --- a/pkg/work/spoke/controllers/finalizercontroller/manifestwork_finalize_controller_test.go +++ b/pkg/work/spoke/controllers/finalizercontroller/manifestwork_finalize_controller_test.go @@ -190,8 +190,7 @@ func TestSyncManifestWorkController(t *testing.T) { t.Errorf("Expect no sync error, but got %v", err) } - workAction := []clienttesting.Action{} - appliedWorkAction := []clienttesting.Action{} + var workAction, appliedWorkAction []clienttesting.Action for _, action := range fakeClient.Actions() { if action.GetResource().Resource == "manifestworks" { workAction = append(workAction, action) diff --git a/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go b/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go index 518a88ba5..5eb842ad0 100644 --- a/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go +++ b/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go @@ -142,7 +142,7 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac // We creat a ownerref instead of controller ref since multiple controller can declare the ownership of a manifests owner := helper.NewAppliedManifestWorkOwner(appliedManifestWork) - errs := []error{} + var errs []error // Apply resources on spoke cluster. resourceResults := make([]applyResult, len(manifestWork.Spec.Workload.Manifests)) err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { @@ -161,7 +161,7 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac klog.Errorf("failed to apply resource with error %v", err) } - newManifestConditions := []workapiv1.ManifestCondition{} + var newManifestConditions []workapiv1.ManifestCondition var requeueTime = MaxRequeueDuration for _, result := range resourceResults { manifestCondition := workapiv1.ManifestCondition{ @@ -238,7 +238,7 @@ func (m *ManifestWorkController) applyAppliedManifestWork(ctx context.Context, w Finalizers: []string{controllers.AppliedManifestWorkFinalizer}, }, Spec: workapiv1.AppliedManifestWorkSpec{ - HubHash: m.hubHash, + HubHash: hubHash, ManifestWorkName: workName, AgentID: agentID, }, diff --git a/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller_test.go b/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller_test.go index 520c599e1..05b33f5fd 100644 --- a/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller_test.go +++ b/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -15,7 +16,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/diff" fakedynamic "k8s.io/client-go/dynamic/fake" fakekube "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" @@ -33,6 +33,8 @@ import ( "open-cluster-management.io/ocm/pkg/work/spoke/spoketesting" ) +const defaultOwner = "testowner" + type testController struct { controller *ManifestWorkController dynamicClient *fakedynamic.FakeDynamicClient @@ -201,8 +203,7 @@ func (t *testCase) validate( dynamicClient *fakedynamic.FakeDynamicClient, workClient *fakeworkclient.Clientset, kubeClient *fakekube.Clientset) { - actualWorkActions := []clienttesting.Action{} - actualAppliedWorkActions := []clienttesting.Action{} + var actualWorkActions, actualAppliedWorkActions []clienttesting.Action for _, workAction := range workClient.Actions() { if workAction.GetResource().Resource == "manifestworks" { actualWorkActions = append(actualWorkActions, workAction) @@ -280,14 +281,14 @@ func TestSync(t *testing.T) { withAppliedWorkAction("create"). withExpectedKubeAction("get", "create"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("create single deployment resource"). withWorkManifest(spoketesting.NewUnstructured("apps/v1", "Deployment", "ns1", "test")). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("get", "create"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("update single resource"). withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test")). withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")). @@ -295,30 +296,38 @@ func TestSync(t *testing.T) { withAppliedWorkAction("create"). withExpectedKubeAction("get", "delete", "create"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("create single unstructured resource"). withWorkManifest(spoketesting.NewUnstructured("v1", "NewObject", "ns1", "test")). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("get", "create"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("update single unstructured resource"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("get", "update"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("multiple create&update resource"). - withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")). + withWorkManifest(spoketesting.NewUnstructured( + "v1", "Secret", "ns1", "test"), + spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")). withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedKubeAction("get", "delete", "create", "get", "create"). - withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedManifestCondition( + expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, + expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), } for _, c := range cases { @@ -342,13 +351,17 @@ func TestSync(t *testing.T) { // Test applying resource failed func TestFailedToApplyResource(t *testing.T) { tc := newTestCase("multiple create&update resource"). - withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")). + withWorkManifest(spoketesting.NewUnstructured( + "v1", "Secret", "ns1", "test"), + spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")). withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedKubeAction("get", "delete", "create", "get", "create"). - withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionFalse}) + withExpectedManifestCondition( + expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, + expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}). + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionFalse}) work, workKey := spoketesting.NewManifestWork(0, tc.workManifest...) work.Finalizers = []string{controllers.ManifestWorkFinalizer} @@ -366,7 +379,7 @@ func TestFailedToApplyResource(t *testing.T) { return false, createObject, nil } - return true, &corev1.Secret{}, fmt.Errorf("Fake error") + return true, &corev1.Secret{}, fmt.Errorf("fake error") }) syncContext := testingcommon.NewFakeSyncContext(t, workKey) err := controller.toController().sync(context.TODO(), syncContext) @@ -380,58 +393,91 @@ func TestFailedToApplyResource(t *testing.T) { func TestUpdateStrategy(t *testing.T) { cases := []*testCase{ newTestCase("update single resource with nil updateStrategy"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). - withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", nil)). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). + withManifestConfig(newManifestConfigOption( + "", "newobjects", "ns1", "n1", nil)). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("get", "update"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("update single resource with update updateStrategy"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). - withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeUpdate})). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). + withManifestConfig(newManifestConfigOption( + "", "newobjects", "ns1", "n1", + &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeUpdate})). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("get", "update"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("create single resource with updateStrategy not found"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). - withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n2", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). + withManifestConfig(newManifestConfigOption( + "", "newobjects", "ns1", "n2", + &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("get", "update"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("create single resource with server side apply updateStrategy"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withManifestConfig(newManifestConfigOption( + "", "newobjects", "ns1", "n1", + &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("patch", "patch"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("update single resource with server side apply updateStrategy"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). - withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). + withManifestConfig(newManifestConfigOption( + "", "newobjects", "ns1", "n1", + &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("patch", "patch"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), newTestCase("update single resource with create only updateStrategy"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). - withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeCreateOnly})). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). + withManifestConfig(newManifestConfigOption( + "", "newobjects", "ns1", "n1", + &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeCreateOnly})). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("get", "patch"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}), + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}), } for _, c := range cases { @@ -444,9 +490,13 @@ func TestUpdateStrategy(t *testing.T) { withUnstructuredObject(c.spokeDynamicObject...) // The default reactor doesn't support apply, so we need our own (trivial) reactor - controller.dynamicClient.PrependReactor("patch", "newobjects", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { - return true, spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}), nil // clusterroleaggregator drops returned objects so no point in constructing them - }) + controller.dynamicClient.PrependReactor("patch", "newobjects", + func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + // clusterroleaggregator drops returned objects so no point in constructing them + return true, spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}), nil + }) syncContext := testingcommon.NewFakeSyncContext(t, workKey) err := controller.toController().sync(context.TODO(), syncContext) if err != nil { @@ -460,14 +510,20 @@ func TestUpdateStrategy(t *testing.T) { func TestServerSideApplyConflict(t *testing.T) { testCase := newTestCase("update single resource with server side apply updateStrategy"). - withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). - withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). - withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). + withWorkManifest(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})). + withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent( + "v1", "NewObject", "ns1", "n1", + map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})). + withManifestConfig(newManifestConfigOption( + "", "newobjects", "ns1", "n1", + &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})). withExpectedWorkAction("patch"). withAppliedWorkAction("create"). withExpectedDynamicAction("patch"). withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}). - withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionFalse}) + withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionFalse}) work, workKey := spoketesting.NewManifestWork(0, testCase.workManifest...) work.Spec.ManifestConfigs = testCase.workManifestConfig @@ -613,7 +669,7 @@ func TestBuildResourceMeta(t *testing.T) { actual.Ordinal = c.expected.Ordinal if !equality.Semantic.DeepEqual(actual, c.expected) { - t.Errorf(diff.ObjectDiff(actual, c.expected)) + t.Errorf(cmp.Diff(actual, c.expected)) } }) } @@ -644,7 +700,7 @@ func TestBuildManifestResourceMeta(t *testing.T) { actual.Ordinal = c.expected.Ordinal if !equality.Semantic.DeepEqual(actual, c.expected) { - t.Errorf(diff.ObjectDiff(actual, c.expected)) + t.Errorf(cmp.Diff(actual, c.expected)) } }) } @@ -665,24 +721,24 @@ func TestManageOwner(t *testing.T) { }{ { name: "foreground by default", - owner: metav1.OwnerReference{UID: "testowner"}, - expectOwner: metav1.OwnerReference{UID: "testowner"}, + owner: metav1.OwnerReference{UID: defaultOwner}, + expectOwner: metav1.OwnerReference{UID: defaultOwner}, }, { name: "orphan the resource", - owner: metav1.OwnerReference{UID: "testowner"}, + owner: metav1.OwnerReference{UID: defaultOwner}, deleteOption: &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan}, expectOwner: metav1.OwnerReference{UID: "testowner-"}, }, { name: "add owner if no orphan rule with selectively orphan", - owner: metav1.OwnerReference{UID: "testowner"}, + owner: metav1.OwnerReference{UID: defaultOwner}, deleteOption: &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan}, - expectOwner: metav1.OwnerReference{UID: "testowner"}, + expectOwner: metav1.OwnerReference{UID: defaultOwner}, }, { name: "orphan the resource with selectively orphan", - owner: metav1.OwnerReference{UID: "testowner"}, + owner: metav1.OwnerReference{UID: defaultOwner}, deleteOption: &workapiv1.DeleteOption{ PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ @@ -700,7 +756,7 @@ func TestManageOwner(t *testing.T) { }, { name: "add owner if resourcec is not matched in orphan rule with selectively orphan", - owner: metav1.OwnerReference{UID: "testowner"}, + owner: metav1.OwnerReference{UID: defaultOwner}, deleteOption: &workapiv1.DeleteOption{ PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ @@ -714,7 +770,7 @@ func TestManageOwner(t *testing.T) { }, }, }, - expectOwner: metav1.OwnerReference{UID: "testowner"}, + expectOwner: metav1.OwnerReference{UID: defaultOwner}, }, } diff --git a/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go b/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go index c3d27c441..4c009c3d7 100644 --- a/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go +++ b/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go @@ -205,8 +205,8 @@ func aggregateManifestConditions(generation int64, manifests []workapiv1.Manifes func (c *AvailableStatusController) getFeedbackValues( resourceMeta workapiv1.ManifestResourceMeta, obj *unstructured.Unstructured, manifestOptions []workapiv1.ManifestConfigOption) ([]workapiv1.FeedbackValue, metav1.Condition) { - errs := []error{} - values := []workapiv1.FeedbackValue{} + var errs []error + var values []workapiv1.FeedbackValue option := helper.FindManifestConiguration(resourceMeta, manifestOptions) diff --git a/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller_test.go b/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller_test.go index 70ad88bbc..235111f76 100644 --- a/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller_test.go +++ b/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller_test.go @@ -115,7 +115,7 @@ func TestSyncManifestWork(t *testing.T) { t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[0].Conditions)) } - if !hasStatusCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue) { + if !hasStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable, metav1.ConditionTrue) { t.Fatal(spew.Sdump(work.Status.Conditions)) } }, @@ -184,7 +184,7 @@ func TestSyncManifestWork(t *testing.T) { t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[1].Conditions)) } - if !hasStatusCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionUnknown) { + if !hasStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable, metav1.ConditionUnknown) { t.Fatal(spew.Sdump(work.Status.Conditions)) } }, @@ -230,7 +230,7 @@ func TestStatusFeedback(t *testing.T) { validateActions func(t *testing.T, actions []clienttesting.Action) }{ { - name: "resource identifer is not matched", + name: "resource identifier is not matched", existingResources: []runtime.Object{ spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"), }, diff --git a/pkg/work/spoke/statusfeedback/reader.go b/pkg/work/spoke/statusfeedback/reader.go index 11b8ed615..896e4dd6a 100644 --- a/pkg/work/spoke/statusfeedback/reader.go +++ b/pkg/work/spoke/statusfeedback/reader.go @@ -30,8 +30,8 @@ func NewStatusReader() *StatusReader { } func (s *StatusReader) GetValuesByRule(obj *unstructured.Unstructured, rule workapiv1.FeedbackRule) ([]workapiv1.FeedbackValue, error) { - errs := []error{} - values := []workapiv1.FeedbackValue{} + var errs []error + var values []workapiv1.FeedbackValue switch rule.Type { case workapiv1.WellKnownStatusType: diff --git a/pkg/work/webhook/start.go b/pkg/work/webhook/start.go index c812cb030..9ba75362e 100644 --- a/pkg/work/webhook/start.go +++ b/pkg/work/webhook/start.go @@ -1,6 +1,8 @@ package webhook import ( + "crypto/tls" + "k8s.io/apimachinery/pkg/runtime" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -30,10 +32,16 @@ func init() { func (c *Options) RunWebhookServer() error { mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, - Port: c.Port, HealthProbeBindAddress: ":8000", - CertDir: c.CertDir, - WebhookServer: webhook.NewServer(webhook.Options{TLSMinVersion: "1.3"}), + WebhookServer: webhook.NewServer(webhook.Options{ + TLSOpts: []func(config *tls.Config){ + func(config *tls.Config) { + config.MinVersion = tls.VersionTLS13 + }, + }, + Port: c.Port, + CertDir: c.CertDir, + }), }) if err != nil { diff --git a/pkg/work/webhook/v1/manifestwork_validating.go b/pkg/work/webhook/v1/manifestwork_validating.go index 5665e01d6..c0fbb5d3c 100644 --- a/pkg/work/webhook/v1/manifestwork_validating.go +++ b/pkg/work/webhook/v1/manifestwork_validating.go @@ -49,7 +49,7 @@ func (r *ManifestWorkWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ManifestWorkWebhook) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { +func (r *ManifestWorkWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { return nil, nil } diff --git a/pkg/work/webhook/v1/manifestwork_validating_test.go b/pkg/work/webhook/v1/manifestwork_validating_test.go index c5784e2c7..f83c7f849 100644 --- a/pkg/work/webhook/v1/manifestwork_validating_test.go +++ b/pkg/work/webhook/v1/manifestwork_validating_test.go @@ -100,7 +100,8 @@ func TestManifestWorkExecutorValidate(t *testing.T) { }, }, }, - expectErr: apierrors.NewBadRequest(fmt.Sprintf("user test2 cannot manipulate the Manifestwork with executor /klusterlet-work-sa in namespace cluster1")), + expectErr: apierrors.NewBadRequest( + "user test2 cannot manipulate the Manifestwork with executor /klusterlet-work-sa in namespace cluster1"), }, { name: "validate executor not nil success", @@ -164,7 +165,8 @@ func TestManifestWorkExecutorValidate(t *testing.T) { }, }, }, - expectErr: apierrors.NewBadRequest(fmt.Sprintf("user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1")), + expectErr: apierrors.NewBadRequest( + "user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1"), }, { name: "validate executor not changed success", @@ -246,7 +248,8 @@ func TestManifestWorkExecutorValidate(t *testing.T) { }, }, }, - expectErr: apierrors.NewBadRequest(fmt.Sprintf("user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1")), + expectErr: apierrors.NewBadRequest( + "user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1"), }, } diff --git a/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go b/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go index 9bb170787..16ce5fffb 100644 --- a/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go +++ b/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go @@ -45,7 +45,7 @@ func (r *ManifestWorkReplicaSetWebhook) ValidateUpdate(ctx context.Context, oldO } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ManifestWorkReplicaSetWebhook) ValidateDelete(_ context.Context, obj runtime.Object) ( +func (r *ManifestWorkReplicaSetWebhook) ValidateDelete(_ context.Context, _ runtime.Object) ( admission.Warnings, error) { if err := checkFeatureEnabled(); err != nil { return nil, err @@ -55,7 +55,7 @@ func (r *ManifestWorkReplicaSetWebhook) ValidateDelete(_ context.Context, obj ru } func (r *ManifestWorkReplicaSetWebhook) validateRequest( - newmwrSet *workv1alpha1.ManifestWorkReplicaSet, oldmwrSet *workv1alpha1.ManifestWorkReplicaSet, + newmwrSet *workv1alpha1.ManifestWorkReplicaSet, _ *workv1alpha1.ManifestWorkReplicaSet, ctx context.Context) error { if err := checkFeatureEnabled(); err != nil { return err diff --git a/pkg/work/webhook/v1alpha1/webhook.go b/pkg/work/webhook/v1alpha1/webhook.go index f104a2686..0ab6facc5 100644 --- a/pkg/work/webhook/v1alpha1/webhook.go +++ b/pkg/work/webhook/v1alpha1/webhook.go @@ -4,7 +4,7 @@ import ( "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" - v1alpha1 "open-cluster-management.io/api/work/v1alpha1" + "open-cluster-management.io/api/work/v1alpha1" ) type ManifestWorkReplicaSetWebhook struct { diff --git a/test/benchmark/placement/benchmark_test.go b/test/benchmark/placement/benchmark_test.go index f1fe9316e..ebaa2e462 100644 --- a/test/benchmark/placement/benchmark_test.go +++ b/test/benchmark/placement/benchmark_test.go @@ -21,7 +21,6 @@ import ( clusterapiv1beta2 "open-cluster-management.io/api/cluster/v1beta2" controllers "open-cluster-management.io/ocm/pkg/placement/controllers" - scheduling "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" "open-cluster-management.io/ocm/test/integration/util" ) @@ -103,7 +102,6 @@ func BenchmarkSchedulePlacements10000(b *testing.B) { func benchmarkSchedulePlacements(b *testing.B, pnum, cnum int) { var err error ctx, cancel := context.WithCancel(context.Background()) - scheduling.ResyncInterval = time.Second * 5 // start a kube-apiserver testEnv := &envtest.Environment{ diff --git a/test/e2e/addon_lease_test.go b/test/e2e/addon_lease_test.go index 80f9566b0..cc8d3b576 100644 --- a/test/e2e/addon_lease_test.go +++ b/test/e2e/addon_lease_test.go @@ -5,8 +5,8 @@ import ( "fmt" "time" - ginkgo "github.com/onsi/ginkgo/v2" - gomega "github.com/onsi/gomega" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" coordv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -19,6 +19,8 @@ import ( operatorapiv1 "open-cluster-management.io/api/operator/v1" ) +const availableLabelValue = "available" + var _ = ginkgo.Describe("Addon Health Check", func() { ginkgo.Context("Checking addon lease on managed cluster to update addon status", func() { var addOnName string @@ -79,7 +81,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() { return false } key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName) - return cluster.Labels[key] == "available" + return cluster.Labels[key] == availableLabelValue }, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue()) }) @@ -117,7 +119,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() { return false } key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName) - return cluster.Labels[key] == "available" + return cluster.Labels[key] == availableLabelValue }, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue()) ginkgo.By(fmt.Sprintf("Updating lease %q with a past time", addOnName)) @@ -186,7 +188,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() { return false } key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName) - return cluster.Labels[key] == "available" + return cluster.Labels[key] == availableLabelValue }, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue()) ginkgo.By(fmt.Sprintf("Deleting lease %q", addOnName)) @@ -223,7 +225,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() { var klusterletName, clusterName, addOnName string ginkgo.BeforeEach(func() { if !deployKlusterlet { - ginkgo.Skip(fmt.Sprintf("skip if disabling deploy klusterlet")) + ginkgo.Skip("skip if disabling deploy klusterlet") } klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6)) clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6)) @@ -284,7 +286,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() { }, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed()) // delete registration agent to stop agent update its status - ginkgo.By(fmt.Sprintf("Stoping klusterlet")) + ginkgo.By("Stoping klusterlet") err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) diff --git a/test/e2e/addonmanagement_test.go b/test/e2e/addonmanagement_test.go index 18a2ca374..3a4a39bd4 100644 --- a/test/e2e/addonmanagement_test.go +++ b/test/e2e/addonmanagement_test.go @@ -29,7 +29,8 @@ const ( originalImageValue = "quay.io/open-cluster-management/addon-examples:latest" overrideImageValue = "quay.io/ocm/addon-examples:latest" customSignerName = "example.com/signer-name" - customSignerSecretName = "addon-signer-secret" + //#nosec G101 + customSignerSecretName = "addon-signer-secret" ) var ( diff --git a/test/e2e/clusterset_test.go b/test/e2e/clusterset_test.go index 903f78289..21faa1976 100644 --- a/test/e2e/clusterset_test.go +++ b/test/e2e/clusterset_test.go @@ -98,7 +98,8 @@ var _ = ginkgo.Describe("Create v1beta2 managedclusterset", func() { if !reflect.DeepEqual(string(v1beta1ManagedClusterSet.Spec.ClusterSelector.SelectorType), string(managedClusterSet.Spec.ClusterSelector.SelectorType)) { return false } - if !reflect.DeepEqual(v1beta1ManagedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels, managedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels) { + if !reflect.DeepEqual(v1beta1ManagedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels, + managedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels) { return false } return true @@ -136,7 +137,8 @@ var _ = ginkgo.Describe("Create v1beta2 managedclusterset", func() { }) ginkgo.It("Check if the v1beta1 storageversion is removed from clustersetbinding crd", func() { gomega.Eventually(func() error { - clustersetBindingCrd, err := t.HubAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), clustersetBindingCrdName, metav1.GetOptions{}) + clustersetBindingCrd, err := t.HubAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), clustersetBindingCrdName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e/managedcluster_loopback_test.go b/test/e2e/managedcluster_loopback_test.go index 141030212..7341fef6d 100644 --- a/test/e2e/managedcluster_loopback_test.go +++ b/test/e2e/managedcluster_loopback_test.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" certificatesv1 "k8s.io/api/certificates/v1" + certificates "k8s.io/api/certificates/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -245,7 +246,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { created.Status = addonv1alpha1.ManagedClusterAddOnStatus{ Registrations: []addonv1alpha1.RegistrationConfig{ { - SignerName: "kubernetes.io/kube-apiserver-client", + SignerName: certificates.KubeAPIServerClientSignerName, }, }, } diff --git a/test/e2e/managedclusterset_test.go b/test/e2e/managedclusterset_test.go index 3ac1b15db..44c69e2f8 100644 --- a/test/e2e/managedclusterset_test.go +++ b/test/e2e/managedclusterset_test.go @@ -86,7 +86,8 @@ var _ = ginkgo.Describe("Create v1beta1 managedclusterset", func() { if string(v1beta2ManagedClusterSet.Spec.ClusterSelector.SelectorType) != string(managedClusterSet.Spec.ClusterSelector.SelectorType) { return fmt.Errorf("unexpected v1beta2 cluster set %v", v1beta2ManagedClusterSet) } - if !reflect.DeepEqual(v1beta2ManagedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels, managedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels) { + if !reflect.DeepEqual(v1beta2ManagedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels, + managedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels) { return fmt.Errorf("unexpected v1beta2 cluster set %v", v1beta2ManagedClusterSet) } return nil @@ -216,7 +217,8 @@ var _ = ginkgo.Describe("Create v1beta2 managedclusterset", func() { if string(v1beta1ManagedClusterSet.Spec.ClusterSelector.SelectorType) != string(managedClusterSet.Spec.ClusterSelector.SelectorType) { return fmt.Errorf("unexpected v1beta1 cluster set %v", v1beta1ManagedClusterSet) } - if !reflect.DeepEqual(v1beta1ManagedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels, managedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels) { + if !reflect.DeepEqual(v1beta1ManagedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels, + managedClusterSet.Spec.ClusterSelector.LabelSelector.MatchLabels) { return fmt.Errorf("unexpected v1beta1 cluster set %v", v1beta1ManagedClusterSet) } return nil diff --git a/test/e2e/manifestworkreplicaset_test.go b/test/e2e/manifestworkreplicaset_test.go index c598c428e..3472761da 100644 --- a/test/e2e/manifestworkreplicaset_test.go +++ b/test/e2e/manifestworkreplicaset_test.go @@ -64,7 +64,8 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { PlacementRefs: []workapiv1alpha1.LocalPlacementReference{placementRef}, }, } - manifestWorkReplicaSet, err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Create(context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) + manifestWorkReplicaSet, err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Create( + context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) csb := &clusterapiv1beta2.ManagedClusterSetBinding{ @@ -76,7 +77,8 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { ClusterSet: "default", }, } - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(metav1.NamespaceDefault).Create(context.Background(), csb, metav1.CreateOptions{}) + _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(metav1.NamespaceDefault).Create( + context.Background(), csb, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) placement := &clusterv1beta1.Placement{ @@ -107,7 +109,8 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { ginkgo.By("check if manifestworkreplicaset status") gomega.Eventually(func() error { - mwrs, err := t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Get(context.TODO(), manifestWorkReplicaSet.Name, metav1.GetOptions{}) + mwrs, err := t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Get( + context.TODO(), manifestWorkReplicaSet.Name, metav1.GetOptions{}) if err != nil { return err } @@ -131,7 +134,8 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { // TODO we should also update manifestwork replicaset and test - err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Delete(context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) + err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Delete( + context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = t.ClusterClient.ClusterV1beta1().Placements(placement.Namespace).Delete(context.TODO(), placement.Name, metav1.DeleteOptions{}) @@ -288,7 +292,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { return false } - return int(mwrSet.Status.Summary.Total) == numOfClusters + return mwrSet.Status.Summary.Total == numOfClusters }, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue()) ginkgo.By("Check manifestWorks are created") diff --git a/test/e2e/placement_test.go b/test/e2e/placement_test.go index 570cf3188..e3ac33692 100644 --- a/test/e2e/placement_test.go +++ b/test/e2e/placement_test.go @@ -24,7 +24,7 @@ const ( maxNumOfClusterDecisions = 100 ) -// Test cases with lable "sanity-check" could be ran as sanity check on an existing enviroment with +// Test cases with lable "sanity-check" could be ran as sanity check on an existing environment with // placement controller installed and well configured . Resource leftovers should be cleaned up on // the hub cluster. var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() { diff --git a/test/e2e/registration_webhook_test.go b/test/e2e/registration_webhook_test.go index f9a83b545..63fdafd35 100644 --- a/test/e2e/registration_webhook_test.go +++ b/test/e2e/registration_webhook_test.go @@ -246,7 +246,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { _, err = authorizedClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) - // remove the finalizer to truely delete the namespace + // remove the finalizer to truly delete the namespace ns, err := t.HubKubeClient.CoreV1().Namespaces().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ns.Finalizers = []string{} @@ -763,13 +763,15 @@ var _ = ginkgo.Describe("Admission webhook", func() { // create a cluster set binding clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - managedClusterSetBinding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + managedClusterSetBinding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create( + context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // update the cluster set binding clusterSetName = fmt.Sprintf("clusterset-%s", rand.String(6)) patch := fmt.Sprintf("{\"spec\": {\"clusterSet\": %q}}", clusterSetName) - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch(context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) + _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch( + context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) }) @@ -870,13 +872,15 @@ var _ = ginkgo.Describe("Admission webhook", func() { // create a cluster set binding clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - managedClusterSetBinding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + managedClusterSetBinding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create( + context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // update the cluster set binding clusterSetName = fmt.Sprintf("clusterset-%s", rand.String(6)) patch := fmt.Sprintf("{\"spec\": {\"clusterSet\": %q}}", clusterSetName) - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch(context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) + _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch( + context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) }) diff --git a/test/e2e/work_webhook_test.go b/test/e2e/work_webhook_test.go index c6d364f33..70081a4b5 100644 --- a/test/e2e/work_webhook_test.go +++ b/test/e2e/work_webhook_test.go @@ -19,7 +19,7 @@ import ( "open-cluster-management.io/ocm/test/integration/util" ) -// Test cases with lable "sanity-check" could be ran on an existing enviroment with validating webhook installed +// Test cases with lable "sanity-check" could be ran on an existing environment with validating webhook installed // and well configured as sanity check. Resource leftovers should be cleaned up on both hub and managed cluster. var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validating-webhook", "sanity-check"), func() { var nameSuffix string diff --git a/test/e2e/work_workload_test.go b/test/e2e/work_workload_test.go index 3ac44093b..1c3f14ffe 100644 --- a/test/e2e/work_workload_test.go +++ b/test/e2e/work_workload_test.go @@ -145,7 +145,7 @@ const ( }` ) -// Test cases with lable "sanity-check" could be ran on an existing enviroment with work agent installed +// Test cases with lable "sanity-check" could be ran on an existing environment with work agent installed // and well configured as sanity check. Resource leftovers should be cleaned up on both hub and managed cluster. var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check"), func() { var workName string @@ -249,7 +249,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") for _, item := range appliedManifestWorkList.Items { if strings.HasSuffix(item.Name, workName) { - appliedManifestWork = &item + appliedManifestWork = item.DeepCopy() return nil } } diff --git a/test/integration/addon/addon_configs_test.go b/test/integration/addon/addon_configs_test.go index 452ea44c6..a355688d5 100644 --- a/test/integration/addon/addon_configs_test.go +++ b/test/integration/addon/addon_configs_test.go @@ -60,7 +60,8 @@ var _ = ginkgo.Describe("AddConfigs", func() { }, Spec: addOnDefaultConfigSpec, } - _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create(context.Background(), addOnDefaultConfig, metav1.CreateOptions{}) + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create( + context.Background(), addOnDefaultConfig, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) diff --git a/test/integration/addon/addon_manager_upgrade_test.go b/test/integration/addon/addon_manager_upgrade_test.go index ad2463d7b..dcee87b40 100644 --- a/test/integration/addon/addon_manager_upgrade_test.go +++ b/test/integration/addon/addon_manager_upgrade_test.go @@ -188,7 +188,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { }, Spec: addOnDefaultConfigSpec, } - _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create(context.Background(), addOnDefaultConfig, metav1.CreateOptions{}) + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create( + context.Background(), addOnDefaultConfig, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // prepare update config @@ -223,7 +224,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { ginkgo.By("check work") gomega.Eventually(func() error { for i := 0; i < 4; i++ { - work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) + work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get( + context.Background(), manifestWorkName, metav1.GetOptions{}) if err != nil { return err } @@ -241,8 +243,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) } @@ -324,10 +330,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { ginkgo.By("update all") ginkgo.By("upgrade configs to test1") - addOnConfig, err := hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Get(context.Background(), configDefaultName, metav1.GetOptions{}) + addOnConfig, err := hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Get( + context.Background(), configDefaultName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) addOnConfig.Spec = addOnTest1ConfigSpec - _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Update(context.Background(), addOnConfig, metav1.UpdateOptions{}) + _, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Update( + context.Background(), addOnConfig, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("check mca status") @@ -412,8 +420,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { if err != nil { return err } - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionFalse, Reason: "WorkApplied", ObservedGeneration: work.Generation}) - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionFalse, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) if err != nil { return err @@ -547,8 +559,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { if err != nil { return err } - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) if err != nil { return err @@ -639,8 +655,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { if err != nil { return err } - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) _, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) if err != nil { return err diff --git a/test/integration/addon/agent_deploy_test.go b/test/integration/addon/agent_deploy_test.go index 7dcc8eac9..f4f9f20de 100644 --- a/test/integration/addon/agent_deploy_test.go +++ b/test/integration/addon/agent_deploy_test.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -61,18 +61,6 @@ const ( } } }` - - mchJson = `{ - "apiVersion": "operator.open-cluster-management.io/v1", - "kind": "MultiClusterHub", - "metadata": { - "name": "multiclusterhub", - "namespace": "open-cluster-management" - }, - "spec": { - "separateCertificateManagement": false - } -}` ) var _ = ginkgo.Describe("Agent deploy", func() { @@ -151,7 +139,7 @@ var _ = ginkgo.Describe("Agent deploy", func() { } if len(work.Spec.Workload.Manifests) != 1 { - return fmt.Errorf("Unexpected number of work manifests") + return fmt.Errorf("unexpected number of work manifests") } if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { @@ -163,7 +151,9 @@ var _ = ginkgo.Describe("Agent deploy", func() { // Update work status to trigger addon status work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -174,10 +164,10 @@ var _ = ginkgo.Describe("Agent deploy", func() { } if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) { - return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + return fmt.Errorf("unexpected addon applied condition, %v", addon.Status.Conditions) } if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) { - return fmt.Errorf("Unexpected addon progressing condition, %v", addon.Status.Conditions) + return fmt.Errorf("unexpected addon progressing condition, %v", addon.Status.Conditions) } return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -185,7 +175,9 @@ var _ = ginkgo.Describe("Agent deploy", func() { // update work to available so addon becomes available work, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - meta.SetStatusCondition(&work.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + meta.SetStatusCondition( + &work.Status.Conditions, + metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) _, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -196,10 +188,10 @@ var _ = ginkgo.Describe("Agent deploy", func() { } if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionAvailable) { - return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + return fmt.Errorf("unexpected addon available condition, %v", addon.Status.Conditions) } if !meta.IsStatusConditionFalse(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) { - return fmt.Errorf("Unexpected addon progressing condition, %v", addon.Status.Conditions) + return fmt.Errorf("unexpected addon progressing condition, %v", addon.Status.Conditions) } return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) diff --git a/test/integration/addon/assertion_test.go b/test/integration/addon/assertion_test.go index 24d37b663..fc4f24304 100644 --- a/test/integration/addon/assertion_test.go +++ b/test/integration/addon/assertion_test.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -88,29 +88,29 @@ func createClusterManagementAddOn(name, defaultConfigNamespace, defaultConfigNam return clusterManagementAddon, nil } -func updateClusterManagementAddOn(ctx context.Context, new *addonapiv1alpha1.ClusterManagementAddOn) { - gomega.Eventually(func() bool { +func updateClusterManagementAddOn(_ context.Context, new *addonapiv1alpha1.ClusterManagementAddOn) { + gomega.Eventually(func() error { old, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), new.Name, metav1.GetOptions{}) + if err != nil { + return err + } old.Spec = new.Spec old.Annotations = new.Annotations _, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), old, metav1.UpdateOptions{}) - if err == nil { - return true - } - return false - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) } -func updateManagedClusterAddOnStatus(ctx context.Context, new *addonapiv1alpha1.ManagedClusterAddOn) { - gomega.Eventually(func() bool { +func updateManagedClusterAddOnStatus(_ context.Context, new *addonapiv1alpha1.ManagedClusterAddOn) { + gomega.Eventually(func() error { old, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Get(context.Background(), new.Name, metav1.GetOptions{}) + if err != nil { + return err + } old.Status = new.Status _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(old.Namespace).UpdateStatus(context.Background(), old, metav1.UpdateOptions{}) - if err == nil { - return true - } - return false - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) } func assertClusterManagementAddOnDefaultConfigReferences(name string, expect ...addonapiv1alpha1.DefaultConfigReference) { @@ -123,14 +123,14 @@ func assertClusterManagementAddOnDefaultConfigReferences(name string, expect ... } if len(actual.Status.DefaultConfigReferences) != len(expect) { - return fmt.Errorf("Expected %v default config reference, actual: %v", len(expect), len(actual.Status.DefaultConfigReferences)) + return fmt.Errorf("expected %v default config reference, actual: %v", len(expect), len(actual.Status.DefaultConfigReferences)) } for i, e := range expect { actualConfigReference := actual.Status.DefaultConfigReferences[i] if !apiequality.Semantic.DeepEqual(actualConfigReference, e) { - return fmt.Errorf("Expected default config is %v, actual: %v", e, actualConfigReference) + return fmt.Errorf("expected default config is %v, actual: %v", e, actualConfigReference) } } @@ -148,14 +148,14 @@ func assertClusterManagementAddOnInstallProgression(name string, expect ...addon } if len(actual.Status.InstallProgressions) != len(expect) { - return fmt.Errorf("Expected %v install progression, actual: %v", len(expect), len(actual.Status.InstallProgressions)) + return fmt.Errorf("expected %v install progression, actual: %v", len(expect), len(actual.Status.InstallProgressions)) } for i, e := range expect { actualInstallProgression := actual.Status.InstallProgressions[i] if !apiequality.Semantic.DeepEqual(actualInstallProgression.ConfigReferences, e.ConfigReferences) { - return fmt.Errorf("Expected InstallProgression.ConfigReferences is %v, actual: %v", e.ConfigReferences, actualInstallProgression.ConfigReferences) + return fmt.Errorf("expected InstallProgression.ConfigReferences is %v, actual: %v", e.ConfigReferences, actualInstallProgression.ConfigReferences) } } @@ -178,7 +178,7 @@ func assertClusterManagementAddOnConditions(name string, expect ...metav1.Condit cond.Status != ec.Status || cond.Reason != ec.Reason || cond.Message != ec.Message { - return fmt.Errorf("Expected cma progressing condition is %v, actual: %v", ec, cond) + return fmt.Errorf("expected cma progressing condition is %v, actual: %v", ec, cond) } } @@ -196,20 +196,18 @@ func assertManagedClusterAddOnConfigReferences(name, namespace string, expect .. } if len(actual.Status.ConfigReferences) != len(expect) { - return fmt.Errorf("Expected %v config reference, actual: %v", len(expect), len(actual.Status.ConfigReferences)) + return fmt.Errorf("expected %v config reference, actual: %v", len(expect), len(actual.Status.ConfigReferences)) } for i, e := range expect { actualConfigReference := actual.Status.ConfigReferences[i] if !apiequality.Semantic.DeepEqual(actualConfigReference, e) { - return fmt.Errorf("Expected mca config reference is %v %v %v, actual: %v %v %v", + return fmt.Errorf("expected mca config reference is %v %v, actual: %v %v", e.DesiredConfig, e.LastAppliedConfig, - e.LastObservedGeneration, actualConfigReference.DesiredConfig, actualConfigReference.LastAppliedConfig, - actualConfigReference.LastObservedGeneration, ) } } @@ -233,7 +231,7 @@ func assertManagedClusterAddOnConditions(name, namespace string, expect ...metav cond.Status != ec.Status || cond.Reason != ec.Reason || cond.Message != ec.Message { - return fmt.Errorf("Expected addon progressing condition is %v, actual: %v", ec, cond) + return fmt.Errorf("expected addon progressing condition is %v, actual: %v", ec, cond) } } diff --git a/test/integration/operator/clustermanager_hosted_test.go b/test/integration/operator/clustermanager_hosted_test.go index 56cc17911..1b0a4e789 100644 --- a/test/integration/operator/clustermanager_hosted_test.go +++ b/test/integration/operator/clustermanager_hosted_test.go @@ -97,6 +97,7 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { gomega.Expect(err).To(gomega.BeNil()) // Create the external hub kubeconfig secret + // #nosec G101 hubKubeconfigSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: helpers.ExternalHubKubeConfig, @@ -233,14 +234,16 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { // Check service gomega.Eventually(func() error { - if _, err := hostedKubeClient.CoreV1().Services(hubNamespaceHosted).Get(hostedCtx, "cluster-manager-registration-webhook", metav1.GetOptions{}); err != nil { + if _, err := hostedKubeClient.CoreV1().Services(hubNamespaceHosted).Get( + hostedCtx, "cluster-manager-registration-webhook", metav1.GetOptions{}); err != nil { return err } return nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) gomega.Eventually(func() error { - if _, err := hostedKubeClient.CoreV1().Services(hubNamespaceHosted).Get(hostedCtx, "cluster-manager-work-webhook", metav1.GetOptions{}); err != nil { + if _, err := hostedKubeClient.CoreV1().Services(hubNamespaceHosted).Get( + hostedCtx, "cluster-manager-work-webhook", metav1.GetOptions{}); err != nil { return err } return nil @@ -263,6 +266,7 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { return nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + //#nosec G101 workWebhookSecret := "work-webhook-serving-cert" gomega.Eventually(func() error { s, err := hostedKubeClient.CoreV1().Secrets(hubNamespaceHosted).Get(hostedCtx, workWebhookSecret, metav1.GetOptions{}) @@ -593,7 +597,8 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { // Check deployment gomega.Eventually(func() error { - if _, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(context.Background(), hubAddOnManagerDeployment, metav1.GetOptions{}); err != nil { + if _, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get( + context.Background(), hubAddOnManagerDeployment, metav1.GetOptions{}); err != nil { return err } return nil @@ -642,7 +647,7 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { if err != nil { return err } - clusterManager.Spec.RegistrationImagePullSpec = "testimage:latest" + clusterManager.Spec.RegistrationImagePullSpec = testImage _, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update(hostedCtx, clusterManager, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) @@ -653,7 +658,7 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { return err } gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1)) - if actual.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { + if actual.Spec.Template.Spec.Containers[0].Image != testImage { return fmt.Errorf("expected image to be testimage:latest but get %s", actual.Spec.Template.Spec.Containers[0].Image) } return nil @@ -697,10 +702,10 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { return err } clusterManager.Spec.NodePlacement = operatorapiv1.NodePlacement{ - NodeSelector: map[string]string{"node-role.kubernetes.io/infra": ""}, + NodeSelector: map[string]string{infraNodeLabel: ""}, Tolerations: []corev1.Toleration{ { - Key: "node-role.kubernetes.io/infra", + Key: infraNodeLabel, Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoSchedule, }, @@ -719,14 +724,14 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { if len(actual.Spec.Template.Spec.NodeSelector) == 0 { return fmt.Errorf("length of node selector should not equals to 0") } - if _, ok := actual.Spec.Template.Spec.NodeSelector["node-role.kubernetes.io/infra"]; !ok { + if _, ok := actual.Spec.Template.Spec.NodeSelector[infraNodeLabel]; !ok { return fmt.Errorf("node-role.kubernetes.io/infra not exist") } if len(actual.Spec.Template.Spec.Tolerations) == 0 { return fmt.Errorf("length of node selecor should not equals to 0") } for _, toleration := range actual.Spec.Template.Spec.Tolerations { - if toleration.Key == "node-role.kubernetes.io/infra" { + if toleration.Key == infraNodeLabel { return nil } } @@ -782,7 +787,7 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { if err != nil { return err } - if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { + if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != testImage { return fmt.Errorf("image should be testimage:latest, but get %s", registrationoDeployment.Spec.Template.Spec.Containers[0].Image) } return nil diff --git a/test/integration/operator/clustermanager_test.go b/test/integration/operator/clustermanager_test.go index 2717b0b05..42d30b3d5 100644 --- a/test/integration/operator/clustermanager_test.go +++ b/test/integration/operator/clustermanager_test.go @@ -24,6 +24,11 @@ import ( "open-cluster-management.io/ocm/test/integration/util" ) +const ( + testImage = "testimage:latest" + infraNodeLabel = "node-role.kubernetes.io/infra" +) + func startHubOperator(ctx context.Context, mode operatorapiv1.InstallMode) { certrotation.SigningCertValidity = time.Second * 30 certrotation.TargetCertValidity = time.Second * 10 @@ -236,6 +241,7 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { return nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + //#nosec G101 workWebhookSecret := "work-webhook-serving-cert" gomega.Eventually(func() error { s, err := kubeClient.CoreV1().Secrets(hubNamespace).Get(context.Background(), workWebhookSecret, metav1.GetOptions{}) @@ -605,7 +611,7 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - clusterManager.Spec.RegistrationImagePullSpec = "testimage:latest" + clusterManager.Spec.RegistrationImagePullSpec = testImage _, err = operatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -615,7 +621,7 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { return err } gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1)) - if actual.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { + if actual.Spec.Template.Spec.Containers[0].Image != testImage { return fmt.Errorf("expected image to be testimage:latest but get %s", actual.Spec.Template.Spec.Containers[0].Image) } return nil @@ -659,10 +665,10 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { return err } clusterManager.Spec.NodePlacement = operatorapiv1.NodePlacement{ - NodeSelector: map[string]string{"node-role.kubernetes.io/infra": ""}, + NodeSelector: map[string]string{infraNodeLabel: ""}, Tolerations: []corev1.Toleration{ { - Key: "node-role.kubernetes.io/infra", + Key: infraNodeLabel, Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoSchedule, }, @@ -681,14 +687,14 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { if len(actual.Spec.Template.Spec.NodeSelector) == 0 { return fmt.Errorf("length of node selector should not equals to 0") } - if _, ok := actual.Spec.Template.Spec.NodeSelector["node-role.kubernetes.io/infra"]; !ok { + if _, ok := actual.Spec.Template.Spec.NodeSelector[infraNodeLabel]; !ok { return fmt.Errorf("node-role.kubernetes.io/infra not exist") } if len(actual.Spec.Template.Spec.Tolerations) == 0 { return fmt.Errorf("length of node selecor should not equals to 0") } for _, toleration := range actual.Spec.Template.Spec.Tolerations { - if toleration.Key == "node-role.kubernetes.io/infra" { + if toleration.Key == infraNodeLabel { return nil } } @@ -716,7 +722,7 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { if err != nil { return err } - if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { + if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != testImage { return fmt.Errorf("image should be testimage:latest, but get %s", registrationoDeployment.Spec.Template.Spec.Containers[0].Image) } return nil diff --git a/test/integration/operator/integration_suite_test.go b/test/integration/operator/integration_suite_test.go index a282ee6b8..20582885e 100644 --- a/test/integration/operator/integration_suite_test.go +++ b/test/integration/operator/integration_suite_test.go @@ -227,7 +227,7 @@ func ServiceAccountCtl(ctx context.Context, kubeClient kubernetes.Interface) { sa, ok := event.Object.(*corev1.ServiceAccount) if !ok { - klog.Infof("not a service account, contine") + klog.Infof("not a service account, continue") continue } diff --git a/test/integration/operator/klusterlet_hosted_test.go b/test/integration/operator/klusterlet_hosted_test.go index 6c41228bf..8399ffa67 100644 --- a/test/integration/operator/klusterlet_hosted_test.go +++ b/test/integration/operator/klusterlet_hosted_test.go @@ -116,7 +116,8 @@ var _ = ginkgo.Describe("Klusterlet Hosted mode", func() { fmt.Printf("related resources are %v\n", actual.Status.RelatedResources) - // 11 managed static manifests + 11 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status) + // 11 managed static manifests + 11 management static manifests + + // 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status) if len(actual.Status.RelatedResources) != 26 { return fmt.Errorf("should get 26 relatedResources, actual got %v", len(actual.Status.RelatedResources)) } @@ -125,13 +126,15 @@ var _ = ginkgo.Describe("Klusterlet Hosted mode", func() { // Check CRDs gomega.Eventually(func() bool { - if _, err := hostedAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "appliedmanifestworks.work.open-cluster-management.io", metav1.GetOptions{}); err != nil { + if _, err := hostedAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), "appliedmanifestworks.work.open-cluster-management.io", metav1.GetOptions{}); err != nil { return false } return true }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) gomega.Eventually(func() bool { - if _, err := hostedAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "clusterclaims.cluster.open-cluster-management.io", metav1.GetOptions{}); err != nil { + if _, err := hostedAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), "clusterclaims.cluster.open-cluster-management.io", metav1.GetOptions{}); err != nil { return false } return true @@ -139,7 +142,8 @@ var _ = ginkgo.Describe("Klusterlet Hosted mode", func() { // Check clusterrole/clusterrolebinding gomega.Eventually(func() bool { - if _, err := hostedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), registrationManagedRoleName, metav1.GetOptions{}); err != nil { + if _, err := hostedKubeClient.RbacV1().ClusterRoles().Get( + context.Background(), registrationManagedRoleName, metav1.GetOptions{}); err != nil { return false } return true diff --git a/test/integration/operator/klusterlet_singleton_test.go b/test/integration/operator/klusterlet_singleton_test.go index 5fddad8e5..330779842 100644 --- a/test/integration/operator/klusterlet_singleton_test.go +++ b/test/integration/operator/klusterlet_singleton_test.go @@ -106,13 +106,15 @@ var _ = ginkgo.Describe("Klusterlet Singleton mode", func() { // Check CRDs gomega.Eventually(func() bool { - if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "appliedmanifestworks.work.open-cluster-management.io", metav1.GetOptions{}); err != nil { + if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), "appliedmanifestworks.work.open-cluster-management.io", metav1.GetOptions{}); err != nil { return false } return true }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) gomega.Eventually(func() bool { - if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "clusterclaims.cluster.open-cluster-management.io", metav1.GetOptions{}); err != nil { + if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), "clusterclaims.cluster.open-cluster-management.io", metav1.GetOptions{}); err != nil { return false } return true diff --git a/test/integration/operator/klusterlet_test.go b/test/integration/operator/klusterlet_test.go index 4c02dcb66..f67184185 100644 --- a/test/integration/operator/klusterlet_test.go +++ b/test/integration/operator/klusterlet_test.go @@ -137,13 +137,15 @@ var _ = ginkgo.Describe("Klusterlet", func() { // Check CRDs gomega.Eventually(func() bool { - if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "appliedmanifestworks.work.open-cluster-management.io", metav1.GetOptions{}); err != nil { + if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), "appliedmanifestworks.work.open-cluster-management.io", metav1.GetOptions{}); err != nil { return false } return true }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) gomega.Eventually(func() bool { - if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "clusterclaims.cluster.open-cluster-management.io", metav1.GetOptions{}); err != nil { + if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), "clusterclaims.cluster.open-cluster-management.io", metav1.GetOptions{}); err != nil { return false } return true @@ -189,7 +191,8 @@ var _ = ginkgo.Describe("Klusterlet", func() { return true }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) gomega.Eventually(func() bool { - if _, err := kubeClient.RbacV1().RoleBindings(klusterletNamespace).Get(context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil { + if _, err := kubeClient.RbacV1().RoleBindings(klusterletNamespace).Get( + context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil { return false } return true @@ -281,10 +284,10 @@ var _ = ginkgo.Describe("Klusterlet", func() { } KlusterletObj.Spec.NodePlacement = operatorapiv1.NodePlacement{ - NodeSelector: map[string]string{"node-role.kubernetes.io/infra": ""}, + NodeSelector: map[string]string{infraNodeLabel: ""}, Tolerations: []corev1.Toleration{ { - Key: "node-role.kubernetes.io/infra", + Key: infraNodeLabel, Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoSchedule, }, @@ -303,14 +306,14 @@ var _ = ginkgo.Describe("Klusterlet", func() { if len(deployment.Spec.Template.Spec.NodeSelector) == 0 { return false } - if _, ok := deployment.Spec.Template.Spec.NodeSelector["node-role.kubernetes.io/infra"]; !ok { + if _, ok := deployment.Spec.Template.Spec.NodeSelector[infraNodeLabel]; !ok { return false } if len(deployment.Spec.Template.Spec.Tolerations) == 0 { return false } for _, toleration := range deployment.Spec.Template.Spec.Tolerations { - if toleration.Key == "node-role.kubernetes.io/infra" { + if toleration.Key == infraNodeLabel { return true } } @@ -703,7 +706,8 @@ var _ = ginkgo.Describe("Klusterlet", func() { _, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Create(context.Background(), bootStrapSecret, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigMissing", metav1.ConditionTrue) + util.AssertKlusterletCondition( + klusterlet.Name, operatorClient, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigMissing", metav1.ConditionTrue) hubSecret, err := kubeClient.CoreV1().Secrets(klusterletNamespace).Get(context.Background(), helpers.HubKubeConfig, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -721,7 +725,8 @@ var _ = ginkgo.Describe("Klusterlet", func() { util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "WorkDesiredDegraded", "UnavailablePods", metav1.ConditionTrue) // Update replica of deployment - registrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}) + registrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), registrationDeploymentName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) registrationDeployment = registrationDeployment.DeepCopy() registrationDeployment.Status.AvailableReplicas = 3 @@ -746,7 +751,10 @@ var _ = ginkgo.Describe("Klusterlet", func() { hubSecret.Data["kubeconfig"] = util.NewKubeConfig(&rest.Config{Host: "https://nohost"}) _, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Update(context.Background(), hubSecret, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) + util.AssertKlusterletCondition( + klusterlet.Name, operatorClient, + "HubConnectionDegraded", + "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) }) ginkgo.It("should have correct available conditions", func() { @@ -760,7 +768,8 @@ var _ = ginkgo.Describe("Klusterlet", func() { return true }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - registrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}) + registrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), registrationDeploymentName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(func() bool { @@ -835,10 +844,12 @@ var _ = ginkgo.Describe("Klusterlet", func() { var registrationDeployment *appsv1.Deployment var workDeployment *appsv1.Deployment gomega.Eventually(func() bool { - if registrationDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}); err != nil { + if registrationDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), registrationDeploymentName, metav1.GetOptions{}); err != nil { return false } - if workDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), workDeploymentName, metav1.GetOptions{}); err != nil { + if workDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), workDeploymentName, metav1.GetOptions{}); err != nil { return false } return true @@ -854,7 +865,8 @@ var _ = ginkgo.Describe("Klusterlet", func() { // Make sure the deployments are deleted and recreated gomega.Eventually(func() bool { - lastRegistrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}) + lastRegistrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), registrationDeploymentName, metav1.GetOptions{}) if err != nil { return false } @@ -893,10 +905,12 @@ var _ = ginkgo.Describe("Klusterlet", func() { var registrationDeployment *appsv1.Deployment var workDeployment *appsv1.Deployment gomega.Eventually(func() bool { - if registrationDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}); err != nil { + if registrationDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), registrationDeploymentName, metav1.GetOptions{}); err != nil { return false } - if workDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), workDeploymentName, metav1.GetOptions{}); err != nil { + if workDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), workDeploymentName, metav1.GetOptions{}); err != nil { return false } return true @@ -920,7 +934,8 @@ var _ = ginkgo.Describe("Klusterlet", func() { // Make sure the deployments are deleted and recreated gomega.Eventually(func() bool { - lastRegistrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}) + lastRegistrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get( + context.Background(), registrationDeploymentName, metav1.GetOptions{}) if err != nil { return false } @@ -952,7 +967,7 @@ var _ = ginkgo.Describe("Klusterlet", func() { klusterlet.Spec.RegistrationConfiguration = nil klusterlet.Spec.WorkConfiguration = &operatorapiv1.WorkConfiguration{} - ginkgo.By("Create the klusterlet with RegistrationConfiguration nil and WorkConfiguration emtpy") + ginkgo.By("Create the klusterlet with RegistrationConfiguration nil and WorkConfiguration empty") _, err := operatorClient.OperatorV1().Klusterlets().Create(context.Background(), klusterlet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/test/integration/placement/assertion_test.go b/test/integration/placement/assertion_test.go index 15a7bd15d..260187b9e 100644 --- a/test/integration/placement/assertion_test.go +++ b/test/integration/placement/assertion_test.go @@ -37,7 +37,9 @@ func assertCreatingPlacementWithDecision(placement *clusterapiv1beta1.Placement, assertPlacementDecisionCreated(newplacement) assertPlacementDecisionNumbers(newplacement.Name, newplacement.Namespace, numberOfDecisionClusters, numberOfPlacementDecisions) if placement.Spec.NumberOfClusters != nil { - assertPlacementConditionSatisfied(newplacement.Name, newplacement.Namespace, numberOfDecisionClusters, numberOfDecisionClusters == int(*placement.Spec.NumberOfClusters)) + assertPlacementConditionSatisfied( + newplacement.Name, newplacement.Namespace, numberOfDecisionClusters, + numberOfDecisionClusters == int(*placement.Spec.NumberOfClusters)) } } @@ -48,7 +50,8 @@ func assertPatchingPlacementSpec(newPlacement *clusterapiv1beta1.Placement) { clusterClient.ClusterV1beta1().Placements(newPlacement.Namespace)) gomega.Eventually(func() error { - oldPlacement, err := clusterClient.ClusterV1beta1().Placements(newPlacement.Namespace).Get(context.Background(), newPlacement.Name, metav1.GetOptions{}) + oldPlacement, err := clusterClient.ClusterV1beta1().Placements(newPlacement.Namespace).Get( + context.Background(), newPlacement.Name, metav1.GetOptions{}) if err != nil { return err } @@ -146,8 +149,9 @@ func assertCreatingPlacementDecision(name, namespace string, clusterNames []stri }, }, } - placementDecision, err := clusterClient.ClusterV1beta1().PlacementDecisions(namespace).Create(context.Background(), placementDecision, metav1.CreateOptions{}) - + placementDecision, err := clusterClient.ClusterV1beta1().PlacementDecisions(namespace).Create( + context.Background(), placementDecision, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) var clusterDecisions []clusterapiv1beta1.ClusterDecision for _, clusterName := range clusterNames { clusterDecisions = append(clusterDecisions, clusterapiv1beta1.ClusterDecision{ @@ -156,7 +160,8 @@ func assertCreatingPlacementDecision(name, namespace string, clusterNames []stri } placementDecision.Status.Decisions = clusterDecisions - placementDecision, err = clusterClient.ClusterV1beta1().PlacementDecisions(namespace).UpdateStatus(context.Background(), placementDecision, metav1.UpdateOptions{}) + placementDecision, err = clusterClient.ClusterV1beta1().PlacementDecisions(namespace).UpdateStatus( + context.Background(), placementDecision, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) } diff --git a/test/integration/placement/placement_test.go b/test/integration/placement/placement_test.go index d76f09704..57d4d209c 100644 --- a/test/integration/placement/placement_test.go +++ b/test/integration/placement/placement_test.go @@ -3,7 +3,6 @@ package placement import ( "context" "fmt" - "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -16,15 +15,13 @@ import ( clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" controllers "open-cluster-management.io/ocm/pkg/placement/controllers" - "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" testinghelpers "open-cluster-management.io/ocm/pkg/placement/helpers/testing" "open-cluster-management.io/ocm/test/integration/util" ) const ( - clusterSetLabel = "cluster.open-cluster-management.io/clusterset" - placementLabel = "cluster.open-cluster-management.io/placement" - maxNumOfClusterDecisions = 100 + clusterSetLabel = "cluster.open-cluster-management.io/clusterset" + placementLabel = "cluster.open-cluster-management.io/placement" ) var _ = ginkgo.Describe("Placement", func() { @@ -56,7 +53,6 @@ var _ = ginkgo.Describe("Placement", func() { // start controller manager var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - scheduling.ResyncInterval = time.Second * 5 go controllers.RunControllerManager(ctx, &controllercmd.ControllerContext{ KubeConfig: restConfig, EventRecorder: util.NewIntegrationTestEventRecorder("integration"), @@ -103,13 +99,17 @@ var _ = ginkgo.Describe("Placement", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) assertPlacementDecisionCreated(placement) assertPlacementDecisionNumbers(placementName, namespace, 5, 1) - assertPlacementStatusDecisionGroups(placementName, namespace, []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 5}}) + assertPlacementStatusDecisionGroups( + placementName, namespace, + []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 5}}) }) ginkgo.It("Should create empty placementdecision when no cluster selected", func() { placement := testinghelpers.NewPlacement(namespace, placementName).Build() assertCreatingPlacementWithDecision(placement, 0, 1) - assertPlacementStatusDecisionGroups(placementName, namespace, []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 0}}) + assertPlacementStatusDecisionGroups( + placementName, namespace, + []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 0}}) }) ginkgo.It("Should create multiple placementdecisions once scheduled", func() { @@ -120,7 +120,9 @@ var _ = ginkgo.Describe("Placement", func() { nod := 101 assertPlacementDecisionNumbers(placementName, namespace, nod, 2) - assertPlacementStatusDecisionGroups(placementName, namespace, []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0", placementName + "-decision-1"}, ClustersCount: 101}}) + assertPlacementStatusDecisionGroups( + placementName, namespace, + []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0", placementName + "-decision-1"}, ClustersCount: 101}}) assertPlacementConditionSatisfied(placementName, namespace, nod, true) }) @@ -161,7 +163,9 @@ var _ = ginkgo.Describe("Placement", func() { placement.Spec.Predicates = predicates assertPatchingPlacementSpec(placement) assertPlacementDecisionNumbers(placementName, namespace, 3, 1) - assertPlacementStatusDecisionGroups(placementName, namespace, []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 3}}) + assertPlacementStatusDecisionGroups( + placementName, namespace, + []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 3}}) ginkgo.By("change the predicates") // change the predicates @@ -424,8 +428,9 @@ var _ = ginkgo.Describe("Placement", func() { assertDeletingClusters(noncanary[1:]...) assertDeletingClusterSet("global") assertPlacementDecisionNumbers(placementName, namespace, 0, 1) - assertPlacementStatusDecisionGroups(placementName, namespace, []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 0}}) - + assertPlacementStatusDecisionGroups( + placementName, namespace, + []clusterapiv1beta1.DecisionGroupStatus{{Decisions: []string{placementName + "-decision-0"}, ClustersCount: 0}}) }) ginkgo.It("Should schedule successfully once clusters belong to global(empty labelselector) clusterset are added/deleted)", func() { @@ -552,11 +557,5 @@ var _ = ginkgo.Describe("Placement", func() { assertPlacementDecisionNumbers(placementName, namespace, 5, 1) assertPlacementConditionSatisfied(placementName, namespace, 5, false) }) - }) }) - -func noc(n int) *int32 { - noc := int32(n) - return &noc -} diff --git a/test/integration/placement/prioritizer_test.go b/test/integration/placement/prioritizer_test.go index cbc7066a7..45beefaa2 100644 --- a/test/integration/placement/prioritizer_test.go +++ b/test/integration/placement/prioritizer_test.go @@ -3,7 +3,6 @@ package placement import ( "context" "fmt" - "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -15,7 +14,6 @@ import ( clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" controllers "open-cluster-management.io/ocm/pkg/placement/controllers" - "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" testinghelpers "open-cluster-management.io/ocm/pkg/placement/helpers/testing" "open-cluster-management.io/ocm/test/integration/util" ) @@ -47,7 +45,6 @@ var _ = ginkgo.Describe("Prioritizers", func() { // start controller manager var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - scheduling.ResyncInterval = time.Second * 5 go controllers.RunControllerManager(ctx, &controllercmd.ControllerContext{ KubeConfig: restConfig, EventRecorder: util.NewIntegrationTestEventRecorder("integration"), diff --git a/test/integration/placement/toleration_test.go b/test/integration/placement/toleration_test.go index 22fdda160..c807de9be 100644 --- a/test/integration/placement/toleration_test.go +++ b/test/integration/placement/toleration_test.go @@ -16,7 +16,6 @@ import ( clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" controllers "open-cluster-management.io/ocm/pkg/placement/controllers" - "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling" testinghelpers "open-cluster-management.io/ocm/pkg/placement/helpers/testing" "open-cluster-management.io/ocm/test/integration/util" ) @@ -46,7 +45,6 @@ var _ = ginkgo.Describe("TaintToleration", func() { // start controller manager var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - scheduling.ResyncInterval = time.Second * 5 go controllers.RunControllerManager(ctx, &controllercmd.ControllerContext{ KubeConfig: restConfig, EventRecorder: util.NewIntegrationTestEventRecorder("integration"), diff --git a/test/integration/registration/addon_lease_test.go b/test/integration/registration/addon_lease_test.go index f404dcece..108a0dfdb 100644 --- a/test/integration/registration/addon_lease_test.go +++ b/test/integration/registration/addon_lease_test.go @@ -22,6 +22,8 @@ import ( "open-cluster-management.io/ocm/test/integration/util" ) +const clusterCleanFinalizer = "cluster.open-cluster-management.io/api-resource-cleanup" + var _ = ginkgo.Describe("Addon Lease Resync", func() { var managedClusterName, hubKubeconfigSecret, hubKubeconfigDir, addOnName string var err error @@ -54,7 +56,7 @@ var _ = ginkgo.Describe("Addon Lease Resync", func() { return false } - if spokeCluster.Finalizers[0] != "cluster.open-cluster-management.io/api-resource-cleanup" { + if spokeCluster.Finalizers[0] != clusterCleanFinalizer { return false } @@ -74,8 +76,8 @@ var _ = ginkgo.Describe("Addon Lease Resync", func() { if err != nil { return false } - accpeted := meta.FindStatusCondition(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) - return accpeted != nil + accepted := meta.FindStatusCondition(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) + return accepted != nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // the hub kubeconfig secret should be filled after the csr is approved diff --git a/test/integration/registration/addon_registration_test.go b/test/integration/registration/addon_registration_test.go index 67bdc6c6f..e9598d4a7 100644 --- a/test/integration/registration/addon_registration_test.go +++ b/test/integration/registration/addon_registration_test.go @@ -84,7 +84,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { return false } - if spokeCluster.Finalizers[0] != "cluster.open-cluster-management.io/api-resource-cleanup" { + if spokeCluster.Finalizers[0] != clusterCleanFinalizer { return false } @@ -105,8 +105,8 @@ var _ = ginkgo.Describe("Addon Registration", func() { if err != nil { return false } - accpeted := meta.FindStatusCondition(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) - return accpeted != nil + accepted := meta.FindStatusCondition(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) + return accepted != nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // the hub kubeconfig secret should be filled after the csr is approved @@ -162,10 +162,10 @@ var _ = ginkgo.Describe("Addon Registration", func() { return false } _, ok := secret.Data[clientcert.KubeconfigFile] - if !ok && signerName == "kubernetes.io/kube-apiserver-client" { + if !ok && signerName == certificates.KubeAPIServerClientSignerName { return false } - if ok && signerName != "kubernetes.io/kube-apiserver-client" { + if ok && signerName != certificates.KubeAPIServerClientSignerName { return false } return true @@ -265,7 +265,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { ginkgo.It("should register addon successfully", func() { assertSuccessClusterBootstrap() - signerName := "kubernetes.io/kube-apiserver-client" + signerName := certificates.KubeAPIServerClientSignerName assertSuccessAddOnBootstrap(signerName) ginkgo.By("Delete the addon and check if secret is gone") @@ -279,7 +279,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { ginkgo.It("should register addon successfully even when the install namespace is not available at the beginning", func() { assertSuccessClusterBootstrap() - signerName := "kubernetes.io/kube-apiserver-client" + signerName := certificates.KubeAPIServerClientSignerName ginkgo.By("Create ManagedClusterAddOn cr with required annotations") // create addon @@ -344,7 +344,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { ginkgo.It("should addon registraton config updated successfully", func() { assertSuccessClusterBootstrap() - signerName := "kubernetes.io/kube-apiserver-client" + signerName := certificates.KubeAPIServerClientSignerName assertSuccessAddOnBootstrap(signerName) // update registration config and change the signer @@ -368,7 +368,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { ginkgo.It("should rotate addon client cert successfully", func() { assertSuccessClusterBootstrap() - signerName := "kubernetes.io/kube-apiserver-client" + signerName := certificates.KubeAPIServerClientSignerName assertSuccessAddOnBootstrap(signerName) secretName := getSecretName(addOnName, signerName) @@ -388,7 +388,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { ginkgo.It("should stop addon client cert update if too frequent", func() { assertSuccessClusterBootstrap() - signerName := "kubernetes.io/kube-apiserver-client" + signerName := certificates.KubeAPIServerClientSignerName assertSuccessAddOnBootstrap(signerName) // update subject for 15 times @@ -435,7 +435,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { }) func getSecretName(addOnName, signerName string) string { - if signerName == "kubernetes.io/kube-apiserver-client" { + if signerName == certificates.KubeAPIServerClientSignerName { return fmt.Sprintf("%s-hub-kubeconfig", addOnName) } return fmt.Sprintf("%s-%s-client-cert", addOnName, strings.ReplaceAll(signerName, "/", "-")) diff --git a/test/integration/registration/certificate_rotation_test.go b/test/integration/registration/certificate_rotation_test.go index 2add0de61..aae2a481f 100644 --- a/test/integration/registration/certificate_rotation_test.go +++ b/test/integration/registration/certificate_rotation_test.go @@ -17,6 +17,7 @@ var _ = ginkgo.Describe("Certificate Rotation", func() { var err error managedClusterName := "rotationtest-spokecluster" + //#nosec G101 hubKubeconfigSecret := "rotationtest-hub-kubeconfig-secret" hubKubeconfigDir := path.Join(util.TestDir, "rotationtest", "hub-kubeconfig") diff --git a/test/integration/registration/disaster_recovery_test.go b/test/integration/registration/disaster_recovery_test.go index 662491ab1..e0615bd5d 100644 --- a/test/integration/registration/disaster_recovery_test.go +++ b/test/integration/registration/disaster_recovery_test.go @@ -32,7 +32,12 @@ import ( ) var _ = ginkgo.Describe("Disaster Recovery", func() { - startHub := func(ctx context.Context) (string, kubernetes.Interface, clusterclientset.Interface, addonclientset.Interface, *envtest.Environment, *util.TestAuthn) { + startHub := func(ctx context.Context) ( + string, + kubernetes.Interface, + clusterclientset.Interface, + addonclientset.Interface, + *envtest.Environment, *util.TestAuthn) { apiserver := &envtest.APIServer{} newAuthn := util.NewTestAuthn(path.Join(util.CertDir, "another-ca.crt"), path.Join(util.CertDir, "another-ca.key")) apiserver.SecureServing.Authn = newAuthn @@ -49,7 +54,7 @@ var _ = ginkgo.Describe("Disaster Recovery", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(cfg).ToNot(gomega.BeNil()) - err = clusterv1.AddToScheme(scheme.Scheme) + err = clusterv1.Install(scheme.Scheme) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // prepare configs @@ -99,7 +104,8 @@ var _ = ginkgo.Describe("Disaster Recovery", func() { return runAgent("addontest", agentOptions, commOptions, spokeCfg) } - assertSuccessClusterBootstrap := func(testNamespace, managedClusterName, hubKubeconfigSecret string, hubKubeClient, spokeKubeClient kubernetes.Interface, hubClusterClient clusterclientset.Interface, auth *util.TestAuthn) { + assertSuccessClusterBootstrap := func(testNamespace, managedClusterName, hubKubeconfigSecret string, + hubKubeClient, spokeKubeClient kubernetes.Interface, hubClusterClient clusterclientset.Interface, auth *util.TestAuthn) { // the spoke cluster and csr should be created after bootstrap ginkgo.By("Check existence of ManagedCluster & CSR") gomega.Eventually(func() bool { @@ -126,7 +132,7 @@ var _ = ginkgo.Describe("Disaster Recovery", func() { return false } - if spokeCluster.Finalizers[0] != "cluster.open-cluster-management.io/api-resource-cleanup" { + if spokeCluster.Finalizers[0] != clusterCleanFinalizer { return false } @@ -216,10 +222,10 @@ var _ = ginkgo.Describe("Disaster Recovery", func() { return false } _, ok := secret.Data[clientcert.KubeconfigFile] - if !ok && signerName == "kubernetes.io/kube-apiserver-client" { + if !ok && signerName == certificates.KubeAPIServerClientSignerName { return false } - if ok && signerName != "kubernetes.io/kube-apiserver-client" { + if ok && signerName != certificates.KubeAPIServerClientSignerName { return false } return true @@ -241,7 +247,10 @@ var _ = ginkgo.Describe("Disaster Recovery", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) } - assertSuccessAddOnBootstrap := func(managedClusterName, addOnName, signerName string, hubKubeClient, spokeKubeClient kubernetes.Interface, hubClusterClient clusterclientset.Interface, hubAddOnClient addonclientset.Interface) { + assertSuccessAddOnBootstrap := func( + managedClusterName, addOnName, signerName string, + hubKubeClient, spokeKubeClient kubernetes.Interface, + hubClusterClient clusterclientset.Interface, hubAddOnClient addonclientset.Interface) { ginkgo.By("Create ManagedClusterAddOn cr with required annotations") // create addon namespace ns := &corev1.Namespace{ @@ -294,7 +303,7 @@ var _ = ginkgo.Describe("Disaster Recovery", func() { hubKubeconfigSecret := fmt.Sprintf("hub-kubeconfig-secret-%s", suffix) hubKubeconfigDir := path.Join(util.TestDir, fmt.Sprintf("recoverytest-%s", suffix), "hub-kubeconfig") addOnName := fmt.Sprintf("addon-%s", suffix) - signerName := "kubernetes.io/kube-apiserver-client" + signerName := certificates.KubeAPIServerClientSignerName hubKubeClient := kubeClient hubClusterClient := clusterClient diff --git a/test/integration/registration/global_managedclusterset_controller_test.go b/test/integration/registration/global_managedclusterset_controller_test.go index 8c6e15f20..399e70082 100644 --- a/test/integration/registration/global_managedclusterset_controller_test.go +++ b/test/integration/registration/global_managedclusterset_controller_test.go @@ -25,7 +25,7 @@ var _ = ginkgo.Describe("GlobalManagedClusterSet", func() { if mcs.ObjectMeta.Name == setcontroller.GlobalManagedClusterSetName && reflect.DeepEqual(mcs.Spec, setcontroller.GlobalManagedClusterSet.Spec) { return nil } - return fmt.Errorf("check not pass!") + return fmt.Errorf("check not pass") }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) @@ -52,7 +52,7 @@ var _ = ginkgo.Describe("GlobalManagedClusterSet", func() { if mcs.ObjectMeta.Name == setcontroller.GlobalManagedClusterSetName && equality.Semantic.DeepEqual(mcs.Spec, setcontroller.GlobalManagedClusterSet.Spec) { return nil } - return fmt.Errorf("check not pass!") + return fmt.Errorf("check not pass") }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) @@ -81,10 +81,12 @@ var _ = ginkgo.Describe("GlobalManagedClusterSet", func() { if err != nil { return err } - if equality.Semantic.DeepEqual(mcs.Spec, setcontroller.GlobalManagedClusterSet.Spec) && equality.Semantic.DeepEqual(mcs.Annotations, updateMcs.Annotations) && equality.Semantic.DeepEqual(mcs.Labels, updateMcs.Labels) { + if equality.Semantic.DeepEqual(mcs.Spec, setcontroller.GlobalManagedClusterSet.Spec) && + equality.Semantic.DeepEqual(mcs.Annotations, updateMcs.Annotations) && + equality.Semantic.DeepEqual(mcs.Labels, updateMcs.Labels) { return nil } - return fmt.Errorf("check not pass!") + return fmt.Errorf("check not pass") }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) @@ -103,7 +105,7 @@ var _ = ginkgo.Describe("GlobalManagedClusterSet", func() { if mcs.ObjectMeta.Name == setcontroller.GlobalManagedClusterSetName && equality.Semantic.DeepEqual(mcs.Spec, setcontroller.GlobalManagedClusterSet.Spec) { return nil } - return fmt.Errorf("check not pass!") + return fmt.Errorf("check not pass") }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) diff --git a/test/integration/registration/managedclusterset_test.go b/test/integration/registration/managedclusterset_test.go index 473d31046..5de8acd0f 100644 --- a/test/integration/registration/managedclusterset_test.go +++ b/test/integration/registration/managedclusterset_test.go @@ -12,6 +12,8 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" + + "open-cluster-management.io/ocm/pkg/registration/hub/managedclusterset" ) var _ = ginkgo.Describe("ManagedClusterSet", func() { @@ -43,7 +45,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionTrue { return false } - if condition.Reason != "NoClusterMatched" { + if condition.Reason != managedclusterset.ReasonNoClusterMatchced { return false } return true @@ -81,7 +83,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionFalse { return false } - if condition.Reason != "ClustersSelected" { + if condition.Reason != managedclusterset.ReasonClusterSelected { return false } return true @@ -125,7 +127,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionFalse { return false } - if condition.Reason != "ClustersSelected" { + if condition.Reason != managedclusterset.ReasonClusterSelected { return false } return true @@ -149,7 +151,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { return fmt.Errorf("clusterset should be empty") } - if cond.Reason != "NoClusterMatched" { + if cond.Reason != managedclusterset.ReasonNoClusterMatchced { return fmt.Errorf("clusterset condition reason not correct, got %q", cond.Reason) } @@ -189,7 +191,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionTrue { return false } - if condition.Reason != "NoClusterMatched" { + if condition.Reason != managedclusterset.ReasonNoClusterMatchced { return false } return true @@ -227,7 +229,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionFalse { return false } - if condition.Reason != "ClustersSelected" { + if condition.Reason != managedclusterset.ReasonClusterSelected { return false } return true @@ -271,7 +273,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionFalse { return false } - if condition.Reason != "ClustersSelected" { + if condition.Reason != managedclusterset.ReasonClusterSelected { return false } return true @@ -295,7 +297,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { return fmt.Errorf("clusterset should be empty") } - if cond.Reason != "NoClusterMatched" { + if cond.Reason != managedclusterset.ReasonNoClusterMatchced { return fmt.Errorf("clusterset condition reason not correct, got %q", cond.Reason) } @@ -339,7 +341,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionTrue { return false } - if condition.Reason != "NoClusterMatched" { + if condition.Reason != managedclusterset.ReasonNoClusterMatchced { return false } return true @@ -377,7 +379,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionFalse { return false } - if condition.Reason != "ClustersSelected" { + if condition.Reason != managedclusterset.ReasonClusterSelected { return false } return true @@ -432,7 +434,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionFalse { return false } - if condition.Reason != "ClustersSelected" { + if condition.Reason != managedclusterset.ReasonClusterSelected { return false } return true @@ -456,7 +458,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { return fmt.Errorf("clusterset should be empty") } - if cond.Reason != "NoClusterMatched" { + if cond.Reason != managedclusterset.ReasonNoClusterMatchced { return fmt.Errorf("clusterset condition reason not correct, got %q", cond.Reason) } @@ -513,7 +515,7 @@ var _ = ginkgo.Describe("ManagedClusterSet", func() { if condition.Status != metav1.ConditionFalse { return false } - if condition.Reason != "ClustersSelected" { + if condition.Reason != managedclusterset.ReasonClusterSelected { return false } return true diff --git a/test/integration/registration/spokeagent_recovery_test.go b/test/integration/registration/spokeagent_recovery_test.go index 6587e8f7e..f2e32001e 100644 --- a/test/integration/registration/spokeagent_recovery_test.go +++ b/test/integration/registration/spokeagent_recovery_test.go @@ -26,6 +26,7 @@ var _ = ginkgo.Describe("Agent Recovery", func() { managedClusterName := "bootstrap-recoverytest-spokecluster" + //#nosec G101 hubKubeconfigSecret := "bootstrap-recoverytest-hub-kubeconfig-secret" hubKubeconfigDir := path.Join(util.TestDir, "bootstrap-recoverytest", "hub-kubeconfig") @@ -119,6 +120,7 @@ var _ = ginkgo.Describe("Agent Recovery", func() { spokeClusterName := "hubkubeconfig-recoverytest-spokecluster" + //#nosec G101 hubKubeconfigSecret := "hubkubeconfig-recoverytest-hub-kubeconfig-secret" hubKubeconfigDir := path.Join(util.TestDir, "hubkubeconfig-recoverytest", "hub-kubeconfig") diff --git a/test/integration/registration/spokeagent_restart_test.go b/test/integration/registration/spokeagent_restart_test.go index 42277268c..d0f53949f 100644 --- a/test/integration/registration/spokeagent_restart_test.go +++ b/test/integration/registration/spokeagent_restart_test.go @@ -99,7 +99,7 @@ var _ = ginkgo.Describe("Agent Restart", func() { // is restarted successfully spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - conditions := []metav1.Condition{} + var conditions []metav1.Condition for _, condition := range spokeCluster.Status.Conditions { if condition.Type == clusterv1.ManagedClusterConditionJoined { continue @@ -147,7 +147,7 @@ var _ = ginkgo.Describe("Agent Restart", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) - // This case happens when registration agent is restarted with a new cluster name by specifing + // This case happens when registration agent is restarted with a new cluster name by specifying // argument 'cluster-name' and the agent has already had a hub kubecofig with a different // cluster name. A bootstrap process is expected. ginkgo.It("restart agent with a different cluster name", func() { diff --git a/test/integration/registration/spokecluster_autoapproval_test.go b/test/integration/registration/spokecluster_autoapproval_test.go index 006cb7d78..a62c359c3 100644 --- a/test/integration/registration/spokecluster_autoapproval_test.go +++ b/test/integration/registration/spokecluster_autoapproval_test.go @@ -22,6 +22,7 @@ var _ = ginkgo.Describe("Cluster Auto Approval", func() { var err error managedClusterName := "autoapprovaltest-spokecluster" + //#nosec G101 hubKubeconfigSecret := "autoapprovaltest-hub-kubeconfig-secret" hubKubeconfigDir := path.Join(util.TestDir, "autoapprovaltest", "hub-kubeconfig") diff --git a/test/integration/registration/spokecluster_claim_test.go b/test/integration/registration/spokecluster_claim_test.go index ef3030919..a16f38791 100644 --- a/test/integration/registration/spokecluster_claim_test.go +++ b/test/integration/registration/spokecluster_claim_test.go @@ -93,7 +93,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() { return false } - if spokeCluster.Finalizers[0] != "cluster.open-cluster-management.io/api-resource-cleanup" { + if spokeCluster.Finalizers[0] != clusterCleanFinalizer { return false } @@ -165,7 +165,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() { assertSuccessBootstrap() ginkgo.By("Sync existing claims") - clusterClaims := []clusterv1.ManagedClusterClaim{} + var clusterClaims []clusterv1.ManagedClusterClaim for _, claim := range claims { clusterClaims = append(clusterClaims, clusterv1.ManagedClusterClaim{ Name: claim.Name, @@ -193,7 +193,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() { newClaim, err = clusterClient.ClusterV1alpha1().ClusterClaims().Create(context.TODO(), newClaim, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - newClusterClaims := []clusterv1.ManagedClusterClaim{} + var newClusterClaims []clusterv1.ManagedClusterClaim newClusterClaims = append(newClusterClaims, clusterClaims...) newClusterClaims = append(newClusterClaims, clusterv1.ManagedClusterClaim{ Name: newClaim.Name, @@ -213,7 +213,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() { _, err := clusterClient.ClusterV1alpha1().ClusterClaims().Update(context.TODO(), newClaim, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - updatedClusterClaims := []clusterv1.ManagedClusterClaim{} + var updatedClusterClaims []clusterv1.ManagedClusterClaim updatedClusterClaims = append(updatedClusterClaims, clusterClaims...) updatedClusterClaims = append(updatedClusterClaims, clusterv1.ManagedClusterClaim{ Name: newClaim.Name, diff --git a/test/integration/registration/spokecluster_joining_test.go b/test/integration/registration/spokecluster_joining_test.go index 304d53e97..fa3e85a2e 100644 --- a/test/integration/registration/spokecluster_joining_test.go +++ b/test/integration/registration/spokecluster_joining_test.go @@ -62,7 +62,7 @@ var _ = ginkgo.Describe("Joining Process", func() { return fmt.Errorf("cluster should have finalizer") } - if spokeCluster.Finalizers[0] != "cluster.open-cluster-management.io/api-resource-cleanup" { + if spokeCluster.Finalizers[0] != clusterCleanFinalizer { return fmt.Errorf("finalizer is not correct") } diff --git a/test/integration/registration/spokecluster_status_test.go b/test/integration/registration/spokecluster_status_test.go index 7a96cc986..98fbefbc4 100644 --- a/test/integration/registration/spokecluster_status_test.go +++ b/test/integration/registration/spokecluster_status_test.go @@ -27,6 +27,7 @@ var _ = ginkgo.Describe("Collecting Node Resource", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) managedClusterName := "resorucetest-managedcluster" + //#nosec G101 hubKubeconfigSecret := "resorucetest-hub-kubeconfig-secret" hubKubeconfigDir := path.Join(util.TestDir, "resorucetest", "hub-kubeconfig") @@ -68,7 +69,7 @@ var _ = ginkgo.Describe("Collecting Node Resource", func() { return false } - if spokeCluster.Finalizers[0] != "cluster.open-cluster-management.io/api-resource-cleanup" { + if spokeCluster.Finalizers[0] != clusterCleanFinalizer { return false } @@ -88,8 +89,8 @@ var _ = ginkgo.Describe("Collecting Node Resource", func() { if err != nil { return false } - accpeted := meta.FindStatusCondition(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) - return accpeted != nil + accepted := meta.FindStatusCondition(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) + return accepted != nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // the hub kubeconfig secret should be filled after the csr is approved diff --git a/test/integration/util/authentication.go b/test/integration/util/authentication.go index 11759e002..959288d32 100644 --- a/test/integration/util/authentication.go +++ b/test/integration/util/authentication.go @@ -335,7 +335,7 @@ func FindAddOnCSRs(kubeClient kubernetes.Interface, spokeClusterName, addOnName return nil, err } - csrs := []*certificates.CertificateSigningRequest{} + var csrs []*certificates.CertificateSigningRequest for _, csr := range csrList.Items { csr := csr csrs = append(csrs, &csr) diff --git a/test/integration/work/deleteoption_test.go b/test/integration/work/deleteoption_test.go index c80f3e9ac..9d7393c9c 100644 --- a/test/integration/work/deleteoption_test.go +++ b/test/integration/work/deleteoption_test.go @@ -69,8 +69,8 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { var anotherAppliedManifestWorkName string ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})), } // Create another manifestworks with one shared resource. anotherWork = util.NewManifestWork(commOptions.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]}) @@ -100,7 +100,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { // ensure configmap exists and get its uid util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) currentUID := curentConfigMap.UID @@ -112,7 +112,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { } for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { return nil } } @@ -121,13 +121,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) if err != nil { return err } for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { return nil } } @@ -153,7 +154,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure the configmap is kept and tracked by anotherappliedmanifestwork. gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -162,13 +163,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { return fmt.Errorf("UID should be equal") } - anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) if err != nil { return err } for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources { - if appliedResource.Name != "cm1" { + if appliedResource.Name != cm1 { return fmt.Errorf("resource Name should be cm1") } @@ -184,7 +186,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() { // ensure configmap exists and get its uid util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) currentUID := curentConfigMap.UID @@ -196,7 +198,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { } for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { return nil } } @@ -205,13 +207,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) if err != nil { return err } for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { return nil } } @@ -234,8 +237,8 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { } for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" { - return fmt.Errorf("found applied resouce name cm1") + if appliedResource.Name == cm1 { + return fmt.Errorf("found applied resource name cm1") } } @@ -244,7 +247,8 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure the configmap is kept and tracked by anotherappliedmanifestwork gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get( + context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -253,13 +257,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { return fmt.Errorf("UID should be equal") } - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) if err != nil { return err } for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name != "cm1" { + if appliedResource.Name != cm1 { return fmt.Errorf("resource Name should be cm1") } @@ -277,8 +282,8 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.Context("Delete options", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})), } }) @@ -302,7 +307,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -315,7 +320,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm2, metav1.GetOptions{}) if err != nil { return err } @@ -350,7 +355,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Group: "", Resource: "configmaps", Namespace: commOptions.SpokeClusterName, - Name: "cm1", + Name: cm1, }, }, }, @@ -371,7 +376,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -394,11 +399,11 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // One of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm2, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) // One of the resource should be kept - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -411,7 +416,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Group: "", Resource: "configmaps", Namespace: commOptions.SpokeClusterName, - Name: "cm1", + Name: cm1, }, }, }, @@ -432,7 +437,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -452,7 +457,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { } work.Spec.Workload.Manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})), } _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err @@ -465,7 +470,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Sleep 5 second and check the resource should be kept time.Sleep(5 * time.Second) - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -478,7 +483,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Group: "", Resource: "configmaps", Namespace: commOptions.SpokeClusterName, - Name: "cm1", + Name: cm1, }, }, }, @@ -499,7 +504,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -525,7 +530,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -548,9 +553,9 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // All of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm2, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) }) diff --git a/test/integration/work/executor_test.go b/test/integration/work/executor_test.go index 1c1a05035..d2912f5c9 100644 --- a/test/integration/work/executor_test.go +++ b/test/integration/work/executor_test.go @@ -34,6 +34,9 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { var err error + roleName := "role1" + executorName := "test-executor" + ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second @@ -74,11 +77,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.Context("Apply the resource with executor", func() { - executorName := "test-executor" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})), } executor = &workapiv1.ManifestWorkExecutor{ Subject: workapiv1.ManifestWorkExecutorSubject{ @@ -108,7 +110,6 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.It("Executor does not have permission to partial resources", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -120,7 +121,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { Verbs: []string{"create", "update", "patch", "get", "list", "delete"}, APIGroups: []string{""}, Resources: []string{"configmaps"}, - ResourceNames: []string{"cm1"}, + ResourceNames: []string{cm1}, }, }, }, metav1.CreateOptions{}) @@ -160,16 +161,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap cm1 exist and cm2 not exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("Executor has permission for all resources", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -181,7 +181,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { Verbs: []string{"create", "update", "patch", "get", "list", "delete"}, APIGroups: []string{""}, Resources: []string{"configmaps"}, - ResourceNames: []string{"cm1", "cm2"}, + ResourceNames: []string{cm1, cm2}, }, }, }, metav1.CreateOptions{}) @@ -224,11 +224,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.Context("Apply the resource with executor deleting validating", func() { - executorName := "test-executor" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})), } executor = &workapiv1.ManifestWorkExecutor{ Subject: workapiv1.ManifestWorkExecutorSubject{ @@ -242,7 +241,6 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.It("Executor does not have delete permission and delete option is foreground", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -254,7 +252,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { Verbs: []string{"create", "update", "patch", "get", "list"}, APIGroups: []string{""}, Resources: []string{"configmaps"}, - ResourceNames: []string{"cm1", "cm2"}, + ResourceNames: []string{cm1, cm2}, }, }, }, metav1.CreateOptions{}) @@ -296,7 +294,6 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.It("Executor does not have delete permission and delete option is orphan", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -308,7 +305,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { Verbs: []string{"create", "update", "patch", "get", "list"}, APIGroups: []string{""}, Resources: []string{"configmaps"}, - ResourceNames: []string{"cm1", "cm2"}, + ResourceNames: []string{cm1, cm2}, }, }, }, metav1.CreateOptions{}) @@ -353,7 +350,6 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.It("Executor does not have delete permission and delete option is selectively orphan", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -365,7 +361,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { Verbs: []string{"create", "update", "patch", "get", "list"}, APIGroups: []string{""}, Resources: []string{"configmaps"}, - ResourceNames: []string{"cm1", "cm2"}, + ResourceNames: []string{cm1, cm2}, }, }, }, metav1.CreateOptions{}) @@ -398,7 +394,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { { Resource: "configmaps", Namespace: commOptions.SpokeClusterName, - Name: "cm1", + Name: cm1, }, }, }, @@ -417,20 +413,19 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap cm1 exist and cm2 not exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) ginkgo.Context("Apply the resource with executor escalation validating", func() { - executorName := "test-executor" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), util.ToManifest(util.NewRoleForManifest(commOptions.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{ Verbs: []string{"create", "update", "patch", "get", "list", "delete"}, APIGroups: []string{""}, @@ -459,7 +454,6 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.It("no permission", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -513,12 +507,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap not exist util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("no permission for already existing resource", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -590,12 +583,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap not exist util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("with permission", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -654,12 +646,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmaps exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("with permission for already exist resource", func() { - roleName := "role1" _, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ @@ -735,14 +726,12 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmaps exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) ginkgo.Context("Caches are in effect", func() { - executorName := "test-executor" - roleName := "role1" createRBAC := func(clusterName, executorName string) { _, err := spokeKubeClient.RbacV1().Roles(clusterName).Create( context.TODO(), &rbacv1.Role{ @@ -755,7 +744,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { Verbs: []string{"create", "update", "patch", "get", "list", "delete"}, APIGroups: []string{""}, Resources: []string{"configmaps"}, - ResourceNames: []string{"cm1", "cm2"}, + ResourceNames: []string{cm1, cm2}, }, }, }, metav1.CreateOptions{}) @@ -791,7 +780,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { } ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), } executor = &workapiv1.ManifestWorkExecutor{ Subject: workapiv1.ManifestWorkExecutorSubject{ @@ -820,7 +809,8 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { util.AssertNonexistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) createRBAC(commOptions.SpokeClusterName, executorName) - addConfigMapToManifestWork(hubWorkClient, work.Name, commOptions.SpokeClusterName, "cm2") + addConfigMapToManifestWork(hubWorkClient, work.Name, commOptions.SpokeClusterName, cm2) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) @@ -833,6 +823,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { deleteRBAC(commOptions.SpokeClusterName, executorName) addConfigMapToManifestWork(hubWorkClient, work.Name, commOptions.SpokeClusterName, "cm3") + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse, metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval) @@ -843,11 +834,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.By("ensure configmap cm1 cm2 exist(will not delete the applied resource even the permison is revoked) but cm3 does not exist") util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"a": "b"}, nil)), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ diff --git a/test/integration/work/manifestworkreplicaset_test.go b/test/integration/work/manifestworkreplicaset_test.go index 2d20355da..56c7658a9 100644 --- a/test/integration/work/manifestworkreplicaset_test.go +++ b/test/integration/work/manifestworkreplicaset_test.go @@ -50,7 +50,7 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { generateTestFixture = func(numberOfClusters int) (*workapiv1alpha1.ManifestWorkReplicaSet, sets.Set[string], error) { clusterNames := sets.New[string]() manifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap("defaut", "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap("defaut", cm1, map[string]string{"a": "b"}, nil)), } placementRef := workapiv1alpha1.LocalPlacementReference{Name: placement.Name} @@ -79,7 +79,8 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { return nil, clusterNames, err } - decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Create(context.TODO(), placementDecision, metav1.CreateOptions{}) + decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Create( + context.TODO(), placementDecision, metav1.CreateOptions{}) if err != nil { return nil, clusterNames, err } @@ -96,7 +97,8 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { clusterNames.Insert(clusterName) } - decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus( + context.TODO(), decision, metav1.UpdateOptions{}) return manifestWorkReplicaSet, clusterNames, err } }) @@ -118,11 +120,13 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { }, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) ginkgo.By("Update decision so manifestworks should be updated") - decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Get(context.TODO(), placementDecision.Name, metav1.GetOptions{}) + decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Get( + context.TODO(), placementDecision.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) removedCluster := decision.Status.Decisions[2].ClusterName decision.Status.Decisions = decision.Status.Decisions[:2] - decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus( + context.TODO(), decision, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) clusterNames.Delete(removedCluster) gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) diff --git a/test/integration/work/statusfeedback_test.go b/test/integration/work/statusfeedback_test.go index 3d39bad09..1dbc48fb9 100644 --- a/test/integration/work/statusfeedback_test.go +++ b/test/integration/work/statusfeedback_test.go @@ -413,7 +413,9 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { return fmt.Errorf("status feedback values are not correct, we got %v", work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values) } - if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse}) { + if !util.HaveManifestCondition( + work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse}) { return fmt.Errorf("status sync condition should be True") } diff --git a/test/integration/work/suite_test.go b/test/integration/work/suite_test.go index 0087a8564..b114783ae 100644 --- a/test/integration/work/suite_test.go +++ b/test/integration/work/suite_test.go @@ -30,6 +30,7 @@ import ( const ( eventuallyTimeout = 30 // seconds eventuallyInterval = 1 // seconds + cm1, cm2 = "cm1", "cm2" ) var tempDir string diff --git a/test/integration/work/unmanaged_appliedwork_test.go b/test/integration/work/unmanaged_appliedwork_test.go index 00b9e4ead..c6d8c5901 100644 --- a/test/integration/work/unmanaged_appliedwork_test.go +++ b/test/integration/work/unmanaged_appliedwork_test.go @@ -54,7 +54,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { go startWorkAgent(ctx, o, commOptions) manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)), } work = util.NewManifestWork(commOptions.SpokeClusterName, "unmanaged-appliedwork", manifests) @@ -113,7 +113,8 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { err := newHub.Stop() gomega.Expect(err).ToNot(gomega.HaveOccurred()) if newHubTempDir != "" { - os.RemoveAll(newHubTempDir) + err := os.RemoveAll(newHubTempDir) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } }) @@ -153,7 +154,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { // ensure the resource has two ownerrefs gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -212,7 +213,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { // ensure the resource has only one ownerref gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), cm1, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/integration/work/work_test.go b/test/integration/work/work_test.go index e9d65db86..3bbc4a538 100644 --- a/test/integration/work/work_test.go +++ b/test/integration/work/work_test.go @@ -101,7 +101,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.Context("With a single manifest", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)), } }) @@ -121,7 +121,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) newManifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"x": "y"}, nil)), } work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -141,7 +141,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" { + if appliedResource.Name == cm1 { return fmt.Errorf("found applied resource cm1") } } @@ -149,7 +149,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) @@ -166,8 +166,8 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.Context("With multiple manifests", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap("non-existent-namespace", "cm1", map[string]string{"a": "b"}, nil)), - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, nil)), + util.ToManifest(util.NewConfigmap("non-existent-namespace", cm1, map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, nil)), util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)), } }) @@ -190,8 +190,8 @@ var _ = ginkgo.Describe("ManifestWork", func() { []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) newManifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"x": "y"}, nil)), util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)), } @@ -599,7 +599,9 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) ginkgo.By("check if resources which are no longer maintained have been deleted") - util.AssertNonexistenceOfResources([]schema.GroupVersionResource{gvrs[3]}, []string{oldServiceAccount.GetNamespace()}, []string{oldServiceAccount.GetName()}, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) + util.AssertNonexistenceOfResources( + []schema.GroupVersionResource{gvrs[3]}, []string{oldServiceAccount.GetNamespace()}, []string{oldServiceAccount.GetName()}, + spokeDynamicClient, eventuallyTimeout, eventuallyInterval) }) }) @@ -607,14 +609,14 @@ var _ = ginkgo.Describe("ManifestWork", func() { var finalizer = "cluster.open-cluster-management.io/testing" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{finalizer})), - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{finalizer})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{finalizer})), + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{finalizer})), util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})), } }) ginkgo.AfterEach(func() { - err = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, "cm1", "cm2", "cm3") + err = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, cm1, cm2, "cm3") gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -641,7 +643,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { go func() { time.Sleep(2 * time.Second) // remove finalizers of cm1 - _ = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, "cm1") + _ = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, cm1) }() // check if resource created by stale manifest is deleted once it is removed from applied resource list @@ -652,7 +654,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" { + if appliedResource.Name == cm1 { return fmt.Errorf("found resource cm1") } } @@ -660,7 +662,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) })