From b306c42319209b1bdcdc885865b0f0b8d69329cd Mon Sep 17 00:00:00 2001 From: Amol Deodhar Date: Tue, 23 Apr 2024 08:01:38 -0400 Subject: [PATCH] adapt linodecluster controller tests to new mocktest changes --- cloud/scope/cluster.go | 4 +- cloud/scope/machine_test.go | 108 +++--- controller/linodecluster_controller.go | 1 + controller/linodecluster_controller_test.go | 364 ++++++++---------- ...nodeobjectstoragebucket_controller_test.go | 297 +++++++------- docs/src/developers/testing.md | 171 ++++++++ mock/mocktest/controller_suite.go | 85 ---- mock/mocktest/node.go | 139 ++++--- mock/mocktest/path.go | 156 +++----- mock/mocktest/path_run.go | 30 +- mock/mocktest/path_test.go | 253 +++++++++--- mock/mocktest/suite.go | 173 ++++++++- 12 files changed, 1019 insertions(+), 762 deletions(-) delete mode 100644 mock/mocktest/controller_suite.go diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 0573bf89c..7769df967 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -72,7 +72,7 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara } return &ClusterScope{ - client: params.Client, + Client: params.Client, Cluster: params.Cluster, LinodeClient: linodeClient, LinodeCluster: params.LinodeCluster, @@ -82,7 +82,7 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara // ClusterScope defines the basic context for an actuator to operate upon. type ClusterScope struct { - client K8sClient + Client K8sClient PatchHelper *patch.Helper LinodeClient LinodeNodeBalancerClient Cluster *clusterv1.Cluster diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index 12e390208..f3797315e 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -112,25 +112,25 @@ func TestValidateMachineScopeParams(t *testing.T) { func TestMachineScopeAddFinalizer(t *testing.T) { t.Parallel() - NewTestSuite(mock.MockK8sClient{}).Run(context.Background(), t, Paths( - Call("scheme 1", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + NewTestSuite(t, mock.MockK8sClient{}).Run(Paths( + Call("scheme 1", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) return s }) }), Either( - Call("scheme 2", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + Call("scheme 2", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) return s }) }), - Result("has finalizer", func(ctx context.Context, m Mock) { + Result("has finalizer", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -147,13 +147,13 @@ func TestMachineScopeAddFinalizer(t *testing.T) { }), ), Either( - Case( - Call("able to patch", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(nil) + Path( + Call("able to patch", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(nil) }), - Result("finalizer added", func(ctx context.Context, m Mock) { + Result("finalizer added", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -165,13 +165,13 @@ func TestMachineScopeAddFinalizer(t *testing.T) { assert.Equal(t, mScope.LinodeMachine.Finalizers[0], infrav1alpha1.GroupVersion.String()) }), ), - Case( - Call("unable to patch", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(errors.New("fail")) + Path( + Call("unable to patch", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(errors.New("fail")) }), - Result("error", func(ctx context.Context, m Mock) { + Result("error", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -189,16 +189,16 @@ func TestMachineScopeAddFinalizer(t *testing.T) { func TestNewMachineScope(t *testing.T) { t.Parallel() - NewTestSuite(mock.MockK8sClient{}).Run(context.Background(), t, Paths( + NewTestSuite(t, mock.MockK8sClient{}).Run(Paths( Either( - Result("invalid params", func(ctx context.Context, m Mock) { + Result("invalid params", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{}) require.ErrorContains(t, err, "is required") assert.Nil(t, mScope) }), - Result("no token", func(ctx context.Context, m Mock) { + Result("no token", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -207,13 +207,13 @@ func TestNewMachineScope(t *testing.T) { require.ErrorContains(t, err, "failed to create linode client") assert.Nil(t, mScope) }), - Case( - Call("no secret", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "example")) + Path( + Call("no secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "example")) }), - Result("error", func(ctx context.Context, m Mock) { + Result("error", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -232,20 +232,20 @@ func TestNewMachineScope(t *testing.T) { ), ), Either( - Call("valid scheme", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + Call("valid scheme", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { s := runtime.NewScheme() infrav1alpha1.AddToScheme(s) return s }) }), - Case( - Call("invalid scheme", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme()) + Path( + Call("invalid scheme", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme()) }), - Result("cannot init patch helper", func(ctx context.Context, m Mock) { + Result("cannot init patch helper", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -257,8 +257,8 @@ func TestNewMachineScope(t *testing.T) { ), ), Either( - Call("credentials in secret", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + Call("credentials in secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { *obj = corev1.Secret{ Data: map[string][]byte{ @@ -268,9 +268,9 @@ func TestNewMachineScope(t *testing.T) { return nil }) }), - Result("default credentials", func(ctx context.Context, m Mock) { + Result("default credentials", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -281,9 +281,9 @@ func TestNewMachineScope(t *testing.T) { }), ), Either( - Result("credentials from LinodeMachine credentialsRef", func(ctx context.Context, m Mock) { + Result("credentials from LinodeMachine credentialsRef", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{}, @@ -299,9 +299,9 @@ func TestNewMachineScope(t *testing.T) { require.NoError(t, err) assert.NotNil(t, mScope) }), - Result("credentials from LinodeCluster credentialsRef", func(ctx context.Context, m Mock) { + Result("credentials from LinodeCluster credentialsRef", func(ctx context.Context, mck Mock) { mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{ - Client: m.K8sClient, + Client: mck.K8sClient, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, LinodeCluster: &infrav1alpha1.LinodeCluster{ @@ -324,18 +324,18 @@ func TestNewMachineScope(t *testing.T) { func TestMachineScopeGetBootstrapData(t *testing.T) { t.Parallel() - NewTestSuite(mock.MockK8sClient{}).Run(context.Background(), t, Paths( - Call("able to get secret", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + NewTestSuite(t, mock.MockK8sClient{}).Run(Paths( + Call("able to get secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { secret := corev1.Secret{Data: map[string][]byte{"value": []byte("test-data")}} *obj = secret return nil }) }), - Result("success", func(ctx context.Context, m Mock) { + Result("success", func(ctx context.Context, mck Mock) { mScope := MachineScope{ - Client: m.K8sClient, + Client: mck.K8sClient, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -351,20 +351,20 @@ func TestMachineScopeGetBootstrapData(t *testing.T) { assert.Equal(t, data, []byte("test-data")) }), Either( - Call("unable to get secret", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + Call("unable to get secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). Return(apierrors.NewNotFound(schema.GroupResource{}, "test-data")) }), - Call("secret is missing data", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). + Call("secret is missing data", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()). DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { *obj = corev1.Secret{} return nil }) }), - Result("secret ref missing", func(ctx context.Context, m Mock) { + Result("secret ref missing", func(ctx context.Context, mck Mock) { mScope := MachineScope{ - Client: m.K8sClient, + Client: mck.K8sClient, Machine: &clusterv1.Machine{}, LinodeMachine: &infrav1alpha1.LinodeMachine{}, } @@ -374,9 +374,9 @@ func TestMachineScopeGetBootstrapData(t *testing.T) { assert.Empty(t, data) }), ), - Result("error", func(ctx context.Context, m Mock) { + Result("error", func(ctx context.Context, mck Mock) { mScope := MachineScope{ - Client: m.K8sClient, + Client: mck.K8sClient, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index 18813b743..1b9763c3e 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -192,6 +192,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo if clusterScope.LinodeCluster.Spec.Network.NodeBalancerID == nil { logger.Info("NodeBalancer ID is missing, nothing to do") controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1alpha1.GroupVersion.String()) + r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, "NodeBalancerIDMissing", "NodeBalancer ID is missing, nothing to do") return nil } diff --git a/controller/linodecluster_controller_test.go b/controller/linodecluster_controller_test.go index 2bc495268..c71f3e4a6 100644 --- a/controller/linodecluster_controller_test.go +++ b/controller/linodecluster_controller_test.go @@ -17,24 +17,32 @@ package controller import ( - corev1 "k8s.io/api/core/v1" + "context" + "errors" + + "go.uber.org/mock/gomock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/go-logr/logr" infrav1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/mock" + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" + "github.com/linode/linodego" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -var _ = Describe("lifecycle", Ordered, Label("cluster", "lifecycle"), func() { - var reconciler *LinodeClusterReconciler +var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecycle"), func() { nodebalancerID := 1 controlPlaneEndpointHost := "10.0.0.1" - clusterName := "lifecycle" + controlPlaneEndpointPort := 6443 + clusterName := "cluster-lifecycle" clusterNameSpace := "default" ownerRef := metav1.OwnerReference{ Name: clusterName, @@ -49,177 +57,108 @@ var _ = Describe("lifecycle", Ordered, Label("cluster", "lifecycle"), func() { OwnerReferences: ownerRefs, } - caplCluster := clusterv1.Cluster{ - ObjectMeta: metadata, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "LinodeCluster", - Name: clusterName, - Namespace: clusterNameSpace, - }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Name: "lifecycle-control-plane", - Namespace: clusterNameSpace, - }, - }, - } - linodeCluster := infrav1.LinodeCluster{ ObjectMeta: metadata, Spec: infrav1.LinodeClusterSpec{ Region: "us-ord", - Network: infrav1.NetworkSpec{ - NodeBalancerID: &nodebalancerID, - }, - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: controlPlaneEndpointHost, - }, }, } - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - LinodeApiKey: "test-key", - } - }) - - It("should provision a control plane endpoint", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) - Expect(k8sClient.Create(ctx, &caplCluster)).To(Succeed()) - Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) - Expect(err).NotTo(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeTrue()) - Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) - Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) - - By("checking nb id") - Expect(linodeCluster.Spec.Network.NodeBalancerID).To(Equal(&nodebalancerID)) - - By("checking controlPlaneEndpoint host") - Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost)) - }) -}) - -var _ = Describe("no-capl-cluster", Ordered, Label("cluster", "no-capl-cluster"), func() { - var reconciler *LinodeClusterReconciler - nodebalancerID := 1 - controlPlaneEndpointHost := "10.0.0.1" - clusterName := "no-capl-cluster" - clusterNameSpace := "default" - metadata := metav1.ObjectMeta{ - Name: clusterName, - Namespace: clusterNameSpace, + ctlrSuite := NewControllerTestSuite(GinkgoT(), mock.MockLinodeNodeBalancerClient{}) + reconciler := LinodeClusterReconciler{ + Recorder: ctlrSuite.Recorder(), } - linodeCluster := infrav1.LinodeCluster{ - ObjectMeta: metadata, - Spec: infrav1.LinodeClusterSpec{ - Region: "us-ord", - Network: infrav1.NetworkSpec{ - NodeBalancerID: &nodebalancerID, - }, - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: controlPlaneEndpointHost, - }, - }, + cScope := &scope.ClusterScope{ + LinodeCluster: &linodeCluster, } - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - LinodeApiKey: "test-key", - } - }) - - It("should fail reconciliation if no capl cluster exists", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) + BeforeAll(func(ctx SpecContext) { + cScope.Client = k8sClient Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) - Expect(err).NotTo(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeFalseBecause("failed to get Cluster/no-capl-cluster: clusters.cluster.x-k8s.io \"no-capl-cluster\" not found")) }) -}) - -var _ = Describe("no-owner-ref", Ordered, Label("cluster", "no-owner-ref"), func() { - var reconciler *LinodeClusterReconciler - nodebalancerID := 1 - controlPlaneEndpointHost := "10.0.0.1" - clusterName := "no-owner-ref" - clusterNameSpace := "default" - metadata := metav1.ObjectMeta{ - Name: clusterName, - Namespace: clusterNameSpace, - } - - caplCluster := clusterv1.Cluster{ - ObjectMeta: metadata, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "LinodeCluster", - Name: clusterName, - Namespace: clusterNameSpace, - }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Name: "lifecycle-control-plane", - Namespace: clusterNameSpace, - }, - }, - } - linodeCluster := infrav1.LinodeCluster{ - ObjectMeta: metadata, - Spec: infrav1.LinodeClusterSpec{ - Region: "us-ord", - Network: infrav1.NetworkSpec{ - NodeBalancerID: &nodebalancerID, - }, - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: controlPlaneEndpointHost, - }, - }, - } - - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - LinodeApiKey: "test-key", - } - }) + BeforeEach(func(ctx SpecContext) { + clusterKey := client.ObjectKey{Name: "cluster-lifecycle", Namespace: "default"} + Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - It("linode cluster should remain NotReady", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) - Expect(k8sClient.Create(ctx, &caplCluster)).To(Succeed()) - Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) + // Create patch helper with latest state of resource. + // This is only needed when relying on envtest's k8sClient. + patchHelper, err := patch.NewHelper(&linodeCluster, k8sClient) Expect(err).NotTo(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeFalse()) - Expect(linodeCluster.Status.FailureMessage).To(BeNil()) - Expect(linodeCluster.Status.FailureReason).To(BeNil()) + cScope.PatchHelper = patchHelper }) + + ctlrSuite.Run(Paths( + Either( + Call("cluster is created", func(ctx context.Context, m Mock) { + cScope.LinodeClient = m.NodeBalancerClient + getNB := m.NodeBalancerClient.EXPECT().ListNodeBalancers(gomock.Any(), gomock.Any()).Return(nil, nil) + m.NodeBalancerClient.EXPECT().CreateNodeBalancer(gomock.Any(), gomock.Any()). + After(getNB). + Return(&linodego.NodeBalancer{ + ID: nodebalancerID, + IPv4: &controlPlaneEndpointHost, + }, nil) + m.NodeBalancerClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).After(getNB).Return(&linodego.NodeBalancerConfig{ + Port: controlPlaneEndpointPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: nodebalancerID, + }, nil) + }), + Path( + Call("cluster is not created because there is no nb", func(ctx context.Context, m Mock) { + cScope.LinodeClient = m.NodeBalancerClient + getNB := m.NodeBalancerClient.EXPECT().ListNodeBalancers(gomock.Any(), gomock.Any()).Return(nil, nil) + m.NodeBalancerClient.EXPECT().CreateNodeBalancer(gomock.Any(), gomock.Any()). + After(getNB). + Return(nil, errors.New("create NB error")) + }), + Result("error", func(ctx context.Context, m Mock) { + _, err := reconciler.reconcile(ctx, cScope, logr.Logger{}) + Expect(err.Error()).To(ContainSubstring("create NB error")) + }), + ), + Path( + Call("cluster is not created because there is no capl cluster", func(ctx context.Context, m Mock) { + cScope.LinodeClient = m.NodeBalancerClient + }), + Result("error", func(ctx context.Context, m Mock) { + reconciler.Client = k8sClient + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(cScope.LinodeCluster), + }) + Expect(err).NotTo(HaveOccurred()) + Expect(linodeCluster.Status.Ready).To(BeFalseBecause("failed to get Cluster/no-capl-cluster: clusters.cluster.x-k8s.io \"no-capl-cluster\" not found")) + }), + ), + ), + Result("resource status is updated and NB is created", func(ctx context.Context, m Mock) { + _, err := reconciler.reconcile(ctx, cScope, logr.Logger{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking ready conditions") + clusterKey := client.ObjectKeyFromObject(&linodeCluster) + Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) + Expect(linodeCluster.Status.Ready).To(BeTrue()) + Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) + Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + + By("checking NB id") + Expect(linodeCluster.Spec.Network.NodeBalancerID).To(Equal(&nodebalancerID)) + + By("checking controlPlaneEndpoint/NB host and port") + Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost)) + Expect(linodeCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(int32(controlPlaneEndpointPort))) + }), + )) }) -var _ = Describe("no-ctrl-plane-endpt", Ordered, Label("cluster", "no-ctrl-plane-endpt"), func() { - var reconciler *LinodeClusterReconciler - clusterName := "no-ctrl-plane-endpt" +var _ = Describe("cluster-delete", Ordered, Label("cluster", "cluster-delete"), func() { + nodebalancerID := 1 + clusterName := "cluster-delete" clusterNameSpace := "default" ownerRef := metav1.OwnerReference{ Name: clusterName, @@ -234,69 +173,68 @@ var _ = Describe("no-ctrl-plane-endpt", Ordered, Label("cluster", "no-ctrl-plane OwnerReferences: ownerRefs, } - caplCluster := clusterv1.Cluster{ - ObjectMeta: metadata, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "LinodeCluster", - Name: clusterName, - Namespace: clusterNameSpace, - }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Name: "lifecycle-control-plane", - Namespace: clusterNameSpace, - }, - }, - } - linodeCluster := infrav1.LinodeCluster{ ObjectMeta: metadata, Spec: infrav1.LinodeClusterSpec{ Region: "us-ord", + Network: infrav1.NetworkSpec{ + NodeBalancerID: &nodebalancerID, + }, }, } - // Create a recorder with a buffered channel for consuming event strings. - recorder := record.NewFakeRecorder(10) - - BeforeEach(func() { - reconciler = &LinodeClusterReconciler{ - Client: k8sClient, - Recorder: recorder, - LinodeApiKey: "test-key", - } - }) - - AfterEach(func() { - // Flush the channel if any events were not consumed. - for len(recorder.Events) > 0 { - <-recorder.Events - } - }) - - It("should fail creating cluster", func(ctx SpecContext) { - clusterKey := client.ObjectKeyFromObject(&linodeCluster) - Expect(k8sClient.Create(ctx, &caplCluster)).To(Succeed()) - Expect(k8sClient.Create(ctx, &linodeCluster)).To(Succeed()) - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: clusterKey, - }) - Expect(err).To(HaveOccurred()) - - By("checking ready conditions") - Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed()) - Expect(linodeCluster.Status.Ready).To(BeFalse()) - Expect(linodeCluster.Status.Conditions).To(HaveLen(1)) - Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) - - By("checking controlPlaneEndpoint host") - Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal("")) + ctlrSuite := NewControllerTestSuite( + GinkgoT(), + mock.MockLinodeNodeBalancerClient{}, + mock.MockK8sClient{}, + ) + reconciler := LinodeClusterReconciler{ + Recorder: ctlrSuite.Recorder(), + } - By("checking nb id to be nil") - Expect(linodeCluster.Spec.Network.NodeBalancerID).To(BeNil()) + cScope := &scope.ClusterScope{ + LinodeCluster: &linodeCluster, + } - By("recording the expected events") - Expect(<-recorder.Events).To(ContainSubstring("Warning CreateError")) - }) + ctlrSuite.Run(Paths( + Either( + Call("cluster is deleted", func(ctx context.Context, m Mock) { + cScope.LinodeClient = m.NodeBalancerClient + cScope.Client = m.K8sClient + m.NodeBalancerClient.EXPECT().DeleteNodeBalancer(gomock.Any(), gomock.Any()).Return(nil) + }), + Path( + Call("nothing to do because NB ID is nil", func(ctx context.Context, m Mock) { + cScope.Client = m.K8sClient + cScope.LinodeClient = m.NodeBalancerClient + cScope.LinodeCluster.Spec.Network.NodeBalancerID = nil + }), + Result("nothing to do because NB ID is nil", func(ctx context.Context, m Mock) { + reconciler.Client = m.K8sClient + err := reconciler.reconcileDelete(ctx, logr.Logger{}, cScope) + Expect(err).NotTo(HaveOccurred()) + Expect(ctlrSuite.Events()).To(ContainSubstring("Warning NodeBalancerIDMissing NodeBalancer ID is missing, nothing to do")) + }), + ), + Path( + Call("cluster not deleted because the nb can't be deleted", func(ctx context.Context, m Mock) { + cScope.LinodeClient = m.NodeBalancerClient + cScope.Client = m.K8sClient + cScope.LinodeCluster.Spec.Network.NodeBalancerID = &nodebalancerID + m.NodeBalancerClient.EXPECT().DeleteNodeBalancer(gomock.Any(), gomock.Any()).Return(errors.New("delete NB error")) + }), + Result("cluster not deleted because the nb can't be deleted", func(ctx context.Context, m Mock) { + reconciler.Client = m.K8sClient + err := reconciler.reconcileDelete(ctx, logr.Logger{}, cScope) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("delete NB error")) + }), + ), + ), + Result("cluster deleted", func(ctx context.Context, m Mock) { + reconciler.Client = m.K8sClient + err := reconciler.reconcileDelete(ctx, logr.Logger{}, cScope) + Expect(err).NotTo(HaveOccurred()) + }), + )) }) diff --git a/controller/linodeobjectstoragebucket_controller_test.go b/controller/linodeobjectstoragebucket_controller_test.go index 32718dda2..8b10ad02c 100644 --- a/controller/linodeobjectstoragebucket_controller_test.go +++ b/controller/linodeobjectstoragebucket_controller_test.go @@ -66,6 +66,8 @@ type accessKeySecret struct { } var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { + suite := NewControllerTestSuite(GinkgoT(), mock.MockLinodeObjectStorageClient{}) + obj := infrav1.LinodeObjectStorageBucket{ ObjectMeta: metav1.ObjectMeta{ Name: "lifecycle", @@ -76,21 +78,21 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }, } - ctlrSuite := NewControllerTestSuite(mock.MockLinodeObjectStorageClient{}) - reconciler := LinodeObjectStorageBucketReconciler{ - Recorder: ctlrSuite.Recorder(), - } bScope := scope.ObjectStorageBucketScope{ Bucket: &obj, - Logger: ctlrSuite.Logger(), + Logger: suite.Logger(), + } + + reconciler := LinodeObjectStorageBucketReconciler{ + Recorder: suite.Recorder(), } - BeforeAll(func(ctx SpecContext) { + suite.BeforeAll(func(ctx context.Context, _ Mock) { bScope.Client = k8sClient Expect(k8sClient.Create(ctx, &obj)).To(Succeed()) }) - BeforeEach(func(ctx SpecContext) { + suite.BeforeEach(func(ctx context.Context, _ Mock) { objectKey := client.ObjectKey{Name: "lifecycle", Namespace: "default"} Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) @@ -101,11 +103,11 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { bScope.PatchHelper = patchHelper }) - ctlrSuite.Run(Paths( + suite.Run(Paths( Either( - Call("bucket is created", func(ctx context.Context, m Mock) { - getBucket := m.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, nil) - m.ObjectStorageClient.EXPECT().CreateObjectStorageBucket(gomock.Any(), gomock.Any()). + Call("bucket is created", func(ctx context.Context, mck Mock) { + getBucket := mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, nil) + mck.ObjectStorageClient.EXPECT().CreateObjectStorageBucket(gomock.Any(), gomock.Any()). After(getBucket). Return(&linodego.ObjectStorageBucket{ Label: obj.Name, @@ -114,22 +116,22 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { Hostname: "hostname", }, nil) }), - Case( - Call("bucket is not created", func(ctx context.Context, m Mock) { - getBucket := m.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, nil) - m.ObjectStorageClient.EXPECT().CreateObjectStorageBucket(gomock.Any(), gomock.Any()).After(getBucket).Return(nil, errors.New("create bucket error")) + Path( + Call("bucket is not created", func(ctx context.Context, mck Mock) { + getBucket := mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, nil) + mck.ObjectStorageClient.EXPECT().CreateObjectStorageBucket(gomock.Any(), gomock.Any()).After(getBucket).Return(nil, errors.New("create bucket error")) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("create bucket error")) }), ), ), Either( - Call("keys are created", func(ctx context.Context, m Mock) { + Call("keys are created", func(ctx context.Context, mck Mock) { for idx := range 2 { - m.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()). + mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()). Return(&linodego.ObjectStorageKey{ ID: idx, AccessKey: fmt.Sprintf("access-key-%d", idx), @@ -137,20 +139,20 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }, nil) } }), - Case( - Call("keys are not created", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("create key error")) + Path( + Call("keys are not created", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("create key error")) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("create key error")) }), ), ), - Result("resource status is updated and key secret is created", func(ctx context.Context, m Mock) { + Result("resource status is updated and key secret is created", func(ctx context.Context, mck Mock) { objectKey := client.ObjectKeyFromObject(&obj) - bScope.LinodeClient = m.ObjectStorageClient + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err).NotTo(HaveOccurred()) @@ -182,17 +184,17 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { Expect(key.StringData.AccessKeyRO).To(Equal("access-key-1")) Expect(key.StringData.SecretKeyRO).To(Equal("secret-key-1")) - Expect(<-m.Events()).To(ContainSubstring("Object storage keys assigned")) - Expect(<-m.Events()).To(ContainSubstring("Object storage keys stored in secret")) - Expect(<-m.Events()).To(ContainSubstring("Object storage bucket synced")) + Expect(suite.Events()).To(ContainSubstring("Object storage keys assigned")) + Expect(suite.Events()).To(ContainSubstring("Object storage keys stored in secret")) + Expect(suite.Events()).To(ContainSubstring("Object storage bucket synced")) - logOutput := m.Logs() + logOutput := suite.Logs() Expect(logOutput).To(ContainSubstring("Reconciling apply")) Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) }), Either( - Call("bucket is retrieved on update", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()). + Call("bucket is retrieved on update", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()). Return(&linodego.ObjectStorageBucket{ Label: obj.Name, Cluster: obj.Spec.Cluster, @@ -200,18 +202,18 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { Hostname: "hostname", }, nil) }), - Case( - Call("bucket is not retrieved on update", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, errors.New("get bucket error")) + Path( + Call("bucket is not retrieved on update", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Cluster, gomock.Any()).Return(nil, errors.New("get bucket error")) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("get bucket error")) }), ), ), - Once("resource keyGeneration is modified", func(ctx context.Context) { + Once("resource keyGeneration is modified", func(ctx context.Context, _ Mock) { objectKey := client.ObjectKeyFromObject(&obj) Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) obj.Spec.KeyGeneration = ptr.To(1) @@ -220,44 +222,44 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { Either( // nb: Order matters for paths of the same length. The leftmost path is evaluated first. // If we evaluate the happy path first, the bucket resource is mutated so the error path won't occur. - Case( - Call("keys are not rotated", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("create key error")) + Path( + Call("keys are not rotated", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("create key error")) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("create key error")) }), ), - Case( - Call("keys are rotated", func(ctx context.Context, m Mock) { + Path( + Call("keys are rotated", func(ctx context.Context, mck Mock) { for idx := range 2 { - createCall := m.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()). + createCall := mck.ObjectStorageClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()). Return(&linodego.ObjectStorageKey{ ID: idx + 2, AccessKey: fmt.Sprintf("access-key-%d", idx+2), SecretKey: fmt.Sprintf("secret-key-%d", idx+2), }, nil) - m.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), idx).After(createCall).Return(nil) + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), idx).After(createCall).Return(nil) } }), - Result("resource lastKeyGeneration is updated", func(ctx context.Context, m Mock) { + Result("resource lastKeyGeneration is updated", func(ctx context.Context, mck Mock) { objectKey := client.ObjectKeyFromObject(&obj) - bScope.LinodeClient = m.ObjectStorageClient + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) Expect(*obj.Status.LastKeyGeneration).To(Equal(1)) - Expect(<-m.Events()).To(ContainSubstring("Object storage keys assigned")) + Expect(suite.Events()).To(ContainSubstring("Object storage keys assigned")) - logOutput := m.Logs() + logOutput := suite.Logs() Expect(logOutput).To(ContainSubstring("Reconciling apply")) Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) }), ), - Once("secret is deleted", func(ctx context.Context) { + Once("secret is deleted", func(ctx context.Context, _ Mock) { var secret corev1.Secret secretKey := client.ObjectKey{Namespace: "default", Name: *obj.Status.KeySecretName} Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) @@ -265,20 +267,20 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }), ), Either( - Case( - Call("keys are not retrieved", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(nil, errors.New("get key error")) + Path( + Call("keys are not retrieved", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(nil, errors.New("get key error")) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("get key error")) }), ), - Case( - Call("keys are retrieved", func(ctx context.Context, m Mock) { + Path( + Call("keys are retrieved", func(ctx context.Context, mck Mock) { for idx := range 2 { - m.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), idx+2). + mck.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), idx+2). Return(&linodego.ObjectStorageKey{ ID: idx + 2, AccessKey: fmt.Sprintf("access-key-%d", idx+2), @@ -286,8 +288,8 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }, nil) } }), - Result("secret is restored", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient + Result("secret is restored", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err).NotTo(HaveOccurred()) @@ -306,49 +308,49 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { Expect(key.StringData.AccessKeyRO).To(Equal("access-key-3")) Expect(key.StringData.SecretKeyRO).To(Equal("secret-key-3")) - Expect(<-m.Events()).To(ContainSubstring("Object storage keys retrieved")) - Expect(<-m.Events()).To(ContainSubstring("Object storage keys stored in secret")) - Expect(<-m.Events()).To(ContainSubstring("Object storage bucket synced")) + Expect(suite.Events()).To(ContainSubstring("Object storage keys retrieved")) + Expect(suite.Events()).To(ContainSubstring("Object storage keys stored in secret")) + Expect(suite.Events()).To(ContainSubstring("Object storage bucket synced")) - logOutput := m.Logs() + logOutput := suite.Logs() Expect(logOutput).To(ContainSubstring("Reconciling apply")) Expect(logOutput).To(ContainSubstring("Secret lifecycle-bucket-details was applied with new access keys")) }), ), ), - Once("resource is deleted", func(ctx context.Context) { + Once("resource is deleted", func(ctx context.Context, _ Mock) { // nb: client.Delete does not set DeletionTimestamp on the object, so re-fetch from the apiserver. objectKey := client.ObjectKeyFromObject(&obj) Expect(k8sClient.Delete(ctx, &obj)).To(Succeed()) Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) }), Either( - Case( - Call("keys are not revoked", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(errors.New("revoke error")) + Path( + Call("keys are not revoked", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(errors.New("revoke error")) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("revoke error")) }), ), - Case( - Call("keys are revoked", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 2).Return(nil) - m.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 3).Return(nil) + Path( + Call("keys are revoked", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 2).Return(nil) + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 3).Return(nil) }), - Result("finalizer is removed", func(ctx context.Context, m Mock) { + Result("finalizer is removed", func(ctx context.Context, mck Mock) { objectKey := client.ObjectKeyFromObject(&obj) k8sClient.Get(ctx, objectKey, &obj) - bScope.LinodeClient = m.ObjectStorageClient + bScope.LinodeClient = mck.ObjectStorageClient _, err := reconciler.reconcile(ctx, &bScope) Expect(err).NotTo(HaveOccurred()) Expect(apierrors.IsNotFound(k8sClient.Get(ctx, objectKey, &obj))).To(BeTrue()) - Expect(<-m.Events()).To(ContainSubstring("Object storage keys revoked")) + Expect(suite.Events()).To(ContainSubstring("Object storage keys revoked")) - logOutput := m.Logs() + logOutput := suite.Logs() Expect(logOutput).To(ContainSubstring("Reconciling delete")) }), ), @@ -357,15 +359,16 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }) var _ = Describe("errors", Label("bucket", "errors"), func() { - ctlrSuite := NewControllerTestSuite( + suite := NewControllerTestSuite( + GinkgoT(), mock.MockLinodeObjectStorageClient{}, mock.MockK8sClient{}, ) - reconciler := LinodeObjectStorageBucketReconciler{Recorder: ctlrSuite.Recorder()} - bScope := scope.ObjectStorageBucketScope{Logger: ctlrSuite.Logger()} + reconciler := LinodeObjectStorageBucketReconciler{Recorder: suite.Recorder()} + bScope := scope.ObjectStorageBucketScope{Logger: suite.Logger()} - BeforeEach(func() { + suite.BeforeEach(func(_ context.Context, _ Mock) { // Reset obj to base state to be modified in each test path. // We can use a consistent name since these tests are stateless. bScope.Bucket = &infrav1.LinodeObjectStorageBucket{ @@ -380,146 +383,146 @@ var _ = Describe("errors", Label("bucket", "errors"), func() { } }) - ctlrSuite.Run(Paths( + suite.Run(Paths( Either( - Call("resource can be fetched", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + Call("resource can be fetched", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) }), - Case( - Call("resource is not found", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "mock")) + Path( + Call("resource is not found", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "mock")) }), - Result("no error", func(ctx context.Context, m Mock) { - reconciler.Client = m.K8sClient + Result("no error", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient _, err := reconciler.Reconcile(ctx, reconcile.Request{ NamespacedName: client.ObjectKeyFromObject(bScope.Bucket), }) Expect(err).NotTo(HaveOccurred()) }), ), - Case( - Call("resource can't be fetched", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("non-404 error")) + Path( + Call("resource can't be fetched", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("non-404 error")) }), - Result("error", func(ctx context.Context, m Mock) { - reconciler.Client = m.K8sClient + Result("error", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient reconciler.Logger = bScope.Logger _, err := reconciler.Reconcile(ctx, reconcile.Request{ NamespacedName: client.ObjectKeyFromObject(bScope.Bucket), }) Expect(err.Error()).To(ContainSubstring("non-404 error")) - Expect(m.Logs()).To(ContainSubstring("Failed to fetch LinodeObjectStorageBucket")) + Expect(suite.Logs()).To(ContainSubstring("Failed to fetch LinodeObjectStorageBucket")) }), ), ), - Result("scope params is missing args", func(ctx context.Context, m Mock) { - reconciler.Client = m.K8sClient + Result("scope params is missing args", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient reconciler.Logger = bScope.Logger _, err := reconciler.Reconcile(ctx, reconcile.Request{ NamespacedName: client.ObjectKeyFromObject(bScope.Bucket), }) Expect(err.Error()).To(ContainSubstring("failed to create object storage bucket scope")) - Expect(m.Logs()).To(ContainSubstring("Failed to create object storage bucket scope")) + Expect(suite.Logs()).To(ContainSubstring("Failed to create object storage bucket scope")) }), - Call("scheme with no infrav1alpha1", func(ctx context.Context, m Mock) { - prev := m.K8sClient.EXPECT().Scheme().Return(scheme.Scheme) - m.K8sClient.EXPECT().Scheme().After(prev).Return(runtime.NewScheme()).Times(2) + Call("scheme with no infrav1alpha1", func(ctx context.Context, mck Mock) { + prev := mck.K8sClient.EXPECT().Scheme().Return(scheme.Scheme) + mck.K8sClient.EXPECT().Scheme().After(prev).Return(runtime.NewScheme()).Times(2) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.Client = m.K8sClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.Client = mck.K8sClient - patchHelper, err := patch.NewHelper(bScope.Bucket, m.K8sClient) + patchHelper, err := patch.NewHelper(bScope.Bucket, mck.K8sClient) Expect(err).NotTo(HaveOccurred()) bScope.PatchHelper = patchHelper _, err = reconciler.reconcile(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("no kind is registered")) }), - Call("get bucket", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil) + Call("get bucket", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucket{Created: ptr.To(time.Now())}, nil) }), Either( - Case( - Call("failed check for deleted secret", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("api error")) + Path( + Call("failed check for deleted secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("api error")) }), - Result("error", func(ctx context.Context, m Mock) { + Result("error", func(ctx context.Context, mck Mock) { bScope.Bucket.Spec.KeyGeneration = ptr.To(1) bScope.Bucket.Status.LastKeyGeneration = bScope.Bucket.Spec.KeyGeneration bScope.Bucket.Status.KeySecretName = ptr.To("mock-bucket-details") bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} - bScope.LinodeClient = m.ObjectStorageClient - bScope.Client = m.K8sClient + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient err := reconciler.reconcileApply(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("api error")) - Expect(<-m.Events()).To(ContainSubstring("api error")) - Expect(m.Logs()).To(ContainSubstring("Failed to ensure access key secret exists")) + Expect(suite.Events()).To(ContainSubstring("api error")) + Expect(suite.Logs()).To(ContainSubstring("Failed to ensure access key secret exists")) }), ), - Call("secret deleted", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-bucket-details")) + Call("secret deleted", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-bucket-details")) }), ), - Call("get keys", func(ctx context.Context, m Mock) { + Call("get keys", func(ctx context.Context, mck Mock) { for idx := range 2 { - m.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), idx).Return(&linodego.ObjectStorageKey{ID: idx}, nil) + mck.ObjectStorageClient.EXPECT().GetObjectStorageKey(gomock.Any(), idx).Return(&linodego.ObjectStorageKey{ID: idx}, nil) } }), Either( - Case( - Call("secret resource creation fails", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Scheme().Return(scheme.Scheme).AnyTimes() - m.K8sClient.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("secret creation error")) + Path( + Call("secret resource creation fails", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(scheme.Scheme).AnyTimes() + mck.K8sClient.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("secret creation error")) }), - Result("creation error", func(ctx context.Context, m Mock) { + Result("creation error", func(ctx context.Context, mck Mock) { bScope.Bucket.Spec.KeyGeneration = ptr.To(1) bScope.Bucket.Status.LastKeyGeneration = bScope.Bucket.Spec.KeyGeneration bScope.Bucket.Status.KeySecretName = ptr.To("mock-bucket-details") bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} - bScope.LinodeClient = m.ObjectStorageClient - bScope.Client = m.K8sClient + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient err := reconciler.reconcileApply(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("secret creation error")) - Expect(<-m.Events()).To(ContainSubstring("keys retrieved")) - Expect(<-m.Events()).To(ContainSubstring("secret creation error")) - Expect(m.Logs()).To(ContainSubstring("Failed to apply key secret")) + Expect(suite.Events()).To(ContainSubstring("keys retrieved")) + Expect(suite.Events()).To(ContainSubstring("secret creation error")) + Expect(suite.Logs()).To(ContainSubstring("Failed to apply key secret")) }), ), - Case( - Call("secret generation fails", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme()) + Path( + Call("secret generation fails", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme()) }), - Result("error", func(ctx context.Context, m Mock) { + Result("error", func(ctx context.Context, mck Mock) { bScope.Bucket.Spec.KeyGeneration = ptr.To(1) bScope.Bucket.Status.LastKeyGeneration = bScope.Bucket.Spec.KeyGeneration bScope.Bucket.Status.KeySecretName = ptr.To("mock-bucket-details") bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} - bScope.LinodeClient = m.ObjectStorageClient - bScope.Client = m.K8sClient + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient err := reconciler.reconcileApply(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("no kind is registered")) - Expect(<-m.Events()).To(ContainSubstring("keys retrieved")) - Expect(<-m.Events()).To(ContainSubstring("no kind is registered")) - Expect(m.Logs()).To(ContainSubstring("Failed to generate key secret")) + Expect(suite.Events()).To(ContainSubstring("keys retrieved")) + Expect(suite.Events()).To(ContainSubstring("no kind is registered")) + Expect(suite.Logs()).To(ContainSubstring("Failed to generate key secret")) }), ), ), - Once("finalizer is missing", func(ctx context.Context) { + Once("finalizer is missing", func(ctx context.Context, _ Mock) { bScope.Bucket.Status.AccessKeyRefs = []int{0, 1} bScope.Bucket.ObjectMeta.Finalizers = []string{} }), - Call("revoke keys", func(ctx context.Context, m Mock) { - m.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(nil) + Call("revoke keys", func(ctx context.Context, mck Mock) { + mck.ObjectStorageClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), gomock.Any()).Times(2).Return(nil) }), - Result("error", func(ctx context.Context, m Mock) { - bScope.LinodeClient = m.ObjectStorageClient - bScope.Client = m.K8sClient + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.ObjectStorageClient + bScope.Client = mck.K8sClient err := reconciler.reconcileDelete(ctx, &bScope) Expect(err.Error()).To(ContainSubstring("failed to remove finalizer from bucket")) - Expect(<-m.Events()).To(ContainSubstring("failed to remove finalizer from bucket")) + Expect(suite.Events()).To(ContainSubstring("failed to remove finalizer from bucket")) }), )) }) diff --git a/docs/src/developers/testing.md b/docs/src/developers/testing.md index 61d9badc9..8bc586ce9 100644 --- a/docs/src/developers/testing.md +++ b/docs/src/developers/testing.md @@ -6,6 +6,177 @@ In order to run the unit tests run the following command ```bash make test ``` +### Adding tests +General unit tests of functions follow the same conventions for testing using Go's `testing` standard library, along with the [testify](https://github.com/stretchr/testify) toolkit for making assertions. + +Unit tests that require API clients use mock clients generated using [gomock](https://github.com/uber-go/mock). To simplify the usage of mock clients, this repo also uses an internal library defined in `mock/mocktest`. + +`mocktest` is usually imported as a dot import along with the `mock` package: + +```go +import ( + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" +) +``` + +Using `mocktest` involves creating a test suite that specifies the mock clients to be used within each test scope and running the test suite using a DSL for defnining test nodes belong to one or more `Paths`. + +#### Example +The following is a contrived example using the mock Linode machine client. + +Let's say we've written an idempotent function `EnsureInstanceRuns` that 1) gets an instance or creates it if it doesn't exist, 2) boots the instance if it's offline. Testing this function would mean we'd need to write test cases for all permutations, i.e. +* instance exists and is not offline +* instance exists but is offline, and is able to boot +* instance exists but is offline, and is not able to boot +* instance does not exist, and is not able to be created +* instance does not exist, and is able to be created, and is able to boot +* instance does not exist, and is able to be created, and is not able to boot + +While writing test cases for each scenario, we'd likely find a lot of overlap between each. `mocktest` provides a DSL for defining each unique test case without needing to spell out all required mock client calls for each case. Here's how we could test `EnsureInstanceRuns` using `mocktest`: + +```go +func TestEnsureInstanceNotOffline(t *testing.T) { + suite := NewTestSuite(t, mock.MockLinodeMachineClient{}) + + suite.Run(t, Paths( + Either( + Path( + Call("instance exists and is not offline", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().GetInstance(ctx, /* ... */).Return(&linodego.Instance{Status: linodego.InstanceRunning}, nil) + }), + Result("success", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(ctx, /* ... */) + require.NoError(t, err) + assert.Equal(t, inst.Status, linodego.InstanceRunning) + }) + ), + Path( + Call("instance does not exist", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().GetInstance(ctx, /* ... */).Return(nil, linodego.Error{Code: 404}) + }), + Either( + Call("able to be created", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().CreateInstance(ctx, /* ... */).Return(&linodego.Instance{Status: linodego.InstanceOffline}, nil) + }), + Path( + Call("not able to be created", func(ctx context.Context, mck Mock) {/* ... */}) + Result("error", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(ctx, /* ... */) + require.ErrorContains(t, err, "instance was not booted: failed to create instance: reasons...") + assert.Empty(inst) + }) + ) + ), + ), + Call("instance exists but is offline", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().GetInstance(ctx, /* ... */).Return(&linodego.Instance{Status: linodego.InstanceOffline}, nil) + }), + ), + Either( + Path( + Call("able to boot", func(ctx context.Context, mck Mock) {/* */}) + Result("success", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(ctx, /* ... */) + require.NoError(t, err) + assert.Equal(t, inst.Status, linodego.InstanceBooting) + }) + ), + Path( + Call("not able to boot", func(ctx context.Context, mck Mock) {/* returns API error */}) + Result("error", func(ctx context.Context, mck Mock) { + inst, err := EnsureInstanceNotOffline(/* options */) + require.ErrorContains(t, err, "instance was not booted: boot failed: reasons...") + assert.Empty(inst) + }) + ) + ), + ) +} +``` +In this example, the nodes passed into `Paths` are used to describe each permutation of the function being called with different results from the mock Linode machine client. + +#### Nodes +* `Call` describes the behavior of method calls by mock clients. A `Call` node can belong to one or more paths. +* `Result` invokes the function with mock clients and tests the output. A `Result` node terminates each path it belongs to. +* `Path` is a list of nodes that all belong to the same test path. Each child node of a `Path` is evaluated in order. +* `Either` is a list of nodes that all belong to different test paths. It is used to define diverging test path, with each path containing the set of all preceding `Call` nodes. + +#### Setup, tear down, and event triggers +Setup and teardown nodes can be scheduled before and after each run: +* `suite.BeforeEach` receives a `func(context.Context, Mock)` function that will run before each path is evaluated. Likewise, `suite.AfterEach` will run after each path is evaluated. +* `suite.BeforeAll` receives a `func(context.Context, Mock)` function taht will run once before all paths are evaluated. Likewise, `suite.AfterEach` will run after each path is evaluated. + +In addition to the path nodes listed in the section above, a special node type `Once` may be specified to inject a function that will only be evaluated one time across all paths. It can be used to trigger side effects outside of mock client behavior that can impact the output of the function being tested. + +#### Control flow +When `Run` is called on a test suite, paths are evaluated in parallel using `t.Parallel()`. Each path will be run with a separate `t.Run` call, and each test run will be named according to the descriptions specified in each node. + +To help with visualizing the paths that will be rendered from nodes, a `Describe` helper method can be called which returns a slice of strings describing each path. For instance, the following shows the output of `Describe` on the paths described in the example above: + +```go +paths := Paths(/* see example above */) + +paths.Describe() /* [ + "instance exists and is not offline > success", + "instance does not exist > not able to be created > error", + "instance does not exist > able to be created > able to boot > success", + "instance does not exist > able to be created > not able to boot > error", + "instance exists but is offline > able to boot > success", + "instance exists but is offline > not able to boot > error" +] */ +``` + +#### Testing controllers +CAPL uses controller-runtime's [envtest](https://book.kubebuilder.io/reference/envtest) package which runs an instance of etcd and the Kubernetes API server for testing controllers. The test setup uses [ginkgo](https://onsi.github.io/ginkgo/) as its test runner as well as [gomega](https://onsi.github.io/gomega/) for assertions. + +`mocktest` is also recommended when writing tests for controllers. The following is another contrived example of how to use it within the context of a Ginkgo `Describe` node: + +```go +// Add the `Ordered` decorator so that tests run in order versus in parallel. +// This is needed when relying on EnvTest for managing Kubernetes API server state. +var _ = Describe("test name", Ordered, func() { + // Create a mocktest controller test suite. + suite := NewControllerTestSuite(GinkgoT(), mock.MockLinodeMachineClient{}) + + obj := infrav1alpha1.LinodeMachine{ + ObjectMeta: metav1.ObjectMeta{/* ... */} + Spec: infrav1alpha1.LinodeMachineSpec{/* ... */} + } + + suite.Run(Paths( + Once("create resource", func(ctx context.Context, _ Mock) { + // Use the EnvTest k8sClient to create the resource in the test server + Expect(k8sClient.Create(ctx, &obj).To(Succeed())) + }) + Call("create a linode", func(ctx context.Context, mck Mock) { + mck.MachineClient.CreateInstance(ctx, gomock.Any(), gomock.Any()).Return(&linodego.Instance{/* ... */}, nil) + }) + Result("update the resource with info about the linode", func(ctx context.Context, mck Mock) { + reconciler := LinodeMachineReconciler{ + // Configure the reconciler to use the mock client for this test path + LinodeClient: mck.MachineClient, + // Use a managed recorder for capturing events published during this test + Recorder: suite.Recorder(), + // Use a managed logger for capturing logs written during the test + Logger: suite.Logger(), // Note: This isn't a real struct field. A logger is configured elsewhere. + } + + _, err := reconciler.Reconcile(ctx, reconcile.Request{/* ... */}) + Expect(err).NotTo(HaveOccurred()) + + // Fetch the updated object in the test server and confirm it was updated + Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(obj))).To(Succeed()) + Expect(obj.Status.Ready).To(BeTrue()) + + // Check for expected events and logs + Expect(suite.Events()).To(ContainSubstring("Linode created!")) + Expect(suite.Logs()).To(ContainSubstring("Linode created!")) + }) + )) +}) +``` ## E2E Tests For e2e tests CAPL uses the [Chainsaw project](https://kyverno.github.io/chainsaw) which leverages `kind` and `tilt` to diff --git a/mock/mocktest/controller_suite.go b/mock/mocktest/controller_suite.go deleted file mode 100644 index 330dd55c6..000000000 --- a/mock/mocktest/controller_suite.go +++ /dev/null @@ -1,85 +0,0 @@ -package mocktest - -import ( - "bytes" - "errors" - - "github.com/go-logr/logr" - "github.com/onsi/ginkgo/v2" - "go.uber.org/mock/gomock" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/linode/cluster-api-provider-linode/mock" -) - -const recorderBufferSize = 20 - -type ctlrSuite struct { - clients []mock.MockClient - recorder *record.FakeRecorder - logger logr.Logger - logs *bytes.Buffer -} - -// NewControllerTestSuite creates a test suite for a controller. -// It generates new mock clients for each test path it runs. -func NewControllerTestSuite(clients ...mock.MockClient) *ctlrSuite { - if len(clients) == 0 { - panic(errors.New("unable to run tests without clients")) - } - - logs := bytes.Buffer{} - - return &ctlrSuite{ - clients: clients, - // Create a recorder with a buffered channel for consuming event strings. - recorder: record.NewFakeRecorder(recorderBufferSize), - // Create a logger that writes to both GinkgoWriter and the local logs buffer - logger: zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.WriteTo(&logs)), - logs: &logs, - } -} - -// Recorder returns a *FakeRecorder for recording events published in a reconcile loop. -// Events can be consumed within test paths by receiving from a MockContext.Events() channel. -func (c *ctlrSuite) Recorder() *record.FakeRecorder { - return c.recorder -} - -// Logger returns a logr.Logger for capturing logs written during a reconcile loop. -// Log output can be read within test paths by calling MockContext.Logs(). -func (c *ctlrSuite) Logger() logr.Logger { - return c.logger -} - -// Run executes Ginkgo test specs for each computed test path. -// It manages mock client instantiation, events, and logging. -func (c *ctlrSuite) Run(paths []path) { - for _, path := range paths { - ginkgo.It(path.Describe(), func(ctx ginkgo.SpecContext) { - mockCtrl := gomock.NewController(ginkgo.GinkgoT()) - defer mockCtrl.Finish() - - m := Mock{ - TestReporter: mockCtrl.T, - recorder: c.recorder, - logs: c.logs, - } - - for _, client := range c.clients { - m.MockClients.Build(client, mockCtrl) - } - - path.Run(ctx, m) - - // Flush the channel if any events were not consumed. - for len(c.recorder.Events) > 0 { - <-c.recorder.Events - } - - // Flush the logs buffer for each test run - c.logs.Reset() - }) - } -} diff --git a/mock/mocktest/node.go b/mock/mocktest/node.go index 77015a639..0bbdc9ed4 100644 --- a/mock/mocktest/node.go +++ b/mock/mocktest/node.go @@ -1,12 +1,9 @@ package mocktest import ( - "bytes" "context" - "fmt" "go.uber.org/mock/gomock" - "k8s.io/client-go/tools/record" "github.com/linode/cluster-api-provider-linode/mock" ) @@ -16,100 +13,126 @@ type Mock struct { gomock.TestReporter mock.MockClients - recorder *record.FakeRecorder - logs *bytes.Buffer + endOfPath bool } -// Events returns a channel for receiving event strings for a single test path. -func (m Mock) Events() <-chan string { - return m.recorder.Events +// Common interface for defining permutations of test paths as a tree. +type node interface { + update(staged, committed []path) (st, com []path) } -// Logs returns a string of all log output written during a single test path. -func (m Mock) Logs() string { - return m.logs.String() +// A container for describing and holding a function. +type fn struct { + text string + does func(context.Context, Mock) + ran bool } // Call declares a function for mocking method calls on a single mock client. func Call(text string, does func(context.Context, Mock)) call { return call{ - text: fmt.Sprintf("Call(%s)", text), + text: text, does: does, } } +// Contains a function for mocking method calls on a single mock client. +type call fn + +// Adds the call to each staged path. +func (c call) update(staged, committed []path) (st, com []path) { + for idx, pth := range staged { + newCalls := make([]call, len(pth.calls), len(pth.calls)+1) + copy(newCalls, pth.calls) + staged[idx] = path{ + once: pth.once, + calls: append(newCalls, c), + } + } + + return staged, committed +} + // Result terminates a test path with a function that tests the effects of mocked method calls. func Result(text string, does func(context.Context, Mock)) result { return result{ - text: fmt.Sprintf("Result(%s)", text), + text: text, does: does, } } +// Contains a function that tests the effects of mocked method calls. +type result fn + +// Commits each staged path with the result. +func (r result) update(staged, committed []path) (st, com []path) { + for idx := range staged { + staged[idx].result = r + } + + committed = append(committed, staged...) + staged = []path{} + + return staged, committed +} + // Once declares a function that runs one time when executing all test paths. // It is triggered at the beginning of the leftmost test path where it is inserted. -func Once(text string, does func(context.Context)) once { +func Once(text string, does func(context.Context, Mock)) once { return once{ - text: fmt.Sprintf("Once(%s)", text), + text: text, does: does, } } -// Case declares both a Mock and a Result for terminating a test path. -func Case(c call, r result) leaf { - return leaf{c, r} -} +// Contains a function that will only run once. +type once fn -// Either declares multiple nodes that fork out into unique test paths. -func Either(nodes ...prong) fork { - return nodes -} +// Adds once to the first staged path. +// It will only be invoked once in the first path to be evaluated. +func (o once) update(staged, committed []path) (st, com []path) { + staged[0].once = append(staged[0].once, &o) -// Common interface for defining permutations of test paths as a tree. -type node interface { - node() + return staged, committed } -// A container for describing and holding a function. -type fn struct { - text string - does func(context.Context, Mock) +// Path declares a sequence of nodes belonging to the same test path. +func Path(nodes ...node) allOf { + return nodes } -// Contains a function for mocking method calls on a single mock client. -type call fn +// A container for defining nodes added to the same test path. +type allOf []node -// Contains a function that tests the effects of mocked method calls. -type result fn +// Adds all nodes to each staged path, committing paths whenever a result is included. +func (a allOf) update(staged, committed []path) (st, com []path) { + for _, impl := range a { + staged, committed = impl.update(staged, committed) + } -// Contains a function for an event trigger that runs once. -type once struct { - text string - does func(context.Context) - described bool - ran bool + return staged, committed } -type leaf struct { - call - result +// Either declares multiple nodes that fork out into unique test paths. +func Either(nodes ...node) oneOf { + return nodes } -// A container for defining nodes that fork out into new test paths. -type fork []prong +// A container for defining nodes that fork out into unique test paths. +type oneOf []node -// Common interface for nodes that fork out into new test paths. -type prong interface { - prong() -} +// Generates new permutations of each staged path with each node. +// Each node should never occur on the same path. +func (o oneOf) update(staged, committed []path) (st, com []path) { + var permutations []path -func (call) node() {} -func (result) node() {} -func (once) node() {} -func (leaf) node() {} -func (fork) node() {} + for _, pth := range staged { + for _, impl := range o { + var localPerms []path + localPerms, committed = impl.update([]path{pth}, committed) + permutations = append(permutations, localPerms...) + } + } -func (call) prong() {} -func (result) prong() {} -func (once) prong() {} -func (leaf) prong() {} + return permutations, committed +} diff --git a/mock/mocktest/path.go b/mock/mocktest/path.go index c6312f763..e1fde3713 100644 --- a/mock/mocktest/path.go +++ b/mock/mocktest/path.go @@ -1,7 +1,7 @@ package mocktest import ( - "fmt" + "errors" "strings" ) @@ -15,13 +15,7 @@ type path struct { // Describe generates a string of all nodes belonging to a test path. func (p path) Describe() string { - text := make([]string, 0, len(p.once)+len(p.calls)+1) - for _, o := range p.once { - if !o.described { - text = append(text, o.text) - o.described = true - } - } + text := make([]string, 0, len(p.calls)+1) for _, c := range p.calls { text = append(text, c.text) } @@ -29,122 +23,58 @@ func (p path) Describe() string { return strings.Join(text, " > ") } +type paths []path + +func (ps paths) Describe() []string { + texts := make([]string, 0, len(ps)) + described := make(map[*once]bool) + + for _, pth := range ps { + var text strings.Builder + for _, o := range pth.once { + if !described[o] { + text.WriteString(o.text + " > ") + described[o] = true + } + } + text.WriteString(pth.Describe()) + texts = append(texts, text.String()) + } + + return texts +} + // Paths declares one or more test paths with mock clients. // It traverses each node and their children, returning a list of permutations, // each representing a different test path as specified and evaluated in order. -// New permutations are defined by -// 1. Terminal nodes i.e. entries that have a function for asserting results. -// 2. Nodes belonging to a fork i.e. multiple entries non-occurring on the same path. -func Paths(nodes ...node) []path { +func Paths(nodes ...node) paths { if len(nodes) == 0 { return nil } - tmp := []path{} - final := []path{} - - for idx, n := range nodes { - // If all paths are closed, make a new path - if len(tmp) == 0 { - tmp = append(tmp, path{}) - } - - switch impl := n.(type) { - // A once node should only be added to the first path. - // It will only invoked once in the first path evaluated. - case once: - tmp[0].once = append(tmp[0].once, &impl) - - // A call node should be appended to all open paths. - case call: - // Add new entry to each open path - for j := range tmp { - tmp[j].calls = append(tmp[j].calls, impl) - } - - // Panic if any paths are open at the end - if idx == len(nodes)-1 { - panic(fmt.Errorf("unresolved path at index %d", idx)) - } - - // A result node should terminate all open paths. - case result: - // Close all open paths - for j := range tmp { - tmp[j].result = impl - } - - // Commit all closed paths - final = append(final, tmp...) - tmp = nil - - // A leaf node contains both a call node and a result node. - // The call is appended to all open paths, and then immediately closed with the result. - case leaf: - // Add new entry to each open path and close it - for j := range tmp { - tmp[j].calls = append(tmp[j].calls, impl.call) - tmp[j].result = impl.result - } - - // Commit all closed paths - final = append(final, tmp...) - tmp = nil - - // A fork node is a list of call or leaf nodes that should not occur on the same path. - case fork: - var newTmp []path - var open bool - - // Make new version of each open path with each new entry - for _, pth := range tmp { - for _, fi := range impl { - switch forkImpl := fi.(type) { - case once: - open = true - newTmp = append(newTmp, path{ - once: append(pth.once, &forkImpl), - calls: pth.calls, - }) - - case call: - open = true - // Duplicate open paths with the new entry - newTmp = append(newTmp, path{ - once: pth.once, - calls: append(pth.calls, forkImpl), - }) + staged, committed := rPaths(nil, nil, nodes) + if len(staged) > 0 { + panic(errors.New("unresolved path detected")) + } - case result: - final = append(final, path{ - once: pth.once, - calls: pth.calls, - result: forkImpl, - }) + return committed +} - // A leaf in a fork is terminal - case leaf: - // Avoid mutating pth.calls slice by allocating new slice. - // Calling append(pth.calls, ...) shares state. - newCalls := make([]call, 0, len(pth.calls)) - newCalls = append(newCalls, pth.calls...) - final = append(final, path{ - once: pth.once, - calls: append(newCalls, forkImpl.call), - result: forkImpl.result, - }) - } - } - } +func rPaths(staged, committed []path, each []node) (st, com []path) { + if len(each) == 0 { + return staged, committed + } - tmp = newTmp + // Get the current node to add to staged/committed. + head, tail := each[0], each[1:] - // Panic if any paths are open at the end - if open && idx == len(nodes)-1 { - panic(fmt.Errorf("unresolved path at index %d", idx)) - } - } + // If there are no open paths, make a new path. + if len(staged) == 0 { + staged = append(staged, path{}) } - return final + // Add to staged/committed. + staged, committed = head.update(staged, committed) + + return rPaths(staged, committed, tail) } diff --git a/mock/mocktest/path_run.go b/mock/mocktest/path_run.go index 25e64e3fa..5b7ba73ae 100644 --- a/mock/mocktest/path_run.go +++ b/mock/mocktest/path_run.go @@ -8,43 +8,41 @@ import ( ) // Run evaluates all declared mock client methods and assertions for the given test path. -func (p path) Run(ctx context.Context, m Mock) { - if m.TestReporter == nil { +func (p path) Run(ctx context.Context, mck Mock) { + if mck.TestReporter == nil { panic("Mock requires TestReporter, i.e. *testing.T, GinkgoT()") } for _, o := range p.once { - evalOnce(ctx, m, o) + evalOnce(ctx, mck, o) } for _, c := range p.calls { - evalFn(ctx, m, fn(c)) + evalFn(ctx, mck, fn(c)) } - evalFn(ctx, m, fn(p.result)) + + mckPtr := &mck + mckPtr.endOfPath = true + + evalFn(ctx, mck, fn(p.result)) } -func evalFn(ctx context.Context, m Mock, fun fn) { - switch tt := m.TestReporter.(type) { +func evalFn(ctx context.Context, mck Mock, fun fn) { + switch tt := mck.TestReporter.(type) { case *testing.T: tt.Log(fun.text) case ginkgo.GinkgoTInterface: ginkgo.By(fun.text) } - fun.does(ctx, m) + fun.does(ctx, mck) } -func evalOnce(ctx context.Context, m Mock, fun *once) { +func evalOnce(ctx context.Context, mck Mock, fun *once) { if fun.ran { return } - switch tt := m.TestReporter.(type) { - case *testing.T: - tt.Log(fun.text) - case ginkgo.GinkgoTInterface: - ginkgo.By(fun.text) - } + evalFn(ctx, mck, fn(*fun)) - fun.does(ctx) fun.ran = true } diff --git a/mock/mocktest/path_test.go b/mock/mocktest/path_test.go index 17a53a40a..c2455f644 100644 --- a/mock/mocktest/path_test.go +++ b/mock/mocktest/path_test.go @@ -36,11 +36,11 @@ var _ = Describe("k8s client", Label("k8sclient"), func() { }) for _, path := range Paths( - Call("fetch object", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + Call("fetch object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) }), - Result("no error", func(ctx context.Context, m Mock) { - Expect(contrivedCalls(ctx, m)).To(Succeed()) + Result("no error", func(ctx context.Context, mck Mock) { + Expect(contrivedCalls(ctx, mck)).To(Succeed()) }), ) { It(path.Describe(), func(ctx SpecContext) { @@ -66,24 +66,24 @@ var _ = Describe("multiple clients", Label("multiple"), func() { }) for _, path := range Paths( - Call("read object", func(ctx context.Context, m Mock) { - m.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + Call("read object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) }), Either( - Case( - Call("underlying exists", func(ctx context.Context, m Mock) { - m.MachineClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Return(&linodego.Instance{ID: 1}, nil) + Path( + Call("underlying exists", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Return(&linodego.Instance{ID: 1}, nil) }), - Result("no error", func(ctx context.Context, m Mock) { - Expect(contrivedCalls(ctx, m)).To(Succeed()) + Result("no error", func(ctx context.Context, mck Mock) { + Expect(contrivedCalls(ctx, mck)).To(Succeed()) }), ), - Case( - Call("underlying does not exist", func(ctx context.Context, m Mock) { - m.MachineClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Return(nil, errors.New("404")) + Path( + Call("underlying does not exist", func(ctx context.Context, mck Mock) { + mck.MachineClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Return(nil, errors.New("404")) }), - Result("error", func(ctx context.Context, m Mock) { - Expect(contrivedCalls(ctx, m)).NotTo(Succeed()) + Result("error", func(ctx context.Context, mck Mock) { + Expect(contrivedCalls(ctx, mck)).NotTo(Succeed()) }), ), ), @@ -100,16 +100,16 @@ var _ = Describe("multiple clients", Label("multiple"), func() { } }) -func contrivedCalls(ctx context.Context, m Mock) error { +func contrivedCalls(ctx context.Context, mck Mock) error { GinkgoHelper() - err := m.K8sClient.Get(ctx, client.ObjectKey{}, &infrav1alpha1.LinodeMachine{}) + err := mck.K8sClient.Get(ctx, client.ObjectKey{}, &infrav1alpha1.LinodeMachine{}) if err != nil { return err } - if m.MachineClient != nil { - _, err = m.MachineClient.CreateInstance(ctx, linodego.InstanceCreateOptions{}) + if mck.MachineClient != nil { + _, err = mck.MachineClient.CreateInstance(ctx, linodego.InstanceCreateOptions{}) if err != nil { return err } @@ -121,11 +121,12 @@ func contrivedCalls(ctx context.Context, m Mock) error { func TestPaths(t *testing.T) { t.Parallel() - for _, tc := range []struct { + for _, testCase := range []struct { name string input []node - output []path - panicErr error + output paths + describe []string + panic bool }{ { name: "basic", @@ -133,42 +134,45 @@ func TestPaths(t *testing.T) { call{text: "0"}, result{text: "0"}, }, - output: []path{ + output: paths{ { calls: []call{{text: "0"}}, result: result{text: "0"}, }, }, + describe: []string{ + "0 > 0", + }, }, { name: "open", input: []node{ - call{}, + call{text: "0"}, }, - panicErr: errors.New("unresolved path at index 0"), + panic: true, }, { name: "open fork", input: []node{ call{text: "0"}, - fork{ + oneOf{ call{text: "1"}, - leaf{call{text: "1"}, result{text: "1"}}, + allOf{call{text: "1"}, result{text: "1"}}, }, }, - panicErr: errors.New("unresolved path at index 1"), + panic: true, }, { name: "split", input: []node{ call{text: "0"}, - fork{ + oneOf{ call{text: "1"}, call{text: "2"}, }, result{text: "4"}, }, - output: []path{ + output: paths{ { calls: []call{ {text: "0"}, @@ -184,18 +188,90 @@ func TestPaths(t *testing.T) { result: result{text: "4"}, }, }, + describe: []string{ + "0 > 1 > 4", + "0 > 2 > 4", + }, + }, + { + name: "recursive", + input: []node{ + oneOf{ + oneOf{ + call{text: "0"}, + oneOf{ + call{text: "1"}, + call{text: "2"}, + }, + }, + oneOf{ + call{text: "3"}, + oneOf{ + call{text: "4"}, + call{text: "5"}, + }, + }, + }, + result{text: "6"}, + }, + output: paths{ + { + calls: []call{ + {text: "0"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "1"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "2"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "3"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "4"}, + }, + result: result{text: "6"}, + }, + { + calls: []call{ + {text: "5"}, + }, + result: result{text: "6"}, + }, + }, + describe: []string{ + "0 > 6", + "1 > 6", + "2 > 6", + "3 > 6", + "4 > 6", + "5 > 6", + }, }, { name: "close order", input: []node{ call{text: "0"}, - fork{ + oneOf{ call{text: "1"}, result{text: "2"}, }, result{text: "3"}, }, - output: []path{ + output: paths{ { calls: []call{ {text: "0"}, @@ -210,20 +286,24 @@ func TestPaths(t *testing.T) { result: result{text: "3"}, }, }, + describe: []string{ + "0 > 2", + "0 > 1 > 3", + }, }, { name: "path order", input: []node{ - fork{ - leaf{call{text: "0"}, result{text: "0"}}, + oneOf{ + allOf{call{text: "0"}, result{text: "0"}}, call{text: "1"}, }, - fork{ - leaf{call{text: "2"}, result{text: "2"}}, - leaf{call{text: "3"}, result{text: "3"}}, + oneOf{ + allOf{call{text: "2"}, result{text: "2"}}, + allOf{call{text: "3"}, result{text: "3"}}, }, }, - output: []path{ + output: paths{ { calls: []call{{text: "0"}}, result: result{text: "0"}, @@ -243,31 +323,36 @@ func TestPaths(t *testing.T) { result: result{text: "3"}, }, }, + describe: []string{ + "0 > 0", + "1 > 2 > 2", + "1 > 3 > 3", + }, }, { name: "once", input: []node{ once{text: "0"}, - fork{ - leaf{call{text: "1"}, result{text: "1"}}, + oneOf{ + allOf{call{text: "1"}, result{text: "1"}}, call{text: "1"}, }, - fork{ - leaf{call{text: "2"}, result{text: "2"}}, + oneOf{ + allOf{call{text: "2"}, result{text: "2"}}, call{text: "2"}, }, result{text: "3"}, once{text: "4"}, - fork{ - leaf{call{text: "5"}, result{text: "5"}}, + oneOf{ + allOf{call{text: "5"}, result{text: "5"}}, call{text: "5"}, }, - fork{ - leaf{call{text: "6"}, result{text: "6"}}, - leaf{call{text: "7"}, result{text: "7"}}, + oneOf{ + allOf{call{text: "6"}, result{text: "6"}}, + allOf{call{text: "7"}, result{text: "7"}}, }, }, - output: []path{ + output: paths{ { once: []*once{{text: "0"}}, calls: []call{{text: "1"}}, @@ -311,22 +396,30 @@ func TestPaths(t *testing.T) { result: result{text: "7"}, }, }, + describe: []string{ + "0 > 1 > 1", + "1 > 2 > 2", + "1 > 2 > 3", + "4 > 5 > 5", + "5 > 6 > 6", + "5 > 7 > 7", + }, }, { name: "no shared state", input: []node{ call{text: "mock1"}, - fork{ - leaf{call{text: "mock1.1"}, result{text: "result1"}}, + oneOf{ + allOf{call{text: "mock1.1"}, result{text: "result1"}}, call{text: "mock2"}, }, call{text: "mock3"}, - fork{ - leaf{call{text: "mock3.1"}, result{text: "result2"}}, - leaf{call{text: "mock3.2"}, result{text: "result3"}}, + oneOf{ + allOf{call{text: "mock3.1"}, result{text: "result2"}}, + allOf{call{text: "mock3.2"}, result{text: "result3"}}, }, }, - output: []path{ + output: paths{ { calls: []call{ {text: "mock1"}, @@ -353,20 +446,58 @@ func TestPaths(t *testing.T) { result: result{text: "result3"}, }, }, + describe: []string{ + "mock1 > mock1.1 > result1", + "mock1 > mock2 > mock3 > mock3.1 > result2", + "mock1 > mock2 > mock3 > mock3.2 > result3", + }, + }, + { + name: "tmp", + input: []node{ + oneOf{ + allOf{ + call{text: "instance exists and is not offline"}, + result{text: "success"}, + }, + allOf{ + call{text: "instance does not exist"}, + oneOf{ + call{text: "able to be created"}, + allOf{ + call{text: "not able to be created"}, + result{text: "error"}, + }, + }, + }, + call{text: "instance exists but is offline"}, + }, + oneOf{ + allOf{ + call{text: "able to boot"}, + result{text: "success"}, + }, + allOf{ + call{text: "not able to boot"}, + result{text: "error"}, + }, + }, + }, }, } { - t.Run(tc.name, func(t *testing.T) { + t.Run(testCase.name, func(t *testing.T) { t.Parallel() - if tc.panicErr != nil { - assert.PanicsWithError(t, tc.panicErr.Error(), func() { - Paths(tc.input...) + if testCase.panic { + assert.Panics(t, func() { + Paths(testCase.input...) }) return } - actual := Paths(tc.input...) - assert.Equal(t, tc.output, actual) + actual := Paths(testCase.input...) + assert.Equal(t, testCase.output, actual) + assert.Equal(t, testCase.describe, actual.Describe()) }) } } diff --git a/mock/mocktest/suite.go b/mock/mocktest/suite.go index dcbef0570..0abcafd52 100644 --- a/mock/mocktest/suite.go +++ b/mock/mocktest/suite.go @@ -1,44 +1,191 @@ package mocktest import ( + "bytes" "context" "errors" + "strings" "testing" + "github.com/go-logr/logr" + "github.com/onsi/ginkgo/v2" "go.uber.org/mock/gomock" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/linode/cluster-api-provider-linode/mock" ) type suite struct { - clients []mock.MockClient + clients []mock.MockClient + beforeEach []fn + afterEach []fn + beforeAll []*once + afterAll []*once } -func NewTestSuite(clients ...mock.MockClient) *suite { +func (s *suite) BeforeEach(action func(context.Context, Mock)) { + s.beforeEach = append(s.beforeEach, fn{ + text: "BeforeEach", + does: action, + }) +} + +func (s *suite) AfterEach(action func(context.Context, Mock)) { + s.afterEach = append(s.afterEach, fn{ + text: "AfterEach", + does: action, + }) +} + +func (s *suite) BeforeAll(action func(context.Context, Mock)) { + s.beforeAll = append(s.beforeAll, &once{ + text: "BeforeAll", + does: action, + }) +} + +func (s *suite) AfterAll(action func(context.Context, Mock)) { + s.afterAll = append(s.afterAll, &once{ + text: "AfterAll", + does: action, + }) +} + +func (s *suite) run(t gomock.TestHelper, ctx context.Context, pth path) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mck := Mock{ + TestReporter: t, + } + + for _, client := range s.clients { + mck.MockClients.Build(client, mockCtrl) + } + + for _, fun := range s.beforeAll { + evalOnce(ctx, mck, fun) + } + for _, fun := range s.beforeEach { + evalFn(ctx, mck, fun) + } + + pth.Run(ctx, mck) + + for _, fun := range s.afterEach { + evalFn(ctx, mck, fun) + } + for _, fun := range s.afterAll { + evalOnce(ctx, mck, fun) + } +} + +type standardSuite struct { + suite + + t *testing.T +} + +func NewTestSuite(t *testing.T, clients ...mock.MockClient) *standardSuite { + t.Helper() + if len(clients) == 0 { panic(errors.New("unable to run tests without clients")) } - return &suite{clients: clients} + return &standardSuite{ + suite: suite{clients: clients}, + t: t, + } } -func (s *suite) Run(ctx context.Context, t *testing.T, paths []path) { +func (ss *standardSuite) Run(paths []path) { for _, path := range paths { - t.Run(path.Describe(), func(t *testing.T) { + ss.t.Run(path.Describe(), func(t *testing.T) { t.Parallel() - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() + ss.suite.run(t, context.Background(), path) + }) + } +} + +const recorderBufferSize = 20 - m := Mock{ - TestReporter: mockCtrl.T, - } +type ctlrSuite struct { + suite + + ginkgoT ginkgo.FullGinkgoTInterface + recorder *record.FakeRecorder + events string + logger logr.Logger + logs *bytes.Buffer +} + +// NewControllerTestSuite creates a test suite for a controller. +// It generates new mock clients for each test path it runs. +func NewControllerTestSuite(ginkgoT ginkgo.FullGinkgoTInterface, clients ...mock.MockClient) *ctlrSuite { + if len(clients) == 0 { + panic(errors.New("unable to run tests without clients")) + } + + logs := bytes.Buffer{} + + return &ctlrSuite{ + suite: suite{clients: clients}, + ginkgoT: ginkgoT, + // Create a recorder with a buffered channel for consuming event strings. + recorder: record.NewFakeRecorder(recorderBufferSize), + // Create a logger that writes to both GinkgoWriter and the local logs buffer + logger: zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.WriteTo(&logs)), + logs: &logs, + } +} + +// Recorder returns a *FakeRecorder for recording events published in a reconcile loop. +// Events can be consumed within test paths by receiving from a MockContext.Events() channel. +func (cs *ctlrSuite) Recorder() *record.FakeRecorder { + return cs.recorder +} + +// Logger returns a logr.Logger for capturing logs written during a reconcile loop. +// Log output can be read within test paths by calling MockContext.Logs(). +func (cs *ctlrSuite) Logger() logr.Logger { + return cs.logger +} + +// Events returns a string of all recorded events for a single test path. +func (cs *ctlrSuite) Events() string { + var strBuilder strings.Builder + for len(cs.recorder.Events) > 0 { + strBuilder.WriteString(<-cs.recorder.Events) + } + + cs.events += strBuilder.String() + + return cs.events +} + +// Logs returns a string of all log output written during a single test path. +func (cs *ctlrSuite) Logs() string { + return cs.logs.String() +} + +// Run executes Ginkgo test specs for each computed test path. +// It manages mock client instantiation, events, and logging. +func (cs *ctlrSuite) Run(paths []path) { + for _, path := range paths { + ginkgo.It(path.Describe(), func(ctx ginkgo.SpecContext) { + cs.suite.run(cs.ginkgoT, ctx, path) - for _, client := range s.clients { - m.MockClients.Build(client, mockCtrl) + // Flush the channel if any events were not consumed + // i.e. Events was never called + for len(cs.recorder.Events) > 0 { + <-cs.recorder.Events } - path.Run(ctx, m) + // Flush the logs buffer + cs.logs.Reset() }) } }