Skip to content

Commit

Permalink
adapt linodecluster controller tests to new mocktest changes
Browse files Browse the repository at this point in the history
  • Loading branch information
amold1 committed Apr 23, 2024
1 parent 2cb7633 commit b306c42
Show file tree
Hide file tree
Showing 12 changed files with 1,019 additions and 762 deletions.
4 changes: 2 additions & 2 deletions cloud/scope/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara
}

return &ClusterScope{
client: params.Client,
Client: params.Client,
Cluster: params.Cluster,
LinodeClient: linodeClient,
LinodeCluster: params.LinodeCluster,
Expand All @@ -82,7 +82,7 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara

// ClusterScope defines the basic context for an actuator to operate upon.
type ClusterScope struct {
client K8sClient
Client K8sClient
PatchHelper *patch.Helper
LinodeClient LinodeNodeBalancerClient
Cluster *clusterv1.Cluster
Expand Down
108 changes: 54 additions & 54 deletions cloud/scope/machine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,25 +112,25 @@ func TestValidateMachineScopeParams(t *testing.T) {
func TestMachineScopeAddFinalizer(t *testing.T) {
t.Parallel()

NewTestSuite(mock.MockK8sClient{}).Run(context.Background(), t, Paths(
Call("scheme 1", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
NewTestSuite(t, mock.MockK8sClient{}).Run(Paths(
Call("scheme 1", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
s := runtime.NewScheme()
infrav1alpha1.AddToScheme(s)
return s
})
}),
Either(
Call("scheme 2", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
Call("scheme 2", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
s := runtime.NewScheme()
infrav1alpha1.AddToScheme(s)
return s
})
}),
Result("has finalizer", func(ctx context.Context, m Mock) {
Result("has finalizer", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -147,13 +147,13 @@ func TestMachineScopeAddFinalizer(t *testing.T) {
}),
),
Either(
Case(
Call("able to patch", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(nil)
Path(
Call("able to patch", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(nil)
}),
Result("finalizer added", func(ctx context.Context, m Mock) {
Result("finalizer added", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -165,13 +165,13 @@ func TestMachineScopeAddFinalizer(t *testing.T) {
assert.Equal(t, mScope.LinodeMachine.Finalizers[0], infrav1alpha1.GroupVersion.String())
}),
),
Case(
Call("unable to patch", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(errors.New("fail"))
Path(
Call("unable to patch", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Patch(ctx, gomock.Any(), gomock.Any()).Return(errors.New("fail"))
}),
Result("error", func(ctx context.Context, m Mock) {
Result("error", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -189,16 +189,16 @@ func TestMachineScopeAddFinalizer(t *testing.T) {
func TestNewMachineScope(t *testing.T) {
t.Parallel()

NewTestSuite(mock.MockK8sClient{}).Run(context.Background(), t, Paths(
NewTestSuite(t, mock.MockK8sClient{}).Run(Paths(
Either(
Result("invalid params", func(ctx context.Context, m Mock) {
Result("invalid params", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{})
require.ErrorContains(t, err, "is required")
assert.Nil(t, mScope)
}),
Result("no token", func(ctx context.Context, m Mock) {
Result("no token", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -207,13 +207,13 @@ func TestNewMachineScope(t *testing.T) {
require.ErrorContains(t, err, "failed to create linode client")
assert.Nil(t, mScope)
}),
Case(
Call("no secret", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "example"))
Path(
Call("no secret", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "example"))
}),
Result("error", func(ctx context.Context, m Mock) {
Result("error", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -232,20 +232,20 @@ func TestNewMachineScope(t *testing.T) {
),
),
Either(
Call("valid scheme", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
Call("valid scheme", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
s := runtime.NewScheme()
infrav1alpha1.AddToScheme(s)
return s
})
}),
Case(
Call("invalid scheme", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme())
Path(
Call("invalid scheme", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme())
}),
Result("cannot init patch helper", func(ctx context.Context, m Mock) {
Result("cannot init patch helper", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -257,8 +257,8 @@ func TestNewMachineScope(t *testing.T) {
),
),
Either(
Call("credentials in secret", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
Call("credentials in secret", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error {
*obj = corev1.Secret{
Data: map[string][]byte{
Expand All @@ -268,9 +268,9 @@ func TestNewMachineScope(t *testing.T) {
return nil
})
}),
Result("default credentials", func(ctx context.Context, m Mock) {
Result("default credentials", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -281,9 +281,9 @@ func TestNewMachineScope(t *testing.T) {
}),
),
Either(
Result("credentials from LinodeMachine credentialsRef", func(ctx context.Context, m Mock) {
Result("credentials from LinodeMachine credentialsRef", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{},
Expand All @@ -299,9 +299,9 @@ func TestNewMachineScope(t *testing.T) {
require.NoError(t, err)
assert.NotNil(t, mScope)
}),
Result("credentials from LinodeCluster credentialsRef", func(ctx context.Context, m Mock) {
Result("credentials from LinodeCluster credentialsRef", func(ctx context.Context, mck Mock) {
mScope, err := NewMachineScope(ctx, "token", MachineScopeParams{
Client: m.K8sClient,
Client: mck.K8sClient,
Cluster: &clusterv1.Cluster{},
Machine: &clusterv1.Machine{},
LinodeCluster: &infrav1alpha1.LinodeCluster{
Expand All @@ -324,18 +324,18 @@ func TestNewMachineScope(t *testing.T) {
func TestMachineScopeGetBootstrapData(t *testing.T) {
t.Parallel()

NewTestSuite(mock.MockK8sClient{}).Run(context.Background(), t, Paths(
Call("able to get secret", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
NewTestSuite(t, mock.MockK8sClient{}).Run(Paths(
Call("able to get secret", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error {
secret := corev1.Secret{Data: map[string][]byte{"value": []byte("test-data")}}
*obj = secret
return nil
})
}),
Result("success", func(ctx context.Context, m Mock) {
Result("success", func(ctx context.Context, mck Mock) {
mScope := MachineScope{
Client: m.K8sClient,
Client: mck.K8sClient,
Machine: &clusterv1.Machine{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
Expand All @@ -351,20 +351,20 @@ func TestMachineScopeGetBootstrapData(t *testing.T) {
assert.Equal(t, data, []byte("test-data"))
}),
Either(
Call("unable to get secret", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
Call("unable to get secret", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
Return(apierrors.NewNotFound(schema.GroupResource{}, "test-data"))
}),
Call("secret is missing data", func(ctx context.Context, m Mock) {
m.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
Call("secret is missing data", func(ctx context.Context, mck Mock) {
mck.K8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).
DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error {
*obj = corev1.Secret{}
return nil
})
}),
Result("secret ref missing", func(ctx context.Context, m Mock) {
Result("secret ref missing", func(ctx context.Context, mck Mock) {
mScope := MachineScope{
Client: m.K8sClient,
Client: mck.K8sClient,
Machine: &clusterv1.Machine{},
LinodeMachine: &infrav1alpha1.LinodeMachine{},
}
Expand All @@ -374,9 +374,9 @@ func TestMachineScopeGetBootstrapData(t *testing.T) {
assert.Empty(t, data)
}),
),
Result("error", func(ctx context.Context, m Mock) {
Result("error", func(ctx context.Context, mck Mock) {
mScope := MachineScope{
Client: m.K8sClient,
Client: mck.K8sClient,
Machine: &clusterv1.Machine{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
Expand Down
1 change: 1 addition & 0 deletions controller/linodecluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo
if clusterScope.LinodeCluster.Spec.Network.NodeBalancerID == nil {
logger.Info("NodeBalancer ID is missing, nothing to do")
controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1alpha1.GroupVersion.String())
r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, "NodeBalancerIDMissing", "NodeBalancer ID is missing, nothing to do")

return nil
}
Expand Down
Loading

0 comments on commit b306c42

Please sign in to comment.