From b7562e1eb58ba2fc3f7f6e4a1f2432b3c3f23b1b Mon Sep 17 00:00:00 2001 From: Thomas Kosiewski Date: Wed, 3 Jul 2024 10:06:01 +0200 Subject: [PATCH 1/3] lint(nilaway): Added nilaway configs to linter Signed-off-by: Thomas Kosiewski --- .custom-gcl.yml | 6 ++++++ .gitignore | 1 + .golangci.yml | 12 +++++++++++- .ignore | 3 +++ Justfile | 3 ++- 5 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 .custom-gcl.yml diff --git a/.custom-gcl.yml b/.custom-gcl.yml new file mode 100644 index 000000000..294f3ff35 --- /dev/null +++ b/.custom-gcl.yml @@ -0,0 +1,6 @@ +# This has to be >= v1.57.0 for module plugin system support. +version: v1.59.1 +plugins: + - module: "go.uber.org/nilaway" + import: "go.uber.org/nilaway/cmd/gclplugin" + version: latest # Or a fixed version for reproducible builds. diff --git a/.gitignore b/.gitignore index a1cacf463..f46b71392 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ profile.out *.test tests/__snapshot__ /licenses +custom-gcl diff --git a/.golangci.yml b/.golangci.yml index ba9f9e077..08fe14639 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,7 +5,6 @@ linters: disable-all: true enable: - asasalint - - tagalign - asciicheck - bidichk - decorder @@ -28,10 +27,12 @@ linters: - makezero - misspell - nakedret + - nilaway - promlinter - revive - staticcheck - stylecheck + - tagalign - typecheck - unconvert - unused @@ -86,6 +87,15 @@ linters-settings: - xml - form + custom: + nilaway: + type: "module" + description: Static analysis tool to detect potential nil panics in Go code. + settings: + # Settings must be a "map from string to string" to mimic command line flags: the keys are + # flag names and the values are the values to the particular flags. + include-pkgs: "github.com/loft-sh/vcluster" + issues: # Maximum issues count per one linter. Set to 0 to disable. Default is 50. max-issues-per-linter: 0 diff --git a/.ignore b/.ignore index 77ca94a30..50ee7cde9 100644 --- a/.ignore +++ b/.ignore @@ -1,2 +1,5 @@ !/.github/ /vendor/ +!.golangci.yml +!.goreleaser.yaml +!.custom-gcl.yml diff --git a/Justfile b/Justfile index 778bf2b2d..e6e8ca50a 100644 --- a/Justfile +++ b/Justfile @@ -25,7 +25,8 @@ release-snapshot: gen-license-report # Run golangci-lint for all packages lint *ARGS: - golangci-lint run {{ARGS}} + [ -f ./custom-gcl ] || golangci-lint custom + ./custom-gcl run {{ARGS}} # Check struct memory alignment and print potential improvements [no-exit-message] From e16b96ab0150a21b754ff8872e82323918cfb31b Mon Sep 17 00:00:00 2001 From: Thomas Kosiewski Date: Fri, 5 Jul 2024 17:23:49 +0200 Subject: [PATCH 2/3] lint(nilaway): Fixed nilaway errors Signed-off-by: Thomas Kosiewski --- .golangci.yml | 7 +- cmd/vclusterctl/cmd/disconnect.go | 14 +- cmd/vclusterctl/cmd/platform/add/cluster.go | 2 +- cmd/vclusterctl/cmd/platform/get/cluster.go | 14 +- cmd/vclusterctl/cmd/platform/get/secret.go | 7 +- cmd/vclusterctl/cmd/platform/reset.go | 9 +- cmd/vclusterctl/cmd/root.go | 38 +++--- config/config.go | 22 ++-- config/legacyconfig/config.go | 10 +- hack/compat-matrix/main.go | 3 + hack/docs/main.go | 16 ++- pkg/certs/cert_list.go | 6 +- pkg/certs/certs.go | 5 +- pkg/certs/ensure.go | 16 ++- pkg/certs/kubeconfig.go | 21 ++- pkg/certs/util.go | 6 +- pkg/cli/config/config.go | 39 +++--- pkg/cli/connect_helm.go | 68 +++++++--- pkg/cli/connect_platform.go | 18 ++- pkg/cli/create_helm.go | 6 +- pkg/cli/delete_helm.go | 11 +- pkg/cli/find/find.go | 13 +- pkg/cli/flags/flags.go | 4 +- pkg/cli/localkubernetes/configure.go | 41 ++++-- pkg/cli/start/docker.go | 2 +- pkg/cli/start/success.go | 8 ++ pkg/controllers/generic/export_syncer.go | 12 +- pkg/controllers/generic/import_syncer.go | 5 + pkg/controllers/generic/patcher.go | 16 ++- pkg/controllers/register.go | 13 +- .../resources/configmaps/syncer.go | 6 +- pkg/controllers/resources/endpoints/syncer.go | 7 +- pkg/controllers/resources/events/syncer.go | 43 ++++--- .../resources/ingresses/util/util.go | 18 +-- .../resources/namespaces/translate.go | 7 + .../resources/networkpolicies/translate.go | 29 +++-- pkg/controllers/resources/nodes/syncer.go | 4 +- pkg/controllers/resources/nodes/translate.go | 22 ++-- .../resources/persistentvolumes/translate.go | 8 ++ .../resources/pods/conditions_test.go | 3 + .../resources/pods/ephemeral_containers.go | 2 +- pkg/controllers/resources/pods/translate.go | 7 +- .../pods/translate/sa_token_secret.go | 24 +++- pkg/controllers/resources/secrets/syncer.go | 5 +- pkg/controllers/resources/services/syncer.go | 5 + .../volumesnapshotcontents/syncer.go | 2 +- .../volumesnapshotcontents/translate.go | 6 +- pkg/controllers/syncer/syncer.go | 2 +- pkg/controllers/syncer/syncer_test.go | 9 +- pkg/controllers/syncer/testing/context.go | 3 + pkg/etcd/util.go | 13 +- pkg/k3s/k3s.go | 8 ++ pkg/lifecycle/lifecycle.go | 7 +- pkg/patches/operation.go | 5 +- pkg/patches/patch_test.go | 10 +- pkg/patches/patch_types.go | 27 ++-- pkg/platform/backup/backup.go | 25 +++- pkg/platform/client.go | 2 +- pkg/platform/clihelper/clihelper.go | 47 +++++++ pkg/platform/helper.go | 52 ++++++-- pkg/platform/secret.go | 7 +- pkg/plugin/v1/remote/plugin.pb.go | 35 ++--- pkg/plugin/v1/remote/plugin_grpc.pb.go | 1 + pkg/plugin/v2/plugin.go | 4 +- pkg/plugin/v2/pluginv2/pluginv2.pb.go | 13 +- pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go | 1 + pkg/server/server.go | 1 + pkg/setup/config.go | 8 ++ pkg/setup/controller_context.go | 57 ++++++--- pkg/setup/controllers.go | 24 +++- pkg/setup/initialize.go | 4 + pkg/telemetry/helpers.go | 10 +- pkg/upgrade/upgrade_test.go | 4 +- pkg/util/clihelper/clihelper.go | 16 ++- pkg/util/commandwriter/commandwriter.go | 4 +- pkg/util/kubeconfig/kubeconfig.go | 73 ++++++----- pkg/util/pluginhookclient/client.go | 4 + pkg/util/portforward/portforward.go | 120 +++++++++++------- pkg/util/servicecidr/servicecidr.go | 8 ++ pkg/util/translate/single_namespace.go | 3 + pkg/util/unstructuredhelper/write.go | 42 ++++++ pkg/util/websocketproxy/websocketproxy.go | 6 +- test/e2e/servicesync/servicesync.go | 4 +- test/e2e/webhook/utiils.go | 6 +- test/framework/kubectlcmd.go | 4 +- 85 files changed, 943 insertions(+), 376 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 08fe14639..601f4c2f8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -28,8 +28,10 @@ linters: - misspell - nakedret - nilaway + - nilnil - promlinter - revive + - sloglint - staticcheck - stylecheck - tagalign @@ -41,7 +43,6 @@ linters: # next linters to be enabled: # - prealloc - # - nilnil # - errchkjson # - gocritic @@ -82,6 +83,7 @@ linters-settings: tagalign: order: + - protobuf - json - yaml - xml @@ -95,6 +97,9 @@ linters-settings: # Settings must be a "map from string to string" to mimic command line flags: the keys are # flag names and the values are the values to the particular flags. include-pkgs: "github.com/loft-sh/vcluster" + exclude-pkgs: "github.com/loft-sh/vcluster/test" + experimental-anonymous-function: "true" + # experimental-struct-init: "true" issues: # Maximum issues count per one linter. Set to 0 to disable. Default is 50. diff --git a/cmd/vclusterctl/cmd/disconnect.go b/cmd/vclusterctl/cmd/disconnect.go index 0bb6e9c88..fc46f2274 100644 --- a/cmd/vclusterctl/cmd/disconnect.go +++ b/cmd/vclusterctl/cmd/disconnect.go @@ -1,6 +1,7 @@ package cmd import ( + "errors" "fmt" "github.com/loft-sh/log" @@ -52,9 +53,14 @@ vcluster disconnect // Run executes the functionality func (cmd *DisconnectCmd) Run() error { - rawConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{ + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{ CurrentContext: cmd.Context, - }).RawConfig() + }) + if clientConfig == nil { + return errors.New("nil clientConfig") + } + + rawConfig, err := clientConfig.RawConfig() if err != nil { return err } @@ -102,6 +108,10 @@ func (cmd *DisconnectCmd) Run() error { } func (cmd *DisconnectCmd) selectContext(kubeConfig *clientcmdapi.Config, currentContext string) (string, error) { + if kubeConfig == nil { + return "", errors.New("nil kubeConfig") + } + availableContexts := []string{} for context := range kubeConfig.Contexts { if context != currentContext { diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go index 81a5e008f..f166529c3 100644 --- a/cmd/vclusterctl/cmd/platform/add/cluster.go +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -253,7 +253,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { return false, err } - return clusterInstance.Status.Phase == storagev1.ClusterStatusPhaseInitialized, nil + return clusterInstance != nil && clusterInstance.Status.Phase == storagev1.ClusterStatusPhaseInitialized, nil }) if waitErr != nil { return fmt.Errorf("get cluster: %w", waitErr) diff --git a/cmd/vclusterctl/cmd/platform/get/cluster.go b/cmd/vclusterctl/cmd/platform/get/cluster.go index bafae35dc..4684a4e5f 100644 --- a/cmd/vclusterctl/cmd/platform/get/cluster.go +++ b/cmd/vclusterctl/cmd/platform/get/cluster.go @@ -20,9 +20,7 @@ import ( "k8s.io/client-go/tools/clientcmd/api" ) -var ( - ErrNotLoftContext = errors.New("current context is not a loft context, but predefined var LOFT_CLUSTER is used") -) +var ErrNotLoftContext = errors.New("current context is not a loft context, but predefined var LOFT_CLUSTER is used") type clusterCmd struct { *flags.GlobalFlags @@ -92,7 +90,10 @@ func (c *clusterCmd) Run(ctx context.Context, _ []string) error { return err } - _, err = os.Stdout.Write([]byte(spaceInstance.Spec.ClusterRef.Cluster)) + if spaceInstance != nil { + _, err = os.Stdout.Write([]byte(spaceInstance.Spec.ClusterRef.Cluster)) + } + return err } @@ -117,7 +118,10 @@ func (c *clusterCmd) Run(ctx context.Context, _ []string) error { return err } - _, err = os.Stdout.Write([]byte(virtualClusterInstance.Spec.ClusterRef.Cluster)) + if virtualClusterInstance != nil { + _, err = os.Stdout.Write([]byte(virtualClusterInstance.Spec.ClusterRef.Cluster)) + } + return err } diff --git a/cmd/vclusterctl/cmd/platform/get/secret.go b/cmd/vclusterctl/cmd/platform/get/secret.go index 8d623787f..bca1a1e6e 100644 --- a/cmd/vclusterctl/cmd/platform/get/secret.go +++ b/cmd/vclusterctl/cmd/platform/get/secret.go @@ -203,9 +203,14 @@ func (cmd *SecretCmd) Run(ctx context.Context, args []string) error { keyNames = append(keyNames, k) } + defaultValue := "" + if len(keyNames) > 0 { + defaultValue = keyNames[0] + } + keyName, err = cmd.log.Question(&survey.QuestionOptions{ Question: "Please select a secret key to read", - DefaultValue: keyNames[0], + DefaultValue: defaultValue, Options: keyNames, }) if err != nil { diff --git a/cmd/vclusterctl/cmd/platform/reset.go b/cmd/vclusterctl/cmd/platform/reset.go index 370fbeb08..61a9cd352 100644 --- a/cmd/vclusterctl/cmd/platform/reset.go +++ b/cmd/vclusterctl/cmd/platform/reset.go @@ -22,13 +22,12 @@ import ( type PasswordCmd struct { *flags.GlobalFlags + Log log.Logger User string Password string Create bool Force bool - - Log log.Logger } func NewResetCmd(globalFlags *flags.GlobalFlags) *cobra.Command { @@ -127,6 +126,9 @@ func (cmd *PasswordCmd) Run() error { return err } } + if user == nil { + return errors.New("could not obtain user") + } // check if user had a password before if user.Spec.PasswordRef == nil || user.Spec.PasswordRef.SecretName == "" || user.Spec.PasswordRef.SecretNamespace == "" || user.Spec.PasswordRef.Key == "" { @@ -184,6 +186,9 @@ func (cmd *PasswordCmd) Run() error { return errors.Wrap(err, "create password secret") } } else { + if passwordSecret == nil { + passwordSecret = &corev1.Secret{} + } if passwordSecret.Data == nil { passwordSecret.Data = map[string][]byte{} } diff --git a/cmd/vclusterctl/cmd/root.go b/cmd/vclusterctl/cmd/root.go index be9d45788..01034347f 100644 --- a/cmd/vclusterctl/cmd/root.go +++ b/cmd/vclusterctl/cmd/root.go @@ -27,7 +27,7 @@ import ( ) // NewRootCmd returns a new root command -func NewRootCmd(log log.Logger) *cobra.Command { +func NewRootCmd(log log.Logger, globalFlags *flags.GlobalFlags) *cobra.Command { return &cobra.Command{ Use: "vcluster", SilenceUsage: true, @@ -57,8 +57,6 @@ func NewRootCmd(log log.Logger) *cobra.Command { } } -var globalFlags *flags.GlobalFlags - // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { @@ -69,16 +67,16 @@ func Execute() { // start command log := log.GetInstance() - rootCmd, err := BuildRoot(log) + rootCmd, globalFlags, err := BuildRoot(log) if err != nil { log.Fatalf("error building root: %+v\n", err) } // Execute command err = rootCmd.ExecuteContext(context.Background()) - recordAndFlush(err, log) + recordAndFlush(err, log, globalFlags) if err != nil { - if globalFlags.Debug { + if globalFlags != nil && globalFlags.Debug { log.Fatalf("%+v", err) } @@ -87,18 +85,20 @@ func Execute() { } // BuildRoot creates a new root command from the -func BuildRoot(log log.Logger) (*cobra.Command, error) { - rootCmd := NewRootCmd(log) +func BuildRoot(log log.Logger) (*cobra.Command, *flags.GlobalFlags, error) { + var globalFlags *flags.GlobalFlags + rootCmd := NewRootCmd(log, globalFlags) persistentFlags := rootCmd.PersistentFlags() globalFlags = flags.SetGlobalFlags(persistentFlags, log) + home, err := homedir.Dir() if err != nil { - return nil, err + return nil, nil, err } defaults, err := defaults.NewFromPath(filepath.Join(home, defaults.ConfigFolder), defaults.ConfigFile) if err != nil { log.Debugf("Error loading defaults: %v", err) - return nil, err + return nil, nil, err } // Set version for --version flag @@ -123,25 +123,25 @@ func BuildRoot(log log.Logger) (*cobra.Command, error) { // add platform commands platformCmd, err := cmdplatform.NewPlatformCmd(globalFlags) if err != nil { - return nil, fmt.Errorf("failed to create platform command: %w", err) + return nil, nil, fmt.Errorf("failed to create platform command: %w", err) } rootCmd.AddCommand(platformCmd) loginCmd, err := NewLoginCmd(globalFlags) if err != nil { - return nil, fmt.Errorf("failed to create login command: %w", err) + return nil, nil, fmt.Errorf("failed to create login command: %w", err) } rootCmd.AddCommand(loginCmd) logoutCmd, err := NewLogoutCmd(globalFlags) if err != nil { - return nil, fmt.Errorf("failed to create logout command: %w", err) + return nil, nil, fmt.Errorf("failed to create logout command: %w", err) } rootCmd.AddCommand(logoutCmd) uiCmd, err := NewUICmd(globalFlags) if err != nil { - return nil, fmt.Errorf("failed to create ui command: %w", err) + return nil, nil, fmt.Errorf("failed to create ui command: %w", err) } rootCmd.AddCommand(uiCmd) rootCmd.AddCommand(credits.NewCreditsCmd()) @@ -149,13 +149,17 @@ func BuildRoot(log log.Logger) (*cobra.Command, error) { // add completion command err = rootCmd.RegisterFlagCompletionFunc("namespace", completion.NewNamespaceCompletionFunc(rootCmd.Context())) if err != nil { - return rootCmd, fmt.Errorf("failed to register completion for namespace: %w", err) + return nil, nil, fmt.Errorf("failed to register completion for namespace: %w", err) } - return rootCmd, nil + return rootCmd, globalFlags, nil } -func recordAndFlush(err error, log log.Logger) { +func recordAndFlush(err error, log log.Logger, globalFlags *flags.GlobalFlags) { + if globalFlags == nil { + panic("empty global flags") + } + telemetry.CollectorCLI.RecordCLI(globalFlags.LoadedConfig(log), platform.Self, err) telemetry.CollectorCLI.Flush() } diff --git a/config/config.go b/config/config.go index ca5c5f9c4..eb804b014 100644 --- a/config/config.go +++ b/config/config.go @@ -1226,31 +1226,31 @@ type VolumeClaim struct { // VolumeMount describes a mounting of a Volume within a container. type VolumeMount struct { // This must match the Name of a Volume. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + Name string `protobuf:"bytes,1,opt,name=name" json:"name"` // Mounted read-only if true, read-write otherwise (false or unspecified). // Defaults to false. - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + ReadOnly bool `protobuf:"varint,2,opt,name=readOnly" json:"readOnly,omitempty"` // Path within the container at which the volume should be mounted. Must // not contain ':'. - MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` + MountPath string `protobuf:"bytes,3,opt,name=mountPath" json:"mountPath"` // Path within the volume from which the container's volume should be mounted. // Defaults to "" (volume's root). - SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` + SubPath string `protobuf:"bytes,4,opt,name=subPath" json:"subPath,omitempty"` // mountPropagation determines how mounts are propagated from the host // to container and the other way around. // When not set, MountPropagationNone is used. // This field is beta in 1.10. - MountPropagation *string `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` + MountPropagation *string `protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode" json:"mountPropagation,omitempty"` // Expanded path within the volume from which the container's volume should be mounted. // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. // Defaults to "" (volume's root). // SubPathExpr and SubPath are mutually exclusive. - SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` + SubPathExpr string `protobuf:"bytes,6,opt,name=subPathExpr" json:"subPathExpr,omitempty"` } type ControlPlaneScheduling struct { @@ -1461,7 +1461,7 @@ type MutatingWebhookConfiguration struct { type MutatingWebhook struct { // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. // Allowed values are "Never" and "IfNeeded". - ReinvocationPolicy *string `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + ReinvocationPolicy *string `protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType" json:"reinvocationPolicy,omitempty"` ValidatingWebhook `json:",inline"` } @@ -1963,16 +1963,16 @@ type DenyRule struct { type RuleWithVerbs struct { // APIGroups is the API groups the resources belong to. '*' is all groups. - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + APIGroups []string `protobuf:"bytes,1,rep,name=apiGroups" json:"apiGroups,omitempty"` // APIVersions is the API versions the resources belong to. '*' is all versions. - APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + APIVersions []string `protobuf:"bytes,2,rep,name=apiVersions" json:"apiVersions,omitempty"` // Resources is a list of resources this rule applies to. - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + Resources []string `protobuf:"bytes,3,rep,name=resources" json:"resources,omitempty"` // Scope specifies the scope of this rule. - Scope *string `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` + Scope *string `protobuf:"bytes,4,rep,name=scope" json:"scope,omitempty"` // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. // For non-resource requests, this is the lowercase http verb. diff --git a/config/legacyconfig/config.go b/config/legacyconfig/config.go index d3cae0ca3..6a404889d 100644 --- a/config/legacyconfig/config.go +++ b/config/legacyconfig/config.go @@ -267,22 +267,22 @@ type RBACRoleValues struct { type RBACRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + Verbs []string `protobuf:"bytes,1,rep,name=verbs" json:"verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + APIGroups []string `protobuf:"bytes,2,rep,name=apiGroups" json:"apiGroups,omitempty"` // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + Resources []string `protobuf:"bytes,3,rep,name=resources" json:"resources,omitempty"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional - ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` + ResourceNames []string `protobuf:"bytes,4,rep,name=resourceNames" json:"resourceNames,omitempty"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional - NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` + NonResourceURLs []string `protobuf:"bytes,5,rep,name=nonResourceURLs" json:"nonResourceURLs,omitempty"` } type PDBValues struct { diff --git a/hack/compat-matrix/main.go b/hack/compat-matrix/main.go index f6af0938e..30213a29c 100644 --- a/hack/compat-matrix/main.go +++ b/hack/compat-matrix/main.go @@ -120,6 +120,9 @@ func updateTableWithDistro(distroName string, versionMap map[string]string, know for hostVersion, issueList := range issues { for vclusterAPI, issueDesc := range issueList { + if issues[hostVersion] == nil { + issues[hostVersion] = make(map[string]string, 0) + } issues[hostVersion][removeRegistry(vclusterAPI)] = issueDesc if removeRegistry(vclusterAPI) != vclusterAPI { // avoids removing valid entries diff --git a/hack/docs/main.go b/hack/docs/main.go index 6bea99ffd..495e99738 100755 --- a/hack/docs/main.go +++ b/hack/docs/main.go @@ -14,13 +14,15 @@ import ( "github.com/spf13/cobra/doc" ) -const cliDocsDir = "./docs/pages/cli" -const headerTemplate = `--- +const ( + cliDocsDir = "./docs/pages/cli" + headerTemplate = `--- title: "%s --help" sidebar_label: %s --- ` +) const proHeaderTemplate = `--- title: "%[1]s --help" @@ -70,7 +72,7 @@ func main() { return strings.ToLower(base) + ".md" } - rootCmd, err := cmd.BuildRoot(logger) + rootCmd, _, err := cmd.BuildRoot(logger) if err != nil { logger.Fatal(err) } @@ -81,7 +83,15 @@ func main() { } err = filepath.Walk(cliDocsDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + stat, err := os.Stat(path) + if err != nil { + return err + } + if stat.IsDir() { return nil } diff --git a/pkg/certs/cert_list.go b/pkg/certs/cert_list.go index 48bee2df3..2a900598a 100644 --- a/pkg/certs/cert_list.go +++ b/pkg/certs/cert_list.go @@ -66,7 +66,6 @@ func (k *KubeadmCert) CreateFromCA(ic *InitConfiguration, caCert *x509.Certifica key, cfg, ) - if err != nil { return errors.Wrapf(err, "failed to write or validate certificate %q", k.Name) } @@ -407,7 +406,10 @@ func makeAltNamesMutator(f func(*InitConfiguration) (*certutil.AltNames, error)) if err != nil { return err } - cc.AltNames = *altNames + if altNames != nil { + cc.AltNames = *altNames + } + return nil } } diff --git a/pkg/certs/certs.go b/pkg/certs/certs.go index 92b30abc2..764f39d84 100644 --- a/pkg/certs/certs.go +++ b/pkg/certs/certs.go @@ -59,7 +59,7 @@ func CreatePKIAssets(cfg *InitConfiguration) error { klog.Infof("Valid certificates and keys now exist in %q", cfg.CertificatesDir) // Service accounts are not x509 certs, so handled separately - return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir, cfg.ClusterConfiguration.PublicKeyAlgorithm()) + return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir, cfg.PublicKeyAlgorithm()) } // CreateServiceAccountKeyAndPublicKeyFiles creates new public/private key files for signing service account users. @@ -82,6 +82,9 @@ func CreateServiceAccountKeyAndPublicKeyFiles(certsDir string, keyType x509.Publ if err != nil { return err } + if key == nil { + return errors.New("no private key generated") + } // Write .key and .pub files to disk klog.Infof("[certs] Generating %q key and public key", ServiceAccountKeyBaseName) diff --git a/pkg/certs/ensure.go b/pkg/certs/ensure.go index e3581d100..01b44bbd2 100644 --- a/pkg/certs/ensure.go +++ b/pkg/certs/ensure.go @@ -32,6 +32,10 @@ func EnsureCerts( etcdSans []string, options *config.VirtualClusterConfig, ) error { + if currentNamespaceClient == nil { + return errors.New("nil currentNamespaceClient") + } + // we create a certificate for up to 20 etcd replicas, this should be sufficient for most use cases. Eventually we probably // want to update this to the actual etcd number, but for now this is the easiest way to allow up and downscaling without // regenerating certificates. @@ -110,8 +114,8 @@ func EnsureCerts( return fmt.Errorf("get vcluster service: %w", err) } // client doesn't populate typemeta - controlPlaneService.TypeMeta.APIVersion = "v1" - controlPlaneService.TypeMeta.Kind = "Service" + controlPlaneService.APIVersion = "v1" + controlPlaneService.Kind = "Service" ownerRef = append(ownerRef, metav1.OwnerReference{ APIVersion: "v1", @@ -291,7 +295,15 @@ func updateKubeconfigToLocalhost(config *clientcmdapi.Config) bool { // not sure what that would do in case of multiple clusters, // but this is not expected AFAIU for k, v := range config.Clusters { + if v == nil { + continue + } + if v.Server != "https://127.0.0.1:6443" { + if config.Clusters[k] == nil { + config.Clusters[k] = &clientcmdapi.Cluster{} + } + config.Clusters[k].Server = "https://127.0.0.1:6443" updated = true } diff --git a/pkg/certs/kubeconfig.go b/pkg/certs/kubeconfig.go index dc3ebc341..ed04ab4b1 100644 --- a/pkg/certs/kubeconfig.go +++ b/pkg/certs/kubeconfig.go @@ -182,6 +182,13 @@ func newClientCertConfigFromKubeConfigSpec(spec *kubeConfigSpec, notAfter *time. // validateKubeConfig check if the kubeconfig file exist and has the expected CA and server URL func validateKubeConfig(outDir, filename string, config *clientcmdapi.Config) error { + if config == nil { + return errors.New("nil config") + } + if config.Contexts == nil || config.Clusters == nil { + return errors.New("config contains unexpected nil fields") + } + kubeConfigFilePath := filepath.Join(outDir, filename) if _, err := os.Stat(kubeConfigFilePath); err != nil { @@ -211,8 +218,18 @@ func validateKubeConfig(outDir, filename string, config *clientcmdapi.Config) er // Make sure the compared CAs are whitespace-trimmed. The function clientcmd.LoadFromFile() just decodes // the base64 CA and places it raw in the v1.Config object. In case the user has extra whitespace // in the CA they used to create a kubeconfig this comparison to a generated v1.Config will otherwise fail. - caCurrent := bytes.TrimSpace(currentConfig.Clusters[currentCluster].CertificateAuthorityData) - caExpected := bytes.TrimSpace(config.Clusters[expectedCluster].CertificateAuthorityData) + currentConfigCurrentCluster, ok := currentConfig.Clusters[currentCluster] + if !ok { + return errors.New("current cluster not found in current configs") + } + + configExpectedCluster, ok := config.Clusters[expectedCluster] + if !ok { + return errors.New("expected cluster not found") + } + + caCurrent := bytes.TrimSpace(currentConfigCurrentCluster.CertificateAuthorityData) + caExpected := bytes.TrimSpace(configExpectedCluster.CertificateAuthorityData) // If the current CA cert on disk doesn't match the expected CA cert, error out because we have a file, but it's stale if !bytes.Equal(caCurrent, caExpected) { diff --git a/pkg/certs/util.go b/pkg/certs/util.go index 3f456ea34..5a7588c67 100644 --- a/pkg/certs/util.go +++ b/pkg/certs/util.go @@ -310,7 +310,7 @@ func parseAPIEndpoint(localEndpoint *APIEndpoint) (net.IP, string, error) { } // parse the AdvertiseAddress - var ip = net.ParseIP(localEndpoint.AdvertiseAddress) + ip := net.ParseIP(localEndpoint.AdvertiseAddress) if ip == nil { return nil, "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", localEndpoint.AdvertiseAddress) } @@ -555,6 +555,10 @@ func GeneratePrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) // NewSignedCert creates a signed certificate using the given CA certificate and key func NewSignedCert(cfg *CertConfig, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer, isCA bool) (*x509.Certificate, error) { + if key == nil { + return nil, errors.New("missing private key") + } + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64)) if err != nil { return nil, err diff --git a/pkg/cli/config/config.go b/pkg/cli/config/config.go index badcc6527..bb29fed96 100644 --- a/pkg/cli/config/config.go +++ b/pkg/cli/config/config.go @@ -6,7 +6,6 @@ import ( "io" "os" "path/filepath" - "sync" "github.com/loft-sh/log" homedir "github.com/mitchellh/go-homedir" @@ -21,10 +20,7 @@ const ( PlatformDriver DriverType = "platform" ) -var ( - singleConfig *CLI - singleConfigOnce sync.Once -) +var singleConfig *CLI // New creates a new default config func New() *CLI { @@ -44,22 +40,35 @@ func New() *CLI { } func (c *CLI) Save() error { - return Write(c.path, c) + path := "" + if c != nil { + path = c.path + } else { + var err error + path, err = DefaultFilePath() + if err != nil { + return err + } + } + + return Write(path, c) } // Read returns the current config by trying to read it from the given config path. // It returns a new default config if there have been any errors during the read. func Read(path string, log log.Logger) *CLI { - singleConfigOnce.Do(func() { - if singleConfig == nil { - cfg, err := readOrNewConfig(path) - if err != nil { - log.Debugf("Failed to load local configuration file: %v", err) - } - cfg.path = path - singleConfig = cfg + if singleConfig == nil { + var err error + singleConfig, err = readOrNewConfig(path) + if err != nil { + log.Debugf("Failed to load local configuration file: %v", err) } - }) + } + if singleConfig == nil { + singleConfig = New() + } + + singleConfig.path = path return singleConfig } diff --git a/pkg/cli/connect_helm.go b/pkg/cli/connect_helm.go index 5d13101b8..de71286f0 100644 --- a/pkg/cli/connect_helm.go +++ b/pkg/cli/connect_helm.go @@ -2,6 +2,7 @@ package cli import ( "context" + "errors" "fmt" "io" "os" @@ -137,6 +138,10 @@ func (cmd *connectHelm) connect(ctx context.Context, vCluster *find.VCluster, co } func writeKubeConfig(kubeConfig *clientcmdapi.Config, vClusterName string, options *ConnectOptions, globalFlags *flags.GlobalFlags, portForwarding bool, log log.Logger) error { + if kubeConfig == nil { + return errors.New("nil kubeconfig") + } + // write kube config to buffer out, err := clientcmd.Write(*kubeConfig) if err != nil { @@ -154,11 +159,17 @@ func writeKubeConfig(kubeConfig *clientcmdapi.Config, vClusterName string, optio for _, c := range kubeConfig.Clusters { clusterConfig = c } + if clusterConfig == nil { + return errors.New("nil clusterConfig") + } var authConfig *clientcmdapi.AuthInfo for _, a := range kubeConfig.AuthInfos { authConfig = a } + if authConfig == nil { + return errors.New("nil authConfig") + } err = clihelper.UpdateKubeConfig(options.KubeConfigContextName, clusterConfig, authConfig, true) if err != nil { @@ -336,10 +347,14 @@ func (cmd *connectHelm) getVClusterKubeConfig(ctx context.Context, vclusterName // find out vcluster server port port := "8443" - for k := range kubeConfig.Clusters { + for _, cluster := range kubeConfig.Clusters { + if cluster == nil { + continue + } + if cmd.Insecure { - kubeConfig.Clusters[k].CertificateAuthorityData = nil - kubeConfig.Clusters[k].InsecureSkipTLSVerify = true + cluster.CertificateAuthorityData = nil + cluster.InsecureSkipTLSVerify = true } if cmd.Server != "" { @@ -347,16 +362,16 @@ func (cmd *connectHelm) getVClusterKubeConfig(ctx context.Context, vclusterName cmd.Server = "https://" + cmd.Server } - kubeConfig.Clusters[k].Server = cmd.Server + cluster.Server = cmd.Server } else { - splitted := strings.Split(kubeConfig.Clusters[k].Server, ":") + splitted := strings.Split(cluster.Server, ":") if len(splitted) != 3 { - return nil, fmt.Errorf("unexpected server in kubeconfig: %s", kubeConfig.Clusters[k].Server) + return nil, fmt.Errorf("unexpected server in kubeconfig: %s", cluster.Server) } port = splitted[2] splitted[2] = strconv.Itoa(cmd.LocalPort) - kubeConfig.Clusters[k].Server = strings.Join(splitted, ":") + cluster.Server = strings.Join(splitted, ":") } } @@ -420,6 +435,9 @@ func (cmd *connectHelm) setServerIfExposed(ctx context.Context, vClusterName str return false, err } } + if service == nil { + return false, errors.New("nil service") + } // not a load balancer? Then don't wait if service.Spec.Type == corev1.ServiceTypeNodePort { @@ -467,6 +485,16 @@ func (cmd *connectHelm) setServerIfExposed(ctx context.Context, vClusterName str // the context name specified by the user. It cannot correctly handle kubeconfigs with multiple entries // for clusters, authInfos, contexts, but ideally this is pointed at a secret created by us. func (cmd *connectHelm) exchangeContextName(kubeConfig *clientcmdapi.Config, vclusterName string) error { + if cmd == nil { + return errors.New("nil connectHelm cmd") + } + if kubeConfig == nil { + return errors.New("nil kubeConfig") + } + if kubeConfig.Clusters == nil || kubeConfig.Contexts == nil || kubeConfig.AuthInfos == nil { + return errors.New("passed kubeconfig is missing required fields") + } + if cmd.KubeConfigContextName == "" { if vclusterName != "" { cmd.KubeConfigContextName = find.VClusterContextName(vclusterName, cmd.Namespace, cmd.rawConfig.CurrentContext) @@ -476,8 +504,8 @@ func (cmd *connectHelm) exchangeContextName(kubeConfig *clientcmdapi.Config, vcl } // pick the last specified cluster (there should ideally be exactly one) - for k := range kubeConfig.Clusters { - kubeConfig.Clusters[cmd.KubeConfigContextName] = kubeConfig.Clusters[k] + for k, cluster := range kubeConfig.Clusters { + kubeConfig.Clusters[cmd.KubeConfigContextName] = cluster // delete the rest if k != cmd.KubeConfigContextName { delete(kubeConfig.Clusters, k) @@ -486,8 +514,11 @@ func (cmd *connectHelm) exchangeContextName(kubeConfig *clientcmdapi.Config, vcl } // pick the last specified context (there should ideally be exactly one) - for k := range kubeConfig.Contexts { - ctx := kubeConfig.Contexts[k] + for k, ctx := range kubeConfig.Contexts { + if ctx == nil { + continue + } + ctx.Cluster = cmd.KubeConfigContextName ctx.AuthInfo = cmd.KubeConfigContextName kubeConfig.Contexts[cmd.KubeConfigContextName] = ctx @@ -499,8 +530,12 @@ func (cmd *connectHelm) exchangeContextName(kubeConfig *clientcmdapi.Config, vcl } // pick the last specified authinfo (there should ideally be exactly one) - for k := range kubeConfig.AuthInfos { - kubeConfig.AuthInfos[cmd.KubeConfigContextName] = kubeConfig.AuthInfos[k] + for k, authInfo := range kubeConfig.AuthInfos { + if authInfo == nil { + continue + } + + kubeConfig.AuthInfos[cmd.KubeConfigContextName] = authInfo // delete the rest if k != cmd.KubeConfigContextName { delete(kubeConfig.AuthInfos, k) @@ -581,8 +616,11 @@ func getLocalVClusterConfig(vKubeConfig clientcmdapi.Config, options *ConnectOpt // update vCluster server address in case of OSS vClusters only if options.LocalPort != 0 { - for k := range vKubeConfig.Clusters { - vKubeConfig.Clusters[k].Server = "https://localhost:" + strconv.Itoa(options.LocalPort) + for _, cluster := range vKubeConfig.Clusters { + if cluster == nil { + continue + } + cluster.Server = "https://localhost:" + strconv.Itoa(options.LocalPort) } } diff --git a/pkg/cli/connect_platform.go b/pkg/cli/connect_platform.go index 016213eb4..2055279b1 100644 --- a/pkg/cli/connect_platform.go +++ b/pkg/cli/connect_platform.go @@ -2,6 +2,7 @@ package cli import ( "context" + "errors" "fmt" "github.com/loft-sh/log" @@ -32,6 +33,9 @@ func ConnectPlatform(ctx context.Context, options *ConnectOptions, globalFlags * if err != nil { return fmt.Errorf("get platform vcluster %s: %w", vClusterName, err) } + if vCluster == nil { + return errors.New("empty vCluster") + } // create connect platform command cmd := connectPlatform{ @@ -53,10 +57,18 @@ func ConnectPlatform(ctx context.Context, options *ConnectOptions, globalFlags * } // wait for vCluster to become ready - vCluster.VirtualCluster, err = platform.WaitForVirtualClusterInstance(ctx, managementClient, vCluster.VirtualCluster.Namespace, vCluster.VirtualCluster.Name, true, log) + if vCluster.VirtualCluster == nil { + return errors.New("nil virtual cluster object") + } + + vc, err := platform.WaitForVirtualClusterInstance(ctx, managementClient, vCluster.VirtualCluster.Namespace, vCluster.VirtualCluster.Name, true, log) if err != nil { return err } + vCluster.VirtualCluster = vc + if vCluster.VirtualCluster == nil { + return errors.New("platform returned empty virtual cluster") + } // retrieve vCluster kube config kubeConfig, err := cmd.getVClusterKubeConfig(ctx, platformClient, vCluster) @@ -90,6 +102,10 @@ func (cmd *connectPlatform) validateProFlags() error { } func (cmd *connectPlatform) getVClusterKubeConfig(ctx context.Context, platformClient platform.Client, vCluster *platform.VirtualClusterInstanceProject) (*clientcmdapi.Config, error) { + if vCluster == nil || vCluster.Project == nil || vCluster.VirtualCluster == nil { + return nil, errors.New("invalid vcluster VirtualClusterInstanceProject object") + } + contextOptions, err := platform.CreateVirtualClusterInstanceOptions(ctx, platformClient, "", vCluster.Project.Name, vCluster.VirtualCluster, false) if err != nil { return nil, fmt.Errorf("prepare vCluster kube config: %w", err) diff --git a/pkg/cli/create_helm.go b/pkg/cli/create_helm.go index b4e074593..ff1dd4372 100644 --- a/pkg/cli/create_helm.go +++ b/pkg/cli/create_helm.go @@ -164,7 +164,7 @@ func CreateHelm(ctx context.Context, options *CreateOptions, globalFlags *flags. return err } // TODO Delete after vCluster 0.19.x resp. the old config format is out of support. - if isLegacyVCluster(release.Chart.Metadata.Version) { + if release != nil && release.Chart != nil && release.Chart.Metadata != nil && isLegacyVCluster(release.Chart.Metadata.Version) { // If we have a < v0.20 virtual cluster running we have to infer the distro from the current chart name. currentDistro := strings.TrimPrefix(release.Chart.Metadata.Name, "vcluster-") // If we are upgrading a vCluster < v0.20 the old k3s chart is the one without a prefix. @@ -379,6 +379,8 @@ func (cmd *createHelm) isLoftAgentDeployed(ctx context.Context) (bool, error) { }) if err != nil && !kerrors.IsNotFound(err) { return false, err + } else if podList == nil { + return false, errors.New("nil podList") } return len(podList.Items) > 0, nil @@ -427,7 +429,7 @@ func isLegacyConfig(values []byte) bool { // helmValuesYAML returns the extraValues from the helm release in yaml format. // If the extra values in the chart are nil it returns an empty string. func helmExtraValuesYAML(release *helm.Release) (string, error) { - if release.Config == nil { + if release == nil || release.Config == nil { return "", nil } extraValues, err := yaml.Marshal(release.Config) diff --git a/pkg/cli/delete_helm.go b/pkg/cli/delete_helm.go index 051364ce0..0af0cbfc9 100644 --- a/pkg/cli/delete_helm.go +++ b/pkg/cli/delete_helm.go @@ -30,6 +30,7 @@ const VirtualClusterServiceUIDLabel = "vcluster.loft.sh/service-uid" type DeleteOptions struct { Driver string + Project string Wait bool KeepPVC bool DeleteNamespace bool @@ -37,8 +38,6 @@ type DeleteOptions struct { DeleteConfigMap bool AutoDeleteNamespace bool IgnoreNotFound bool - - Project string } type deleteHelm struct { @@ -300,6 +299,10 @@ func (cmd *deleteHelm) prepare(vCluster *find.VCluster) error { } func deleteContext(kubeConfig *clientcmdapi.Config, kubeContext string, otherContext string) error { + if kubeConfig == nil || kubeConfig.Contexts == nil { + return nil + } + // Get context contextRaw, ok := kubeConfig.Contexts[kubeContext] if !ok { @@ -314,6 +317,10 @@ func deleteContext(kubeConfig *clientcmdapi.Config, kubeContext string, otherCon // Check if AuthInfo or Cluster is used by any other context for name, ctx := range kubeConfig.Contexts { + if ctx == nil { + continue + } + if name != kubeContext && ctx.AuthInfo == contextRaw.AuthInfo { removeAuthInfo = false } diff --git a/pkg/cli/find/find.go b/pkg/cli/find/find.go index 357e9fd65..0c8a89fe8 100644 --- a/pkg/cli/find/find.go +++ b/pkg/cli/find/find.go @@ -27,14 +27,13 @@ import ( const VirtualClusterSelector = "app=vcluster" type VCluster struct { - Name string - Namespace string - - Status Status + ClientFactory clientcmd.ClientConfig `json:"-"` Created metav1.Time + Name string + Namespace string + Status Status Context string Version string - ClientFactory clientcmd.ClientConfig `json:"-"` } type Status string @@ -54,6 +53,10 @@ func (e *VClusterNotFoundError) Error() string { } func SwitchContext(kubeConfig *clientcmdapi.Config, otherContext string) error { + if kubeConfig == nil { + return errors.New("nil kubeconfig") + } + kubeConfig.CurrentContext = otherContext return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), *kubeConfig, false) } diff --git a/pkg/cli/flags/flags.go b/pkg/cli/flags/flags.go index f47c44031..5c4c50b4c 100644 --- a/pkg/cli/flags/flags.go +++ b/pkg/cli/flags/flags.go @@ -9,12 +9,12 @@ import ( // GlobalFlags is the flags that contains the global flags type GlobalFlags struct { - Silent bool - Debug bool Config string Context string Namespace string LogOutput string + Silent bool + Debug bool } func (g *GlobalFlags) LoadedConfig(log log.Logger) *config.CLI { diff --git a/pkg/cli/localkubernetes/configure.go b/pkg/cli/localkubernetes/configure.go index 0778d9f77..4d83a2b57 100644 --- a/pkg/cli/localkubernetes/configure.go +++ b/pkg/cli/localkubernetes/configure.go @@ -55,6 +55,10 @@ func ExposeLocal(ctx context.Context, vClusterName, vClusterNamespace string, ra } func CleanupLocal(vClusterName, vClusterNamespace string, rawConfig *clientcmdapi.Config, log log.Logger) error { + if rawConfig == nil { + return errors.New("nil rawConfig") + } + clusterType := DetectClusterType(rawConfig) switch clusterType { case ClusterTypeMinikube: @@ -74,6 +78,9 @@ func CleanupLocal(vClusterName, vClusterNamespace string, rawConfig *clientcmdap } func k3dProxy(ctx context.Context, vClusterName, vClusterNamespace string, rawConfig *clientcmdapi.Config, vRawConfig *clientcmdapi.Config, service *corev1.Service, localPort int, timeout time.Duration, log log.Logger) (string, error) { + if service == nil { + return "", errors.New("service is nil") + } if len(service.Spec.Ports) == 0 { return "", fmt.Errorf("service has %d ports (expected 1 port)", len(service.Spec.Ports)) } @@ -91,6 +98,9 @@ func k3dProxy(ctx context.Context, vClusterName, vClusterNamespace string, rawCo } func minikubeProxy(ctx context.Context, vClusterName, vClusterNamespace string, rawConfig *clientcmdapi.Config, vRawConfig *clientcmdapi.Config, service *corev1.Service, localPort int, timeout time.Duration, log log.Logger) (string, error) { + if service == nil { + return "", errors.New("nil service") + } if len(service.Spec.Ports) == 0 { return "", fmt.Errorf("service has %d ports (expected 1 port)", len(service.Spec.Ports)) } @@ -124,9 +134,13 @@ func minikubeProxy(ctx context.Context, vClusterName, vClusterNamespace string, // workaround for the fact that vcluster certificate is not made valid for the node IPs // but avoid modifying the passed config before the connection is tested testvConfig := vRawConfig.DeepCopy() - for k := range testvConfig.Clusters { - testvConfig.Clusters[k].CertificateAuthorityData = nil - testvConfig.Clusters[k].InsecureSkipTLSVerify = true + for _, cluster := range testvConfig.Clusters { + if cluster == nil { + continue + } + + cluster.CertificateAuthorityData = nil + cluster.InsecureSkipTLSVerify = true } // test local connection @@ -143,9 +157,13 @@ func minikubeProxy(ctx context.Context, vClusterName, vClusterNamespace string, } // now it's safe to modify the vRawConfig struct that was passed in as a pointer - for k := range vRawConfig.Clusters { - vRawConfig.Clusters[k].CertificateAuthorityData = nil - vRawConfig.Clusters[k].InsecureSkipTLSVerify = true + for _, cluster := range vRawConfig.Clusters { + if cluster == nil { + continue + } + + cluster.CertificateAuthorityData = nil + cluster.InsecureSkipTLSVerify = true } return server, nil @@ -189,6 +207,9 @@ func cleanupProxy(vClusterName, vClusterNamespace string, rawConfig *clientcmdap } func kindProxy(ctx context.Context, vClusterName, vClusterNamespace string, rawConfig *clientcmdapi.Config, vRawConfig *clientcmdapi.Config, service *corev1.Service, localPort int, timeout time.Duration, log log.Logger) (string, error) { + if service == nil { + return "", errors.New("nil service") + } if len(service.Spec.Ports) == 0 { return "", fmt.Errorf("service has %d ports (expected 1 port)", len(service.Spec.Ports)) } @@ -378,8 +399,12 @@ func IsDockerInstalledAndUpAndRunning() bool { func testConnectionWithServer(ctx context.Context, vRawConfig *clientcmdapi.Config, server string) error { vRawConfig = vRawConfig.DeepCopy() - for k := range vRawConfig.Clusters { - vRawConfig.Clusters[k].Server = server + for _, cluster := range vRawConfig.Clusters { + if cluster == nil { + continue + } + + cluster.Server = server } restConfig, err := clientcmd.NewDefaultClientConfig(*vRawConfig, &clientcmd.ConfigOverrides{}).ClientConfig() diff --git a/pkg/cli/start/docker.go b/pkg/cli/start/docker.go index 2f7484fee..d533f313f 100644 --- a/pkg/cli/start/docker.go +++ b/pkg/cli/start/docker.go @@ -390,7 +390,7 @@ func (e *Error) Error() string { } var exitError *exec.ExitError - if errors.As(e.err, &exitError) && len(exitError.Stderr) > 0 { + if errors.As(e.err, &exitError) && exitError != nil && len(exitError.Stderr) > 0 { message += string(exitError.Stderr) + "\n" } diff --git a/pkg/cli/start/success.go b/pkg/cli/start/success.go index a0285cb35..9b6844b20 100644 --- a/pkg/cli/start/success.go +++ b/pkg/cli/start/success.go @@ -3,6 +3,7 @@ package start import ( "context" "crypto/tls" + "errors" "fmt" "net/http" "strings" @@ -100,6 +101,13 @@ func (l *LoftStarter) success(ctx context.Context) error { } func (l *LoftStarter) pingLoftRouter(ctx context.Context, loftPod *corev1.Pod) (string, error) { + if l == nil { + return "", errors.New("nil LoftStarter") + } + if l.KubeClient == nil { + return "", errors.New("nil KubeClient") + } + loftRouterSecret, err := l.KubeClient.CoreV1().Secrets(loftPod.Namespace).Get(ctx, clihelper.LoftRouterDomainSecret, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { diff --git a/pkg/controllers/generic/export_syncer.go b/pkg/controllers/generic/export_syncer.go index 78632bca1..bb4df4624 100644 --- a/pkg/controllers/generic/export_syncer.go +++ b/pkg/controllers/generic/export_syncer.go @@ -168,7 +168,12 @@ func (f *exporter) SyncToHost(ctx *synccontext.SyncContext, vObj client.Object) pObj, err := f.patcher.ApplyPatches(ctx.Context, vObj, nil, f) if kerrors.IsConflict(err) { return ctrl.Result{Requeue: true}, nil - } else if err != nil { + } + if err != nil { + if err := IgnoreAcceptableErrors(err); err != nil { + return ctrl.Result{}, nil + } + f.EventRecorder().Eventf(vObj, "Warning", "SyncError", "Error syncing to physical cluster: %v", err) return ctrl.Result{}, fmt.Errorf("error applying patches: %w", err) } else if pObj == nil { @@ -253,6 +258,7 @@ func (f *exporter) Sync(ctx *synccontext.SyncContext, pObj client.Object, vObj c // apply patches pObj, err = f.patcher.ApplyPatches(ctx.Context, vObj, pObj, f) + err = IgnoreAcceptableErrors(err) if err != nil { // when invalid, auto delete and recreate to recover if kerrors.IsInvalid(err) && f.replaceWhenInvalid { @@ -301,6 +307,10 @@ func (f *exporter) Name() string { // TranslateMetadata converts the virtual object into a physical object func (f *exporter) TranslateMetadata(ctx context.Context, vObj client.Object) client.Object { pObj := f.NamespacedTranslator.TranslateMetadata(ctx, vObj) + if pObj == nil { + return nil + } + if pObj.GetAnnotations() == nil { pObj.SetAnnotations(map[string]string{translate.ControllerLabel: f.Name()}) } else { diff --git a/pkg/controllers/generic/import_syncer.go b/pkg/controllers/generic/import_syncer.go index 89b4828f2..4676c1f9f 100644 --- a/pkg/controllers/generic/import_syncer.go +++ b/pkg/controllers/generic/import_syncer.go @@ -197,6 +197,10 @@ func (s *importer) SyncToVirtual(ctx *synccontext.SyncContext, pObj client.Objec ctx.Log.Infof("Create virtual %s, since it is missing, but physical object %s/%s exists", s.gvk.Kind, pObj.GetNamespace(), pObj.GetName()) vObj, err := s.patcher.ApplyPatches(ctx.Context, pObj, nil, s) if err != nil { + if err := IgnoreAcceptableErrors(err); err != nil { + return ctrl.Result{}, nil + } + // TODO: add eventRecorder? // s.EventRecorder().Eventf(vObj, "Warning", "SyncError", "Error syncing to virtual cluster: %v", err) return ctrl.Result{}, fmt.Errorf("error applying patches: %w", err) @@ -310,6 +314,7 @@ func (s *importer) Sync(ctx *synccontext.SyncContext, pObj client.Object, vObj c // apply patches vObj, err = s.patcher.ApplyPatches(ctx.Context, pObj, vObj, s) + err = IgnoreAcceptableErrors(err) if err != nil { // when invalid, auto delete and recreate to recover if kerrors.IsInvalid(err) && s.replaceWhenInvalid { diff --git a/pkg/controllers/generic/patcher.go b/pkg/controllers/generic/patcher.go index 41a3bd04d..1b138e832 100644 --- a/pkg/controllers/generic/patcher.go +++ b/pkg/controllers/generic/patcher.go @@ -24,6 +24,14 @@ type ObjectPatcherAndMetadataTranslator interface { var ErrNoUpdateNeeded = errors.New("no update needed") +func IgnoreAcceptableErrors(err error) error { + if errors.Is(err, ErrNoUpdateNeeded) { + return nil + } + + return err +} + // ObjectPatcher is the heart of the export and import syncers. The following functions are executed based on the lifecycle: // During Creation: // * ServerSideApply with nil existingOtherObj @@ -88,10 +96,6 @@ func (s *Patcher) ApplyPatches(ctx context.Context, fromObj, toObj client.Object // apply patches on from object err = modifier.ServerSideApply(ctx, fromObj, toObjCopied, toObj) if err != nil { - if errors.Is(err, ErrNoUpdateNeeded) { - return nil, nil - } - return nil, fmt.Errorf("error applying patches: %w", err) } @@ -190,6 +194,10 @@ func (s *Patcher) ApplyReversePatches(ctx context.Context, fromObj, otherObj cli } func toUnstructured(obj client.Object) (*unstructured.Unstructured, error) { + if obj == nil { + return nil, errors.New("nil obj") + } + fromCopied, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) if err != nil { return nil, err diff --git a/pkg/controllers/register.go b/pkg/controllers/register.go index 132087b73..1be4c87ed 100644 --- a/pkg/controllers/register.go +++ b/pkg/controllers/register.go @@ -105,11 +105,17 @@ func Create(ctx *config.ControllerContext) ([]syncertypes.Object, error) { } createdController, err := newSyncer(registerContext) + + name := "" + if createdController != nil { + name = createdController.Name() + } + if err != nil { - return nil, fmt.Errorf("register controller: %w", err) + return nil, fmt.Errorf("register %s controller: %w", name, err) } - loghelper.Infof("Start %s sync controller", createdController.Name()) + loghelper.Infof("Start %s sync controller", name) syncers = append(syncers, createdController) } @@ -257,6 +263,9 @@ func RegisterServiceSyncControllers(ctx *config.ControllerContext) error { if err != nil { return err } + if globalLocalManager == nil { + return errors.New("nil globalLocalManager") + } // start the manager go func() { diff --git a/pkg/controllers/resources/configmaps/syncer.go b/pkg/controllers/resources/configmaps/syncer.go index d8dfbcf5a..b47322cec 100644 --- a/pkg/controllers/resources/configmaps/syncer.go +++ b/pkg/controllers/resources/configmaps/syncer.go @@ -107,7 +107,11 @@ func (s *configMapSyncer) Sync(ctx *synccontext.SyncContext, pObj client.Object, if err != nil { return ctrl.Result{}, err } else if !used { - pConfigMap, _ := meta.Accessor(pObj) + pConfigMap, err := meta.Accessor(pObj) + if err != nil { + return reconcile.Result{}, err + } + ctx.Log.Infof("delete physical config map %s/%s, because it is not used anymore", pConfigMap.GetNamespace(), pConfigMap.GetName()) err = ctx.PhysicalClient.Delete(ctx.Context, pObj) if err != nil { diff --git a/pkg/controllers/resources/endpoints/syncer.go b/pkg/controllers/resources/endpoints/syncer.go index 993bcf3b0..118a978dc 100644 --- a/pkg/controllers/resources/endpoints/syncer.go +++ b/pkg/controllers/resources/endpoints/syncer.go @@ -42,8 +42,11 @@ var _ syncer.Starter = &endpointsSyncer{} func (s *endpointsSyncer) ReconcileStart(ctx *synccontext.SyncContext, req ctrl.Request) (bool, error) { if req.NamespacedName == specialservices.DefaultKubernetesSvcKey { return true, nil - } else if _, ok := specialservices.Default.SpecialServicesToSync()[req.NamespacedName]; ok { - return true, nil + } + if specialservices.Default != nil { + if _, ok := specialservices.Default.SpecialServicesToSync()[req.NamespacedName]; ok { + return true, nil + } } svc := &corev1.Service{} diff --git a/pkg/controllers/resources/events/syncer.go b/pkg/controllers/resources/events/syncer.go index bdb83f819..708f13027 100644 --- a/pkg/controllers/resources/events/syncer.go +++ b/pkg/controllers/resources/events/syncer.go @@ -2,6 +2,7 @@ package events import ( "context" + "errors" "fmt" "strings" @@ -63,9 +64,9 @@ func (s *eventSyncer) VirtualToHost(context.Context, types.NamespacedName, clien func (s *eventSyncer) HostToVirtual(ctx context.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { involvedObject, err := s.getInvolvedObject(ctx, pObj) if err != nil { - klog.Infof("Error retrieving involved object for %s/%s: %v", req.Namespace, req.Name, err) - return types.NamespacedName{} - } else if involvedObject == nil { + if err := IgnoreAcceptableErrors(err); err != nil { + klog.Infof("Error retrieving involved object for %s/%s: %v", req.Namespace, req.Name, err) + } return types.NamespacedName{} } @@ -96,9 +97,7 @@ func (s *eventSyncer) Sync(ctx *synccontext.SyncContext, pObj client.Object, vOb vOldEvent := vEvent.DeepCopy() vEvent, err := s.buildVirtualEvent(ctx.Context, pEvent) if err != nil { - return ctrl.Result{}, err - } else if vEvent == nil { - return ctrl.Result{}, nil + return ctrl.Result{}, IgnoreAcceptableErrors(err) } // reset metadata @@ -127,9 +126,7 @@ func (s *eventSyncer) SyncToVirtual(ctx *synccontext.SyncContext, pObj client.Ob // build the virtual event vObj, err := s.buildVirtualEvent(ctx.Context, pObj.(*corev1.Event)) if err != nil { - return ctrl.Result{}, err - } else if vObj == nil { - return ctrl.Result{}, nil + return ctrl.Result{}, IgnoreAcceptableErrors(err) } // make sure namespace is not being deleted @@ -161,8 +158,6 @@ func (s *eventSyncer) buildVirtualEvent(ctx context.Context, pEvent *corev1.Even involvedObject, err := s.getInvolvedObject(ctx, pEvent) if err != nil { return nil, err - } else if involvedObject == nil { - return nil, nil } // copy physical object @@ -194,20 +189,38 @@ func hostEventNameToVirtual(hostName string, hostInvolvedObjectName, virtualInvo return hostName } +var ( + ErrNilPhysicalObject = errors.New("events: nil pObject") + ErrKindNotAccepted = errors.New("events: kind not accpted") + ErrNotFound = errors.New("events: not found") +) + +func IgnoreAcceptableErrors(err error) error { + if errors.Is(err, ErrNilPhysicalObject) || + errors.Is(err, ErrKindNotAccepted) || + errors.Is(err, ErrNotFound) { + return nil + } + + return err +} + +// getInvolvedObject returns the related object from the vCLuster. +// Alternatively returns a ErrNilPhysicalObject, ErrKindNotAccepted or ErrNotFound. func (s *eventSyncer) getInvolvedObject(ctx context.Context, pObj client.Object) (metav1.Object, error) { if pObj == nil { - return nil, nil + return nil, ErrNilPhysicalObject } pEvent, ok := pObj.(*corev1.Event) if !ok { - return nil, fmt.Errorf("object is not of type event") + return nil, errors.New("object is not of type event") } // check if the involved object is accepted gvk := pEvent.InvolvedObject.GroupVersionKind() if !AcceptedKinds[gvk] { - return nil, nil + return nil, ErrKindNotAccepted } // create new virtual object @@ -223,7 +236,7 @@ func (s *eventSyncer) getInvolvedObject(ctx context.Context, pObj client.Object) return nil, err } - return nil, nil + return nil, fmt.Errorf("%w: %w", ErrNotFound, err) } // we found the related object diff --git a/pkg/controllers/resources/ingresses/util/util.go b/pkg/controllers/resources/ingresses/util/util.go index c8f1a507d..e891f7283 100644 --- a/pkg/controllers/resources/ingresses/util/util.go +++ b/pkg/controllers/resources/ingresses/util/util.go @@ -9,10 +9,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -const AlbConditionAnnotation = "alb.ingress.kubernetes.io/conditions" -const AlbActionsAnnotation = "alb.ingress.kubernetes.io/actions" -const ConditionSuffix = "/conditions." -const ActionsSuffix = "/actions." +const ( + AlbConditionAnnotation = "alb.ingress.kubernetes.io/conditions" + AlbActionsAnnotation = "alb.ingress.kubernetes.io/actions" + ConditionSuffix = "/conditions." + ActionsSuffix = "/actions." +) func getActionOrConditionValue(annotation, actionOrCondition string) string { i := strings.Index(annotation, actionOrCondition) @@ -24,14 +26,14 @@ func getActionOrConditionValue(annotation, actionOrCondition string) string { // ref https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/pkg/ingress/config_types.go type actionPayload struct { - Type string `json:"type,omitempty"` TargetGroupARN *string `json:"targetGroupARN,omitempty"` FixedResponseConfig map[string]interface{} `json:"fixedResponseConfig,omitempty"` + RedirectConfig map[string]interface{} `json:"redirectConfig,omitempty"` + Type string `json:"type,omitempty"` ForwardConfig struct { - TargetGroups []map[string]interface{} `json:"targetGroups,omitempty"` TargetGroupStickinessConfig map[string]interface{} `json:"targetGroupStickinessConfig,omitempty"` + TargetGroups []map[string]interface{} `json:"targetGroups,omitempty"` } `json:"forwardConfig,omitempty"` - RedirectConfig map[string]interface{} `json:"redirectConfig,omitempty"` } func ProcessAlbAnnotations(namespace string, k string, v string) (string, string) { @@ -46,7 +48,7 @@ func ProcessAlbAnnotations(namespace string, k string, v string) (string, string err := json.Unmarshal([]byte(v), &payload) if err != nil { klog.Errorf("Could not unmarshal payload: %v", err) - } else { + } else if payload != nil { for _, targetGroup := range payload.ForwardConfig.TargetGroups { if targetGroup["serviceName"] != nil { switch svcName := targetGroup["serviceName"].(type) { diff --git a/pkg/controllers/resources/namespaces/translate.go b/pkg/controllers/resources/namespaces/translate.go index 4ab8c56ce..060a87632 100644 --- a/pkg/controllers/resources/namespaces/translate.go +++ b/pkg/controllers/resources/namespaces/translate.go @@ -12,6 +12,10 @@ import ( func (s *namespaceSyncer) translate(ctx context.Context, vObj client.Object) *corev1.Namespace { newNamespace := s.TranslateMetadata(ctx, vObj).(*corev1.Namespace) + if newNamespace.Labels == nil { + newNamespace.Labels = map[string]string{} + } + // add user defined namespace labels for k, v := range s.namespaceLabels { newNamespace.Labels[k] = v @@ -24,6 +28,9 @@ func (s *namespaceSyncer) translateUpdate(ctx context.Context, pObj, vObj *corev var updated *corev1.Namespace _, updatedAnnotations, updatedLabels := s.TranslateMetadataUpdate(ctx, vObj, pObj) + if updatedLabels == nil { + updatedLabels = map[string]string{} + } // add user defined namespace labels for k, v := range s.namespaceLabels { updatedLabels[k] = v diff --git a/pkg/controllers/resources/networkpolicies/translate.go b/pkg/controllers/resources/networkpolicies/translate.go index c072a43f8..ade1e9779 100644 --- a/pkg/controllers/resources/networkpolicies/translate.go +++ b/pkg/controllers/resources/networkpolicies/translate.go @@ -12,17 +12,20 @@ import ( func (s *networkPolicySyncer) translate(ctx context.Context, vNetworkPolicy *networkingv1.NetworkPolicy) *networkingv1.NetworkPolicy { newNetworkPolicy := s.TranslateMetadata(ctx, vNetworkPolicy).(*networkingv1.NetworkPolicy) - newNetworkPolicy.Spec = *translateSpec(&vNetworkPolicy.Spec, vNetworkPolicy.GetNamespace()) + if spec := translateSpec(&vNetworkPolicy.Spec, vNetworkPolicy.GetNamespace()); spec != nil { + newNetworkPolicy.Spec = *spec + } return newNetworkPolicy } func (s *networkPolicySyncer) translateUpdate(ctx context.Context, pObj, vObj *networkingv1.NetworkPolicy) *networkingv1.NetworkPolicy { var updated *networkingv1.NetworkPolicy - translatedSpec := *translateSpec(&vObj.Spec, vObj.GetNamespace()) - if !equality.Semantic.DeepEqual(translatedSpec, pObj.Spec) { - updated = translator.NewIfNil(updated, pObj) - updated.Spec = translatedSpec + if translatedSpec := translateSpec(&vObj.Spec, vObj.GetNamespace()); translatedSpec != nil { + if !equality.Semantic.DeepEqual(translatedSpec, pObj.Spec) { + updated = translator.NewIfNil(updated, pObj) + updated.Spec = *translatedSpec + } } changed, translatedAnnotations, translatedLabels := s.TranslateMetadataUpdate(ctx, vObj, pObj) @@ -65,14 +68,16 @@ func translateSpec(spec *networkingv1.NetworkPolicySpec, namespace string) *netw panic("Multi-Namespace Mode not supported for network policies yet!") } - outSpec.PodSelector = *translate.Default.TranslateLabelSelector(&spec.PodSelector) - if outSpec.PodSelector.MatchLabels == nil { - outSpec.PodSelector.MatchLabels = map[string]string{} + if translatedLabelSelector := translate.Default.TranslateLabelSelector(&spec.PodSelector); translatedLabelSelector != nil { + outSpec.PodSelector = *translatedLabelSelector + if outSpec.PodSelector.MatchLabels == nil { + outSpec.PodSelector.MatchLabels = map[string]string{} + } + // add selector for namespace as NetworkPolicy podSelector applies to pods within it's namespace + outSpec.PodSelector.MatchLabels[translate.NamespaceLabel] = namespace + // add selector for the marker label to select only from pods belonging this vcluster instance + outSpec.PodSelector.MatchLabels[translate.MarkerLabel] = translate.VClusterName } - // add selector for namespace as NetworkPolicy podSelector applies to pods within it's namespace - outSpec.PodSelector.MatchLabels[translate.NamespaceLabel] = namespace - // add selector for the marker label to select only from pods belonging this vcluster instance - outSpec.PodSelector.MatchLabels[translate.MarkerLabel] = translate.VClusterName outSpec.PolicyTypes = spec.PolicyTypes return outSpec diff --git a/pkg/controllers/resources/nodes/syncer.go b/pkg/controllers/resources/nodes/syncer.go index 1110b3780..946d99524 100644 --- a/pkg/controllers/resources/nodes/syncer.go +++ b/pkg/controllers/resources/nodes/syncer.go @@ -275,10 +275,10 @@ func (s *nodeSyncer) Sync(ctx *synccontext.SyncContext, pObj client.Object, vObj return ctrl.Result{}, ctx.VirtualClient.Delete(ctx.Context, vObj) } - updatedVNode, err := s.translateUpdateStatus(ctx, pNode, vNode) + updatedVNode, statusChanged, err := s.translateUpdateStatus(ctx, pNode, vNode) if err != nil { return ctrl.Result{}, errors.Wrap(err, "update node status") - } else if updatedVNode != nil { + } else if statusChanged { ctx.Log.Infof("update virtual node %s, because status has changed", pNode.Name) translator.PrintChanges(vNode, updatedVNode, ctx.Log) err := ctx.VirtualClient.Status().Update(ctx.Context, updatedVNode) diff --git a/pkg/controllers/resources/nodes/translate.go b/pkg/controllers/resources/nodes/translate.go index 6debccabf..13ed686b1 100644 --- a/pkg/controllers/resources/nodes/translate.go +++ b/pkg/controllers/resources/nodes/translate.go @@ -2,12 +2,12 @@ package nodes import ( "encoding/json" + "fmt" "os" "github.com/loft-sh/vcluster/pkg/constants" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" "github.com/loft-sh/vcluster/pkg/controllers/syncer/translator" - "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/controller-runtime/pkg/client" @@ -18,9 +18,7 @@ import ( "k8s.io/klog/v2" ) -var ( - TaintsAnnotation = "vcluster.loft.sh/original-taints" -) +var TaintsAnnotation = "vcluster.loft.sh/original-taints" func (s *nodeSyncer) translateUpdateBackwards(pNode *corev1.Node, vNode *corev1.Node) *corev1.Node { var updated *corev1.Node @@ -129,7 +127,9 @@ func (s *nodeSyncer) translateUpdateBackwards(pNode *corev1.Node, vNode *corev1. return updated } -func (s *nodeSyncer) translateUpdateStatus(ctx *synccontext.SyncContext, pNode *corev1.Node, vNode *corev1.Node) (*corev1.Node, error) { +// translateUpdateStatus translates the node's status. +// Returns a Node object, a boolean indicating whether it changed or an error. +func (s *nodeSyncer) translateUpdateStatus(ctx *synccontext.SyncContext, pNode *corev1.Node, vNode *corev1.Node) (*corev1.Node, bool, error) { // translate node status first translatedStatus := pNode.Status.DeepCopy() if s.useFakeKubelets { @@ -153,7 +153,7 @@ func (s *nodeSyncer) translateUpdateStatus(ctx *synccontext.SyncContext, pNode * // create new service for this node nodeIP, err := s.nodeServiceProvider.GetNodeIP(ctx.Context, vNode.Name) if err != nil { - return nil, errors.Wrap(err, "get vNode IP") + return nil, false, fmt.Errorf("get vNode IP: %w", err) } newAddresses = append(newAddresses, corev1.NodeAddress{ @@ -256,14 +256,10 @@ func (s *nodeSyncer) translateUpdateStatus(ctx *synccontext.SyncContext, pNode * translatedStatus.Images = make([]corev1.ContainerImage, 0) } - // check if the status has changed - if !equality.Semantic.DeepEqual(vNode.Status, *translatedStatus) { - newNode := vNode.DeepCopy() - newNode.Status = *translatedStatus - return newNode, nil - } + newNode := vNode.DeepCopy() + newNode.Status = *translatedStatus - return nil, nil + return newNode, !equality.Semantic.DeepEqual(vNode.Status, *translatedStatus), nil } func mergeStrings(physical []string, virtual []string, oldPhysical []string) []string { diff --git a/pkg/controllers/resources/persistentvolumes/translate.go b/pkg/controllers/resources/persistentvolumes/translate.go index 25061cb88..9be073efc 100644 --- a/pkg/controllers/resources/persistentvolumes/translate.go +++ b/pkg/controllers/resources/persistentvolumes/translate.go @@ -33,6 +33,10 @@ func (s *persistentVolumeSyncer) translateBackwards(pPv *corev1.PersistentVolume vObj.UID = "" vObj.ManagedFields = nil if vPvc != nil { + if vObj.Spec.ClaimRef == nil { + vObj.Spec.ClaimRef = &corev1.ObjectReference{} + } + vObj.Spec.ClaimRef.ResourceVersion = vPvc.ResourceVersion vObj.Spec.ClaimRef.UID = vPvc.UID vObj.Spec.ClaimRef.Name = vPvc.Name @@ -55,6 +59,10 @@ func (s *persistentVolumeSyncer) translateUpdateBackwards(vPv *corev1.Persistent translatedSpec := *pPv.Spec.DeepCopy() isStorageClassCreatedOnVirtual, isClaimRefCreatedOnVirtual := false, false if vPvc != nil { + if translatedSpec.ClaimRef == nil { + translatedSpec.ClaimRef = &corev1.ObjectReference{} + } + translatedSpec.ClaimRef.ResourceVersion = vPvc.ResourceVersion translatedSpec.ClaimRef.UID = vPvc.UID translatedSpec.ClaimRef.Name = vPvc.Name diff --git a/pkg/controllers/resources/pods/conditions_test.go b/pkg/controllers/resources/pods/conditions_test.go index 5e4203222..1d04d9516 100644 --- a/pkg/controllers/resources/pods/conditions_test.go +++ b/pkg/controllers/resources/pods/conditions_test.go @@ -242,6 +242,9 @@ func TestUpdateConditions(t *testing.T) { VirtualClient: vClient, }, testCase.pPod, testCase.vPod) assert.NilError(t, err, "unexpected error in testCase %s", testCase.name) + if updated == nil { + t.Fatal("updated is empty") + } assert.DeepEqual(t, updated.Status.Conditions, testCase.expectedVirtualConditions) // check physical conditions diff --git a/pkg/controllers/resources/pods/ephemeral_containers.go b/pkg/controllers/resources/pods/ephemeral_containers.go index b4a097b0e..457553f19 100644 --- a/pkg/controllers/resources/pods/ephemeral_containers.go +++ b/pkg/controllers/resources/pods/ephemeral_containers.go @@ -44,7 +44,7 @@ func AddEphemeralContainer(ctx *synccontext.SyncContext, physicalClusterClient k if err != nil { // The apiserver will return a 404 when the EphemeralContainers feature is disabled because the `/ephemeralcontainers` subresource // is missing. Unlike the 404 returned by a missing physicalPod, the status details will be empty. - if serr, ok := lo.ErrorsAs[*kerrors.StatusError](err); ok && serr.Status().Reason == metav1.StatusReasonNotFound && serr.ErrStatus.Details.Name == "" { + if serr, ok := lo.ErrorsAs[*kerrors.StatusError](err); ok && serr != nil && serr.Status().Reason == metav1.StatusReasonNotFound && serr.ErrStatus.Details.Name == "" { return fmt.Errorf("ephemeral containers are disabled for this cluster (error from server: %w)", err) } // The Kind used for the /ephemeralcontainers subresource changed in 1.22. When presented with an unexpected diff --git a/pkg/controllers/resources/pods/translate.go b/pkg/controllers/resources/pods/translate.go index a61372b4f..c10a1b24c 100644 --- a/pkg/controllers/resources/pods/translate.go +++ b/pkg/controllers/resources/pods/translate.go @@ -2,6 +2,7 @@ package pods import ( "context" + "errors" "fmt" podtranslate "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate" @@ -55,7 +56,7 @@ func (s *podSyncer) getK8sIPDNSIPServiceList(ctx *synccontext.SyncContext, vPod func (s *podSyncer) translateUpdate(ctx context.Context, pClient client.Client, pObj, vObj *corev1.Pod) (*corev1.Pod, error) { secret, err := podtranslate.GetSecretIfExists(ctx, pClient, vObj.Name, vObj.Namespace) - if err != nil { + if err := podtranslate.IgnoreAcceptableErrors(err); err != nil { return nil, err } else if secret != nil { // check if owner is vcluster service, if so, modify to pod as owner @@ -82,6 +83,10 @@ func (s *podSyncer) findKubernetesIP(ctx *synccontext.SyncContext) (string, erro } func (s *podSyncer) findKubernetesDNSIP(ctx *synccontext.SyncContext) (string, error) { + if specialservices.Default == nil { + return "", errors.New("specialservices default not initialized") + } + pClient, namespace := specialservices.Default.DNSNamespace(ctx) // first try to find the actual synced service, then fallback to a different if we have a suffix (only in the case of integrated coredns) diff --git a/pkg/controllers/resources/pods/translate/sa_token_secret.go b/pkg/controllers/resources/pods/translate/sa_token_secret.go index e37a31871..4562f36d2 100644 --- a/pkg/controllers/resources/pods/translate/sa_token_secret.go +++ b/pkg/controllers/resources/pods/translate/sa_token_secret.go @@ -2,6 +2,7 @@ package translate import ( "context" + "errors" "fmt" "github.com/loft-sh/vcluster/pkg/util/translate" @@ -24,6 +25,16 @@ func SecretNameFromPodName(podName, namespace string) string { return translate.Default.PhysicalName(fmt.Sprintf("%s-sa-token", podName), namespace) } +var ErrNotFound = errors.New("tanslate: not found") + +func IgnoreAcceptableErrors(err error) error { + if errors.Is(err, ErrNotFound) { + return nil + } + + return err +} + func GetSecretIfExists(ctx context.Context, pClient client.Client, vPodName, vNamespace string) (*corev1.Secret, error) { secret := &corev1.Secret{} err := pClient.Get(ctx, types.NamespacedName{ @@ -32,7 +43,7 @@ func GetSecretIfExists(ctx context.Context, pClient client.Client, vPodName, vNa }, secret) if err != nil { if kerrors.IsNotFound(err) { - return nil, nil + return nil, ErrNotFound } return nil, err @@ -43,7 +54,7 @@ func GetSecretIfExists(ctx context.Context, pClient client.Client, vPodName, vNa func SATokenSecret(ctx context.Context, pClient client.Client, vPod *corev1.Pod, tokens map[string]string) error { existingSecret, err := GetSecretIfExists(ctx, pClient, vPod.Name, vPod.Namespace) - if err != nil { + if err := IgnoreAcceptableErrors(err); err != nil { return err } @@ -96,19 +107,18 @@ func SetPodAsOwner(ctx context.Context, pPod *corev1.Pod, pClient client.Client, UID: pPod.GetUID(), } - owners := secret.GetOwnerReferences() if translate.Owner != nil { // check if the current owner is the vcluster service - for i, owner := range owners { + for i, owner := range secret.OwnerReferences { if owner.UID == translate.Owner.GetUID() { // path this with current pod as owner instead - secret.ObjectMeta.OwnerReferences[i] = podOwnerReference + secret.OwnerReferences[i] = podOwnerReference break } } } else { // check if pod is already correctly set as one of the owners - for _, owner := range owners { + for _, owner := range secret.OwnerReferences { if equality.Semantic.DeepEqual(owner, podOwnerReference) { // no update needed return nil @@ -116,7 +126,7 @@ func SetPodAsOwner(ctx context.Context, pPod *corev1.Pod, pClient client.Client, } // pod not set as owner update accordingly - secret.ObjectMeta.OwnerReferences = append(secret.ObjectMeta.OwnerReferences, podOwnerReference) + secret.OwnerReferences = append(secret.OwnerReferences, podOwnerReference) } return pClient.Update(ctx, secret) diff --git a/pkg/controllers/resources/secrets/syncer.go b/pkg/controllers/resources/secrets/syncer.go index b36ff2442..838579b70 100644 --- a/pkg/controllers/resources/secrets/syncer.go +++ b/pkg/controllers/resources/secrets/syncer.go @@ -116,11 +116,10 @@ func (s *secretSyncer) Sync(ctx *synccontext.SyncContext, pObj client.Object, vO if err != nil { return ctrl.Result{}, err } else if !used { - pSecret, _ := meta.Accessor(pObj) - ctx.Log.Infof("delete physical secret %s/%s, because it is not used anymore", pSecret.GetNamespace(), pSecret.GetName()) + ctx.Log.Infof("delete physical secret %s/%s, because it is not used anymore", pObj.GetNamespace(), pObj.GetName()) err = ctx.PhysicalClient.Delete(ctx.Context, pObj) if err != nil { - ctx.Log.Infof("error deleting physical object %s/%s in physical cluster: %v", pSecret.GetNamespace(), pSecret.GetName(), err) + ctx.Log.Infof("error deleting physical object %s/%s in physical cluster: %v", pObj.GetNamespace(), pObj.GetName(), err) return ctrl.Result{}, err } diff --git a/pkg/controllers/resources/services/syncer.go b/pkg/controllers/resources/services/syncer.go index 3cc9a4740..fc938d477 100644 --- a/pkg/controllers/resources/services/syncer.go +++ b/pkg/controllers/resources/services/syncer.go @@ -2,6 +2,7 @@ package services import ( "context" + "errors" "time" "github.com/loft-sh/vcluster/pkg/controllers/syncer" @@ -158,6 +159,10 @@ var _ syncertypes.Starter = &serviceSyncer{} func (s *serviceSyncer) ReconcileStart(ctx *synccontext.SyncContext, req ctrl.Request) (bool, error) { // don't do anything for the kubernetes service + if specialservices.Default == nil { + return false, errors.New("specialservices default not initialized yet") + } + specialServices := specialservices.Default.SpecialServicesToSync() if svc, ok := specialServices[req.NamespacedName]; ok { return true, svc(ctx, ctx.CurrentNamespace, s.serviceName, req.NamespacedName, TranslateServicePorts) diff --git a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer.go b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer.go index cccacbad8..502eaad8c 100644 --- a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer.go +++ b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer.go @@ -219,7 +219,7 @@ func (s *volumeSnapshotContentSyncer) shouldSync(ctx context.Context, pObj *volu if !kerrors.IsNotFound(err) { return false, nil, err } else if translate.Default.IsManagedCluster(pObj) { - return true, nil, nil + return true, vVS, nil } return false, nil, nil } diff --git a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/translate.go b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/translate.go index e9d7a142e..4238ec35e 100644 --- a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/translate.go +++ b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/translate.go @@ -27,7 +27,9 @@ func (s *volumeSnapshotContentSyncer) translateBackwards(pVSC *volumesnapshotv1. vObj.UID = "" vObj.ManagedFields = nil - vObj.Spec.VolumeSnapshotRef = translateVolumeSnapshotRefBackwards(&vObj.Spec.VolumeSnapshotRef, vVS) + if vVS != nil { + vObj.Spec.VolumeSnapshotRef = translateVolumeSnapshotRefBackwards(&vObj.Spec.VolumeSnapshotRef, vVS) + } if vObj.Annotations == nil { vObj.Annotations = map[string]string{} @@ -52,7 +54,7 @@ func (s *volumeSnapshotContentSyncer) translateUpdateBackwards(pVSC, vVSC *volum updated.Finalizers = pCopy.Finalizers } - //TODO: consider syncing certain annotations, e.g.: + // TODO: consider syncing certain annotations, e.g.: // "snapshot.storage.kubernetes.io/volumesnapshot-being-deleted" or // "snapshot.storage.kubernetes.io/volumesnapshot-being-created" diff --git a/pkg/controllers/syncer/syncer.go b/pkg/controllers/syncer/syncer.go index 68c853333..112cc81f7 100644 --- a/pkg/controllers/syncer/syncer.go +++ b/pkg/controllers/syncer/syncer.go @@ -120,7 +120,7 @@ func (r *SyncController) Reconcile(ctx context.Context, origReq ctrl.Request) (_ // check what function we should call if vObj != nil && pObj == nil { return r.syncer.SyncToHost(syncContext, vObj) - } else if vObj != nil { + } else if vObj != nil && pObj != nil { // make sure the object uid matches pAnnotations := pObj.GetAnnotations() if !r.options.DisableUIDDeletion && pAnnotations != nil && pAnnotations[translate.UIDAnnotation] != "" && pAnnotations[translate.UIDAnnotation] != string(vObj.GetUID()) { diff --git a/pkg/controllers/syncer/syncer_test.go b/pkg/controllers/syncer/syncer_test.go index 22583e5c5..b06536433 100644 --- a/pkg/controllers/syncer/syncer_test.go +++ b/pkg/controllers/syncer/syncer_test.go @@ -2,6 +2,7 @@ package syncer import ( "context" + "errors" "sort" "testing" @@ -39,6 +40,7 @@ func (s *mockSyncer) naiveTranslateCreate(ctx *synccontext.SyncContext, vObj cli pObj := s.TranslateMetadata(ctx.Context, vObj) return pObj } + func (s *mockSyncer) naiveTranslateUpdate(ctx *synccontext.SyncContext, vObj client.Object, pObj client.Object) client.Object { _, updatedAnnotations, updatedLabels := s.TranslateMetadataUpdate(ctx.Context, vObj, pObj) newPObj := pObj.DeepCopyObject().(client.Object) @@ -49,7 +51,12 @@ func (s *mockSyncer) naiveTranslateUpdate(ctx *synccontext.SyncContext, vObj cli // SyncToHost is called when a virtual object was created and needs to be synced down to the physical cluster func (s *mockSyncer) SyncToHost(ctx *synccontext.SyncContext, vObj client.Object) (ctrl.Result, error) { - return s.SyncToHostCreate(ctx, vObj, s.naiveTranslateCreate(ctx, vObj)) + pObj := s.naiveTranslateCreate(ctx, vObj) + if pObj == nil { + return ctrl.Result{}, errors.New("naive translate create failed") + } + + return s.SyncToHostCreate(ctx, vObj, pObj) } // Sync is called to sync a virtual object with a physical object diff --git a/pkg/controllers/syncer/testing/context.go b/pkg/controllers/syncer/testing/context.go index eee290827..479e1a077 100644 --- a/pkg/controllers/syncer/testing/context.go +++ b/pkg/controllers/syncer/testing/context.go @@ -33,6 +33,9 @@ const ( func FakeStartSyncer(t *testing.T, ctx *synccontext.RegisterContext, create func(ctx *synccontext.RegisterContext) (syncer.Object, error)) (*synccontext.SyncContext, syncer.Object) { object, err := create(ctx) assert.NilError(t, err) + if object == nil { + t.Fatal("object is nil") + } // run register indices registerer, ok := object.(syncer.IndicesRegisterer) diff --git a/pkg/etcd/util.go b/pkg/etcd/util.go index 91e6cf7a8..cb0990419 100644 --- a/pkg/etcd/util.go +++ b/pkg/etcd/util.go @@ -104,11 +104,16 @@ func getClientConfig(ctx context.Context, certificates *Certificates, endpoints PermitWithoutStream: true, } - var err error - if strings.HasPrefix(endpoints[0], "https://") && certificates != nil { - config.TLS, err = toTLSConfig(certificates) + if len(endpoints) > 0 { + if strings.HasPrefix(endpoints[0], "https://") && certificates != nil { + var err error + if config.TLS, err = toTLSConfig(certificates); err != nil { + return nil, err + } + } } - return config, err + + return config, nil } func toTLSConfig(certificates *Certificates) (*tls.Config, error) { diff --git a/pkg/k3s/k3s.go b/pkg/k3s/k3s.go index 6e1e88dac..97d3c95e3 100644 --- a/pkg/k3s/k3s.go +++ b/pkg/k3s/k3s.go @@ -2,6 +2,7 @@ package k3s import ( "context" + "errors" "fmt" "os" "os/exec" @@ -110,6 +111,13 @@ func StartK3S(ctx context.Context, vConfig *config.VirtualClusterConfig, service } func EnsureK3SToken(ctx context.Context, currentNamespaceClient kubernetes.Interface, currentNamespace, vClusterName string, options *config.VirtualClusterConfig) (string, error) { + if currentNamespaceClient == nil { + return "", errors.New("nil currentNamespaceClient") + } + if options == nil { + return "", errors.New("nil options") + } + // check if token is set externally if options.ControlPlane.Distro.K3S.Token != "" { return options.ControlPlane.Distro.K3S.Token, nil diff --git a/pkg/lifecycle/lifecycle.go b/pkg/lifecycle/lifecycle.go index bb9cc90ef..8868e6244 100644 --- a/pkg/lifecycle/lifecycle.go +++ b/pkg/lifecycle/lifecycle.go @@ -2,6 +2,7 @@ package lifecycle import ( "context" + "fmt" "strconv" "time" @@ -65,7 +66,6 @@ func DeletePods(ctx context.Context, kubeClient *kubernetes.Clientset, labelSele log.Infof("Delete %d vcluster pods", len(list.Items)) for _, item := range list.Items { err = kubeClient.CoreV1().Pods(namespace).Delete(ctx, item.Name, metav1.DeleteOptions{}) - if err != nil { if kerrors.IsNotFound(err) { continue @@ -86,7 +86,10 @@ func DeleteMultiNamespaceVClusterWorkloads(ctx context.Context, client *kubernet }), }) if err != nil && !kerrors.IsForbidden(err) { - return errors.Wrap(err, "list namespaces") + return fmt.Errorf("list namespaces: %w", err) + } + if namespaces == nil { + return errors.New("list namespaces: nil result") } // delete all pods inside the above returned namespaces diff --git a/pkg/patches/operation.go b/pkg/patches/operation.go index a79e69d07..43464fea3 100644 --- a/pkg/patches/operation.go +++ b/pkg/patches/operation.go @@ -17,6 +17,9 @@ func Find(doc *yaml.Node, predicate func(*yaml.Node) bool) *yaml.Node { if predicate(doc) { return doc } + if doc == nil { + return nil + } for _, content := range doc.Content { if found := Find(content, predicate); found != nil { @@ -53,7 +56,7 @@ func removeProperty(parent *yaml.Node, child *yaml.Node) []*yaml.Node { } func removeChild(parent *yaml.Node, child *yaml.Node) []*yaml.Node { - var remaining []*yaml.Node + remaining := make([]*yaml.Node, 0) for _, current := range parent.Content { if child == current { continue diff --git a/pkg/patches/patch_test.go b/pkg/patches/patch_test.go index 03f88210e..43fe66d4a 100644 --- a/pkg/patches/patch_test.go +++ b/pkg/patches/patch_test.go @@ -454,9 +454,11 @@ func (f *fakeNameResolver) TranslateLabelKey(key string) (string, error) { return key, nil } +var ErrNilSelector = errors.New("fake: nil selector") + func (f *fakeNameResolver) TranslateLabelExpressionsSelector(selector *metav1.LabelSelector) (*metav1.LabelSelector, error) { if selector == nil { - return nil, nil + return nil, ErrNilSelector } if selector.MatchLabels == nil { @@ -468,7 +470,7 @@ func (f *fakeNameResolver) TranslateLabelExpressionsSelector(selector *metav1.La func (f *fakeNameResolver) TranslateLabelSelector(selector map[string]string) (map[string]string, error) { if selector == nil { - return nil, nil + return nil, ErrNilSelector } selector["test"] = "test" return selector, nil @@ -507,7 +509,7 @@ func (r *fakeVirtualToHostNameResolver) TranslateLabelKey(key string) (string, e func (r *fakeVirtualToHostNameResolver) TranslateLabelExpressionsSelector(selector *metav1.LabelSelector) (*metav1.LabelSelector, error) { if selector == nil { - return nil, nil + return nil, ErrNilSelector } if selector.MatchLabels == nil { @@ -519,7 +521,7 @@ func (r *fakeVirtualToHostNameResolver) TranslateLabelExpressionsSelector(select func (r *fakeVirtualToHostNameResolver) TranslateLabelSelector(selector map[string]string) (map[string]string, error) { if selector == nil { - return nil, nil + return nil, ErrNilSelector } selector["test"] = "test" return selector, nil diff --git a/pkg/patches/patch_types.go b/pkg/patches/patch_types.go index aaec4d451..4b4514e13 100644 --- a/pkg/patches/patch_types.go +++ b/pkg/patches/patch_types.go @@ -60,8 +60,9 @@ func CopyFromObject(obj1, obj2 *yaml.Node, patch *config.Patch) error { }, }) } else { - parent := Find(obj1, ContainsChild(m)) - removeChild(parent, m) + if parent := Find(obj1, ContainsChild(m)); parent != nil { + removeChild(parent, m) + } } } @@ -71,18 +72,22 @@ func CopyFromObject(obj1, obj2 *yaml.Node, patch *config.Patch) error { func Remove(obj1 *yaml.Node, patch *config.Patch) error { matches, err := FindMatches(obj1, patch.Path) if err != nil { - return errors.Wrap(err, "find matches") + return fmt.Errorf("find matches: %w", err) } for _, m := range matches { validated, err := ValidateAllConditions(obj1, m, patch.Conditions) if err != nil { - return errors.Wrap(err, "validate conditions") + return fmt.Errorf("validate conditions: %w", err) } else if !validated { continue } parent := Find(obj1, ContainsChild(m)) + if parent == nil { + continue + } + switch parent.Kind { case yaml.MappingNode: parent.Content = removeProperty(parent, m) @@ -172,7 +177,6 @@ func RewriteName(obj1 *yaml.Node, patch *config.Patch, resolver NameResolver) er case yaml.SequenceNode: for _, subNode := range m.Content { err = ProcessRewrite(subNode, patch, resolver) - if err != nil { return err } @@ -196,7 +200,6 @@ func ProcessRewrite(obj *yaml.Node, patch *config.Patch, resolver NameResolver) if patch.NamespacePath != "" { namespace, err = GetNamespace(obj, patch) - if err != nil { return err } @@ -212,7 +215,6 @@ func ProcessRewrite(obj *yaml.Node, patch *config.Patch, resolver NameResolver) continue } err = ValidateAndTranslateName(obj, nameMatch, patch, resolver, namespace) - if err != nil { return err } @@ -230,7 +232,6 @@ func ProcessRewrite(obj *yaml.Node, patch *config.Patch, resolver NameResolver) continue } err = ValidateAndTranslateNamespace(obj, namespaceMatch, patch, resolver) - if err != nil { return err } @@ -473,10 +474,11 @@ func createPath(obj1 *yaml.Node, path string, value *yaml.Node) error { // check if we expect an array or map as parent for _, match := range matches { - parent := Find(obj1, ContainsChild(match)) switch match.Kind { case yaml.ScalarNode: - parent.Content = AddChildAtIndex(parent, match, value) + if parent := Find(obj1, ContainsChild(match)); parent != nil { + parent.Content = AddChildAtIndex(parent, match, value) + } case yaml.MappingNode: match.Content = append(match.Content, &yaml.Node{ Kind: yaml.ScalarNode, @@ -545,10 +547,11 @@ func createMappingNode(path string, child *yaml.Node) *yaml.Node { } func AddNode(obj1 *yaml.Node, match *yaml.Node, value *yaml.Node) { - parent := Find(obj1, ContainsChild(match)) switch match.Kind { case yaml.ScalarNode: - parent.Content = AddChildAtIndex(parent, match, value) + if parent := Find(obj1, ContainsChild(match)); parent != nil { + parent.Content = AddChildAtIndex(parent, match, value) + } case yaml.MappingNode: match.Content = append(match.Content, value.Content[0].Content...) case yaml.SequenceNode: diff --git a/pkg/platform/backup/backup.go b/pkg/platform/backup/backup.go index 5aec4f398..c57ee03d3 100644 --- a/pkg/platform/backup/backup.go +++ b/pkg/platform/backup/backup.go @@ -2,6 +2,7 @@ package backup import ( "context" + "fmt" "strings" "github.com/ghodss/yaml" @@ -331,10 +332,14 @@ func clusters(ctx context.Context, client clientpkg.Client) ([]runtime.Object, e if u.Spec.Config.SecretName != "" { secret, err := getSecret(ctx, client, u.Spec.Config.SecretNamespace, u.Spec.Config.SecretName) if err != nil { - return nil, errors.Wrap(err, "get cluster secret") - } else if secret != nil { - retList = append(retList, secret) + if !errors.Is(err, ErrMissingName) { + return nil, fmt.Errorf("get cluster secret: %w", err) + } + + continue } + + retList = append(retList, secret) } } @@ -583,7 +588,9 @@ func users(ctx context.Context, client clientpkg.Client) ([]runtime.Object, erro if u.Spec.PasswordRef != nil { secret, err := getSecret(ctx, client, u.Spec.PasswordRef.SecretNamespace, u.Spec.PasswordRef.SecretName) if err != nil { - return nil, errors.Wrap(err, "get user secret") + if !errors.Is(err, ErrMissingName) { + return nil, fmt.Errorf("get user secret: %w", err) + } } else if secret != nil { retList = append(retList, secret) } @@ -591,7 +598,9 @@ func users(ctx context.Context, client clientpkg.Client) ([]runtime.Object, erro if u.Spec.CodesRef != nil { secret, err := getSecret(ctx, client, u.Spec.CodesRef.SecretNamespace, u.Spec.CodesRef.SecretName) if err != nil { - return nil, errors.Wrap(err, "get user secret") + if !errors.Is(err, ErrMissingName) { + return nil, fmt.Errorf("get user secret: %w", err) + } } else if secret != nil { retList = append(retList, secret) } @@ -611,9 +620,11 @@ func isProjectSecret(secret corev1.Secret) bool { return false } +var ErrMissingName = errors.New("backup: missing namespace or name") + func getSecret(ctx context.Context, client clientpkg.Client, namespace, name string) (*corev1.Secret, error) { if namespace == "" || name == "" { - return nil, nil + return nil, ErrMissingName } secret := &corev1.Secret{} @@ -627,7 +638,7 @@ func getSecret(ctx context.Context, client clientpkg.Client, namespace, name str err = resetMetadata(client.Scheme(), secret) if err != nil { - return nil, errors.Wrap(err, "reset metadata secret") + return nil, fmt.Errorf("reset metadata secret: %w", err) } return secret, nil diff --git a/pkg/platform/client.go b/pkg/platform/client.go index 929f41ec1..5115ac70b 100644 --- a/pkg/platform/client.go +++ b/pkg/platform/client.go @@ -340,7 +340,7 @@ func (c *client) LoginWithAccessKey(host, accessKey string, insecure bool) error _, err = managementClient.Loft().ManagementV1().Selves().Create(context.TODO(), &managementv1.Self{}, metav1.CreateOptions{}) if err != nil { var urlError *url.Error - if errors.As(err, &urlError) { + if errors.As(err, &urlError) && urlError != nil { var err x509.UnknownAuthorityError if errors.As(urlError.Err, &err) { return fmt.Errorf("unsafe login endpoint '%s', if you wish to login into an insecure loft endpoint run with the '--insecure' flag", c.config.Platform.Host) diff --git a/pkg/platform/clihelper/clihelper.go b/pkg/platform/clihelper/clihelper.go index 2074d3187..9db52adb0 100644 --- a/pkg/platform/clihelper/clihelper.go +++ b/pkg/platform/clihelper/clihelper.go @@ -135,6 +135,10 @@ func GetProKubeConfig(options kubeconfig.ContextOptions) (*clientcmdapi.Config, } func GetLoftIngressHost(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + if kubeClient == nil { + return "", errors.New("nil kubeClient") + } + ingress, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) if err != nil { ingress, err := kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) @@ -235,6 +239,16 @@ func WaitForReadyLoftPod(ctx context.Context, kubeClient kubernetes.Interface, n } func StartPortForwarding(ctx context.Context, config *rest.Config, client kubernetes.Interface, pod *corev1.Pod, localPort string, log log.Logger) (chan struct{}, error) { + if config == nil { + return nil, errors.New("nil config") + } + if client == nil { + return nil, errors.New("nil client") + } + if pod == nil { + return nil, errors.New("nil pod") + } + log.WriteString(logrus.InfoLevel, "\n") log.Infof("Starting port-forwarding to the %s pod", product.DisplayName()) execRequest := client.CoreV1().RESTClient().Post(). @@ -292,6 +306,10 @@ func StartPortForwarding(ctx context.Context, config *rest.Config, client kubern } func GetLoftDefaultPassword(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + if kubeClient == nil { + return "", errors.New("nil kubeClient") + } + loftNamespace, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { @@ -412,6 +430,10 @@ func EnterHostNameQuestion(log log.Logger) (string, error) { } func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (bool, error) { + if kubeClient == nil { + return false, errors.New("nil kubeClient") + } + _, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { @@ -425,6 +447,13 @@ func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface } func UninstallLoft(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, kubeContext, namespace string, log log.Logger) error { + if kubeClient == nil { + return errors.New("nil kubeClient") + } + if restConfig == nil { + return errors.New("nil restConfig") + } + log.Infof("Uninstalling %s...", product.DisplayName()) releaseName := defaultReleaseName deploy, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) @@ -537,6 +566,10 @@ func deleteUser(ctx context.Context, restConfig *rest.Config, name string) error } func EnsureIngressController(ctx context.Context, kubeClient kubernetes.Interface, kubeContext string, log log.Logger) error { + if kubeClient == nil { + return errors.New("nil kubeClient") + } + // first create an ingress controller const ( YesOption = "Yes" @@ -591,6 +624,9 @@ func EnsureIngressController(ctx context.Context, kubeClient kubernetes.Interfac if len(list.Items) == 1 { secret := list.Items[0] originalSecret := secret.DeepCopy() + if secret.Labels == nil { + secret.Labels = map[string]string{} + } secret.Labels["loft.sh/app"] = "true" if secret.Annotations == nil { secret.Annotations = map[string]string{} @@ -720,6 +756,13 @@ func getHelmWorkdir(chartName string) (string, error) { // Makes sure that admin user and password secret exists // Returns (true, nil) if everything is correct but password is different from parameter `password` func EnsureAdminPassword(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, password string, log log.Logger) (bool, error) { + if restConfig == nil { + return false, errors.New("nil kubeClient") + } + if kubeClient == nil { + return false, errors.New("nil kubeClient") + } + loftClient, err := loftclientset.NewForConfig(restConfig) if err != nil { return false, err @@ -796,6 +839,10 @@ func EnsureAdminPassword(ctx context.Context, kubeClient kubernetes.Interface, r } func IsLoftInstalledLocally(ctx context.Context, kubeClient kubernetes.Interface, namespace string) bool { + if kubeClient == nil { + panic("nil kubeClient") + } + _, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) if err != nil && !kerrors.IsNotFound(err) { _, err = kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) diff --git a/pkg/platform/helper.go b/pkg/platform/helper.go index 260ecb636..d603aa7a0 100644 --- a/pkg/platform/helper.go +++ b/pkg/platform/helper.go @@ -208,6 +208,12 @@ func SelectVirtualClusterInstance(ctx context.Context, client Client, virtualClu // get unformatted options var optionsUnformatted [][]string for _, virtualCluster := range virtualClusters { + if virtualCluster == nil || + virtualCluster.VirtualCluster == nil || + virtualCluster.Project == nil { + continue + } + optionsUnformatted = append(optionsUnformatted, []string{"vcluster: " + clihelper.GetDisplayName(virtualCluster.VirtualCluster.Name, virtualCluster.VirtualCluster.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(virtualCluster.Project.Name, virtualCluster.Project.Spec.DisplayName)}) } @@ -218,7 +224,12 @@ func SelectVirtualClusterInstance(ctx context.Context, client Client, virtualClu } return "", "", "", "", fmt.Errorf("couldn't find a virtual cluster you have access to") } else if len(virtualClusters) == 1 { - return "", virtualClusters[0].Project.Name, "", virtualClusters[0].VirtualCluster.Name, nil + vc := virtualClusters[0] + if vc.Project == nil || vc.VirtualCluster == nil { + return "", "", "", "", errors.New("virtual cluster instance object is missing project or virtual cluster infos") + } + + return "", vc.Project.Name, "", vc.VirtualCluster.Name, nil } questionOptions := formatOptions("%s | %s", optionsUnformatted) @@ -233,11 +244,18 @@ func SelectVirtualClusterInstance(ctx context.Context, client Client, virtualClu for idx, s := range questionOptions { if s == selectedOption { - return "", virtualClusters[idx].Project.Name, "", virtualClusters[idx].VirtualCluster.Name, nil + vc := virtualClusters[idx] + if vc.Project == nil { + return "", "", "", "", errors.New("nil project") + } + if vc.VirtualCluster == nil { + return "", "", "", "", errors.New("nil virtual cluster") + } + return "", vc.Project.Name, "", vc.VirtualCluster.Name, nil } } - return "", "", "", "", fmt.Errorf("couldn't find answer") + return "", "", "", "", errors.New("couldn't find answer") } func SelectSpaceInstance(ctx context.Context, client Client, spaceName, projectName string, log log.Logger) (string, string, string, error) { @@ -541,8 +559,8 @@ func GetSpaceInstances(ctx context.Context, client Client) ([]*SpaceInstanceProj } type ProjectProjectSecret struct { - ProjectSecret managementv1.ProjectSecret Project string + ProjectSecret managementv1.ProjectSecret } func GetProjectSecrets(ctx context.Context, managementClient kube.Interface, projectNames ...string) ([]*ProjectProjectSecret, error) { @@ -675,7 +693,12 @@ func CreateVirtualClusterInstanceOptions(ctx context.Context, client Client, con // get server for _, val := range kubeConfig.Clusters { - contextOptions.Server = val.Server + if val != nil { + contextOptions.Server = val.Server + } + } + if contextOptions.Server == "" { + return kubeconfig.ContextOptions{}, errors.New("could not determine server url") } contextOptions.InsecureSkipTLSVerify = true @@ -1233,13 +1256,18 @@ func getCertificateAndKeyDataFromKubeConfig(config string) (string, string, erro return "", "", err } - return string(apiCfg.AuthInfos["vcluster"].ClientCertificateData), string(apiCfg.AuthInfos["vcluster"].ClientKeyData), nil + authInfo, ok := apiCfg.AuthInfos["vcluster"] + if !ok || authInfo == nil { + return "", "", errors.New("couldn't find vcluster auth infos") + } + + return string(authInfo.ClientCertificateData), string(authInfo.ClientKeyData), nil } -func getVirtualClusterInstanceAccessConfig(ctx context.Context, client Client, virtualClusterInstance *managementv1.VirtualClusterInstance) (*clientcmdapi.Config, error) { +func getVirtualClusterInstanceAccessConfig(ctx context.Context, client Client, virtualClusterInstance *managementv1.VirtualClusterInstance) (clientcmdapi.Config, error) { managementClient, err := client.Management() if err != nil { - return nil, err + return clientcmdapi.Config{}, err } kubeConfig, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterInstance.Namespace).GetKubeConfig( @@ -1251,21 +1279,21 @@ func getVirtualClusterInstanceAccessConfig(ctx context.Context, client Client, v metav1.CreateOptions{}, ) if err != nil { - return nil, err + return clientcmdapi.Config{}, err } // parse kube config string clientCfg, err := clientcmd.NewClientConfigFromBytes([]byte(kubeConfig.Status.KubeConfig)) if err != nil { - return nil, err + return clientcmdapi.Config{}, err } apiCfg, err := clientCfg.RawConfig() if err != nil { - return nil, err + return clientcmdapi.Config{}, err } - return &apiCfg, nil + return apiCfg, nil } func findProjectCluster(ctx context.Context, client Client, projectName, clusterName string) (*managementv1.Cluster, error) { diff --git a/pkg/platform/secret.go b/pkg/platform/secret.go index a64627baa..82d21efad 100644 --- a/pkg/platform/secret.go +++ b/pkg/platform/secret.go @@ -2,6 +2,7 @@ package platform import ( "context" + "errors" "fmt" "reflect" "strconv" @@ -67,11 +68,15 @@ func ApplyPlatformSecret( } return nil - } else if reflect.DeepEqual(keySecret.Data, payload) { + } else if keySecret != nil && reflect.DeepEqual(keySecret.Data, payload) { // no update needed, just return return nil } + if keySecret == nil { + return errors.New("nil keySecret") + } + // create the patch patch := ctrlclient.MergeFrom(keySecret.DeepCopy()) keySecret.Data = payload diff --git a/pkg/plugin/v1/remote/plugin.pb.go b/pkg/plugin/v1/remote/plugin.pb.go index e7566c492..223757690 100644 --- a/pkg/plugin/v1/remote/plugin.pb.go +++ b/pkg/plugin/v1/remote/plugin.pb.go @@ -9,10 +9,11 @@ package remote import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( @@ -27,9 +28,9 @@ type RegisterPluginRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` ClientHooks []*ClientHook `protobuf:"bytes,4,rep,name=clientHooks,proto3" json:"clientHooks,omitempty"` } @@ -184,9 +185,9 @@ type MutateRequest struct { unknownFields protoimpl.UnknownFields ApiVersion string `protobuf:"bytes,1,opt,name=apiVersion,proto3" json:"apiVersion,omitempty"` - Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` - Object string `protobuf:"bytes,3,opt,name=object,proto3" json:"object,omitempty"` - Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Object string `protobuf:"bytes,3,opt,name=object,proto3" json:"object,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` } func (x *MutateRequest) Reset() { @@ -254,7 +255,7 @@ type MutateResult struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Object string `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Object string `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` Mutated bool `protobuf:"varint,2,opt,name=mutated,proto3" json:"mutated,omitempty"` } @@ -310,7 +311,7 @@ type LeaderInfo struct { unknownFields protoimpl.UnknownFields Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"` - RunID string `protobuf:"bytes,2,opt,name=runID,proto3" json:"runID,omitempty"` + RunID string `protobuf:"bytes,2,opt,name=runID,proto3" json:"runID,omitempty"` } func (x *LeaderInfo) Reset() { @@ -365,8 +366,8 @@ type ClientHook struct { unknownFields protoimpl.UnknownFields ApiVersion string `protobuf:"bytes,1,opt,name=apiVersion,proto3" json:"apiVersion,omitempty"` - Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` - Types []string `protobuf:"bytes,3,rep,name=types,proto3" json:"types,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Types []string `protobuf:"bytes,3,rep,name=types,proto3" json:"types,omitempty"` } func (x *ClientHook) Reset() { @@ -427,12 +428,12 @@ type Context struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - VirtualClusterConfig string `protobuf:"bytes,1,opt,name=virtualClusterConfig,proto3" json:"virtualClusterConfig,omitempty"` + VirtualClusterConfig string `protobuf:"bytes,1,opt,name=virtualClusterConfig,proto3" json:"virtualClusterConfig,omitempty"` PhysicalClusterConfig string `protobuf:"bytes,2,opt,name=physicalClusterConfig,proto3" json:"physicalClusterConfig,omitempty"` - SyncerConfig string `protobuf:"bytes,3,opt,name=syncerConfig,proto3" json:"syncerConfig,omitempty"` - TargetNamespace string `protobuf:"bytes,4,opt,name=targetNamespace,proto3" json:"targetNamespace,omitempty"` - CurrentNamespace string `protobuf:"bytes,5,opt,name=currentNamespace,proto3" json:"currentNamespace,omitempty"` - Options string `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + SyncerConfig string `protobuf:"bytes,3,opt,name=syncerConfig,proto3" json:"syncerConfig,omitempty"` + TargetNamespace string `protobuf:"bytes,4,opt,name=targetNamespace,proto3" json:"targetNamespace,omitempty"` + CurrentNamespace string `protobuf:"bytes,5,opt,name=currentNamespace,proto3" json:"currentNamespace,omitempty"` + Options string `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` } func (x *Context) Reset() { diff --git a/pkg/plugin/v1/remote/plugin_grpc.pb.go b/pkg/plugin/v1/remote/plugin_grpc.pb.go index d4f827a76..2228dc966 100644 --- a/pkg/plugin/v1/remote/plugin_grpc.pb.go +++ b/pkg/plugin/v1/remote/plugin_grpc.pb.go @@ -8,6 +8,7 @@ package remote import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/pkg/plugin/v2/plugin.go b/pkg/plugin/v2/plugin.go index 410bf5b34..44ffe32f8 100644 --- a/pkg/plugin/v2/plugin.go +++ b/pkg/plugin/v2/plugin.go @@ -108,7 +108,6 @@ func (m *Manager) Start( for _, vClusterPlugin := range m.Plugins { // build the start request initRequest, err := m.buildInitRequest(filepath.Dir(vClusterPlugin.Path), syncerConfig, vConfig, port) - if err != nil { return fmt.Errorf("build start request: %w", err) } @@ -571,6 +570,9 @@ func (m *Manager) registerNonResourceURL(port int, interceptorsInfos Interceptor if nonResourceURL == "" { continue } + if m.NonResourceInterceptorsPorts[nonResourceURL] == nil { + m.NonResourceInterceptorsPorts[nonResourceURL] = make(map[string]portHandlerName, 0) + } for _, v := range interceptorsInfos.Verbs { if _, ok := m.NonResourceInterceptorsPorts[nonResourceURL][v]; ok { return fmt.Errorf("error while loading the plugins, multiple interceptor plugins are registered for the same non resource url %s and verb %s", nonResourceURL, v) diff --git a/pkg/plugin/v2/pluginv2/pluginv2.pb.go b/pkg/plugin/v2/pluginv2/pluginv2.pb.go index b576c72aa..cb0152bd7 100644 --- a/pkg/plugin/v2/pluginv2/pluginv2.pb.go +++ b/pkg/plugin/v2/pluginv2/pluginv2.pb.go @@ -9,10 +9,11 @@ package pluginv2 import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( @@ -350,9 +351,9 @@ type Mutate_Request struct { unknownFields protoimpl.UnknownFields ApiVersion string `protobuf:"bytes,1,opt,name=apiVersion,proto3" json:"apiVersion,omitempty"` - Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` - Object string `protobuf:"bytes,3,opt,name=object,proto3" json:"object,omitempty"` - Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Object string `protobuf:"bytes,3,opt,name=object,proto3" json:"object,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` } func (x *Mutate_Request) Reset() { @@ -420,7 +421,7 @@ type Mutate_Response struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Object string `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Object string `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` Mutated bool `protobuf:"varint,2,opt,name=mutated,proto3" json:"mutated,omitempty"` } diff --git a/pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go b/pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go index d623cc7fe..0f35e44de 100644 --- a/pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go +++ b/pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go @@ -8,6 +8,7 @@ package pluginv2 import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/pkg/server/server.go b/pkg/server/server.go index 8302c0f32..752205ff2 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -530,5 +530,6 @@ func setGlobalDefaults(config *rest.Config) *rest.Config { type emptyConfigProvider struct{} func (e *emptyConfigProvider) ConfigFor(_ string) (io.Reader, error) { + //nolint:nilnil return nil, nil } diff --git a/pkg/setup/config.go b/pkg/setup/config.go index 5c63b6b1c..ca1f510c4 100644 --- a/pkg/setup/config.go +++ b/pkg/setup/config.go @@ -192,6 +192,10 @@ func updateSecretAnnotations(ctx context.Context, client kubernetes.Interface, n // SetGlobalOwner fetches the owning service and populates in translate.Owner if: the vcluster is configured to setOwner is, // and if the currentNamespace == targetNamespace (because cross namespace owner refs don't work). func SetGlobalOwner(ctx context.Context, vConfig *config.VirtualClusterConfig) error { + if vConfig == nil { + return errors.New("nil vConfig") + } + if !vConfig.Experimental.SyncSettings.SetOwner { return nil } @@ -207,6 +211,10 @@ func SetGlobalOwner(ctx context.Context, vConfig *config.VirtualClusterConfig) e return nil } + if vConfig.WorkloadClient == nil { + return errors.New("nil WorkloadClient") + } + service, err := vConfig.WorkloadClient.CoreV1().Services(vConfig.WorkloadNamespace).Get(ctx, vConfig.WorkloadService, metav1.GetOptions{}) if err != nil { return errors.Wrap(err, "get vcluster service") diff --git a/pkg/setup/controller_context.go b/pkg/setup/controller_context.go index cee6e84ab..6e40b936f 100644 --- a/pkg/setup/controller_context.go +++ b/pkg/setup/controller_context.go @@ -141,6 +141,9 @@ func loadVirtualConfig(ctx context.Context, options *config.VirtualClusterConfig if err != nil { return nil, nil, err } + if clientConfig == nil { + return nil, nil, errors.New("nil clientConfig") + } virtualClusterConfig, err := clientConfig.ClientConfig() if err != nil { @@ -216,45 +219,53 @@ func CreateVClusterKubeConfig(config *clientcmdapi.Config, options *config.Virtu config = config.DeepCopy() // exchange kube config server & resolve certificate - for i := range config.Clusters { + for _, cluster := range config.Clusters { + if cluster == nil { + continue + } + // fill in data - if config.Clusters[i].CertificateAuthorityData == nil && config.Clusters[i].CertificateAuthority != "" { - o, err := os.ReadFile(config.Clusters[i].CertificateAuthority) + if cluster.CertificateAuthorityData == nil && cluster.CertificateAuthority != "" { + o, err := os.ReadFile(cluster.CertificateAuthority) if err != nil { return nil, err } - config.Clusters[i].CertificateAuthority = "" - config.Clusters[i].CertificateAuthorityData = o + cluster.CertificateAuthority = "" + cluster.CertificateAuthorityData = o } if options.ExportKubeConfig.Server != "" { - config.Clusters[i].Server = options.ExportKubeConfig.Server + cluster.Server = options.ExportKubeConfig.Server } else { - config.Clusters[i].Server = fmt.Sprintf("https://localhost:%d", options.ControlPlane.Proxy.Port) + cluster.Server = fmt.Sprintf("https://localhost:%d", options.ControlPlane.Proxy.Port) } } // resolve auth info cert & key - for i := range config.AuthInfos { + for _, authInfo := range config.AuthInfos { + if authInfo == nil { + continue + } + // fill in data - if config.AuthInfos[i].ClientCertificateData == nil && config.AuthInfos[i].ClientCertificate != "" { - o, err := os.ReadFile(config.AuthInfos[i].ClientCertificate) + if authInfo.ClientCertificateData == nil && authInfo.ClientCertificate != "" { + o, err := os.ReadFile(authInfo.ClientCertificate) if err != nil { return nil, err } - config.AuthInfos[i].ClientCertificate = "" - config.AuthInfos[i].ClientCertificateData = o + authInfo.ClientCertificate = "" + authInfo.ClientCertificateData = o } - if config.AuthInfos[i].ClientKeyData == nil && config.AuthInfos[i].ClientKey != "" { - o, err := os.ReadFile(config.AuthInfos[i].ClientKey) + if authInfo.ClientKeyData == nil && authInfo.ClientKey != "" { + o, err := os.ReadFile(authInfo.ClientKey) if err != nil { return nil, err } - config.AuthInfos[i].ClientKey = "" - config.AuthInfos[i].ClientKeyData = o + authInfo.ClientKey = "" + authInfo.ClientKeyData = o } } @@ -268,6 +279,13 @@ func initControllerContext( virtualRawConfig *clientcmdapi.Config, vClusterOptions *config.VirtualClusterConfig, ) (*config.ControllerContext, error) { + if localManager == nil { + return nil, errors.New("nil localManager") + } + if virtualManager == nil { + return nil, errors.New("nil virtualManager") + } + stopChan := make(<-chan struct{}) // get virtual cluster version @@ -313,6 +331,13 @@ func initControllerContext( } func newCurrentNamespaceClient(ctx context.Context, localManager ctrl.Manager, options *config.VirtualClusterConfig) (client.Client, error) { + if localManager == nil { + return nil, errors.New("nil localManager") + } + if options == nil { + return nil, errors.New("nil options") + } + var err error // currentNamespaceCache is needed for tasks such as finding out fake kubelet ips diff --git a/pkg/setup/controllers.go b/pkg/setup/controllers.go index 4e9116288..781384aec 100644 --- a/pkg/setup/controllers.go +++ b/pkg/setup/controllers.go @@ -252,8 +252,12 @@ func WriteKubeConfigToSecret(ctx context.Context, currentNamespace string, curre if options.ExportKubeConfig.Context != "" { syncerConfig.CurrentContext = options.ExportKubeConfig.Context // update authInfo - for k := range syncerConfig.AuthInfos { - syncerConfig.AuthInfos[syncerConfig.CurrentContext] = syncerConfig.AuthInfos[k] + for k, authInfo := range syncerConfig.AuthInfos { + if authInfo == nil { + continue + } + + syncerConfig.AuthInfos[syncerConfig.CurrentContext] = authInfo if k != syncerConfig.CurrentContext { delete(syncerConfig.AuthInfos, k) } @@ -261,8 +265,12 @@ func WriteKubeConfigToSecret(ctx context.Context, currentNamespace string, curre } // update cluster - for k := range syncerConfig.Clusters { - syncerConfig.Clusters[syncerConfig.CurrentContext] = syncerConfig.Clusters[k] + for k, cluster := range syncerConfig.Clusters { + if cluster == nil { + continue + } + + syncerConfig.Clusters[syncerConfig.CurrentContext] = cluster if k != syncerConfig.CurrentContext { delete(syncerConfig.Clusters, k) } @@ -270,8 +278,12 @@ func WriteKubeConfigToSecret(ctx context.Context, currentNamespace string, curre } // update context - for k := range syncerConfig.Contexts { - tmpCtx := syncerConfig.Contexts[k] + for k, context := range syncerConfig.Contexts { + if context == nil { + continue + } + + tmpCtx := context tmpCtx.Cluster = syncerConfig.CurrentContext tmpCtx.AuthInfo = syncerConfig.CurrentContext syncerConfig.Contexts[syncerConfig.CurrentContext] = tmpCtx diff --git a/pkg/setup/initialize.go b/pkg/setup/initialize.go index e68e00a1b..20f79e33f 100644 --- a/pkg/setup/initialize.go +++ b/pkg/setup/initialize.go @@ -281,6 +281,10 @@ func UpdateSecretWithK0sCerts( currentNamespaceClient kubernetes.Interface, currentNamespace, vClusterName string, ) error { + if currentNamespaceClient == nil { + return errors.New("nil currentNamespaceClient") + } + // wait for k0s to generate the secrets for us files, err := waitForK0sFiles(ctx, "/data/k0s/pki") if err != nil { diff --git a/pkg/telemetry/helpers.go b/pkg/telemetry/helpers.go index 227ce3c2a..931f70adf 100644 --- a/pkg/telemetry/helpers.go +++ b/pkg/telemetry/helpers.go @@ -16,9 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var ( - SyncerVersion = "dev" -) +var SyncerVersion = "dev" // getVClusterID provides instance ID based on the UID of the service func getVClusterID(ctx context.Context, hostClient kubernetes.Interface, vClusterNamespace, vClusterService string) (string, error) { @@ -89,7 +87,7 @@ func toKubernetesVersion(vi *version.Info) *KubernetesVersion { // GetPlatformUserID returns the loft instance id func GetPlatformUserID(cliConfig *config.CLI, self *managementv1.Self) string { - if cliConfig.TelemetryDisabled || self == nil { + if cliConfig == nil || cliConfig.TelemetryDisabled || self == nil { return "" } platformID := self.Status.Subject @@ -101,7 +99,7 @@ func GetPlatformUserID(cliConfig *config.CLI, self *managementv1.Self) string { // GetPlatformInstanceID returns the loft instance id func GetPlatformInstanceID(cliConfig *config.CLI, self *managementv1.Self) string { - if cliConfig.TelemetryDisabled || self == nil { + if cliConfig == nil || cliConfig.TelemetryDisabled || self == nil { return "" } @@ -111,7 +109,7 @@ func GetPlatformInstanceID(cliConfig *config.CLI, self *managementv1.Self) strin // GetMachineID retrieves machine ID and encodes it together with users $HOME path and // extra key to protect privacy. Returns a hex-encoded string. func GetMachineID(cliConfig *config.CLI) string { - if cliConfig.TelemetryDisabled { + if cliConfig == nil || cliConfig.TelemetryDisabled { return "" } diff --git a/pkg/upgrade/upgrade_test.go b/pkg/upgrade/upgrade_test.go index 3c0df6fb5..a45f520b1 100644 --- a/pkg/upgrade/upgrade_test.go +++ b/pkg/upgrade/upgrade_test.go @@ -59,7 +59,9 @@ func TestUpgrade(t *testing.T) { if err != nil { t.Fatalf("Error creating temporary log file: %v", err) } - *(os.Stderr) = *logFile + if logFile != nil { + *(os.Stderr) = *logFile + } latest, found, err := selfupdate.DetectLatest(githubSlug) if err != nil { diff --git a/pkg/util/clihelper/clihelper.go b/pkg/util/clihelper/clihelper.go index 025747e9f..6c909cb36 100644 --- a/pkg/util/clihelper/clihelper.go +++ b/pkg/util/clihelper/clihelper.go @@ -91,7 +91,10 @@ func GetKubeConfig(ctx context.Context, kubeClient *kubernetes.Clientset, vclust return true, nil }) if err != nil { - return nil, errors.Wrap(err, "wait for vcluster") + return nil, fmt.Errorf("wait for vcluster: %w", err) + } + if kubeConfig == nil { + return nil, errors.New("nil kubeConfig") } return kubeConfig, nil @@ -126,6 +129,16 @@ func UpdateKubeConfig(contextName string, cluster *clientcmdapi.Cluster, authInf return err } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdapi.Cluster{} + } + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdapi.Context{} + } + config.Clusters[contextName] = cluster config.AuthInfos[contextName] = authInfo @@ -162,7 +175,6 @@ func checkPort(port int) (status bool, err error) { // Try to create a server with the port server, err := net.Listen("tcp", host) - // if it fails then the port is likely taken if err != nil { return false, err diff --git a/pkg/util/commandwriter/commandwriter.go b/pkg/util/commandwriter/commandwriter.go index 99458c42d..c02c9b7b8 100644 --- a/pkg/util/commandwriter/commandwriter.go +++ b/pkg/util/commandwriter/commandwriter.go @@ -92,7 +92,9 @@ func (c *commandWriter) Writer() io.Writer { } func (c *commandWriter) Close() { - _ = c.writer.Close() + if c.writer != nil { + _ = c.writer.Close() + } } func (c *commandWriter) CloseAndWait(_ context.Context, _ error) { diff --git a/pkg/util/kubeconfig/kubeconfig.go b/pkg/util/kubeconfig/kubeconfig.go index e9e567182..a3a09d2ac 100644 --- a/pkg/util/kubeconfig/kubeconfig.go +++ b/pkg/util/kubeconfig/kubeconfig.go @@ -121,62 +121,66 @@ func ConvertRestConfigToClientConfig(config *rest.Config) (clientcmd.ClientConfi AuthInfo: contextName, }, } + + cluster := &clientcmdapi.Cluster{ + Server: config.Host, + InsecureSkipTLSVerify: config.Insecure, + CertificateAuthorityData: config.CAData, + CertificateAuthority: config.CAFile, + } kubeConfig.Clusters = map[string]*clientcmdapi.Cluster{ - contextName: { - Server: config.Host, - InsecureSkipTLSVerify: config.Insecure, - CertificateAuthorityData: config.CAData, - CertificateAuthority: config.CAFile, - }, + contextName: cluster, + } + + authInfo := &clientcmdapi.AuthInfo{ + Token: config.BearerToken, + TokenFile: config.BearerTokenFile, + Impersonate: config.Impersonate.UserName, + ImpersonateGroups: config.Impersonate.Groups, + ImpersonateUserExtra: config.Impersonate.Extra, + ClientCertificate: config.CertFile, + ClientCertificateData: config.CertData, + ClientKey: config.KeyFile, + ClientKeyData: config.KeyData, + Username: config.Username, + Password: config.Password, + AuthProvider: config.AuthProvider, + Exec: config.ExecProvider, } kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{ - contextName: { - Token: config.BearerToken, - TokenFile: config.BearerTokenFile, - Impersonate: config.Impersonate.UserName, - ImpersonateGroups: config.Impersonate.Groups, - ImpersonateUserExtra: config.Impersonate.Extra, - ClientCertificate: config.CertFile, - ClientCertificateData: config.CertData, - ClientKey: config.KeyFile, - ClientKeyData: config.KeyData, - Username: config.Username, - Password: config.Password, - AuthProvider: config.AuthProvider, - Exec: config.ExecProvider, - }, + contextName: authInfo, } kubeConfig.CurrentContext = contextName // resolve certificate - if kubeConfig.Clusters[contextName].CertificateAuthorityData == nil && kubeConfig.Clusters[contextName].CertificateAuthority != "" { - o, err := os.ReadFile(kubeConfig.Clusters[contextName].CertificateAuthority) + if cluster.CertificateAuthorityData == nil && cluster.CertificateAuthority != "" { + o, err := os.ReadFile(cluster.CertificateAuthority) if err != nil { return nil, err } - kubeConfig.Clusters[contextName].CertificateAuthority = "" - kubeConfig.Clusters[contextName].CertificateAuthorityData = o + cluster.CertificateAuthority = "" + cluster.CertificateAuthorityData = o } // fill in data - if kubeConfig.AuthInfos[contextName].ClientCertificateData == nil && kubeConfig.AuthInfos[contextName].ClientCertificate != "" { - o, err := os.ReadFile(kubeConfig.AuthInfos[contextName].ClientCertificate) + if authInfo.ClientCertificateData == nil && authInfo.ClientCertificate != "" { + o, err := os.ReadFile(authInfo.ClientCertificate) if err != nil { return nil, err } - kubeConfig.AuthInfos[contextName].ClientCertificate = "" - kubeConfig.AuthInfos[contextName].ClientCertificateData = o + authInfo.ClientCertificate = "" + authInfo.ClientCertificateData = o } - if kubeConfig.AuthInfos[contextName].ClientKeyData == nil && kubeConfig.AuthInfos[contextName].ClientKey != "" { - o, err := os.ReadFile(kubeConfig.AuthInfos[contextName].ClientKey) + if authInfo.ClientKeyData == nil && authInfo.ClientKey != "" { + o, err := os.ReadFile(authInfo.ClientKey) if err != nil { return nil, err } - kubeConfig.AuthInfos[contextName].ClientKey = "" - kubeConfig.AuthInfos[contextName].ClientKeyData = o + authInfo.ClientKey = "" + authInfo.ClientKeyData = o } return clientcmd.NewDefaultClientConfig(*kubeConfig, &clientcmd.ConfigOverrides{}), nil @@ -220,6 +224,9 @@ func resolveExecCredentials(restConfig *rest.Config) error { } execProvider := restConfig.ExecProvider + if execProvider == nil { + return errors.New("exec provider is missing") + } if execProvider.ProvideClusterInfo { var err error cred.Spec.Cluster, err = rest.ConfigToExecCluster(restConfig) diff --git a/pkg/util/pluginhookclient/client.go b/pkg/util/pluginhookclient/client.go index 414122335..7c3068248 100644 --- a/pkg/util/pluginhookclient/client.go +++ b/pkg/util/pluginhookclient/client.go @@ -45,6 +45,10 @@ func NewPluginClient(virtual bool, delegate client.NewClientFunc) client.NewClie } func wrapClient(virtual bool, innerClient client.Client) client.Client { + if innerClient == nil { + panic("nil innerClient") + } + suffix := "Physical" if virtual { suffix = "Virtual" diff --git a/pkg/util/portforward/portforward.go b/pkg/util/portforward/portforward.go index ac37cf1d5..27933d8f8 100644 --- a/pkg/util/portforward/portforward.go +++ b/pkg/util/portforward/portforward.go @@ -196,7 +196,9 @@ func (pf *PortForwarder) raiseError(err error) { } }() - _ = pf.streamConn.Close() + if pf.streamConn != nil { + _ = pf.streamConn.Close() + } } func (pf *PortForwarder) NumConnections() int64 { @@ -246,10 +248,15 @@ func (pf *PortForwarder) forward(ctx context.Context) error { close(pf.Ready) } + var streamConnCloseChan <-chan bool + if pf.streamConn != nil { + streamConnCloseChan = pf.streamConn.CloseChan() + } + // wait for interrupt or conn closure select { case <-pf.stopChan: - case <-pf.streamConn.CloseChan(): + case <-streamConnCloseChan: pf.raiseError(errors.New("lost connection to pod")) } @@ -318,8 +325,13 @@ func (pf *PortForwarder) getListener(protocol string, hostname string, port *For // the background. func (pf *PortForwarder) waitForConnection(ctx context.Context, listener net.Listener, port ForwardedPort) { for { + var closeChan <-chan bool + if pf.streamConn != nil { + closeChan = pf.streamConn.CloseChan() + } + select { - case <-pf.streamConn.CloseChan(): + case <-closeChan: return default: conn, err := listener.Accept() @@ -362,63 +374,74 @@ func (pf *PortForwarder) handleConnection(ctx context.Context, conn net.Conn, po headers.Set(corev1.StreamType, corev1.StreamTypeError) headers.Set(corev1.PortHeader, fmt.Sprintf("%d", port.Remote)) headers.Set(corev1.PortForwardRequestIDHeader, strconv.Itoa(requestID)) - errorStream, err := pf.streamConn.CreateStream(headers) - if err != nil { - pf.raiseError(fmt.Errorf("error creating error stream for port %d -> %d: %w", port.Local, port.Remote, err)) - return - } - // we're not writing to this stream - errorStream.Close() errorChan := make(chan error) - go func() { - message, err := io.ReadAll(errorStream) - switch { - case err != nil: - errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %w", port.Local, port.Remote, err) - case len(message) > 0: - errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message)) + if pf.streamConn != nil { + errorStream, err := pf.streamConn.CreateStream(headers) + if err != nil { + pf.raiseError(fmt.Errorf("error creating error stream for port %d -> %d: %w", port.Local, port.Remote, err)) + return } - close(errorChan) - }() - - // create data stream - headers.Set(corev1.StreamType, corev1.StreamTypeData) - dataStream, err := pf.streamConn.CreateStream(headers) - if err != nil { - pf.raiseError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %w", port.Local, port.Remote, err)) - return + // we're not writing to this stream + errorStream.Close() + + go func() { + message, err := io.ReadAll(errorStream) + switch { + case err != nil: + errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %w", port.Local, port.Remote, err) + case len(message) > 0: + errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message)) + } + close(errorChan) + }() + } else { + go func() { + errorChan <- errors.New("no streamConn available") + close(errorChan) + }() } + // create data stream localError := make(chan struct{}) remoteDone := make(chan struct{}) + if pf.streamConn != nil { + headers.Set(corev1.StreamType, corev1.StreamTypeData) + dataStream, err := pf.streamConn.CreateStream(headers) + if err != nil { + pf.raiseError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %w", port.Local, port.Remote, err)) + return + } - go func() { - // Copy from the remote side to the local port. - if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - if logger := klog.FromContext(ctx).V(4); logger.Enabled() { - logger.Error(err, "error copying from remote stream to local connection", "debug", true) + go func() { + // Copy from the remote side to the local port. + if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + if logger := klog.FromContext(ctx).V(4); logger.Enabled() { + logger.Error(err, "error copying from remote stream to local connection", "debug", true) + } } - } - // inform the select below that the remote copy is done - close(remoteDone) - }() + // inform the select below that the remote copy is done + close(remoteDone) + }() - go func() { - // inform server we're not sending any more data after copy unblocks - defer dataStream.Close() + go func() { + // inform server we're not sending any more data after copy unblocks + defer dataStream.Close() - // Copy from the local port to the remote side. - if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - if logger := klog.FromContext(ctx).V(4); logger.Enabled() { - logger.Error(err, "error copying from local connection to remote stream", "debug", true) - } + // Copy from the local port to the remote side. + if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + if logger := klog.FromContext(ctx).V(4); logger.Enabled() { + logger.Error(err, "error copying from local connection to remote stream", "debug", true) + } - // break out of the select below without waiting for the other copy to finish - close(localError) - } - }() + // break out of the select below without waiting for the other copy to finish + close(localError) + } + }() + } else { + close(localError) + } // wait for either a local->remote error or for copying from remote->local to finish select { @@ -427,8 +450,7 @@ func (pf *PortForwarder) handleConnection(ctx context.Context, conn net.Conn, po } // always expect something on errorChan (it may be nil) - err = <-errorChan - if err != nil { + if err := <-errorChan; err != nil { // Fail for errors like container not running or No such container if strings.Contains(err.Error(), "container") { pf.raiseError(err) diff --git a/pkg/util/servicecidr/servicecidr.go b/pkg/util/servicecidr/servicecidr.go index 2caab4e82..1bf6e7e52 100644 --- a/pkg/util/servicecidr/servicecidr.go +++ b/pkg/util/servicecidr/servicecidr.go @@ -2,6 +2,7 @@ package servicecidr import ( "context" + "errors" "fmt" "net" "strings" @@ -29,6 +30,10 @@ func GetServiceCIDR(ctx context.Context, client kubernetes.Interface, namespace return ipv4CIDR, fmt.Sprintf("failed to find IPv6 service CIDR, will use IPv4 service CIDR. Error details: %v", ipv6Err) } + if client == nil { + panic("client is nil") + } + // Both IPv4 and IPv6 are configured, we need to find out which one is the default policy := corev1.IPFamilyPolicyPreferDualStack testService, err := client.CoreV1().Services(namespace).Create(ctx, &corev1.Service{ @@ -73,6 +78,9 @@ func getServiceCIDR(ctx context.Context, client kubernetes.Interface, namespace // https://www.ietf.org/rfc/rfc3849.txt clusterIP = "2001:DB8::1" } + if client == nil { + return "", errors.New("nil client") + } _, err := client.CoreV1().Services(namespace).Create(ctx, &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-service-", diff --git a/pkg/util/translate/single_namespace.go b/pkg/util/translate/single_namespace.go index a99d29413..31d2a234f 100644 --- a/pkg/util/translate/single_namespace.go +++ b/pkg/util/translate/single_namespace.go @@ -229,6 +229,9 @@ func (s *singleNamespace) ApplyLabels(src client.Object, dest client.Object, syn if dest != nil { pObjLabels := dest.GetLabels() if pObjLabels != nil && pObjLabels[ControllerLabel] != "" { + if newLabels == nil { + newLabels = make(map[string]string) + } newLabels[ControllerLabel] = pObjLabels[ControllerLabel] } } diff --git a/pkg/util/unstructuredhelper/write.go b/pkg/util/unstructuredhelper/write.go index 211130a28..afef69e80 100644 --- a/pkg/util/unstructuredhelper/write.go +++ b/pkg/util/unstructuredhelper/write.go @@ -9,12 +9,22 @@ import ( ) func ValueFrom[T any](path string, from ReadMap) (T, bool) { + if from == nil { + var ret T + return ret, false + } + pathSplitted := strings.Split(path, ".") // traverse to target targetMap := from for i := 0; i < len(pathSplitted)-1; i++ { targetMap = targetMap.Map(pathSplitted[i]) + + if targetMap == nil { + var ret T + return ret, false + } } // check if map has latest path @@ -37,6 +47,10 @@ func ValueFrom[T any](path string, from ReadMap) (T, bool) { type TranslateFn[T any] func(in T) T func Set[T any](path string, from ReadMap, to WriteMap, translate TranslateFn[T]) { + if from == nil || to == nil { + return + } + pathSplitted := strings.Split(path, ".") // traverse to target @@ -45,6 +59,10 @@ func Set[T any](path string, from ReadMap, to WriteMap, translate TranslateFn[T] for i := 0; i < len(pathSplitted)-1; i++ { targetMap = targetMap.Map(pathSplitted[i]) toMap = toMap.Map(pathSplitted[i]) + + if targetMap == nil || toMap == nil { + return + } } // check if map has latest path @@ -68,8 +86,16 @@ func TranslateArray[T any](path string, from ReadMap, to WriteMap, translate Tra // traverse to target targetMap := from + if targetMap == nil { + return + } + for i := 0; i < len(pathSplitted)-1; i++ { targetMap = targetMap.Map(pathSplitted[i]) + + if targetMap == nil { + return + } } // check if map has latest path @@ -81,6 +107,10 @@ func TranslateArray[T any](path string, from ReadMap, to WriteMap, translate Tra // to map toMap := to for i := 0; i < len(pathSplitted)-1; i++ { + if toMap == nil { + return + } + toMap = toMap.Map(pathSplitted[i]) } @@ -96,12 +126,20 @@ func TranslateArray[T any](path string, from ReadMap, to WriteMap, translate Tra } func Translate[T any](path string, from ReadMap, to WriteMap, translate TranslateFn[T]) { + if from == nil || to == nil { + return + } + pathSplitted := strings.Split(path, ".") // traverse to target targetMap := from for i := 0; i < len(pathSplitted)-1; i++ { targetMap = targetMap.Map(pathSplitted[i]) + + if targetMap == nil { + return + } } // check if map has latest path @@ -114,6 +152,10 @@ func Translate[T any](path string, from ReadMap, to WriteMap, translate Translat toMap := to for i := 0; i < len(pathSplitted)-1; i++ { toMap = toMap.Map(pathSplitted[i]) + + if toMap == nil { + return + } } // try to convert target value diff --git a/pkg/util/websocketproxy/websocketproxy.go b/pkg/util/websocketproxy/websocketproxy.go index a181c689f..253aef25d 100644 --- a/pkg/util/websocketproxy/websocketproxy.go +++ b/pkg/util/websocketproxy/websocketproxy.go @@ -191,7 +191,7 @@ func (w *WebsocketProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { msgType, msg, err := src.ReadMessage() if err != nil { m := websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%v", err)) - if e, ok := lo.ErrorsAs[*websocket.CloseError](err); ok { + if e, ok := lo.ErrorsAs[*websocket.CloseError](err); ok && e != nil { if e.Code != websocket.CloseNoStatusReceived { m = websocket.FormatCloseMessage(e.Code, e.Text) } @@ -234,7 +234,9 @@ func (w *WebsocketProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { case err = <-errBackend: message = "websocketproxy: Error when copying from client to backend: %v" } - if e, ok := lo.ErrorsAs[*websocket.CloseError](err); !ok || e.Code == websocket.CloseAbnormalClosure { + + var closeError *websocket.CloseError + if ok := errors.As(err, &closeError); !ok || (closeError != nil && closeError.Code == websocket.CloseAbnormalClosure) { log.Printf(message, err) } } diff --git a/test/e2e/servicesync/servicesync.go b/test/e2e/servicesync/servicesync.go index d02a9643f..877f0979b 100644 --- a/test/e2e/servicesync/servicesync.go +++ b/test/e2e/servicesync/servicesync.go @@ -16,9 +16,7 @@ import ( ) var _ = ginkgo.Describe("map services from host to virtual cluster and vice versa", func() { - var ( - f *framework.Framework - ) + var f *framework.Framework ginkgo.JustBeforeEach(func() { // use default framework diff --git a/test/e2e/webhook/utiils.go b/test/e2e/webhook/utiils.go index 51c9e3f74..252d5f6a9 100644 --- a/test/e2e/webhook/utiils.go +++ b/test/e2e/webhook/utiils.go @@ -23,7 +23,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" - "fmt" + "errors" "math" "math/big" "time" @@ -58,10 +58,10 @@ func NewSignedCert(cfg *certutil.Config, key crypto.Signer, caCert *x509.Certifi return nil, err } if len(cfg.CommonName) == 0 { - return nil, fmt.Errorf("must specify a CommonName") + return nil, errors.New("must specify a CommonName") } if len(cfg.Usages) == 0 { - return nil, fmt.Errorf("must specify at least one ExtKeyUsage") + return nil, errors.New("must specify at least one ExtKeyUsage") } certTmpl := x509.Certificate{ diff --git a/test/framework/kubectlcmd.go b/test/framework/kubectlcmd.go index e2b129699..b1e50295a 100644 --- a/test/framework/kubectlcmd.go +++ b/test/framework/kubectlcmd.go @@ -67,7 +67,7 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd { kubectlArgs := append(defaultArgs, args...) // We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh" - //and so on. + // and so on. cmd := exec.Command(tk.KubectlPath, kubectlArgs...) // caller will invoke this and wait on it. @@ -167,7 +167,7 @@ func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) { select { case err := <-errCh: if err != nil { - var rc = 127 + rc := 127 if ee, ok := lo.ErrorsAs[*exec.ExitError](err); ok { rc = ee.Sys().(syscall.WaitStatus).ExitStatus() log.GetInstance().Infof("rc: %d", rc) From 9eb6b62a71a43c741caa138b485d9414472d2445 Mon Sep 17 00:00:00 2001 From: Thomas Kosiewski Date: Tue, 9 Jul 2024 11:32:00 +0200 Subject: [PATCH 3/3] lint(nilaway): Updated GitHub Action Signed-off-by: Thomas Kosiewski --- .github/workflows/lint.yaml | 8 +++++++- Justfile | 1 + cmd/vclusterctl/cmd/root.go | 16 ++++++++++++---- .../resources/ingresses/syncer_test.go | 2 +- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 3a66b10c5..bec61e6cf 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -47,5 +47,11 @@ jobs: exit 1 fi + - name: Install golangci-lint + run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 + + - name: Build custom golangci-lint + run: golangci-lint custom + - name: Run golangci-lint - uses: golangci/golangci-lint-action@v6 + run: ./custom-gcl run diff --git a/Justfile b/Justfile index e6e8ca50a..c77995b6a 100644 --- a/Justfile +++ b/Justfile @@ -26,6 +26,7 @@ release-snapshot: gen-license-report # Run golangci-lint for all packages lint *ARGS: [ -f ./custom-gcl ] || golangci-lint custom + ./custom-gcl cache clean ./custom-gcl run {{ARGS}} # Check struct memory alignment and print potential improvements diff --git a/cmd/vclusterctl/cmd/root.go b/cmd/vclusterctl/cmd/root.go index 01034347f..a18801bd6 100644 --- a/cmd/vclusterctl/cmd/root.go +++ b/cmd/vclusterctl/cmd/root.go @@ -2,6 +2,7 @@ package cmd import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -27,13 +28,17 @@ import ( ) // NewRootCmd returns a new root command -func NewRootCmd(log log.Logger, globalFlags *flags.GlobalFlags) *cobra.Command { +func NewRootCmd(log log.Logger) *cobra.Command { return &cobra.Command{ Use: "vcluster", SilenceUsage: true, SilenceErrors: true, Short: "Welcome to vcluster!", - PersistentPreRun: func(_ *cobra.Command, _ []string) { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if globalFlags == nil { + return errors.New("nil globalFlags") + } + if globalFlags.Config == "" { var err error globalFlags.Config, err = config.DefaultFilePath() @@ -52,6 +57,8 @@ func NewRootCmd(log log.Logger, globalFlags *flags.GlobalFlags) *cobra.Command { } else { log.SetLevel(logrus.InfoLevel) } + + return nil }, Long: `vcluster root command`, } @@ -84,10 +91,11 @@ func Execute() { } } +var globalFlags *flags.GlobalFlags + // BuildRoot creates a new root command from the func BuildRoot(log log.Logger) (*cobra.Command, *flags.GlobalFlags, error) { - var globalFlags *flags.GlobalFlags - rootCmd := NewRootCmd(log, globalFlags) + rootCmd := NewRootCmd(log) persistentFlags := rootCmd.PersistentFlags() globalFlags = flags.SetGlobalFlags(persistentFlags, log) diff --git a/pkg/controllers/resources/ingresses/syncer_test.go b/pkg/controllers/resources/ingresses/syncer_test.go index ad52f0b37..cee918cce 100644 --- a/pkg/controllers/resources/ingresses/syncer_test.go +++ b/pkg/controllers/resources/ingresses/syncer_test.go @@ -413,7 +413,7 @@ func TestSync(t *testing.T) { "vcluster.loft.sh/object-namespace": baseIngress.Namespace, translate.UIDAnnotation: "", "alb.ingress.kubernetes.io/actions.testservice-x-test-x-suffix": `{"forwardConfig":{"targetGroups":[{"serviceName":"nginx-service-x-test-x-suffix","servicePort":"80","weight":100}]}}`, - "alb.ingress.kubernetes.io/actions.ssl-redirect-x-test-x-suffix": `{"type":"redirect","forwardConfig":{},"redirectConfig":{"Port":"443","Protocol":"HTTPS","StatusCode":"HTTP_301"}}`, + "alb.ingress.kubernetes.io/actions.ssl-redirect-x-test-x-suffix": `{"redirectConfig":{"Port":"443","Protocol":"HTTPS","StatusCode":"HTTP_301"},"type":"redirect","forwardConfig":{}}`, }, }, },