diff --git a/cmd/vcluster/cmd/debug/debug.go b/cmd/vcluster/cmd/debug/debug.go new file mode 100644 index 000000000..8b647b944 --- /dev/null +++ b/cmd/vcluster/cmd/debug/debug.go @@ -0,0 +1,23 @@ +package debug + +import ( + "github.com/loft-sh/vcluster/cmd/vcluster/cmd/debug/etcd" + "github.com/loft-sh/vcluster/cmd/vcluster/cmd/debug/mappings" + "github.com/spf13/cobra" +) + +func NewDebugCmd() *cobra.Command { + debugCmd := &cobra.Command{ + Use: "debug", + Short: "vCluster debug subcommand", + Long: `####################################################### +################### vcluster debug #################### +####################################################### + `, + Args: cobra.NoArgs, + } + + debugCmd.AddCommand(mappings.NewMappingsCmd()) + debugCmd.AddCommand(etcd.NewEtcdCmd()) + return debugCmd +} diff --git a/cmd/vcluster/cmd/debug/etcd/etcd.go b/cmd/vcluster/cmd/debug/etcd/etcd.go new file mode 100644 index 000000000..1844a8eb2 --- /dev/null +++ b/cmd/vcluster/cmd/debug/etcd/etcd.go @@ -0,0 +1,20 @@ +package etcd + +import ( + "github.com/spf13/cobra" +) + +func NewEtcdCmd() *cobra.Command { + debugCmd := &cobra.Command{ + Use: "etcd", + Short: "vCluster etcd subcommand", + Long: `####################################################### +############### vcluster debug etcd ############### +####################################################### + `, + Args: cobra.NoArgs, + } + + debugCmd.AddCommand(NewKeysCommand()) + return debugCmd +} diff --git a/cmd/vcluster/cmd/debug/etcd/keys.go b/cmd/vcluster/cmd/debug/etcd/keys.go new file mode 100644 index 000000000..ff986b392 --- /dev/null +++ b/cmd/vcluster/cmd/debug/etcd/keys.go @@ -0,0 +1,61 @@ +package etcd + +import ( + "context" + "fmt" + "os" + + "github.com/loft-sh/vcluster/pkg/config" + "github.com/loft-sh/vcluster/pkg/constants" + "github.com/loft-sh/vcluster/pkg/etcd" + "github.com/spf13/cobra" +) + +type KeysOptions struct { + Config string + + Prefix string +} + +func NewKeysCommand() *cobra.Command { + options := &KeysOptions{} + cmd := &cobra.Command{ + Use: "keys", + Short: "Dump the vCluster etcd stored keys", + Args: cobra.NoArgs, + RunE: func(cobraCommand *cobra.Command, _ []string) (err error) { + return ExecuteKeys(cobraCommand.Context(), options) + }, + } + + cmd.Flags().StringVar(&options.Config, "config", constants.DefaultVClusterConfigLocation, "The path where to find the vCluster config to load") + cmd.Flags().StringVar(&options.Prefix, "prefix", "/", "The prefix to use for listing the keys") + return cmd +} + +func ExecuteKeys(ctx context.Context, options *KeysOptions) error { + // parse vCluster config + vConfig, err := config.ParseConfig(options.Config, os.Getenv("VCLUSTER_NAME"), nil) + if err != nil { + return err + } + + // create new etcd client + etcdClient, err := etcd.NewFromConfig(ctx, vConfig) + if err != nil { + return err + } + + // create new etcd backend & list mappings + keyValues, err := etcdClient.List(ctx, options.Prefix, 0) + if err != nil { + return err + } + + // print mappings + for _, keyValue := range keyValues { + fmt.Println(string(keyValue.Key)) + } + + return nil +} diff --git a/cmd/vcluster/cmd/debug/mappings/add.go b/cmd/vcluster/cmd/debug/mappings/add.go new file mode 100644 index 000000000..e66f03f44 --- /dev/null +++ b/cmd/vcluster/cmd/debug/mappings/add.go @@ -0,0 +1,118 @@ +package mappings + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/loft-sh/vcluster/pkg/config" + "github.com/loft-sh/vcluster/pkg/constants" + "github.com/loft-sh/vcluster/pkg/etcd" + "github.com/loft-sh/vcluster/pkg/mappings/store" + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" +) + +type AddOptions struct { + Config string + + APIVersion string + Kind string + + Host string + Virtual string +} + +func NewAddCommand() *cobra.Command { + options := &AddOptions{} + cmd := &cobra.Command{ + Use: "add", + Short: "Adds a custom mapping to the vCluster stored mappings", + RunE: func(cobraCommand *cobra.Command, _ []string) (err error) { + return ExecuteSave(cobraCommand.Context(), options) + }, + } + + cmd.Flags().StringVar(&options.Config, "config", constants.DefaultVClusterConfigLocation, "The path where to find the vCluster config to load") + cmd.Flags().StringVar(&options.Kind, "kind", "", "The Kind of the object") + cmd.Flags().StringVar(&options.APIVersion, "api-version", "", "The APIVersion of the object") + cmd.Flags().StringVar(&options.Host, "host", "", "The host object in the form of namespace/name") + cmd.Flags().StringVar(&options.Virtual, "virtual", "", "The virtual object in the form of namespace/name") + + return cmd +} +func ExecuteSave(ctx context.Context, options *AddOptions) error { + nameMapping, etcdBackend, err := parseMappingAndClient(ctx, options.Config, options.Kind, options.APIVersion, options.Virtual, options.Host) + if err != nil { + return err + } + + err = etcdBackend.Save(ctx, &store.Mapping{ + NameMapping: nameMapping, + }) + if err != nil { + return fmt.Errorf("error saving %s: %w", nameMapping.String(), err) + } + + klog.FromContext(ctx).Info("Successfully added name mapping to store", "mapping", nameMapping.String()) + return nil +} + +func parseMappingAndClient(ctx context.Context, configPath, kind, apiVersion, virtual, host string) (synccontext.NameMapping, store.Backend, error) { + if kind == "" || apiVersion == "" || virtual == "" || host == "" { + return synccontext.NameMapping{}, nil, fmt.Errorf("make sure to specify --kind, --api-version, --host and --virtual") + } + + // parse group version + groupVersion, err := schema.ParseGroupVersion(apiVersion) + if err != nil { + return synccontext.NameMapping{}, nil, fmt.Errorf("parse group version: %w", err) + } + + // parse host + hostName := types.NamespacedName{Name: host} + if strings.Contains(host, "/") { + namespaceName := strings.SplitN(host, "/", 2) + hostName.Namespace = namespaceName[0] + hostName.Name = namespaceName[1] + } + + // parse virtual + virtualName := types.NamespacedName{Name: virtual} + if strings.Contains(virtual, "/") { + namespaceName := strings.SplitN(virtual, "/", 2) + virtualName.Namespace = namespaceName[0] + virtualName.Name = namespaceName[1] + } + + // build name mapping + nameMapping := synccontext.NameMapping{ + GroupVersionKind: schema.GroupVersionKind{ + Group: groupVersion.Group, + Version: groupVersion.Version, + Kind: kind, + }, + VirtualName: virtualName, + HostName: hostName, + } + + // parse vCluster config + vConfig, err := config.ParseConfig(configPath, os.Getenv("VCLUSTER_NAME"), nil) + if err != nil { + return synccontext.NameMapping{}, nil, err + } + + // create new etcd client + etcdClient, err := etcd.NewFromConfig(ctx, vConfig) + if err != nil { + return synccontext.NameMapping{}, nil, err + } + + // create new etcd backend & list mappings + etcdBackend := store.NewEtcdBackend(etcdClient) + return nameMapping, etcdBackend, nil +} diff --git a/cmd/vcluster/cmd/debug/mappings/clear.go b/cmd/vcluster/cmd/debug/mappings/clear.go new file mode 100644 index 000000000..9a5acb66f --- /dev/null +++ b/cmd/vcluster/cmd/debug/mappings/clear.go @@ -0,0 +1,64 @@ +package mappings + +import ( + "context" + "fmt" + "os" + + "github.com/loft-sh/vcluster/pkg/config" + "github.com/loft-sh/vcluster/pkg/constants" + "github.com/loft-sh/vcluster/pkg/etcd" + "github.com/loft-sh/vcluster/pkg/mappings/store" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +type ClearOptions struct { + Config string +} + +func NewClearCommand() *cobra.Command { + options := &ClearOptions{} + cmd := &cobra.Command{ + Use: "clear", + Short: "Empty the vCluster stored mappings", + Args: cobra.NoArgs, + RunE: func(cobraCommand *cobra.Command, _ []string) (err error) { + return ExecuteClear(cobraCommand.Context(), options) + }, + } + + cmd.Flags().StringVar(&options.Config, "config", constants.DefaultVClusterConfigLocation, "The path where to find the vCluster config to load") + return cmd +} +func ExecuteClear(ctx context.Context, options *ClearOptions) error { + // parse vCluster config + vConfig, err := config.ParseConfig(options.Config, os.Getenv("VCLUSTER_NAME"), nil) + if err != nil { + return err + } + + // create new etcd client + etcdClient, err := etcd.NewFromConfig(ctx, vConfig) + if err != nil { + return err + } + + // create new etcd backend & list mappings + etcdBackend := store.NewEtcdBackend(etcdClient) + mappings, err := etcdBackend.List(ctx) + if err != nil { + return fmt.Errorf("list mappings: %w", err) + } + + // print mappings + for _, mapping := range mappings { + klog.FromContext(ctx).Info("Delete mapping", "mapping", mapping.String()) + err = etcdBackend.Delete(ctx, mapping) + if err != nil { + return fmt.Errorf("delete mapping %s: %w", mapping.String(), err) + } + } + + return nil +} diff --git a/cmd/vcluster/cmd/debug/mappings/delete.go b/cmd/vcluster/cmd/debug/mappings/delete.go new file mode 100644 index 000000000..ee69606c5 --- /dev/null +++ b/cmd/vcluster/cmd/debug/mappings/delete.go @@ -0,0 +1,56 @@ +package mappings + +import ( + "context" + "fmt" + + "github.com/loft-sh/vcluster/pkg/constants" + "github.com/loft-sh/vcluster/pkg/mappings/store" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +type DeleteOptions struct { + Config string + + APIVersion string + Kind string + + Host string + Virtual string +} + +func NewDeleteCommand() *cobra.Command { + options := &DeleteOptions{} + cmd := &cobra.Command{ + Use: "delete", + Short: "Deletes a custom mapping to the vCluster stored mappings", + RunE: func(cobraCommand *cobra.Command, _ []string) (err error) { + return ExecuteDelete(cobraCommand.Context(), options) + }, + } + + cmd.Flags().StringVar(&options.Config, "config", constants.DefaultVClusterConfigLocation, "The path where to find the vCluster config to load") + cmd.Flags().StringVar(&options.Kind, "kind", "", "The Kind of the object") + cmd.Flags().StringVar(&options.APIVersion, "api-version", "", "The APIVersion of the object") + cmd.Flags().StringVar(&options.Host, "host", "", "The host object in the form of namespace/name") + cmd.Flags().StringVar(&options.Virtual, "virtual", "", "The virtual object in the form of namespace/name") + + return cmd +} +func ExecuteDelete(ctx context.Context, options *DeleteOptions) error { + nameMapping, etcdBackend, err := parseMappingAndClient(ctx, options.Config, options.Kind, options.APIVersion, options.Virtual, options.Host) + if err != nil { + return err + } + + err = etcdBackend.Delete(ctx, &store.Mapping{ + NameMapping: nameMapping, + }) + if err != nil { + return fmt.Errorf("error saving %s: %w", nameMapping.String(), err) + } + + klog.FromContext(ctx).Info("Successfully deleted name mapping from store", "mapping", nameMapping.String()) + return nil +} diff --git a/cmd/vcluster/cmd/debug/mappings/list.go b/cmd/vcluster/cmd/debug/mappings/list.go new file mode 100644 index 000000000..affa2d0ae --- /dev/null +++ b/cmd/vcluster/cmd/debug/mappings/list.go @@ -0,0 +1,78 @@ +package mappings + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/loft-sh/vcluster/pkg/config" + "github.com/loft-sh/vcluster/pkg/constants" + "github.com/loft-sh/vcluster/pkg/etcd" + "github.com/loft-sh/vcluster/pkg/mappings/store" + "github.com/spf13/cobra" +) + +type ListOptions struct { + Config string + + Kind string +} + +func NewListCommand() *cobra.Command { + options := &ListOptions{} + cmd := &cobra.Command{ + Use: "list", + Short: "Dump the vCluster stored mappings", + Args: cobra.NoArgs, + RunE: func(cobraCommand *cobra.Command, _ []string) (err error) { + return ExecuteList(cobraCommand.Context(), options) + }, + } + + cmd.Flags().StringVar(&options.Config, "config", constants.DefaultVClusterConfigLocation, "The path where to find the vCluster config to load") + cmd.Flags().StringVar(&options.Kind, "kind", "", "The kind of objects to list") + return cmd +} + +func ExecuteList(ctx context.Context, options *ListOptions) error { + // parse vCluster config + vConfig, err := config.ParseConfig(options.Config, os.Getenv("VCLUSTER_NAME"), nil) + if err != nil { + return err + } + + // create new etcd client + etcdClient, err := etcd.NewFromConfig(ctx, vConfig) + if err != nil { + return err + } + + // create new etcd backend & list mappings + mappings, err := store.NewEtcdBackend(etcdClient).List(ctx) + if err != nil { + return fmt.Errorf("list mappings: %w", err) + } + + // filter if kind is specified + if options.Kind != "" { + newMappings := make([]*store.Mapping, 0, len(mappings)) + for _, mapping := range mappings { + if mapping.Kind != options.Kind { + continue + } + + newMappings = append(newMappings, mapping) + } + mappings = newMappings + } + + // print mappings + raw, err := json.MarshalIndent(mappings, "", " ") + if err != nil { + return fmt.Errorf("marshal mappings: %w", err) + } + + fmt.Println(string(raw)) + return nil +} diff --git a/cmd/vcluster/cmd/debug/mappings/mappings.go b/cmd/vcluster/cmd/debug/mappings/mappings.go new file mode 100644 index 000000000..ed5661f52 --- /dev/null +++ b/cmd/vcluster/cmd/debug/mappings/mappings.go @@ -0,0 +1,23 @@ +package mappings + +import ( + "github.com/spf13/cobra" +) + +func NewMappingsCmd() *cobra.Command { + debugCmd := &cobra.Command{ + Use: "mappings", + Short: "vCluster mappings subcommand", + Long: `####################################################### +############### vcluster debug mappings ############### +####################################################### + `, + Args: cobra.NoArgs, + } + + debugCmd.AddCommand(NewListCommand()) + debugCmd.AddCommand(NewClearCommand()) + debugCmd.AddCommand(NewAddCommand()) + debugCmd.AddCommand(NewDeleteCommand()) + return debugCmd +} diff --git a/cmd/vcluster/cmd/root.go b/cmd/vcluster/cmd/root.go index 90d9f9c63..71e9ca92b 100644 --- a/cmd/vcluster/cmd/root.go +++ b/cmd/vcluster/cmd/root.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" loftlogr "github.com/loft-sh/log/logr" + "github.com/loft-sh/vcluster/cmd/vcluster/cmd/debug" "github.com/spf13/cobra" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -57,5 +58,6 @@ func BuildRoot() *cobra.Command { // add top level commands rootCmd.AddCommand(NewStartCommand()) rootCmd.AddCommand(NewCpCommand()) + rootCmd.AddCommand(debug.NewDebugCmd()) return rootCmd } diff --git a/cmd/vcluster/cmd/start.go b/cmd/vcluster/cmd/start.go index 9022531df..b55b3565e 100644 --- a/cmd/vcluster/cmd/start.go +++ b/cmd/vcluster/cmd/start.go @@ -7,6 +7,7 @@ import ( "runtime/debug" "github.com/loft-sh/vcluster/pkg/config" + "github.com/loft-sh/vcluster/pkg/constants" "github.com/loft-sh/vcluster/pkg/integrations" "github.com/loft-sh/vcluster/pkg/leaderelection" "github.com/loft-sh/vcluster/pkg/plugin" @@ -38,7 +39,7 @@ func NewStartCommand() *cobra.Command { }, } - cmd.Flags().StringVar(&startOptions.Config, "config", "/var/vcluster/config.yaml", "The path where to find the vCluster config to load") + cmd.Flags().StringVar(&startOptions.Config, "config", constants.DefaultVClusterConfigLocation, "The path where to find the vCluster config to load") // Should only be used for development cmd.Flags().StringArrayVar(&startOptions.SetValues, "set", []string{}, "Set values for the config. E.g. --set 'exportKubeConfig.secret.name=my-name'") diff --git a/go.mod b/go.mod index e912202c3..c7434b64d 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,6 @@ require ( github.com/loft-sh/utils v0.0.29 github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d github.com/mitchellh/go-homedir v1.1.0 - github.com/moby/locker v1.0.1 github.com/moby/term v0.5.0 github.com/olekukonko/tablewriter v0.0.5 github.com/onsi/ginkgo/v2 v2.17.2 @@ -45,7 +44,6 @@ require ( golang.org/x/sync v0.7.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.1 - gopkg.in/square/go-jose.v2 v2.6.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible gotest.tools/v3 v3.5.1 @@ -176,7 +174,7 @@ require ( github.com/tcnksm/go-gitconfig v0.1.2 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.14 // indirect + go.etcd.io/etcd/api/v3 v3.5.14 go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect go.etcd.io/etcd/client/v3 v3.5.14 go.mongodb.org/mongo-driver v1.10.0 // indirect diff --git a/go.sum b/go.sum index efd1b41d2..cb3984369 100644 --- a/go.sum +++ b/go.sum @@ -374,8 +374,6 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= @@ -813,8 +811,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 6598737a4..a6f813278 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -4,4 +4,7 @@ const ( K8sKineEndpoint = "unix:///data/kine.sock" K3sKineEndpoint = "unix:///data/server/kine.sock" K0sKineEndpoint = "unix:///run/k0s/kine/kine.sock:2379" + + // DefaultVClusterConfigLocation is the default location of the vCluster config within the container + DefaultVClusterConfigLocation = "/var/vcluster/config.yaml" ) diff --git a/pkg/constants/indices.go b/pkg/constants/indices.go index d43ac9c3f..a9a38ae2d 100644 --- a/pkg/constants/indices.go +++ b/pkg/constants/indices.go @@ -3,12 +3,9 @@ package constants import "time" const ( - IndexByPhysicalName = "IndexByPhysicalName" - IndexByVirtualName = "IndexByVirtualName" IndexByAssigned = "IndexByAssigned" IndexByIngressSecret = "IndexByIngressSecret" IndexByPodSecret = "IndexByPodSecret" - IndexByConfigMap = "IndexByConfigMap" // IndexByHostName is used to map rewritten hostnames(advertised as node addresses) to nodenames IndexByHostName = "IndexByHostName" diff --git a/pkg/controllers/generic/export_patcher.go b/pkg/controllers/generic/export_patcher.go index 069cc59ca..61f65cd05 100644 --- a/pkg/controllers/generic/export_patcher.go +++ b/pkg/controllers/generic/export_patcher.go @@ -27,7 +27,7 @@ func (e *exportPatcher) ServerSideApply(ctx *synccontext.SyncContext, fromObj, d syncContext: ctx, namespace: fromObj.GetNamespace(), - targetNamespace: translate.Default.HostNamespace(fromObj.GetNamespace()), + targetNamespace: translate.Default.HostNamespace(ctx, fromObj.GetNamespace()), }) } @@ -58,13 +58,13 @@ func (r *virtualToHostNameResolver) TranslateNameWithNamespace(name string, name } return types.NamespacedName{ - Namespace: translate.Default.HostNamespace(namespace), - Name: translate.Default.HostName(name, ns), + Namespace: translate.Default.HostNamespace(r.syncContext, namespace), + Name: translate.Default.HostName(r.syncContext, name, ns), } }), nil } - return translate.Default.HostName(name, namespace), nil + return translate.Default.HostName(r.syncContext, name, namespace), nil } func (r *virtualToHostNameResolver) TranslateLabelExpressionsSelector(selector *metav1.LabelSelector) (*metav1.LabelSelector, error) { @@ -72,7 +72,7 @@ func (r *virtualToHostNameResolver) TranslateLabelExpressionsSelector(selector * } func (r *virtualToHostNameResolver) TranslateLabelKey(key string) (string, error) { - return translate.Default.HostLabel(r.syncContext, key), nil + return translate.Default.HostLabel(r.syncContext, key, ""), nil } func (r *virtualToHostNameResolver) TranslateLabelSelector(selector map[string]string) (map[string]string, error) { @@ -80,11 +80,11 @@ func (r *virtualToHostNameResolver) TranslateLabelSelector(selector map[string]s MatchLabels: selector, } - return metav1.LabelSelectorAsMap(translate.HostLabelSelector(r.syncContext, labelSelector)) + return metav1.LabelSelectorAsMap(translate.HostLabelSelector(r.syncContext, labelSelector, "")) } func (r *virtualToHostNameResolver) TranslateNamespaceRef(namespace string) (string, error) { - return translate.Default.HostNamespace(namespace), nil + return translate.Default.HostNamespace(r.syncContext, namespace), nil } func validateExportConfig(config *vclusterconfig.Export) error { diff --git a/pkg/controllers/generic/import_patcher.go b/pkg/controllers/generic/import_patcher.go index b42442e16..8591e1085 100644 --- a/pkg/controllers/generic/import_patcher.go +++ b/pkg/controllers/generic/import_patcher.go @@ -1,15 +1,12 @@ package generic import ( - "context" "regexp" vclusterconfig "github.com/loft-sh/vcluster/config" - "github.com/loft-sh/vcluster/pkg/constants" + "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/patches" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -22,7 +19,9 @@ type importPatcher struct { var _ ObjectPatcher = &importPatcher{} func (s *importPatcher) ServerSideApply(ctx *synccontext.SyncContext, _, destObj, sourceObj client.Object) error { - return patches.ApplyPatches(destObj, sourceObj, s.config.Patches, s.config.ReversePatches, &hostToVirtualImportNameResolver{virtualClient: s.virtualClient, ctx: ctx}) + return patches.ApplyPatches(destObj, sourceObj, s.config.Patches, s.config.ReversePatches, &hostToVirtualImportNameResolver{ + syncContext: ctx, + }) } func (s *importPatcher) ReverseUpdate(ctx *synccontext.SyncContext, destObj, sourceObj client.Object) error { @@ -33,8 +32,7 @@ func (s *importPatcher) ReverseUpdate(ctx *synccontext.SyncContext, destObj, sou } type hostToVirtualImportNameResolver struct { - virtualClient client.Client - ctx context.Context + syncContext *synccontext.SyncContext } func (r *hostToVirtualImportNameResolver) TranslateName(name string, _ *regexp.Regexp, _ string) (string, error) { @@ -58,10 +56,6 @@ func (r *hostToVirtualImportNameResolver) TranslateLabelSelector(selector map[st } func (r *hostToVirtualImportNameResolver) TranslateNamespaceRef(namespace string) (string, error) { - vNamespace := (&corev1.Namespace{}).DeepCopyObject().(client.Object) - err := clienthelper.GetByIndex(r.ctx, r.virtualClient, vNamespace, constants.IndexByPhysicalName, namespace) - if err != nil { - return "", err - } - return vNamespace.GetName(), nil + vNamespace := mappings.HostToVirtual(r.syncContext, namespace, "", nil, mappings.Namespaces()) + return vNamespace.Name, nil } diff --git a/pkg/controllers/generic/import_syncer.go b/pkg/controllers/generic/import_syncer.go index 85f4986be..fb076102f 100644 --- a/pkg/controllers/generic/import_syncer.go +++ b/pkg/controllers/generic/import_syncer.go @@ -7,6 +7,7 @@ import ( "time" vclusterconfig "github.com/loft-sh/vcluster/config" + "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" @@ -15,9 +16,7 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "github.com/loft-sh/vcluster/pkg/constants" "github.com/loft-sh/vcluster/pkg/log" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -97,9 +96,8 @@ func createImporter(ctx *synccontext.RegisterContext, config *vclusterconfig.Imp name: controllerID, syncerOptions: &syncertypes.Options{ - DisableUIDDeletion: true, - IsClusterScopedCRD: isClusterScoped, - HasStatusSubresource: hasStatusSubresource, + DisableUIDDeletion: true, + IsClusterScopedCRD: isClusterScoped, }, }, nil } @@ -143,6 +141,10 @@ func (s *importer) Syncer() syncertypes.Sync[client.Object] { return syncer.ToGenericSyncer[*unstructured.Unstructured](s) } +func (s *importer) Migrate(_ *synccontext.RegisterContext, _ synccontext.Mapper) error { + return nil +} + func (s *importer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccontext.SyncToVirtualEvent[*unstructured.Unstructured]) (ctrl.Result, error) { // check if annotation is already present pAnnotations := event.Host.GetAnnotations() @@ -344,7 +346,7 @@ func (s *importer) isVirtualManaged(vObj client.Object) bool { return vObj.GetAnnotations() != nil && vObj.GetAnnotations()[translate.ControllerLabel] != "" && vObj.GetAnnotations()[translate.ControllerLabel] == s.Name() } -func (s *importer) IsManaged(_ *synccontext.SyncContext, pObj client.Object) (bool, error) { +func (s *importer) IsManaged(ctx *synccontext.SyncContext, pObj client.Object) (bool, error) { if s.syncerOptions.IsClusterScopedCRD { return true, nil } @@ -353,7 +355,7 @@ func (s *importer) IsManaged(_ *synccontext.SyncContext, pObj client.Object) (bo } // check if the pObj belong to a namespace managed by this vcluster - if !translate.Default.IsTargetedNamespace(pObj.GetNamespace()) { + if !translate.Default.IsTargetedNamespace(ctx, pObj.GetNamespace()) { return false, nil } @@ -371,7 +373,7 @@ func (s *importer) VirtualToHost(ctx *synccontext.SyncContext, req types.Namespa return s.virtualToHost(ctx, req, vObj) } - return types.NamespacedName{Name: translate.Default.HostName(req.Name, req.Namespace), Namespace: translate.Default.HostNamespace(req.Namespace)} + return types.NamespacedName{Name: translate.Default.HostName(ctx, req.Name, req.Namespace), Namespace: translate.Default.HostNamespace(ctx, req.Namespace)} } func (s *importer) HostToVirtual(ctx *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) types.NamespacedName { @@ -383,13 +385,12 @@ func (s *importer) HostToVirtual(ctx *synccontext.SyncContext, req types.Namespa // in multi-namespace mode we just query the target namespace if !translate.Default.SingleNamespaceTarget() { - vNamespace := &corev1.Namespace{} - err := clienthelper.GetByIndex(ctx, s.virtualClient, vNamespace, constants.IndexByPhysicalName, req.Namespace) - if err != nil { + vNamespace := mappings.HostToVirtual(ctx, req.Namespace, "", nil, mappings.Namespaces()) + if vNamespace.Name == "" { return types.NamespacedName{} } - return types.NamespacedName{Name: req.Name, Namespace: vNamespace.GetName()} + return types.NamespacedName{Name: req.Name, Namespace: vNamespace.Name} } // this is a little bit more tricky diff --git a/pkg/controllers/resources/configmaps/syncer.go b/pkg/controllers/resources/configmaps/syncer.go index 1d79a4eb3..d851a68e9 100644 --- a/pkg/controllers/resources/configmaps/syncer.go +++ b/pkg/controllers/resources/configmaps/syncer.go @@ -1,9 +1,7 @@ package configmaps import ( - "context" "fmt" - "strings" "github.com/loft-sh/vcluster/pkg/constants" "github.com/loft-sh/vcluster/pkg/mappings" @@ -17,10 +15,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -49,20 +47,12 @@ func (s *configMapSyncer) Syncer() syncertypes.Sync[client.Object] { return syncer.ToGenericSyncer[*corev1.ConfigMap](s) } -var _ syncertypes.IndicesRegisterer = &configMapSyncer{} - -func (s *configMapSyncer) RegisterIndices(ctx *synccontext.RegisterContext) error { - // index pods by their used config maps - return ctx.VirtualManager.GetFieldIndexer().IndexField(ctx, &corev1.Pod{}, constants.IndexByConfigMap, func(rawObj client.Object) []string { - pod := rawObj.(*corev1.Pod) - return configNamesFromPod(pod) - }) -} - var _ syncertypes.ControllerModifier = &configMapSyncer{} -func (s *configMapSyncer) ModifyController(_ *synccontext.RegisterContext, builder *builder.Builder) (*builder.Builder, error) { - return builder.Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(mapPods)), nil +func (s *configMapSyncer) ModifyController(ctx *synccontext.RegisterContext, builder *builder.Builder) (*builder.Builder, error) { + return builder.WatchesRawSource(ctx.Mappings.Store().Watch(s.GroupVersionKind(), func(nameMapping synccontext.NameMapping, queue workqueue.RateLimitingInterface) { + queue.Add(reconcile.Request{NamespacedName: nameMapping.VirtualName}) + })), nil } func (s *configMapSyncer) SyncToHost(ctx *synccontext.SyncContext, event *synccontext.SyncToHostEvent[*corev1.ConfigMap]) (ctrl.Result, error) { @@ -135,34 +125,14 @@ func (s *configMapSyncer) isConfigMapUsed(ctx *synccontext.SyncContext, vObj run return true, nil } - podList := &corev1.PodList{} - err := ctx.VirtualClient.List(ctx, podList, client.MatchingFields{constants.IndexByConfigMap: configMap.Namespace + "/" + configMap.Name}) - if err != nil { - return false, err - } - - return len(podList.Items) > 0, nil -} - -func mapPods(_ context.Context, obj client.Object) []reconcile.Request { - pod, ok := obj.(*corev1.Pod) - if !ok { - return nil - } - - requests := []reconcile.Request{} - names := configNamesFromPod(pod) - for _, name := range names { - splitted := strings.Split(name, "/") - if len(splitted) == 2 { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: splitted[0], - Name: splitted[1], - }, - }) - } - } + // retrieve references for config map + references := ctx.Mappings.Store().ReferencesTo(ctx, synccontext.Object{ + GroupVersionKind: s.GroupVersionKind(), + NamespacedName: types.NamespacedName{ + Namespace: configMap.Namespace, + Name: configMap.Name, + }, + }) - return requests + return len(references) > 0, nil } diff --git a/pkg/controllers/resources/configmaps/syncer_test.go b/pkg/controllers/resources/configmaps/syncer_test.go index 2dbd582ce..e5ecc6883 100644 --- a/pkg/controllers/resources/configmaps/syncer_test.go +++ b/pkg/controllers/resources/configmaps/syncer_test.go @@ -1,7 +1,6 @@ package configmaps import ( - "context" "testing" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" @@ -29,7 +28,7 @@ func TestSync(t *testing.T) { } syncedConfigMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(baseConfigMap.Name, baseConfigMap.Namespace), + Name: translate.Default.HostName(nil, baseConfigMap.Name, baseConfigMap.Namespace), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: baseConfigMap.Name, @@ -138,48 +137,3 @@ func TestSync(t *testing.T) { }, }) } - -func TestMapping(t *testing.T) { - // test pod - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - Env: []corev1.EnvVar{ - { - Name: "test", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "a", - }, - }, - }, - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "test", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "b", - }, - }, - }, - }, - }, - }, - } - requests := mapPods(context.Background(), pod) - if len(requests) != 2 || requests[0].Name != "a" || requests[0].Namespace != "test" || requests[1].Name != "b" || requests[1].Namespace != "test" { - t.Fatalf("Wrong pod requests returned: %#+v", requests) - } -} diff --git a/pkg/controllers/resources/configmaps/util.go b/pkg/controllers/resources/configmaps/util.go deleted file mode 100644 index c33e47f76..000000000 --- a/pkg/controllers/resources/configmaps/util.go +++ /dev/null @@ -1,62 +0,0 @@ -package configmaps - -import ( - "github.com/loft-sh/vcluster/pkg/util/translate" - corev1 "k8s.io/api/core/v1" -) - -func configNamesFromPod(pod *corev1.Pod) []string { - configMaps := []string{} - for _, c := range pod.Spec.Containers { - configMaps = append(configMaps, configNamesFromContainer(pod.Namespace, &c)...) - } - for _, c := range pod.Spec.InitContainers { - configMaps = append(configMaps, configNamesFromContainer(pod.Namespace, &c)...) - } - for _, c := range pod.Spec.EphemeralContainers { - configMaps = append(configMaps, configNamesFromEphemeralContainer(pod.Namespace, &c)...) - } - for i := range pod.Spec.Volumes { - if pod.Spec.Volumes[i].ConfigMap != nil { - configMaps = append(configMaps, pod.Namespace+"/"+pod.Spec.Volumes[i].ConfigMap.Name) - } - if pod.Spec.Volumes[i].Projected != nil { - for j := range pod.Spec.Volumes[i].Projected.Sources { - if pod.Spec.Volumes[i].Projected.Sources[j].ConfigMap != nil { - configMaps = append(configMaps, pod.Namespace+"/"+pod.Spec.Volumes[i].Projected.Sources[j].ConfigMap.Name) - } - } - } - } - return translate.UniqueSlice(configMaps) -} - -func configNamesFromContainer(namespace string, container *corev1.Container) []string { - configNames := []string{} - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" { - configNames = append(configNames, namespace+"/"+env.ValueFrom.ConfigMapKeyRef.Name) - } - } - for _, from := range container.EnvFrom { - if from.ConfigMapRef != nil && from.ConfigMapRef.Name != "" { - configNames = append(configNames, namespace+"/"+from.ConfigMapRef.Name) - } - } - return configNames -} - -func configNamesFromEphemeralContainer(namespace string, container *corev1.EphemeralContainer) []string { - configNames := []string{} - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" { - configNames = append(configNames, namespace+"/"+env.ValueFrom.ConfigMapKeyRef.Name) - } - } - for _, from := range container.EnvFrom { - if from.ConfigMapRef != nil && from.ConfigMapRef.Name != "" { - configNames = append(configNames, namespace+"/"+from.ConfigMapRef.Name) - } - } - return configNames -} diff --git a/pkg/controllers/resources/csidrivers/syncer.go b/pkg/controllers/resources/csidrivers/syncer.go index e6598489e..825683f2a 100644 --- a/pkg/controllers/resources/csidrivers/syncer.go +++ b/pkg/controllers/resources/csidrivers/syncer.go @@ -3,7 +3,7 @@ package csidrivers import ( "fmt" - "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" @@ -16,8 +16,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { - mapper, err := ctx.Mappings.ByGVK(mappings.CSIDrivers()) +func New(_ *synccontext.RegisterContext) (syncertypes.Object, error) { + mapper, err := generic.NewMirrorMapper(&storagev1.CSIDriver{}) if err != nil { return nil, err } diff --git a/pkg/controllers/resources/csinodes/syncer.go b/pkg/controllers/resources/csinodes/syncer.go index 527116ac0..f47fa1d42 100644 --- a/pkg/controllers/resources/csinodes/syncer.go +++ b/pkg/controllers/resources/csinodes/syncer.go @@ -3,7 +3,7 @@ package csinodes import ( "fmt" - "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" @@ -19,7 +19,7 @@ import ( ) func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { - mapper, err := ctx.Mappings.ByGVK(mappings.CSINodes()) + mapper, err := generic.NewMirrorMapper(&storagev1.CSINode{}) if err != nil { return nil, err } diff --git a/pkg/mappings/resources/csistoragecapacities.go b/pkg/controllers/resources/csistoragecapacities/mapper.go similarity index 53% rename from pkg/mappings/resources/csistoragecapacities.go rename to pkg/controllers/resources/csistoragecapacities/mapper.go index 72d26deee..c22d3232c 100644 --- a/pkg/mappings/resources/csistoragecapacities.go +++ b/pkg/controllers/resources/csistoragecapacities/mapper.go @@ -1,9 +1,10 @@ -package resources +package csistoragecapacities import ( - "github.com/loft-sh/vcluster/pkg/constants" + "fmt" + + "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" "github.com/loft-sh/vcluster/pkg/util/translate" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -12,23 +13,45 @@ import ( ) func CreateCSIStorageCapacitiesMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - s := &csiStorageCapacitiesMapper{ + return generic.WithRecorder(&csiStorageCapacitiesMapper{ physicalClient: ctx.PhysicalManager.GetClient(), - } - err := ctx.PhysicalManager.GetFieldIndexer().IndexField(ctx, &storagev1.CSIStorageCapacity{}, constants.IndexByVirtualName, func(rawObj client.Object) []string { - return []string{s.HostToVirtual(ctx.ToSyncContext("csi storage capacity mapper"), types.NamespacedName{Name: rawObj.GetName(), Namespace: rawObj.GetNamespace()}, rawObj).Name} - }) - if err != nil { - return nil, err - } - - return s, nil + }), nil } type csiStorageCapacitiesMapper struct { physicalClient client.Client } +func (s *csiStorageCapacitiesMapper) Migrate(ctx *synccontext.RegisterContext, mapper synccontext.Mapper) error { + list := &storagev1.CSIStorageCapacityList{} + err := ctx.VirtualManager.GetClient().List(ctx, list) + if err != nil { + return fmt.Errorf("error listing csi storage capacities: %w", err) + } + + for _, val := range list.Items { + item := &val + + // this will try to translate and record the mapping + vName := types.NamespacedName{Name: item.Name, Namespace: item.Namespace} + pName := mapper.VirtualToHost(ctx.ToSyncContext("migrate-"+item.Kind), vName, item) + if pName.Name != "" { + nameMapping := synccontext.NameMapping{ + GroupVersionKind: s.GroupVersionKind(), + VirtualName: vName, + HostName: pName, + } + + err = ctx.Mappings.Store().RecordAndSaveReference(ctx, nameMapping, nameMapping) + if err != nil { + return fmt.Errorf("error saving reference in store: %w", err) + } + } + } + + return nil +} + func (s *csiStorageCapacitiesMapper) GroupVersionKind() schema.GroupVersionKind { return storagev1.SchemeGroupVersion.WithKind("CSIStorageCapacity") } @@ -37,7 +60,7 @@ func (s *csiStorageCapacitiesMapper) HostToVirtual(_ *synccontext.SyncContext, r return types.NamespacedName{Name: translate.SafeConcatName(req.Name, "x", req.Namespace), Namespace: "kube-system"} } -func (s *csiStorageCapacitiesMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, vObj client.Object) types.NamespacedName { +func (s *csiStorageCapacitiesMapper) VirtualToHost(_ *synccontext.SyncContext, _ types.NamespacedName, vObj client.Object) types.NamespacedName { // if the virtual object is annotated with the physical name and namespace, return that if vObj != nil { vAnnotations := vObj.GetAnnotations() @@ -49,17 +72,7 @@ func (s *csiStorageCapacitiesMapper) VirtualToHost(ctx *synccontext.SyncContext, } } - sc := &storagev1.CSIStorageCapacity{} - pObj := sc.DeepCopyObject().(client.Object) - err := clienthelper.GetByIndex(ctx, s.physicalClient, pObj, constants.IndexByVirtualName, req.Name) - if err != nil { - return types.NamespacedName{} - } - - return types.NamespacedName{ - Namespace: pObj.GetNamespace(), - Name: pObj.GetName(), - } + return types.NamespacedName{} } func (s *csiStorageCapacitiesMapper) IsManaged(*synccontext.SyncContext, client.Object) (bool, error) { diff --git a/pkg/controllers/resources/csistoragecapacities/syncer.go b/pkg/controllers/resources/csistoragecapacities/syncer.go index 772cd68a3..4839c89ac 100644 --- a/pkg/controllers/resources/csistoragecapacities/syncer.go +++ b/pkg/controllers/resources/csistoragecapacities/syncer.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" @@ -24,7 +23,7 @@ import ( ) func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { - mapper, err := ctx.Mappings.ByGVK(mappings.CSIStorageCapacities()) + mapper, err := CreateCSIStorageCapacitiesMapper(ctx) if err != nil { return nil, err } diff --git a/pkg/controllers/resources/csistoragecapacities/translate.go b/pkg/controllers/resources/csistoragecapacities/translate.go index eaff94c50..0b5057b72 100644 --- a/pkg/controllers/resources/csistoragecapacities/translate.go +++ b/pkg/controllers/resources/csistoragecapacities/translate.go @@ -7,6 +7,7 @@ import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -60,7 +61,7 @@ func (s *csistoragecapacitySyncer) translateBackwards(ctx *synccontext.SyncConte // TranslateMetadata translates the object's metadata func (s *csistoragecapacitySyncer) virtualMetadata(ctx *synccontext.SyncContext, pObj *storagev1.CSIStorageCapacity) *storagev1.CSIStorageCapacity { - vObj := translate.CopyObjectWithName(pObj, mappings.HostToVirtual(ctx, pObj.GetName(), pObj.GetNamespace(), pObj, mappings.CSIStorageCapacities()), false) + vObj := translate.CopyObjectWithName(pObj, s.HostToVirtual(ctx, types.NamespacedName{Name: pObj.Name, Namespace: pObj.Namespace}, pObj), false) vObj.SetAnnotations(translate.HostAnnotations(pObj, nil)) vObj.SetLabels(translate.HostLabels(ctx, pObj, nil)) return vObj diff --git a/pkg/controllers/resources/endpoints/syncer_test.go b/pkg/controllers/resources/endpoints/syncer_test.go index e858230f1..d7a0921d9 100644 --- a/pkg/controllers/resources/endpoints/syncer_test.go +++ b/pkg/controllers/resources/endpoints/syncer_test.go @@ -48,13 +48,13 @@ func TestExistingEndpoints(t *testing.T) { } pEndpoints := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(vEndpoints.Name, vEndpoints.Namespace), + Name: translate.Default.HostName(nil, vEndpoints.Name, vEndpoints.Namespace), Namespace: "test", }, } pService := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(vEndpoints.Name, vEndpoints.Namespace), + Name: translate.Default.HostName(nil, vEndpoints.Name, vEndpoints.Namespace), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: vEndpoints.Name, @@ -69,7 +69,7 @@ func TestExistingEndpoints(t *testing.T) { } expectedEndpoints := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(vEndpoints.Name, vEndpoints.Namespace), + Name: translate.Default.HostName(nil, vEndpoints.Name, vEndpoints.Namespace), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: vEndpoints.Name, @@ -148,7 +148,7 @@ func TestSync(t *testing.T) { } syncedEndpoints := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(baseEndpoints.Name, baseEndpoints.Namespace), + Name: translate.Default.HostName(nil, baseEndpoints.Name, baseEndpoints.Namespace), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: baseEndpoints.Name, diff --git a/pkg/controllers/resources/events/syncer.go b/pkg/controllers/resources/events/syncer.go index defd70584..3d83e646e 100644 --- a/pkg/controllers/resources/events/syncer.go +++ b/pkg/controllers/resources/events/syncer.go @@ -50,9 +50,22 @@ func (s *eventSyncer) Syncer() syncertypes.Sync[client.Object] { return syncer.ToGenericSyncer[*corev1.Event](s) } -func (s *eventSyncer) SyncToHost(_ *synccontext.SyncContext, _ *synccontext.SyncToHostEvent[*corev1.Event]) (ctrl.Result, error) { - // this should never happen since we ignore virtual events and don't handle objects we can't find - panic("unimplemented") +var _ syncertypes.OptionsProvider = &eventSyncer{} + +func (s *eventSyncer) Options() *syncertypes.Options { + return &syncertypes.Options{ + SkipMappingsRecording: true, + } +} + +func (s *eventSyncer) SyncToHost(ctx *synccontext.SyncContext, event *synccontext.SyncToHostEvent[*corev1.Event]) (ctrl.Result, error) { + // check if delete event + if event.IsDelete() { + return syncer.DeleteVirtualObject(ctx, event.Virtual, "host event was deleted") + } + + // just ignore, Kubernetes will clean them up + return ctrl.Result{}, nil } func (s *eventSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Event]) (_ ctrl.Result, retErr error) { diff --git a/pkg/controllers/resources/events/syncer_test.go b/pkg/controllers/resources/events/syncer_test.go index c3483e802..ee6fc10ee 100644 --- a/pkg/controllers/resources/events/syncer_test.go +++ b/pkg/controllers/resources/events/syncer_test.go @@ -34,7 +34,7 @@ func TestSync(t *testing.T) { } pPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(vPod.Name, vPod.Namespace), + Name: translate.Default.HostName(nil, vPod.Name, vPod.Namespace), Namespace: syncertesting.DefaultTestTargetNamespace, }, } diff --git a/pkg/controllers/resources/ingressclasses/syncer.go b/pkg/controllers/resources/ingressclasses/syncer.go index f784cb56a..7e57382bb 100644 --- a/pkg/controllers/resources/ingressclasses/syncer.go +++ b/pkg/controllers/resources/ingressclasses/syncer.go @@ -3,7 +3,7 @@ package ingressclasses import ( "fmt" - "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" @@ -16,8 +16,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { - mapper, err := ctx.Mappings.ByGVK(mappings.IngressClasses()) +func New(_ *synccontext.RegisterContext) (syncertypes.Object, error) { + mapper, err := generic.NewMirrorMapper(&networkingv1.IngressClass{}) if err != nil { return nil, err } diff --git a/pkg/controllers/resources/ingresses/syncer.go b/pkg/controllers/resources/ingresses/syncer.go index f0fc48b5c..6adf9d956 100644 --- a/pkg/controllers/resources/ingresses/syncer.go +++ b/pkg/controllers/resources/ingresses/syncer.go @@ -2,7 +2,6 @@ package ingresses import ( "fmt" - "strings" "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/patcher" @@ -10,7 +9,6 @@ import ( "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/syncer/translator" syncertypes "github.com/loft-sh/vcluster/pkg/syncer/types" - "github.com/loft-sh/vcluster/pkg/util/translate" networkingv1 "k8s.io/api/networking/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -80,49 +78,3 @@ func (s *ingressSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *syncc // virtual object is not here anymore, so we delete return syncer.DeleteHostObject(ctx, event.Host, "virtual object was deleted") } - -func SecretNamesFromIngress(ctx *synccontext.SyncContext, ingress *networkingv1.Ingress) []string { - secrets := []string{} - _, extraSecrets := translateIngressAnnotations(ctx, ingress.Annotations, ingress.Namespace) - secrets = append(secrets, extraSecrets...) - for _, tls := range ingress.Spec.TLS { - if tls.SecretName != "" { - secrets = append(secrets, ingress.Namespace+"/"+tls.SecretName) - } - } - return translate.UniqueSlice(secrets) -} - -var TranslateAnnotations = map[string]bool{ - "nginx.ingress.kubernetes.io/auth-secret": true, - "nginx.ingress.kubernetes.io/auth-tls-secret": true, - "nginx.ingress.kubernetes.io/proxy-ssl-secret": true, -} - -func translateIngressAnnotations(ctx *synccontext.SyncContext, annotations map[string]string, ingressNamespace string) (map[string]string, []string) { - foundSecrets := []string{} - newAnnotations := map[string]string{} - for k, v := range annotations { - if !TranslateAnnotations[k] { - newAnnotations[k] = v - continue - } - - splitted := strings.Split(annotations[k], "/") - if len(splitted) == 1 { // If value is only "secret" - secret := splitted[0] - foundSecrets = append(foundSecrets, ingressNamespace+"/"+secret) - newAnnotations[k] = mappings.VirtualToHostName(ctx, secret, ingressNamespace, mappings.Secrets()) - } else if len(splitted) == 2 { // If value is "namespace/secret" - namespace := splitted[0] - secret := splitted[1] - foundSecrets = append(foundSecrets, namespace+"/"+secret) - pName := mappings.VirtualToHost(ctx, secret, namespace, mappings.Secrets()) - newAnnotations[k] = pName.Namespace + "/" + pName.Name - } else { - newAnnotations[k] = v - } - } - - return newAnnotations, foundSecrets -} diff --git a/pkg/controllers/resources/ingresses/syncer_test.go b/pkg/controllers/resources/ingresses/syncer_test.go index 0dfc73089..9d3828565 100644 --- a/pkg/controllers/resources/ingresses/syncer_test.go +++ b/pkg/controllers/resources/ingresses/syncer_test.go @@ -3,8 +3,10 @@ package ingresses import ( "testing" + "github.com/loft-sh/vcluster/pkg/config" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" syncertesting "github.com/loft-sh/vcluster/pkg/syncer/testing" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "gotest.tools/assert" "k8s.io/apimachinery/pkg/types" @@ -56,10 +58,10 @@ func TestSync(t *testing.T) { pBaseSpec := networkingv1.IngressSpec{ DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ - Name: translate.Default.HostName("testservice", "test"), + Name: translate.Default.HostName(nil, "testservice", "test"), }, Resource: &corev1.TypedLocalObjectReference{ - Name: translate.Default.HostName("testbackendresource", "test"), + Name: translate.Default.HostName(nil, "testbackendresource", "test"), }, }, Rules: []networkingv1.IngressRule{ @@ -70,10 +72,10 @@ func TestSync(t *testing.T) { { Backend: networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ - Name: translate.Default.HostName("testbackendservice", "test"), + Name: translate.Default.HostName(nil, "testbackendservice", "test"), }, Resource: &corev1.TypedLocalObjectReference{ - Name: translate.Default.HostName("testbackendresource", "test"), + Name: translate.Default.HostName(nil, "testbackendresource", "test"), }, }, }, @@ -84,7 +86,7 @@ func TestSync(t *testing.T) { }, TLS: []networkingv1.IngressTLS{ { - SecretName: translate.Default.HostName("testtlssecret", "test"), + SecretName: translate.Default.HostName(nil, "testtlssecret", "test"), }, }, } @@ -103,7 +105,7 @@ func TestSync(t *testing.T) { Namespace: "test", } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName("testingress", "test"), + Name: translate.Default.HostName(nil, "testingress", "test"), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: vObjectMeta.Name, @@ -157,7 +159,10 @@ func TestSync(t *testing.T) { Status: changedIngressStatus, } - syncertesting.RunTests(t, []*syncertesting.SyncTest{ + syncertesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.ToHost.Ingresses.Enabled = true + return syncertesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*syncertesting.SyncTest{ { Name: "Create forward", InitialVirtualState: []runtime.Object{baseIngress.DeepCopy()}, @@ -333,8 +338,8 @@ func TestSync(t *testing.T) { Namespace: createdIngress.Namespace, Labels: createdIngress.Labels, Annotations: map[string]string{ - "nginx.ingress.kubernetes.io/auth-secret": translate.Default.HostName("my-secret", baseIngress.Namespace), - "nginx.ingress.kubernetes.io/auth-tls-secret": createdIngress.Namespace + "/" + translate.Default.HostName("my-secret", baseIngress.Namespace), + "nginx.ingress.kubernetes.io/auth-secret": translate.Default.HostName(nil, "my-secret", baseIngress.Namespace), + "nginx.ingress.kubernetes.io/auth-tls-secret": createdIngress.Namespace + "/" + translate.Default.HostName(nil, "my-secret", baseIngress.Namespace), "vcluster.loft.sh/managed-annotations": "nginx.ingress.kubernetes.io/auth-secret\nnginx.ingress.kubernetes.io/auth-tls-secret", "vcluster.loft.sh/object-name": baseIngress.Name, "vcluster.loft.sh/object-namespace": baseIngress.Namespace, @@ -410,7 +415,7 @@ func TestSync(t *testing.T) { Labels: createdIngress.Labels, Annotations: map[string]string{ "vcluster.loft.sh/managed-annotations": "alb.ingress.kubernetes.io/actions.ssl-redirect-x-test-x-suffix\nalb.ingress.kubernetes.io/actions.testservice-x-test-x-suffix\nnginx.ingress.kubernetes.io/auth-secret", - "nginx.ingress.kubernetes.io/auth-secret": translate.Default.HostName("my-secret", baseIngress.Namespace), + "nginx.ingress.kubernetes.io/auth-secret": translate.Default.HostName(nil, "my-secret", baseIngress.Namespace), "vcluster.loft.sh/object-name": baseIngress.Name, "vcluster.loft.sh/object-namespace": baseIngress.Namespace, translate.UIDAnnotation: "", diff --git a/pkg/controllers/resources/ingresses/translate.go b/pkg/controllers/resources/ingresses/translate.go index 7c53955cc..c11d53c79 100644 --- a/pkg/controllers/resources/ingresses/translate.go +++ b/pkg/controllers/resources/ingresses/translate.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/resources" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/util/translate" networkingv1 "k8s.io/api/networking/v1" @@ -23,19 +24,19 @@ const ( func (s *ingressSyncer) translate(ctx *synccontext.SyncContext, vIngress *networkingv1.Ingress) (*networkingv1.Ingress, error) { newIngress := s.TranslateMetadata(ctx, vIngress).(*networkingv1.Ingress) newIngress.Spec = *translateSpec(ctx, vIngress.Namespace, &vIngress.Spec) - newIngress.Annotations, _ = translateIngressAnnotations(ctx, newIngress.Annotations, vIngress.Namespace) + newIngress.Annotations, _ = resources.TranslateIngressAnnotations(ctx, newIngress.Annotations, vIngress.Namespace) return newIngress, nil } func (s *ingressSyncer) TranslateMetadata(ctx *synccontext.SyncContext, vObj client.Object) client.Object { ingress := vObj.(*networkingv1.Ingress).DeepCopy() - updateAnnotations(ingress) + updateAnnotations(ctx, ingress) return translate.HostMetadata(ctx, vObj, s.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj)) } func (s *ingressSyncer) TranslateMetadataUpdate(ctx *synccontext.SyncContext, vObj client.Object, pObj client.Object) (annotations map[string]string, labels map[string]string) { vIngress := vObj.(*networkingv1.Ingress).DeepCopy() - updateAnnotations(vIngress) + updateAnnotations(ctx, vIngress) return translate.HostAnnotations(vIngress, pObj), translate.HostLabels(ctx, vIngress, pObj) } @@ -44,7 +45,7 @@ func (s *ingressSyncer) translateUpdate(ctx *synccontext.SyncContext, pObj, vObj var translatedAnnotations map[string]string translatedAnnotations, pObj.Labels = s.TranslateMetadataUpdate(ctx, vObj, pObj) - translatedAnnotations, _ = translateIngressAnnotations(ctx, translatedAnnotations, vObj.Namespace) + translatedAnnotations, _ = resources.TranslateIngressAnnotations(ctx, translatedAnnotations, vObj.Namespace) pObj.Annotations = translatedAnnotations } @@ -55,7 +56,7 @@ func translateSpec(ctx *synccontext.SyncContext, namespace string, vIngressSpec retSpec.DefaultBackend.Service.Name = mappings.VirtualToHostName(ctx, retSpec.DefaultBackend.Service.Name, namespace, mappings.Services()) } if retSpec.DefaultBackend.Resource != nil { - retSpec.DefaultBackend.Resource.Name = translate.Default.HostName(retSpec.DefaultBackend.Resource.Name, namespace) + retSpec.DefaultBackend.Resource.Name = translate.Default.HostName(ctx, retSpec.DefaultBackend.Resource.Name, namespace) } } @@ -66,7 +67,7 @@ func translateSpec(ctx *synccontext.SyncContext, namespace string, vIngressSpec retSpec.Rules[i].HTTP.Paths[j].Backend.Service.Name = mappings.VirtualToHostName(ctx, retSpec.Rules[i].HTTP.Paths[j].Backend.Service.Name, namespace, mappings.Services()) } if path.Backend.Resource != nil { - retSpec.Rules[i].HTTP.Paths[j].Backend.Resource.Name = translate.Default.HostName(retSpec.Rules[i].HTTP.Paths[j].Backend.Resource.Name, namespace) + retSpec.Rules[i].HTTP.Paths[j].Backend.Resource.Name = translate.Default.HostName(ctx, retSpec.Rules[i].HTTP.Paths[j].Backend.Resource.Name, namespace) } } } @@ -101,12 +102,12 @@ type actionPayload struct { } `json:"forwardConfig,omitempty"` } -func processAlbAnnotations(namespace string, k string, v string) (string, string) { +func processAlbAnnotations(ctx *synccontext.SyncContext, namespace string, k string, v string) (string, string) { if strings.HasPrefix(k, AlbActionsAnnotation) { // change k action := getActionOrConditionValue(k, ActionsSuffix) if !strings.Contains(k, "x-"+namespace+"-x") { - k = strings.Replace(k, action, translate.Default.HostName(action, namespace), 1) + k = strings.Replace(k, action, translate.Default.HostName(ctx, action, namespace), 1) } // change v var payload *actionPayload @@ -120,7 +121,7 @@ func processAlbAnnotations(namespace string, k string, v string) (string, string case string: if svcName != "" { if !strings.Contains(svcName, "x-"+namespace+"-x") { - targetGroup["serviceName"] = translate.Default.HostName(svcName, namespace) + targetGroup["serviceName"] = translate.Default.HostName(ctx, svcName, namespace) } else { targetGroup["serviceName"] = svcName } @@ -138,16 +139,16 @@ func processAlbAnnotations(namespace string, k string, v string) (string, string if strings.HasPrefix(k, AlbConditionAnnotation) { condition := getActionOrConditionValue(k, ConditionSuffix) if !strings.Contains(k, "x-"+namespace+"-x") { - k = strings.Replace(k, condition, translate.Default.HostName(condition, namespace), 1) + k = strings.Replace(k, condition, translate.Default.HostName(ctx, condition, namespace), 1) } } return k, v } -func updateAnnotations(ingress *networkingv1.Ingress) { +func updateAnnotations(ctx *synccontext.SyncContext, ingress *networkingv1.Ingress) { for k, v := range ingress.Annotations { delete(ingress.Annotations, k) - k, v = processAlbAnnotations(ingress.Namespace, k, v) + k, v = processAlbAnnotations(ctx, ingress.Namespace, k, v) ingress.Annotations[k] = v } } diff --git a/pkg/controllers/resources/networkpolicies/syncer.go b/pkg/controllers/resources/networkpolicies/syncer.go index d48173a47..194576ff9 100644 --- a/pkg/controllers/resources/networkpolicies/syncer.go +++ b/pkg/controllers/resources/networkpolicies/syncer.go @@ -3,12 +3,13 @@ package networkpolicies import ( "fmt" - "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/syncer/translator" syncertypes "github.com/loft-sh/vcluster/pkg/syncer/types" + "github.com/loft-sh/vcluster/pkg/util/translate" networkingv1 "k8s.io/api/networking/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -16,7 +17,7 @@ import ( ) func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { - mapper, err := ctx.Mappings.ByGVK(mappings.NetworkPolicies()) + mapper, err := generic.NewMapper(ctx, &networkingv1.NetworkPolicy{}, translate.Default.HostName) if err != nil { return nil, err } diff --git a/pkg/controllers/resources/networkpolicies/syncer_test.go b/pkg/controllers/resources/networkpolicies/syncer_test.go index dcb14e203..bdedba7cd 100644 --- a/pkg/controllers/resources/networkpolicies/syncer_test.go +++ b/pkg/controllers/resources/networkpolicies/syncer_test.go @@ -49,13 +49,13 @@ func TestSync(t *testing.T) { pBaseSpec := networkingv1.NetworkPolicySpec{ PodSelector: metav1.LabelSelector{ MatchLabels: map[string]string{ - translate.Default.HostLabel(nil, "mykey"): "mylabel", - translate.NamespaceLabel: vObjectMeta.Namespace, - translate.MarkerLabel: translate.VClusterName, + translate.Default.HostLabel(nil, "mykey", ""): "mylabel", + translate.NamespaceLabel: vObjectMeta.Namespace, + translate.MarkerLabel: translate.VClusterName, }, MatchExpressions: []metav1.LabelSelectorRequirement{ { - Key: translate.Default.HostLabel(nil, "secondkey"), + Key: translate.Default.HostLabel(nil, "secondkey", ""), Operator: metav1.LabelSelectorOpIn, Values: []string{"label-A", "label-B"}, }, @@ -63,7 +63,7 @@ func TestSync(t *testing.T) { }, } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName("testnetworkpolicy", "test"), + Name: translate.Default.HostName(nil, "testnetworkpolicy", "test"), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: vObjectMeta.Name, @@ -124,9 +124,9 @@ func TestSync(t *testing.T) { Ports: somePorts, From: []networkingv1.NetworkPolicyPeer{{PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - translate.Default.HostLabel(nil, "random-key"): "value", - translate.MarkerLabel: translate.VClusterName, - translate.NamespaceLabel: vnetworkPolicyWithPodSelectorNoNs.GetNamespace(), + translate.Default.HostLabel(nil, "random-key", ""): "value", + translate.MarkerLabel: translate.VClusterName, + translate.NamespaceLabel: vnetworkPolicyWithPodSelectorNoNs.GetNamespace(), }, MatchExpressions: []metav1.LabelSelectorRequirement{}, }}}, @@ -200,7 +200,7 @@ func TestSync(t *testing.T) { }, MatchExpressions: []metav1.LabelSelectorRequirement{ { - Key: translate.Default.HostLabel(nil, "pod-expr-key"), + Key: translate.Default.HostLabel(nil, "pod-expr-key", ""), Operator: metav1.LabelSelectorOpExists, Values: []string{"some-pod-key"}, }, diff --git a/pkg/controllers/resources/networkpolicies/translate.go b/pkg/controllers/resources/networkpolicies/translate.go index 0652b8c90..b7df6d584 100644 --- a/pkg/controllers/resources/networkpolicies/translate.go +++ b/pkg/controllers/resources/networkpolicies/translate.go @@ -54,7 +54,7 @@ func translateSpec(ctx *synccontext.SyncContext, spec *networkingv1.NetworkPolic panic("Multi-Namespace Mode not supported for network policies yet!") } - if translatedLabelSelector := translate.HostLabelSelector(ctx, &spec.PodSelector); translatedLabelSelector != nil { + if translatedLabelSelector := translate.HostLabelSelector(ctx, &spec.PodSelector, namespace); translatedLabelSelector != nil { outSpec.PodSelector = *translatedLabelSelector if outSpec.PodSelector.MatchLabels == nil { outSpec.PodSelector.MatchLabels = map[string]string{} @@ -76,7 +76,7 @@ func translateNetworkPolicyPeers(ctx *synccontext.SyncContext, peers []networkin out := []networkingv1.NetworkPolicyPeer{} for _, peer := range peers { newPeer := networkingv1.NetworkPolicyPeer{ - PodSelector: translate.HostLabelSelector(ctx, peer.PodSelector), + PodSelector: translate.HostLabelSelector(ctx, peer.PodSelector, namespace), NamespaceSelector: nil, // must be set to nil as all vcluster pods are in the same host namespace as the NetworkPolicy } if peer.IPBlock == nil { diff --git a/pkg/controllers/resources/persistentvolumeclaims/syncer_test.go b/pkg/controllers/resources/persistentvolumeclaims/syncer_test.go index 2866bcdb1..aacbac769 100644 --- a/pkg/controllers/resources/persistentvolumeclaims/syncer_test.go +++ b/pkg/controllers/resources/persistentvolumeclaims/syncer_test.go @@ -26,7 +26,7 @@ func TestSync(t *testing.T) { Namespace: "testns", } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName("testpvc", "testns"), + Name: translate.Default.HostName(nil, "testpvc", "testns"), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: vObjectMeta.Name, diff --git a/pkg/controllers/resources/persistentvolumes/syncer.go b/pkg/controllers/resources/persistentvolumes/syncer.go index da162ee28..e1e42029f 100644 --- a/pkg/controllers/resources/persistentvolumes/syncer.go +++ b/pkg/controllers/resources/persistentvolumes/syncer.go @@ -250,7 +250,7 @@ func (s *persistentVolumeSyncer) shouldSync(ctx *synccontext.SyncContext, pObj * return true, nil, nil } - return translate.Default.IsTargetedNamespace(pObj.Spec.ClaimRef.Namespace) && pObj.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain, nil, nil + return translate.Default.IsTargetedNamespace(ctx, pObj.Spec.ClaimRef.Namespace) && pObj.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain, nil, nil } vPvc := &corev1.PersistentVolumeClaim{} @@ -262,7 +262,7 @@ func (s *persistentVolumeSyncer) shouldSync(ctx *synccontext.SyncContext, pObj * return true, nil, nil } - return translate.Default.IsTargetedNamespace(pObj.Spec.ClaimRef.Namespace) && pObj.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain, nil, nil + return translate.Default.IsTargetedNamespace(ctx, pObj.Spec.ClaimRef.Namespace) && pObj.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain, nil, nil } return true, vPvc, nil diff --git a/pkg/controllers/resources/persistentvolumes/syncer_test.go b/pkg/controllers/resources/persistentvolumes/syncer_test.go index 2f52dc563..bea02abd5 100644 --- a/pkg/controllers/resources/persistentvolumes/syncer_test.go +++ b/pkg/controllers/resources/persistentvolumes/syncer_test.go @@ -32,7 +32,7 @@ func TestSync(t *testing.T) { }, } basePPvcReference := &corev1.ObjectReference{ - Name: translate.Default.HostName("testpvc", "test"), + Name: translate.Default.HostName(nil, "testpvc", "test"), Namespace: "test", ResourceVersion: syncertesting.FakeClientResourceVersion, } diff --git a/pkg/controllers/resources/poddisruptionbudgets/syncer.go b/pkg/controllers/resources/poddisruptionbudgets/syncer.go index 8be209b75..8d42c810b 100644 --- a/pkg/controllers/resources/poddisruptionbudgets/syncer.go +++ b/pkg/controllers/resources/poddisruptionbudgets/syncer.go @@ -3,12 +3,13 @@ package poddisruptionbudgets import ( "fmt" - "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/syncer/translator" syncertypes "github.com/loft-sh/vcluster/pkg/syncer/types" + "github.com/loft-sh/vcluster/pkg/util/translate" policyv1 "k8s.io/api/policy/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -16,7 +17,7 @@ import ( ) func New(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { - mapper, err := ctx.Mappings.ByGVK(mappings.PodDisruptionBudgets()) + mapper, err := generic.NewMapper(ctx, &policyv1.PodDisruptionBudget{}, translate.Default.HostName) if err != nil { return nil, err } diff --git a/pkg/controllers/resources/poddisruptionbudgets/syncer_test.go b/pkg/controllers/resources/poddisruptionbudgets/syncer_test.go index ccd8b36b3..c7f3eadc9 100644 --- a/pkg/controllers/resources/poddisruptionbudgets/syncer_test.go +++ b/pkg/controllers/resources/poddisruptionbudgets/syncer_test.go @@ -24,7 +24,7 @@ func TestSync(t *testing.T) { ResourceVersion: syncertesting.FakeClientResourceVersion, } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName("testPDB", vObjectMeta.Namespace), + Name: translate.Default.HostName(nil, "testPDB", vObjectMeta.Namespace), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: vObjectMeta.Name, @@ -75,7 +75,7 @@ func TestSync(t *testing.T) { ObjectMeta: hostClusterSyncedPDB.ObjectMeta, Spec: policyv1.PodDisruptionBudgetSpec{ MaxUnavailable: vclusterUpdatedSelectorPDB.Spec.MaxUnavailable, - Selector: translate.HostLabelSelector(nil, vclusterUpdatedSelectorPDB.Spec.Selector), + Selector: translate.HostLabelSelector(nil, vclusterUpdatedSelectorPDB.Spec.Selector, ""), }, } diff --git a/pkg/controllers/resources/poddisruptionbudgets/translate.go b/pkg/controllers/resources/poddisruptionbudgets/translate.go index a9503e1e9..5354cb740 100644 --- a/pkg/controllers/resources/poddisruptionbudgets/translate.go +++ b/pkg/controllers/resources/poddisruptionbudgets/translate.go @@ -9,7 +9,7 @@ import ( func (s *pdbSyncer) translate(ctx *synccontext.SyncContext, vObj *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget { newPDB := translate.HostMetadata(ctx, vObj, s.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj)) - newPDB.Spec.Selector = translate.HostLabelSelector(ctx, newPDB.Spec.Selector) + newPDB.Spec.Selector = translate.HostLabelSelector(ctx, newPDB.Spec.Selector, vObj.Namespace) return newPDB } @@ -18,5 +18,5 @@ func (s *pdbSyncer) translateUpdate(ctx *synccontext.SyncContext, pObj, vObj *po pObj.Labels = translate.HostLabels(ctx, vObj, pObj) pObj.Spec.MaxUnavailable = vObj.Spec.MaxUnavailable pObj.Spec.MinAvailable = vObj.Spec.MinAvailable - pObj.Spec.Selector = translate.HostLabelSelector(ctx, vObj.Spec.Selector) + pObj.Spec.Selector = translate.HostLabelSelector(ctx, vObj.Spec.Selector, vObj.Namespace) } diff --git a/pkg/controllers/resources/pods/syncer.go b/pkg/controllers/resources/pods/syncer.go index 58222bbfe..847ce65ab 100644 --- a/pkg/controllers/resources/pods/syncer.go +++ b/pkg/controllers/resources/pods/syncer.go @@ -6,6 +6,7 @@ import ( "reflect" "time" + "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/token" "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" @@ -337,7 +338,7 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv }() // update the virtual pod if the spec has changed - err = s.podTranslator.Diff(ctx, event.Virtual, event.Host) + err = s.podTranslator.Diff(ctx, event) if err != nil { return ctrl.Result{}, err } @@ -346,17 +347,25 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEv } func (s *podSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccontext.SyncToVirtualEvent[*corev1.Pod]) (_ ctrl.Result, retErr error) { - // virtual object is not here anymore, so we delete - return syncer.DeleteHostObject(ctx, event.Host, "virtual object was deleted") + if event.IsDelete() || event.Host.DeletionTimestamp != nil { + // virtual object is not here anymore, so we delete + return syncer.DeleteHostObject(ctx, event.Host, "virtual object was deleted") + } + + return ctrl.Result{}, nil } func setSATokenSecretAsOwner(ctx *synccontext.SyncContext, pClient client.Client, vObj, pObj *corev1.Pod) error { - secret, err := translatepods.GetSecretIfExists(ctx, pClient, vObj.Name, vObj.Namespace) - if err := translatepods.IgnoreAcceptableErrors(err); err != nil { + if !ctx.Config.Sync.ToHost.Pods.UseSecretsForSATokens { + return nil + } + + secret, err := token.GetSecretIfExists(ctx, pClient, vObj.Name, vObj.Namespace) + if err := token.IgnoreAcceptableErrors(err); err != nil { return err } else if secret != nil { // check if owner is vCluster service, if so, modify to pod as owner - err := translatepods.SetPodAsOwner(ctx, pObj, pClient, secret) + err := token.SetPodAsOwner(ctx, pObj, pClient, secret) if err != nil { return err } diff --git a/pkg/controllers/resources/pods/syncer_test.go b/pkg/controllers/resources/pods/syncer_test.go index a85dd4f70..1b51dcbea 100644 --- a/pkg/controllers/resources/pods/syncer_test.go +++ b/pkg/controllers/resources/pods/syncer_test.go @@ -20,11 +20,6 @@ import ( ) var ( - PodLogsVolumeName = "pod-logs" - LogsVolumeName = "logs" - KubeletPodVolumeName = "kubelet-pods" - HostpathPodName = "test-hostpaths" - pVclusterService = corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: syncertesting.DefaultTestVClusterServiceName, @@ -36,7 +31,7 @@ var ( } pDNSService = corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName("kube-dns", "kube-system"), + Name: translate.Default.HostName(nil, "kube-dns", "kube-system"), Namespace: syncertesting.DefaultTestTargetNamespace, }, Spec: corev1.ServiceSpec{ @@ -59,7 +54,7 @@ func TestSyncTable(t *testing.T) { Namespace: vNamespace.Name, } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName("testpod", "testns"), + Name: translate.Default.HostName(nil, "testpod", "testns"), Namespace: "test", Annotations: map[string]string{ podtranslate.ClusterAutoScalerAnnotation: "false", @@ -344,7 +339,7 @@ func TestSync(t *testing.T) { translate.VClusterName = syncertesting.DefaultTestVClusterName pDNSService := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName("kube-dns", "kube-system"), + Name: translate.Default.HostName(nil, "kube-dns", "kube-system"), Namespace: syncertesting.DefaultTestTargetNamespace, }, Spec: corev1.ServiceSpec{ @@ -361,7 +356,7 @@ func TestSync(t *testing.T) { Namespace: vNamespace.Name, } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName("testpod", "testns"), + Name: translate.Default.HostName(nil, "testpod", "testns"), Namespace: "test", Annotations: map[string]string{ podtranslate.ClusterAutoScalerAnnotation: "false", @@ -462,7 +457,7 @@ func TestSync(t *testing.T) { hostToContainer := corev1.MountPropagationHostToContainer pHostPathPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(vHostPathPod.Name, syncertesting.DefaultTestCurrentNamespace), + Name: translate.Default.HostName(nil, vHostPathPod.Name, syncertesting.DefaultTestCurrentNamespace), Namespace: syncertesting.DefaultTestTargetNamespace, Annotations: map[string]string{ diff --git a/pkg/controllers/resources/pods/translate/sa_token_secret.go b/pkg/controllers/resources/pods/token/sa_token_secret.go similarity index 87% rename from pkg/controllers/resources/pods/translate/sa_token_secret.go rename to pkg/controllers/resources/pods/token/sa_token_secret.go index 05ba769ee..863ca7452 100644 --- a/pkg/controllers/resources/pods/translate/sa_token_secret.go +++ b/pkg/controllers/resources/pods/token/sa_token_secret.go @@ -1,4 +1,4 @@ -package translate +package token import ( "context" @@ -23,8 +23,8 @@ const ( var PodServiceAccountTokenSecretName string -func SecretNameFromPodName(ctx *synccontext.SyncContext, podName, namespace string) string { - return mappings.VirtualToHostName(ctx, fmt.Sprintf("%s-sa-token", podName), namespace, mappings.Secrets()) +func SecretNameFromPodName(ctx *synccontext.SyncContext, podName, namespace string) types.NamespacedName { + return mappings.VirtualToHost(ctx, fmt.Sprintf("%s-sa-token", podName), namespace, mappings.Secrets()) } var ErrNotFound = errors.New("translate: not found") @@ -38,10 +38,11 @@ func IgnoreAcceptableErrors(err error) error { } func GetSecretIfExists(ctx *synccontext.SyncContext, pClient client.Client, vPodName, vNamespace string) (*corev1.Secret, error) { + secretName := SecretNameFromPodName(ctx, vPodName, vNamespace) secret := &corev1.Secret{} err := pClient.Get(ctx, types.NamespacedName{ - Name: SecretNameFromPodName(ctx, vPodName, vNamespace), - Namespace: translate.Default.HostNamespace(vNamespace), + Name: secretName.Name, + Namespace: secretName.Namespace, }, secret) if err != nil { if kerrors.IsNotFound(err) { @@ -69,10 +70,11 @@ func SATokenSecret(ctx *synccontext.SyncContext, pClient client.Client, vPod *co } // create to secret with the given token + secretName := SecretNameFromPodName(ctx, vPod.Name, vPod.Namespace) secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: SecretNameFromPodName(ctx, vPod.Name, vPod.Namespace), - Namespace: translate.Default.HostNamespace(vPod.Namespace), + Name: secretName.Name, + Namespace: secretName.Namespace, Annotations: map[string]string{ translate.SkipBackSyncInMultiNamespaceMode: "true", diff --git a/pkg/controllers/resources/pods/translate/conditions.go b/pkg/controllers/resources/pods/translate/conditions.go deleted file mode 100644 index 33615e57e..000000000 --- a/pkg/controllers/resources/pods/translate/conditions.go +++ /dev/null @@ -1,65 +0,0 @@ -package translate - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" -) - -var coreConditions = map[string]bool{ - string(corev1.PodReady): true, - string(corev1.ContainersReady): true, - string(corev1.PodInitialized): true, - string(corev1.PodScheduled): true, - "PodReadyToStartContainers": true, -} - -// updateConditions adds/updates new/old conditions in the physical Pod -func updateConditions(pPod, vPod *corev1.Pod, oldVPodStatus *corev1.PodStatus) { - // check if newConditions need to be added. - for _, vCondition := range oldVPodStatus.Conditions { - if isCustomCondition(vCondition) { - found := false - for index, pCondition := range pPod.Status.Conditions { - // found condition in pPod with same type, updating foundCondition - if vCondition.Type == pCondition.Type { - found = true - if !equality.Semantic.DeepEqual(pCondition, vCondition) { - pPod.Status.Conditions[index] = vCondition - } - break - } - } - if !found { - pPod.Status.Conditions = append(pPod.Status.Conditions, vCondition) - } - } - } - - // don't sync custom conditions up - newConditions := []corev1.PodCondition{} - for _, pCondition := range pPod.Status.Conditions { - if isCustomCondition(pCondition) { - found := false - for _, vCondition := range oldVPodStatus.Conditions { - if pCondition.Type == vCondition.Type { - found = true - break - } - } - if !found { - // don't sync custom conditions we don't have on the virtual pod - continue - } - } - - newConditions = append(newConditions, pCondition) - } - - vPod.Status.Conditions = newConditions -} - -// Check for custom condition -func isCustomCondition(condition corev1.PodCondition) bool { - // if not a default condition, we assume it's a custom condition - return !coreConditions[string(condition.Type)] -} diff --git a/pkg/controllers/resources/pods/translate/conditions_test.go b/pkg/controllers/resources/pods/translate/conditions_test.go deleted file mode 100644 index 105c535f2..000000000 --- a/pkg/controllers/resources/pods/translate/conditions_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package translate - -import ( - "fmt" - "testing" - - "gotest.tools/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type conditionsTestCase struct { - name string - - pPod *corev1.Pod - vPod *corev1.Pod - - expectedPhysicalConditions []corev1.PodCondition - expectedVirtualConditions []corev1.PodCondition -} - -func TestUpdateConditions(t *testing.T) { - testCases := []conditionsTestCase{ - { - name: "simple", - - pPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ptest", - Namespace: "ptest", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - }, - }, - }, - vPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vtest", - Namespace: "vtest", - }, - }, - - expectedPhysicalConditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - }, - - expectedVirtualConditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - }, - }, - { - name: "keep-custom-vcondition", - - pPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ptest", - Namespace: "ptest", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - }, - }, - }, - - vPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vtest", - Namespace: "vtest", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: "custom", - Status: "True", - }, - }, - }, - }, - - expectedPhysicalConditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - { - Type: "custom", - Status: "True", - }, - }, - - expectedVirtualConditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - { - Type: "custom", - Status: "True", - }, - }, - }, - { - name: "dont-sync-custom-condition-up", - - pPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ptest", - Namespace: "ptest", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - { - Type: "custom", - Status: "True", - }, - }, - }, - }, - - vPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vtest", - Namespace: "vtest", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{}, - }, - }, - - expectedPhysicalConditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - { - Type: "custom", - Status: "True", - }, - }, - - expectedVirtualConditions: []corev1.PodCondition{ - { - Type: corev1.PodScheduled, - Status: "False", - Reason: "my-reason", - }, - }, - }, - { - name: "update-custom-condition", - - pPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ptest", - Namespace: "ptest", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: "custom", - Status: "False", - Reason: "my-reason", - }, - }, - }, - }, - - vPod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "vtest", - Namespace: "vtest", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: "custom", - Status: "True", - }, - }, - }, - }, - - expectedPhysicalConditions: []corev1.PodCondition{ - { - Type: "custom", - Status: "True", - }, - }, - - expectedVirtualConditions: []corev1.PodCondition{ - { - Type: "custom", - Status: "True", - }, - }, - }, - } - - for _, testCase := range testCases { - fmt.Println(testCase.name) - - updateConditions(testCase.pPod, testCase.vPod, testCase.vPod.Status.DeepCopy()) - assert.DeepEqual(t, testCase.vPod.Status.Conditions, testCase.expectedVirtualConditions) - assert.DeepEqual(t, testCase.pPod.Status.Conditions, testCase.expectedPhysicalConditions) - } -} diff --git a/pkg/controllers/resources/pods/translate/diff.go b/pkg/controllers/resources/pods/translate/diff.go index 5771172a6..105b1c9c7 100644 --- a/pkg/controllers/resources/pods/translate/diff.go +++ b/pkg/controllers/resources/pods/translate/diff.go @@ -11,12 +11,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func (t *translator) Diff(ctx *synccontext.SyncContext, vPod, pPod *corev1.Pod) error { +func (t *translator) Diff(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Pod]) error { + // sync conditions + event.TargetObject().Status.Conditions = event.SourceObject().Status.Conditions + // has status changed? - oldVPodStatus := vPod.Status.DeepCopy() + vPod := event.Virtual + pPod := event.Host vPod.Status = *pPod.Status.DeepCopy() stripInjectedSidecarContainers(vPod, pPod) - updateConditions(pPod, vPod, oldVPodStatus) // get Namespace resource in order to have access to its labels vNamespace := &corev1.Namespace{} @@ -29,7 +32,8 @@ func (t *translator) Diff(ctx *synccontext.SyncContext, vPod, pPod *corev1.Pod) t.calcSpecDiff(pPod, vPod) // check annotations - updatedAnnotations, updatedLabels := translate.HostAnnotations(vPod, pPod, getExcludedAnnotations(pPod)...), translate.HostLabels(ctx, vPod, pPod) + updatedAnnotations := translate.HostAnnotations(vPod, pPod, getExcludedAnnotations(pPod)...) + updatedLabels := translate.HostLabels(ctx, vPod, pPod) if updatedAnnotations == nil { updatedAnnotations = map[string]string{} } @@ -52,6 +56,7 @@ func (t *translator) Diff(ctx *synccontext.SyncContext, vPod, pPod *corev1.Pod) delete(updatedAnnotations, OwnerReferences) delete(updatedAnnotations, OwnerSetKind) } + // check pod and namespace labels for k, v := range vNamespace.GetLabels() { updatedLabels[translate.HostLabelNamespace(k)] = v @@ -71,7 +76,7 @@ func getExcludedAnnotations(pPod *corev1.Pod) []string { if source.DownwardAPI != nil { for _, item := range source.DownwardAPI.Items { if item.FieldRef != nil { - // check if its a label we have to rewrite + // check if it's a label we have to rewrite annotationsMatch := FieldPathAnnotationRegEx.FindStringSubmatch(item.FieldRef.FieldPath) if len(annotationsMatch) == 2 { if strings.HasPrefix(annotationsMatch[1], ServiceAccountTokenAnnotation) { diff --git a/pkg/controllers/resources/pods/translate/translator.go b/pkg/controllers/resources/pods/translate/translator.go index cd1d298c7..2af9a59a7 100644 --- a/pkg/controllers/resources/pods/translate/translator.go +++ b/pkg/controllers/resources/pods/translate/translator.go @@ -10,6 +10,7 @@ import ( "strconv" "strings" + satoken "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/token" "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/util/loghelper" @@ -51,7 +52,7 @@ var ( type Translator interface { Translate(ctx *synccontext.SyncContext, vPod *corev1.Pod, services []*corev1.Service, dnsIP string, kubeIP string) (*corev1.Pod, error) - Diff(ctx *synccontext.SyncContext, vPod, pPod *corev1.Pod) error + Diff(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Pod]) error TranslateContainerEnv(ctx *synccontext.SyncContext, envVar []corev1.EnvVar, envFrom []corev1.EnvFromSource, vPod *corev1.Pod, serviceEnvMap map[string]string) ([]corev1.EnvVar, []corev1.EnvFromSource, error) } @@ -404,7 +405,7 @@ func (t *translator) translateVolumes(ctx *synccontext.SyncContext, pPod *corev1 } if pPod.Spec.Volumes[i].DownwardAPI != nil { for j := range pPod.Spec.Volumes[i].DownwardAPI.Items { - translateFieldRef(ctx, pPod.Spec.Volumes[i].DownwardAPI.Items[j].FieldRef) + translateFieldRef(ctx, pPod.Spec.Volumes[i].DownwardAPI.Items[j].FieldRef, vPod.Namespace) } } if pPod.Spec.Volumes[i].ISCSI != nil && pPod.Spec.Volumes[i].ISCSI.SecretRef != nil { @@ -441,7 +442,7 @@ func (t *translator) translateVolumes(ctx *synccontext.SyncContext, pPod *corev1 // create the service account token holder secret if necessary if len(tokenSecrets) > 0 { - err := SATokenSecret(ctx, t.pClient, vPod, tokenSecrets) + err := satoken.SATokenSecret(ctx, t.pClient, vPod, tokenSecrets) if err != nil { return fmt.Errorf("create sa token secret: %w", err) } @@ -470,7 +471,7 @@ func (t *translator) translateProjectedVolume( } if projectedVolume.Sources[i].DownwardAPI != nil { for j := range projectedVolume.Sources[i].DownwardAPI.Items { - translateFieldRef(ctx, projectedVolume.Sources[i].DownwardAPI.Items[j].FieldRef) + translateFieldRef(ctx, projectedVolume.Sources[i].DownwardAPI.Items[j].FieldRef, vPod.Namespace) } } if projectedVolume.Sources[i].ServiceAccountToken != nil { @@ -521,7 +522,7 @@ func (t *translator) translateProjectedVolume( // rewrite projected volume to use sources as secret projectedVolume.Sources[i].Secret = &corev1.SecretProjection{ LocalObjectReference: corev1.LocalObjectReference{ - Name: SecretNameFromPodName(ctx, vPod.Name, vPod.Namespace), + Name: satoken.SecretNameFromPodName(ctx, vPod.Name, vPod.Namespace).Name, }, Items: []corev1.KeyToPath{ { @@ -569,7 +570,7 @@ func (t *translator) translateProjectedVolume( return nil } -func translateFieldRef(ctx *synccontext.SyncContext, fieldSelector *corev1.ObjectFieldSelector) { +func translateFieldRef(ctx *synccontext.SyncContext, fieldSelector *corev1.ObjectFieldSelector, vNamespace string) { if fieldSelector == nil { return } @@ -577,7 +578,7 @@ func translateFieldRef(ctx *synccontext.SyncContext, fieldSelector *corev1.Objec // check if its a label we have to rewrite labelsMatch := FieldPathLabelRegEx.FindStringSubmatch(fieldSelector.FieldPath) if len(labelsMatch) == 2 { - fieldSelector.FieldPath = "metadata.labels['" + translate.Default.HostLabel(ctx, labelsMatch[1]) + "']" + fieldSelector.FieldPath = "metadata.labels['" + translate.Default.HostLabel(ctx, labelsMatch[1], vNamespace) + "']" return } @@ -598,7 +599,7 @@ func translateFieldRef(ctx *synccontext.SyncContext, fieldSelector *corev1.Objec func (t *translator) TranslateContainerEnv(ctx *synccontext.SyncContext, envVar []corev1.EnvVar, envFrom []corev1.EnvFromSource, vPod *corev1.Pod, serviceEnvMap map[string]string) ([]corev1.EnvVar, []corev1.EnvFromSource, error) { envNameMap := make(map[string]struct{}) for j, env := range envVar { - translateDownwardAPI(ctx, &envVar[j]) + translateDownwardAPI(ctx, &envVar[j], vPod.Namespace) if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" { envVar[j].ValueFrom.ConfigMapKeyRef.Name = mappings.VirtualToHostName(ctx, envVar[j].ValueFrom.ConfigMapKeyRef.Name, vPod.Namespace, mappings.ConfigMaps()) } @@ -639,14 +640,14 @@ func (t *translator) TranslateContainerEnv(ctx *synccontext.SyncContext, envVar return envVar, envFrom, nil } -func translateDownwardAPI(ctx *synccontext.SyncContext, env *corev1.EnvVar) { +func translateDownwardAPI(ctx *synccontext.SyncContext, env *corev1.EnvVar, vNamespace string) { if env.ValueFrom == nil { return } if env.ValueFrom.FieldRef == nil { return } - translateFieldRef(ctx, env.ValueFrom.FieldRef) + translateFieldRef(ctx, env.ValueFrom.FieldRef, vNamespace) } func (t *translator) translateDNSConfig(pPod *corev1.Pod, vPod *corev1.Pod, nameServer string) { @@ -747,7 +748,7 @@ func (t *translator) translatePodAffinityTerm(ctx *synccontext.SyncContext, vPod // We never select pods that are not in the vcluster namespace on the host, so we will // omit Namespaces and namespaceSelector here newAffinityTerm := corev1.PodAffinityTerm{ - LabelSelector: translate.HostLabelSelector(ctx, term.LabelSelector), + LabelSelector: translate.HostLabelSelector(ctx, term.LabelSelector, vPod.Namespace), TopologyKey: term.TopologyKey, } @@ -804,7 +805,7 @@ func (t *translator) translatePodAffinityTerm(ctx *synccontext.SyncContext, vPod func translateTopologySpreadConstraints(ctx *synccontext.SyncContext, vPod *corev1.Pod, pPod *corev1.Pod) { for i := range pPod.Spec.TopologySpreadConstraints { - pPod.Spec.TopologySpreadConstraints[i].LabelSelector = translate.HostLabelSelector(ctx, pPod.Spec.TopologySpreadConstraints[i].LabelSelector) + pPod.Spec.TopologySpreadConstraints[i].LabelSelector = translate.HostLabelSelector(ctx, pPod.Spec.TopologySpreadConstraints[i].LabelSelector, vPod.Namespace) // make sure we only select pods in the current namespace if pPod.Spec.TopologySpreadConstraints[i].LabelSelector != nil { diff --git a/pkg/controllers/resources/pods/translate/translator_test.go b/pkg/controllers/resources/pods/translate/translator_test.go index e0f8b5e36..a7b791824 100644 --- a/pkg/controllers/resources/pods/translate/translator_test.go +++ b/pkg/controllers/resources/pods/translate/translator_test.go @@ -28,7 +28,7 @@ func TestPodAffinityTermsTranslation(t *testing.T) { } basicSelectorTranslatedWithMarker := &metav1.LabelSelector{MatchLabels: map[string]string{}} for k, v := range basicSelector.MatchLabels { - basicSelectorTranslatedWithMarker.MatchLabels[translate.Default.HostLabel(nil, k)] = v + basicSelectorTranslatedWithMarker.MatchLabels[translate.Default.HostLabel(nil, k, "")] = v } basicSelectorTranslatedWithMarker.MatchLabels[translate.MarkerLabel] = translate.VClusterName @@ -199,7 +199,7 @@ func TestVolumeTranslation(t *testing.T) { Name: "eph-vol", VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: translate.Default.HostName("pod-name-eph-vol", "test-ns"), + ClaimName: translate.Default.HostName(nil, "pod-name-eph-vol", "test-ns"), }, Ephemeral: nil, }, diff --git a/pkg/controllers/resources/pods/util.go b/pkg/controllers/resources/pods/util.go deleted file mode 100644 index 21026b5df..000000000 --- a/pkg/controllers/resources/pods/util.go +++ /dev/null @@ -1,108 +0,0 @@ -package pods - -import ( - "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - "github.com/loft-sh/vcluster/pkg/util/translate" - - podtranslate "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate" - - corev1 "k8s.io/api/core/v1" -) - -func SecretNamesFromPod(ctx *synccontext.SyncContext, pod *corev1.Pod) []string { - secrets := []string{} - for _, c := range pod.Spec.Containers { - secrets = append(secrets, SecretNamesFromContainer(pod.Namespace, &c)...) - } - for _, c := range pod.Spec.InitContainers { - secrets = append(secrets, SecretNamesFromContainer(pod.Namespace, &c)...) - } - for _, c := range pod.Spec.EphemeralContainers { - secrets = append(secrets, SecretNamesFromEphemeralContainer(pod.Namespace, &c)...) - } - for i := range pod.Spec.ImagePullSecrets { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.ImagePullSecrets[i].Name) - } - secrets = append(secrets, SecretNamesFromVolumes(ctx, pod)...) - return translate.UniqueSlice(secrets) -} - -func SecretNamesFromVolumes(ctx *synccontext.SyncContext, pod *corev1.Pod) []string { - secrets := []string{} - for i := range pod.Spec.Volumes { - if pod.Spec.Volumes[i].Secret != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].Secret.SecretName) - } - if pod.Spec.Volumes[i].Projected != nil { - for j := range pod.Spec.Volumes[i].Projected.Sources { - if pod.Spec.Volumes[i].Projected.Sources[j].Secret != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].Projected.Sources[j].Secret.Name) - } - - // check if projected volume source is a serviceaccount and in such a case - // we re-write it as a secret too, handle accordingly - if pod.Spec.Volumes[i].Projected.Sources[j].ServiceAccountToken != nil { - secrets = append(secrets, pod.Namespace+"/"+podtranslate.SecretNameFromPodName(ctx, pod.Name, pod.Namespace)) - } - } - } - if pod.Spec.Volumes[i].ISCSI != nil && pod.Spec.Volumes[i].ISCSI.SecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].ISCSI.SecretRef.Name) - } - if pod.Spec.Volumes[i].RBD != nil && pod.Spec.Volumes[i].RBD.SecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].RBD.SecretRef.Name) - } - if pod.Spec.Volumes[i].FlexVolume != nil && pod.Spec.Volumes[i].FlexVolume.SecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].FlexVolume.SecretRef.Name) - } - if pod.Spec.Volumes[i].Cinder != nil && pod.Spec.Volumes[i].Cinder.SecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].Cinder.SecretRef.Name) - } - if pod.Spec.Volumes[i].CephFS != nil && pod.Spec.Volumes[i].CephFS.SecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].CephFS.SecretRef.Name) - } - if pod.Spec.Volumes[i].AzureFile != nil && pod.Spec.Volumes[i].AzureFile.SecretName != "" { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].AzureFile.SecretName) - } - if pod.Spec.Volumes[i].ScaleIO != nil && pod.Spec.Volumes[i].ScaleIO.SecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].ScaleIO.SecretRef.Name) - } - if pod.Spec.Volumes[i].StorageOS != nil && pod.Spec.Volumes[i].StorageOS.SecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].StorageOS.SecretRef.Name) - } - if pod.Spec.Volumes[i].CSI != nil && pod.Spec.Volumes[i].CSI.NodePublishSecretRef != nil { - secrets = append(secrets, pod.Namespace+"/"+pod.Spec.Volumes[i].CSI.NodePublishSecretRef.Name) - } - } - return secrets -} - -func SecretNamesFromContainer(namespace string, container *corev1.Container) []string { - secrets := []string{} - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" { - secrets = append(secrets, namespace+"/"+env.ValueFrom.SecretKeyRef.Name) - } - } - for _, from := range container.EnvFrom { - if from.SecretRef != nil && from.SecretRef.Name != "" { - secrets = append(secrets, namespace+"/"+from.SecretRef.Name) - } - } - return secrets -} - -func SecretNamesFromEphemeralContainer(namespace string, container *corev1.EphemeralContainer) []string { - secrets := []string{} - for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" { - secrets = append(secrets, namespace+"/"+env.ValueFrom.SecretKeyRef.Name) - } - } - for _, from := range container.EnvFrom { - if from.SecretRef != nil && from.SecretRef.Name != "" { - secrets = append(secrets, namespace+"/"+from.SecretRef.Name) - } - } - return secrets -} diff --git a/pkg/controllers/resources/secrets/syncer.go b/pkg/controllers/resources/secrets/syncer.go index 8849a3aa0..7fb200a83 100644 --- a/pkg/controllers/resources/secrets/syncer.go +++ b/pkg/controllers/resources/secrets/syncer.go @@ -1,10 +1,9 @@ package secrets import ( - "context" "fmt" - "strings" + "github.com/loft-sh/vcluster/pkg/constants" "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/syncer" @@ -12,20 +11,12 @@ import ( "github.com/loft-sh/vcluster/pkg/syncer/translator" syncertypes "github.com/loft-sh/vcluster/pkg/syncer/types" "github.com/loft-sh/vcluster/pkg/util/translate" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/handler" - - "github.com/loft-sh/vcluster/pkg/constants" - "github.com/loft-sh/vcluster/pkg/controllers/resources/ingresses" - "github.com/loft-sh/vcluster/pkg/controllers/resources/pods" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -63,39 +54,11 @@ func (s *secretSyncer) Syncer() syncertypes.Sync[client.Object] { return syncer.ToGenericSyncer[*corev1.Secret](s) } -var _ syncertypes.IndicesRegisterer = &secretSyncer{} - -func (s *secretSyncer) RegisterIndices(ctx *synccontext.RegisterContext) error { - if ctx.Config.Sync.ToHost.Ingresses.Enabled { - err := ctx.VirtualManager.GetFieldIndexer().IndexField(ctx, &networkingv1.Ingress{}, constants.IndexByIngressSecret, func(rawObj client.Object) []string { - return ingresses.SecretNamesFromIngress(ctx.ToSyncContext("secret-indexer"), rawObj.(*networkingv1.Ingress)) - }) - if err != nil { - return err - } - } - - err := ctx.VirtualManager.GetFieldIndexer().IndexField(ctx, &corev1.Pod{}, constants.IndexByPodSecret, func(rawObj client.Object) []string { - return pods.SecretNamesFromPod(ctx.ToSyncContext("secret-indexer"), rawObj.(*corev1.Pod)) - }) - if err != nil { - return err - } - - return nil -} - var _ syncertypes.ControllerModifier = &secretSyncer{} -func (s *secretSyncer) ModifyController(registerCtx *synccontext.RegisterContext, builder *builder.Builder) (*builder.Builder, error) { - if s.includeIngresses { - builder = builder.Watches(&networkingv1.Ingress{}, handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request { - return mapIngresses(registerCtx.ToSyncContext("secret-syncer"), object) - })) - } - - return builder.Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(func(_ context.Context, object client.Object) []reconcile.Request { - return mapPods(registerCtx.ToSyncContext("secret-syncer"), object) +func (s *secretSyncer) ModifyController(ctx *synccontext.RegisterContext, builder *builder.Builder) (*builder.Builder, error) { + return builder.WatchesRawSource(ctx.Mappings.Store().Watch(s.GroupVersionKind(), func(nameMapping synccontext.NameMapping, queue workqueue.RateLimitingInterface) { + queue.Add(reconcile.Request{NamespacedName: nameMapping.VirtualName}) })), nil } @@ -166,38 +129,36 @@ func (s *secretSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.Syn } func (s *secretSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccontext.SyncToVirtualEvent[*corev1.Secret]) (_ ctrl.Result, retErr error) { - // virtual object is not here anymore, so we delete - return syncer.DeleteHostObject(ctx, event.Host, "virtual object was deleted") -} - -func (s *secretSyncer) isSecretUsed(ctx *synccontext.SyncContext, vObj runtime.Object) (bool, error) { - secret, ok := vObj.(*corev1.Secret) - if !ok || secret == nil { - return false, fmt.Errorf("%#v is not a secret", vObj) - } else if secret.Annotations != nil && secret.Annotations[constants.SyncResourceAnnotation] == "true" { - return true, nil + if event.IsDelete() { + // virtual object is not here anymore, so we delete + return syncer.DeleteHostObject(ctx, event.Host, "virtual object was deleted") } - isUsed, err := isSecretUsedByPods(ctx, ctx.VirtualClient, secret.Namespace+"/"+secret.Name) + vObj := translate.VirtualMetadata(ctx, event.Host, s.HostToVirtual(ctx, types.NamespacedName{Name: event.Host.Name, Namespace: event.Host.Namespace}, event.Host)) + isUsed, err := s.isSecretUsed(ctx, vObj) if err != nil { - return false, errors.Wrap(err, "is secret used by pods") + return ctrl.Result{}, err + } else if !isUsed { + return syncer.DeleteHostObject(ctx, event.Host, "virtual secret is not used anymore") } - if isUsed { + + return syncer.CreateVirtualObject(ctx, event.Host, vObj, s.EventRecorder()) +} + +func (s *secretSyncer) isSecretUsed(ctx *synccontext.SyncContext, secret *corev1.Secret) (bool, error) { + if secret.Annotations[constants.SyncResourceAnnotation] == "true" { return true, nil } - // check if we also sync ingresses - if s.includeIngresses { - ingressesList := &networkingv1.IngressList{} - err := ctx.VirtualClient.List(ctx, ingressesList, client.MatchingFields{constants.IndexByIngressSecret: secret.Namespace + "/" + secret.Name}) - if err != nil { - return false, err - } - - isUsed = meta.LenList(ingressesList) > 0 - if isUsed { - return true, nil - } + // if other objects reference this secret we sync it + if len(ctx.Mappings.Store().ReferencesTo(ctx, synccontext.Object{ + GroupVersionKind: s.GroupVersionKind(), + NamespacedName: types.NamespacedName{ + Namespace: secret.Namespace, + Name: secret.Name, + }, + })) > 0 { + return true, nil } if s.syncAllSecrets { @@ -206,59 +167,3 @@ func (s *secretSyncer) isSecretUsed(ctx *synccontext.SyncContext, vObj runtime.O return false, nil } - -func mapIngresses(ctx *synccontext.SyncContext, obj client.Object) []reconcile.Request { - ingress, ok := obj.(*networkingv1.Ingress) - if !ok { - return nil - } - - requests := []reconcile.Request{} - names := ingresses.SecretNamesFromIngress(ctx, ingress) - for _, name := range names { - splitted := strings.Split(name, "/") - if len(splitted) == 2 { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: splitted[0], - Name: splitted[1], - }, - }) - } - } - - return requests -} - -func mapPods(ctx *synccontext.SyncContext, obj client.Object) []reconcile.Request { - pod, ok := obj.(*corev1.Pod) - if !ok { - return nil - } - - requests := []reconcile.Request{} - names := pods.SecretNamesFromPod(ctx, pod) - for _, name := range names { - splitted := strings.Split(name, "/") - if len(splitted) == 2 { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: splitted[0], - Name: splitted[1], - }, - }) - } - } - - return requests -} - -func isSecretUsedByPods(ctx context.Context, vClient client.Client, secretName string) (bool, error) { - podList := &corev1.PodList{} - err := vClient.List(ctx, podList, client.MatchingFields{constants.IndexByPodSecret: secretName}) - if err != nil { - return false, err - } - - return meta.LenList(podList) > 0, nil -} diff --git a/pkg/controllers/resources/secrets/syncer_test.go b/pkg/controllers/resources/secrets/syncer_test.go index 1e9e32b22..2d4262811 100644 --- a/pkg/controllers/resources/secrets/syncer_test.go +++ b/pkg/controllers/resources/secrets/syncer_test.go @@ -3,15 +3,12 @@ package secrets import ( "testing" - "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" generictesting "github.com/loft-sh/vcluster/pkg/syncer/testing" syncer "github.com/loft-sh/vcluster/pkg/syncer/types" - testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" "gotest.tools/assert" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -43,7 +40,7 @@ func TestSync(t *testing.T) { } syncedSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(baseSecret.Name, baseSecret.Namespace), + Name: translate.Default.HostName(nil, baseSecret.Name, baseSecret.Namespace), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: baseSecret.Name, @@ -153,73 +150,3 @@ func TestSync(t *testing.T) { }, }) } - -func TestMapping(t *testing.T) { - // test ingress - ingress := &networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: networkingv1.IngressSpec{ - TLS: []networkingv1.IngressTLS{ - { - SecretName: "a", - }, - { - SecretName: "b", - }, - }, - }, - } - - // test ingress mapping - pClient := testingutil.NewFakeClient(scheme.Scheme) - vClient := testingutil.NewFakeClient(scheme.Scheme) - registerCtx := generictesting.NewFakeRegisterContext(generictesting.NewFakeConfig(), pClient, vClient) - requests := mapIngresses(registerCtx.ToSyncContext("ingresses"), ingress) - if len(requests) != 2 || requests[0].Name != "a" || requests[0].Namespace != "test" || requests[1].Name != "b" || requests[1].Namespace != "test" { - t.Fatalf("Wrong secret requests returned: %#+v", requests) - } - - // test pod - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - Env: []corev1.EnvVar{ - { - Name: "test", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "a", - }, - }, - }, - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "test", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "b", - }, - }, - }, - }, - }, - } - requests = mapPods(registerCtx.ToSyncContext("pods"), pod) - if len(requests) != 2 || requests[0].Name != "a" || requests[0].Namespace != "test" || requests[1].Name != "b" || requests[1].Namespace != "test" { - t.Fatalf("Wrong pod requests returned: %#+v", requests) - } -} diff --git a/pkg/controllers/resources/serviceaccounts/syncer_test.go b/pkg/controllers/resources/serviceaccounts/syncer_test.go index 9c49422aa..e5f6c3a3c 100644 --- a/pkg/controllers/resources/serviceaccounts/syncer_test.go +++ b/pkg/controllers/resources/serviceaccounts/syncer_test.go @@ -3,8 +3,10 @@ package serviceaccounts import ( "testing" + "github.com/loft-sh/vcluster/pkg/config" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" syncertesting "github.com/loft-sh/vcluster/pkg/syncer/testing" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" "gotest.tools/assert" corev1 "k8s.io/api/core/v1" @@ -35,7 +37,7 @@ func TestSync(t *testing.T) { } pSA := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(vSA.Name, vSA.Namespace), + Name: translate.Default.HostName(nil, vSA.Name, vSA.Namespace), Namespace: "test", Annotations: map[string]string{ "test": "test", @@ -52,7 +54,10 @@ func TestSync(t *testing.T) { AutomountServiceAccountToken: &[]bool{false}[0], } - syncertesting.RunTests(t, []*syncertesting.SyncTest{ + syncertesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.ToHost.ServiceAccounts.Enabled = true + return syncertesting.NewFakeRegisterContext(vConfig, pClient, vClient) + }, []*syncertesting.SyncTest{ { Name: "ServiceAccount sync", InitialVirtualState: []runtime.Object{ diff --git a/pkg/controllers/resources/services/syncer_test.go b/pkg/controllers/resources/services/syncer_test.go index 0afcdb6aa..ae5d87ab1 100644 --- a/pkg/controllers/resources/services/syncer_test.go +++ b/pkg/controllers/resources/services/syncer_test.go @@ -25,7 +25,7 @@ func TestSync(t *testing.T) { Namespace: "testns", } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName("testservice", "testns"), + Name: translate.Default.HostName(nil, "testservice", "testns"), Namespace: "test", Annotations: map[string]string{ translate.NameAnnotation: vObjectMeta.Name, @@ -297,9 +297,9 @@ func TestSync(t *testing.T) { ObjectMeta: pObjectMeta, Spec: corev1.ServiceSpec{ Selector: map[string]string{ - translate.Default.HostLabel(nil, selectorKey): vServiceNodePortFromExternal.Spec.Selector[selectorKey], - translate.NamespaceLabel: vServiceNodePortFromExternal.Namespace, - translate.MarkerLabel: translate.VClusterName, + translate.Default.HostLabel(nil, selectorKey, ""): vServiceNodePortFromExternal.Spec.Selector[selectorKey], + translate.NamespaceLabel: vServiceNodePortFromExternal.Namespace, + translate.MarkerLabel: translate.VClusterName, }, Type: corev1.ServiceTypeNodePort, Ports: vServiceNodePortFromExternal.Spec.Ports, diff --git a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go index 497180fa3..0e012754d 100644 --- a/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshots/volumesnapshotcontents/syncer_test.go @@ -41,7 +41,7 @@ func TestSync(t *testing.T) { } pVolumeSnapshot := &volumesnapshotv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName(vVolumeSnapshot.Name, vVolumeSnapshot.Namespace), + Name: translate.Default.HostName(nil, vVolumeSnapshot.Name, vVolumeSnapshot.Namespace), Namespace: targetNamespace, }, } @@ -79,7 +79,7 @@ func TestSync(t *testing.T) { Spec: *vPreProvisioned.Spec.DeepCopy(), } pPreProvisioned.Spec.VolumeSnapshotRef = corev1.ObjectReference{ - Name: translate.Default.HostName(vPreProvisioned.Spec.VolumeSnapshotRef.Name, vPreProvisioned.Spec.VolumeSnapshotRef.Namespace), + Name: translate.Default.HostName(nil, vPreProvisioned.Spec.VolumeSnapshotRef.Name, vPreProvisioned.Spec.VolumeSnapshotRef.Namespace), Namespace: targetNamespace, } @@ -91,7 +91,7 @@ func TestSync(t *testing.T) { ObjectMeta: pDynamicObjectMeta, Spec: volumesnapshotv1.VolumeSnapshotContentSpec{ VolumeSnapshotRef: corev1.ObjectReference{ - Name: translate.Default.HostName(vVolumeSnapshot.Name, vVolumeSnapshot.Namespace), + Name: translate.Default.HostName(nil, vVolumeSnapshot.Name, vVolumeSnapshot.Namespace), Namespace: targetNamespace, }, DeletionPolicy: volumesnapshotv1.VolumeSnapshotContentDelete, diff --git a/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go b/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go index f49039ccf..354138190 100644 --- a/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshots/volumesnapshots/syncer_test.go @@ -56,7 +56,7 @@ func TestSync(t *testing.T) { } pObjectMeta := metav1.ObjectMeta{ - Name: translate.Default.HostName(vObjectMeta.Name, vObjectMeta.Namespace), + Name: translate.Default.HostName(nil, vObjectMeta.Name, vObjectMeta.Namespace), Namespace: targetNamespace, ResourceVersion: "1234", Annotations: map[string]string{ @@ -74,7 +74,7 @@ func TestSync(t *testing.T) { ObjectMeta: pObjectMeta, Spec: volumesnapshotv1.VolumeSnapshotSpec{ Source: volumesnapshotv1.VolumeSnapshotSource{ - PersistentVolumeClaimName: ptr.To(translate.Default.HostName(*vPVSourceSnapshot.Spec.Source.PersistentVolumeClaimName, vObjectMeta.Namespace)), + PersistentVolumeClaimName: ptr.To(translate.Default.HostName(nil, *vPVSourceSnapshot.Spec.Source.PersistentVolumeClaimName, vObjectMeta.Namespace)), }, VolumeSnapshotClassName: vPVSourceSnapshot.Spec.VolumeSnapshotClassName, }, diff --git a/pkg/etcd/client.go b/pkg/etcd/client.go index 320a7f549..c72f9c0a5 100644 --- a/pkg/etcd/client.go +++ b/pkg/etcd/client.go @@ -23,6 +23,7 @@ var ( type Client interface { List(ctx context.Context, key string, rev int) ([]Value, error) + Watch(ctx context.Context, key string, rev int) clientv3.WatchChan Get(ctx context.Context, key string) (Value, error) Put(ctx context.Context, key string, value []byte) error Create(ctx context.Context, key string, value []byte) error @@ -51,6 +52,13 @@ func NewFromConfig(ctx context.Context, vConfig *config.VirtualClusterConfig) (C ServerCert: "/data/pki/apiserver-etcd-client.crt", ServerKey: "/data/pki/apiserver-etcd-client.key", } + if vConfig.Distro() == vconfig.K0SDistro { + etcdCertificates = &Certificates{ + CaCert: "/data/k0s/pki/etcd/ca.crt", + ServerCert: "/data/k0s/pki/apiserver-etcd-client.crt", + ServerKey: "/data/k0s/pki/apiserver-etcd-client.key", + } + } if vConfig.ControlPlane.BackingStore.Etcd.Embedded.Enabled { etcdEndpoints = "https://127.0.0.1:2379" @@ -94,6 +102,10 @@ func New(ctx context.Context, certificates *Certificates, endpoints ...string) ( }, nil } +func (c *client) Watch(ctx context.Context, key string, rev int) clientv3.WatchChan { + return c.c.Watch(ctx, key, clientv3.WithPrefix(), clientv3.WithRev(int64(rev))) +} + func (c *client) List(ctx context.Context, key string, rev int) ([]Value, error) { resp, err := c.c.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithRev(int64(rev))) if err != nil { @@ -131,7 +143,7 @@ func (c *client) Get(ctx context.Context, key string) (Value, error) { func (c *client) Put(ctx context.Context, key string, value []byte) error { val, err := c.Get(ctx, key) - if err != nil { + if err != nil && !errors.Is(err, ErrNotFound) { return err } if val.Revision == 0 { diff --git a/pkg/etcd/util.go b/pkg/etcd/util.go index 6a6187f8c..9440ad29f 100644 --- a/pkg/etcd/util.go +++ b/pkg/etcd/util.go @@ -27,7 +27,7 @@ type Certificates struct { func WaitForEtcd(parentCtx context.Context, certificates *Certificates, endpoints ...string) error { var err error waitErr := wait.PollUntilContextTimeout(parentCtx, time.Second, waitForClientTimeout, true, func(ctx context.Context) (bool, error) { - etcdClient, err := GetEtcdClient(parentCtx, certificates, endpoints...) + etcdClient, err := GetEtcdClient(ctx, certificates, endpoints...) if err == nil { defer func() { _ = etcdClient.Close() @@ -39,7 +39,7 @@ func WaitForEtcd(parentCtx context.Context, certificates *Certificates, endpoint } } - klog.Infof("Couldn't connect to embedded etcd (will retry in a second): %v", err) + klog.Infof("Couldn't connect to etcd (will retry in a second): %v", err) return false, nil }) if waitErr != nil { @@ -67,14 +67,11 @@ func GetEtcdClient(ctx context.Context, certificates *Certificates, endpoints .. // If no endpoints are provided, getEndpoints is called to provide defaults. func getClientConfig(ctx context.Context, certificates *Certificates, endpoints ...string) (*clientv3.Config, error) { config := &clientv3.Config{ - Endpoints: endpoints, - Context: ctx, - DialTimeout: 2 * time.Second, - DialKeepAliveTime: 30 * time.Second, - DialKeepAliveTimeout: 10 * time.Second, - AutoSyncInterval: 10 * time.Second, - Logger: zap.L().Named("etcd-client"), - PermitWithoutStream: true, + Endpoints: endpoints, + Context: ctx, + DialTimeout: 5 * time.Second, + + Logger: zap.L().Named("etcd-client"), } if len(endpoints) > 0 { diff --git a/pkg/integrations/metricsserver/metricsserver.go b/pkg/integrations/metricsserver/metricsserver.go index ed497dc5f..366879c8c 100644 --- a/pkg/integrations/metricsserver/metricsserver.go +++ b/pkg/integrations/metricsserver/metricsserver.go @@ -133,7 +133,7 @@ func handleMetricsServerProxyRequest( ) { syncContext := ctx.ToSyncContext("metrics-proxy") splitted := strings.Split(req.URL.Path, "/") - err := translateLabelSelectors(syncContext, req) + err := translateLabelSelectors(syncContext, req, info.Namespace) if err != nil { klog.Infof("error translating label selectors %v", err) requestpkg.FailWithStatus(w, req, http.StatusInternalServerError, err) @@ -166,7 +166,7 @@ func handleMetricsServerProxyRequest( if info.Resource == PodResource && info.Verb == RequestVerbList { // check if its a list request across all namespaces if info.Namespace != "" { - splitted[5] = translate.Default.HostNamespace(info.Namespace) + splitted[5] = translate.Default.HostNamespace(syncContext, info.Namespace) } else if translate.Default.SingleNamespaceTarget() { // limit to current namespace in host cluster splitted = append(splitted[:4], append([]string{"namespaces", ctx.Config.WorkloadTargetNamespace}, splitted[4:]...)...) @@ -472,7 +472,7 @@ func getVirtualNodes(ctx context.Context, vClient client.Client) ([]corev1.Node, return nodeList.Items, nil } -func translateLabelSelectors(ctx *synccontext.SyncContext, req *http.Request) error { +func translateLabelSelectors(ctx *synccontext.SyncContext, req *http.Request, namespace string) error { translatedSelectors := make(map[string]string) query := req.URL.Query() @@ -484,7 +484,7 @@ func translateLabelSelectors(ctx *synccontext.SyncContext, req *http.Request) er } for k, v := range selectors { - translatedKey := translate.Default.HostLabel(ctx, k) + translatedKey := translate.Default.HostLabel(ctx, k, namespace) translatedSelectors[translatedKey] = v } } diff --git a/pkg/mappings/generic/mapper.go b/pkg/mappings/generic/mapper.go index aab28c6a2..01a7bf51b 100644 --- a/pkg/mappings/generic/mapper.go +++ b/pkg/mappings/generic/mapper.go @@ -3,12 +3,12 @@ package generic import ( "fmt" - "github.com/loft-sh/vcluster/pkg/constants" "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - "github.com/loft-sh/vcluster/pkg/util/clienthelper" "github.com/loft-sh/vcluster/pkg/util/translate" - kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -16,45 +16,45 @@ import ( ) // PhysicalNameWithObjectFunc is a definition to translate a name that also optionally expects a vObj -type PhysicalNameWithObjectFunc func(vName, vNamespace string, vObj client.Object) string +type PhysicalNameWithObjectFunc func(ctx *synccontext.SyncContext, vName, vNamespace string, vObj client.Object) string // PhysicalNameFunc is a definition to translate a name -type PhysicalNameFunc func(vName, vNamespace string) string +type PhysicalNameFunc func(ctx *synccontext.SyncContext, vName, vNamespace string) string // NewMapper creates a new mapper with a custom physical name func -func NewMapper(ctx *synccontext.RegisterContext, obj client.Object, translateName PhysicalNameFunc, options ...MapperOption) (synccontext.Mapper, error) { - return NewMapperWithObject(ctx, obj, func(vName, vNamespace string, _ client.Object) string { - return translateName(vName, vNamespace) - }, options...) +func NewMapper(ctx *synccontext.RegisterContext, obj client.Object, translateName PhysicalNameFunc) (synccontext.Mapper, error) { + return NewMapperWithObject(ctx, obj, func(ctx *synccontext.SyncContext, vName, vNamespace string, _ client.Object) string { + return translateName(ctx, vName, vNamespace) + }) } // NewMapperWithObject creates a new mapper with a custom physical name func -func NewMapperWithObject(ctx *synccontext.RegisterContext, obj client.Object, translateName PhysicalNameWithObjectFunc, options ...MapperOption) (synccontext.Mapper, error) { +func NewMapperWithObject(ctx *synccontext.RegisterContext, obj client.Object, translateName PhysicalNameWithObjectFunc) (synccontext.Mapper, error) { + return newMapper(ctx, obj, true, translateName) +} + +// NewMapperWithoutRecorder creates a new mapper with a recorder to store mappings in the mappings store +func NewMapperWithoutRecorder(ctx *synccontext.RegisterContext, obj client.Object, translateName PhysicalNameWithObjectFunc) (synccontext.Mapper, error) { + return newMapper(ctx, obj, false, translateName) +} + +// newMapper creates a new mapper with a recorder to store mappings in the mappings store +func newMapper(ctx *synccontext.RegisterContext, obj client.Object, recorder bool, translateName PhysicalNameWithObjectFunc) (synccontext.Mapper, error) { gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) if err != nil { return nil, fmt.Errorf("retrieve GVK for object failed: %w", err) } - mapperOptions := getOptions(options...) - if !mapperOptions.SkipIndex { - err = ctx.VirtualManager.GetFieldIndexer().IndexField(ctx, obj.DeepCopyObject().(client.Object), constants.IndexByPhysicalName, func(rawObj client.Object) []string { - if rawObj.GetNamespace() != "" { - return []string{translate.Default.HostNamespace(rawObj.GetNamespace()) + "/" + translateName(rawObj.GetName(), rawObj.GetNamespace(), rawObj)} - } - - return []string{translateName(rawObj.GetName(), rawObj.GetNamespace(), rawObj)} - }) - if err != nil { - return nil, fmt.Errorf("index field: %w", err) - } - } - - return &mapper{ + var retMapper synccontext.Mapper = &mapper{ translateName: translateName, virtualClient: ctx.VirtualManager.GetClient(), obj: obj, gvk: gvk, - }, nil + } + if recorder { + retMapper = WithRecorder(retMapper) + } + return retMapper, nil } type mapper struct { @@ -69,45 +69,95 @@ func (n *mapper) GroupVersionKind() schema.GroupVersionKind { return n.gvk } -func (n *mapper) VirtualToHost(_ *synccontext.SyncContext, req types.NamespacedName, vObj client.Object) types.NamespacedName { - return types.NamespacedName{ - Namespace: translate.Default.HostNamespace(req.Namespace), - Name: n.translateName(req.Name, req.Namespace, vObj), +func (n *mapper) Migrate(ctx *synccontext.RegisterContext, mapper synccontext.Mapper) error { + gvk := mapper.GroupVersionKind() + listGvk := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + Kind: gvk.Kind + "List", } -} -func (n *mapper) HostToVirtual(ctx *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) types.NamespacedName { - if pObj != nil { - pAnnotations := pObj.GetAnnotations() - if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { - return types.NamespacedName{ - Namespace: pAnnotations[translate.NamespaceAnnotation], - Name: pAnnotations[translate.NameAnnotation], - } + list, err := scheme.Scheme.New(listGvk) + if err != nil { + if !runtime.IsNotRegisteredError(err) { + return fmt.Errorf("migrate create object list %s: %w", listGvk.String(), err) } + + list = &unstructured.UnstructuredList{} } - key := req.Name - if req.Namespace != "" { - key = req.Namespace + "/" + req.Name + uList, ok := list.(*unstructured.UnstructuredList) + if ok { + uList.SetKind(listGvk.Kind) + uList.SetAPIVersion(listGvk.GroupVersion().String()) } - vObj := n.obj.DeepCopyObject().(client.Object) - err := clienthelper.GetByIndex(ctx, n.virtualClient, vObj, constants.IndexByPhysicalName, key) + // it's safe to list here without namespace as this will just list all items in the cache + err = ctx.VirtualManager.GetClient().List(ctx, list.(client.ObjectList)) if err != nil { - if !kerrors.IsNotFound(err) && !kerrors.IsConflict(err) { - panic(err.Error()) + return fmt.Errorf("error listing %s: %w", listGvk.String(), err) + } + + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("extract list %s: %w", listGvk.String(), err) + } + + for _, item := range items { + clientObject, ok := item.(client.Object) + if !ok { + continue } - return types.NamespacedName{} + vName := types.NamespacedName{Name: clientObject.GetName(), Namespace: clientObject.GetNamespace()} + pName := mapper.VirtualToHost(ctx.ToSyncContext("migrate-"+listGvk.Kind), vName, clientObject) + if pName.Name != "" { + nameMapping := synccontext.NameMapping{ + GroupVersionKind: n.gvk, + VirtualName: vName, + HostName: pName, + } + + err = ctx.Mappings.Store().RecordAndSaveReference(ctx, nameMapping, nameMapping) + if err != nil { + return fmt.Errorf("error saving reference in store: %w", err) + } + } + } + + return nil +} + +func (n *mapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, vObj client.Object) (retName types.NamespacedName) { + pNamespace := req.Namespace + if pNamespace != "" { + pNamespace = translate.Default.HostNamespace(ctx, pNamespace) } return types.NamespacedName{ - Namespace: vObj.GetNamespace(), - Name: vObj.GetName(), + Namespace: pNamespace, + Name: n.translateName(ctx, req.Name, req.Namespace, vObj), } } +func (n *mapper) HostToVirtual(_ *synccontext.SyncContext, _ types.NamespacedName, pObj client.Object) (retName types.NamespacedName) { + if pObj != nil { + pAnnotations := pObj.GetAnnotations() + if pAnnotations[translate.NameAnnotation] != "" { + // check if kind matches + gvk, ok := pAnnotations[translate.KindAnnotation] + if !ok || n.gvk.String() == gvk { + return types.NamespacedName{ + Namespace: pAnnotations[translate.NamespaceAnnotation], + Name: pAnnotations[translate.NameAnnotation], + } + } + } + } + + return types.NamespacedName{} +} + func (n *mapper) IsManaged(ctx *synccontext.SyncContext, pObj client.Object) (bool, error) { return translate.Default.IsManaged(ctx, pObj), nil } diff --git a/pkg/mappings/generic/mirror.go b/pkg/mappings/generic/mirror.go index b208b1560..581d0a358 100644 --- a/pkg/mappings/generic/mirror.go +++ b/pkg/mappings/generic/mirror.go @@ -31,10 +31,14 @@ func (n *mirrorMapper) GroupVersionKind() schema.GroupVersionKind { return n.gvk } -func (n *mirrorMapper) VirtualToHost(_ *synccontext.SyncContext, req types.NamespacedName, _ client.Object) types.NamespacedName { +func (n *mirrorMapper) Migrate(_ *synccontext.RegisterContext, _ synccontext.Mapper) error { + return nil +} + +func (n *mirrorMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, _ client.Object) (retName types.NamespacedName) { pNamespace := req.Namespace if pNamespace != "" { - pNamespace = translate.Default.HostNamespace(pNamespace) + pNamespace = translate.Default.HostNamespace(ctx, pNamespace) } return types.NamespacedName{ @@ -43,7 +47,7 @@ func (n *mirrorMapper) VirtualToHost(_ *synccontext.SyncContext, req types.Names } } -func (n *mirrorMapper) HostToVirtual(_ *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) types.NamespacedName { +func (n *mirrorMapper) HostToVirtual(_ *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) (retName types.NamespacedName) { if pObj != nil { pAnnotations := pObj.GetAnnotations() if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { @@ -54,13 +58,9 @@ func (n *mirrorMapper) HostToVirtual(_ *synccontext.SyncContext, req types.Names } } - // if a namespace is requested we need to return early here - if req.Namespace != "" { - return types.NamespacedName{} - } - return types.NamespacedName{ - Name: req.Name, + Name: req.Name, + Namespace: "", // this is intentionally empty } } diff --git a/pkg/mappings/generic/options.go b/pkg/mappings/generic/options.go deleted file mode 100644 index 7d8a30c37..000000000 --- a/pkg/mappings/generic/options.go +++ /dev/null @@ -1,21 +0,0 @@ -package generic - -type MapperOptions struct { - SkipIndex bool -} - -type MapperOption func(options *MapperOptions) - -func SkipIndex() MapperOption { - return func(options *MapperOptions) { - options.SkipIndex = true - } -} - -func getOptions(options ...MapperOption) *MapperOptions { - newOptions := &MapperOptions{} - for _, option := range options { - option(newOptions) - } - return newOptions -} diff --git a/pkg/mappings/generic/recorder.go b/pkg/mappings/generic/recorder.go new file mode 100644 index 000000000..c95598aeb --- /dev/null +++ b/pkg/mappings/generic/recorder.go @@ -0,0 +1,121 @@ +package generic + +import ( + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func WithRecorder(mapper synccontext.Mapper) synccontext.Mapper { + return &recorder{ + Mapper: mapper, + } +} + +type recorder struct { + synccontext.Mapper +} + +func (n *recorder) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, vObj client.Object) (retName types.NamespacedName) { + defer func() { + err := RecordMapping(ctx, retName, req, n.GroupVersionKind()) + if err != nil { + klog.FromContext(ctx).Error(err, "record name mapping", "host", retName, "virtual", req) + retName = types.NamespacedName{} + } + }() + + // check store first + pName, ok := VirtualToHostFromStore(ctx, req, n.GroupVersionKind()) + if ok { + return pName + } + + return n.Mapper.VirtualToHost(ctx, req, vObj) +} + +func (n *recorder) HostToVirtual(ctx *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) (retName types.NamespacedName) { + defer func() { + err := RecordMapping(ctx, req, retName, n.GroupVersionKind()) + if err != nil { + klog.FromContext(ctx).Error(err, "record name mapping", "host", req, "virtual", retName) + retName = types.NamespacedName{} + } + }() + + // check store first + vName, ok := HostToVirtualFromStore(ctx, req, n.GroupVersionKind()) + if ok { + return vName + } + + return n.Mapper.HostToVirtual(ctx, req, pObj) +} + +func (n *recorder) IsManaged(ctx *synccontext.SyncContext, pObj client.Object) (bool, error) { + if ctx != nil && ctx.Mappings != nil && ctx.Mappings.Store() != nil { + _, ok := ctx.Mappings.Store().HostToVirtualName(ctx, synccontext.Object{ + GroupVersionKind: n.GroupVersionKind(), + NamespacedName: types.NamespacedName{ + Name: pObj.GetName(), + Namespace: pObj.GetNamespace(), + }, + }) + if ok { + return true, nil + } + } + + return n.Mapper.IsManaged(ctx, pObj) +} + +func RecordMapping(ctx *synccontext.SyncContext, pName, vName types.NamespacedName, gvk schema.GroupVersionKind) error { + if pName.Name == "" || vName.Name == "" { + return nil + } + + if ctx != nil && ctx.Mappings != nil && ctx.Mappings.Store() != nil { + // check if we have the owning object in the context + belongsTo, ok := synccontext.MappingFrom(ctx) + if !ok { + return nil + } + + // record the reference + err := ctx.Mappings.Store().RecordReference(ctx, synccontext.NameMapping{ + GroupVersionKind: gvk, + + HostName: pName, + VirtualName: vName, + }, belongsTo) + if err != nil { + return err + } + } + + return nil +} + +func HostToVirtualFromStore(ctx *synccontext.SyncContext, req types.NamespacedName, gvk schema.GroupVersionKind) (types.NamespacedName, bool) { + if ctx == nil || ctx.Mappings == nil || ctx.Mappings.Store() == nil { + return types.NamespacedName{}, false + } + + return ctx.Mappings.Store().HostToVirtualName(ctx, synccontext.Object{ + GroupVersionKind: gvk, + NamespacedName: req, + }) +} + +func VirtualToHostFromStore(ctx *synccontext.SyncContext, req types.NamespacedName, gvk schema.GroupVersionKind) (types.NamespacedName, bool) { + if ctx == nil || ctx.Mappings == nil || ctx.Mappings.Store() == nil { + return types.NamespacedName{}, false + } + + return ctx.Mappings.Store().VirtualToHostName(ctx, synccontext.Object{ + GroupVersionKind: gvk, + NamespacedName: req, + }) +} diff --git a/pkg/mappings/generic/recorder_test.go b/pkg/mappings/generic/recorder_test.go new file mode 100644 index 000000000..c4e220cae --- /dev/null +++ b/pkg/mappings/generic/recorder_test.go @@ -0,0 +1,135 @@ +package generic + +import ( + "context" + "testing" + + "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/store" + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "gotest.tools/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestRecorder(t *testing.T) { + gvk := corev1.SchemeGroupVersion.WithKind("Secret") + storeBackend := store.NewMemoryBackend() + mappingsStore, err := store.NewStore(context.TODO(), nil, nil, storeBackend) + assert.NilError(t, err) + + // check recording + syncContext := &synccontext.SyncContext{ + Context: context.TODO(), + Mappings: mappings.NewMappingsRegistry(mappingsStore), + } + + // create mapper + recorderMapper := WithRecorder(&fakeMapper{gvk: gvk}) + + // record mapping + vTest := types.NamespacedName{ + Name: "test", + Namespace: "test", + } + pTestOther := types.NamespacedName{ + Name: "other", + Namespace: "other", + } + hTest := recorderMapper.VirtualToHost(syncContext, vTest, nil) + assert.Equal(t, vTest, hTest) + + // check it was not added to store + _, ok := mappingsStore.VirtualToHostName(syncContext.Context, synccontext.Object{ + GroupVersionKind: gvk, + NamespacedName: vTest, + }) + assert.Equal(t, ok, false) + + // add conflicting mapping + conflictingMapping := synccontext.NameMapping{ + GroupVersionKind: gvk, + VirtualName: vTest, + HostName: pTestOther, + } + err = mappingsStore.RecordReference(syncContext.Context, conflictingMapping, conflictingMapping) + assert.NilError(t, err) + + // check that mapping is empty + syncContext.Context = synccontext.WithMapping(syncContext.Context, synccontext.NameMapping{ + GroupVersionKind: gvk, + VirtualName: vTest, + }) + retTest := recorderMapper.HostToVirtual(syncContext, vTest, nil) + assert.Equal(t, retTest, types.NamespacedName{}) + + // check that mapping is expected + retTest = recorderMapper.HostToVirtual(syncContext, pTestOther, nil) + assert.Equal(t, retTest, vTest) + + // add another mapping + vTest = types.NamespacedName{ + Name: "test123", + Namespace: "test123", + } + retTest = recorderMapper.HostToVirtual(syncContext, vTest, nil) + assert.Equal(t, retTest, vTest) + retTest = recorderMapper.VirtualToHost(syncContext, vTest, nil) + assert.Equal(t, retTest, vTest) + + // try to record other mapping + conflictingMapping = synccontext.NameMapping{ + GroupVersionKind: gvk, + HostName: retTest, + VirtualName: pTestOther, + } + err = mappingsStore.RecordReference(syncContext.Context, conflictingMapping, conflictingMapping) + assert.ErrorContains(t, err, "there is already another name mapping") + + // check if managed 1 + isManaged, err := recorderMapper.IsManaged(syncContext, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: vTest.Name, + Namespace: vTest.Namespace, + }, + }) + assert.NilError(t, err) + assert.Equal(t, isManaged, true) + + // check if managed 2 + isManaged, err = recorderMapper.IsManaged(syncContext, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: vTest.Name, + Namespace: vTest.Namespace + "-other", + }, + }) + assert.NilError(t, err) + assert.Equal(t, isManaged, false) +} + +var _ synccontext.Mapper = &fakeMapper{} + +type fakeMapper struct { + gvk schema.GroupVersionKind +} + +func (f *fakeMapper) Migrate(_ *synccontext.RegisterContext, _ synccontext.Mapper) error { + return nil +} + +func (f *fakeMapper) GroupVersionKind() schema.GroupVersionKind { return f.gvk } + +func (f *fakeMapper) VirtualToHost(_ *synccontext.SyncContext, req types.NamespacedName, _ client.Object) types.NamespacedName { + return req +} + +func (f *fakeMapper) HostToVirtual(_ *synccontext.SyncContext, req types.NamespacedName, _ client.Object) types.NamespacedName { + return req +} + +func (f *fakeMapper) IsManaged(_ *synccontext.SyncContext, _ client.Object) (bool, error) { + return false, nil +} diff --git a/pkg/mappings/registry.go b/pkg/mappings/registry.go index 4d7ac9041..b28ed7ade 100644 --- a/pkg/mappings/registry.go +++ b/pkg/mappings/registry.go @@ -2,13 +2,13 @@ package mappings import ( "fmt" + "maps" "sync" volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" - policyv1 "k8s.io/api/policy/v1" schedulingv1 "k8s.io/api/scheduling/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -16,16 +16,31 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func NewMappingsRegistry() synccontext.MappingsRegistry { +func NewMappingsRegistry(store synccontext.MappingsStore) synccontext.MappingsRegistry { return &Registry{ mappers: map[schema.GroupVersionKind]synccontext.Mapper{}, + + store: store, } } type Registry struct { mappers map[schema.GroupVersionKind]synccontext.Mapper - m sync.Mutex + store synccontext.MappingsStore + + m sync.RWMutex +} + +func (m *Registry) Store() synccontext.MappingsStore { + return m.store +} + +func (m *Registry) List() map[schema.GroupVersionKind]synccontext.Mapper { + m.m.RLock() + defer m.m.RUnlock() + + return maps.Clone(m.mappers) } func (m *Registry) AddMapper(mapper synccontext.Mapper) error { @@ -37,16 +52,16 @@ func (m *Registry) AddMapper(mapper synccontext.Mapper) error { } func (m *Registry) Has(gvk schema.GroupVersionKind) bool { - m.m.Lock() - defer m.m.Unlock() + m.m.RLock() + defer m.m.RUnlock() _, ok := m.mappers[gvk] return ok } func (m *Registry) ByGVK(gvk schema.GroupVersionKind) (synccontext.Mapper, error) { - m.m.Lock() - defer m.m.Unlock() + m.m.RLock() + defer m.m.RUnlock() mapper, ok := m.mappers[gvk] if !ok { @@ -56,34 +71,14 @@ func (m *Registry) ByGVK(gvk schema.GroupVersionKind) (synccontext.Mapper, error return mapper, nil } -func CSIDrivers() schema.GroupVersionKind { - return storagev1.SchemeGroupVersion.WithKind("CSIDriver") -} - -func CSINodes() schema.GroupVersionKind { - return storagev1.SchemeGroupVersion.WithKind("CSINode") -} - -func CSIStorageCapacities() schema.GroupVersionKind { - return storagev1.SchemeGroupVersion.WithKind("CSIStorageCapacity") -} - func VolumeSnapshotContents() schema.GroupVersionKind { return volumesnapshotv1.SchemeGroupVersion.WithKind("VolumeSnapshotContent") } -func NetworkPolicies() schema.GroupVersionKind { - return networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy") -} - func Nodes() schema.GroupVersionKind { return corev1.SchemeGroupVersion.WithKind("Node") } -func PodDisruptionBudgets() schema.GroupVersionKind { - return policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget") -} - func VolumeSnapshots() schema.GroupVersionKind { return volumesnapshotv1.SchemeGroupVersion.WithKind("VolumeSnapshot") } @@ -128,10 +123,6 @@ func StorageClasses() schema.GroupVersionKind { return storagev1.SchemeGroupVersion.WithKind("StorageClass") } -func IngressClasses() schema.GroupVersionKind { - return networkingv1.SchemeGroupVersion.WithKind("IngressClass") -} - func Namespaces() schema.GroupVersionKind { return corev1.SchemeGroupVersion.WithKind("Namespace") } diff --git a/pkg/mappings/resources/configmaps.go b/pkg/mappings/resources/configmaps.go index 15cbb5430..553bffe00 100644 --- a/pkg/mappings/resources/configmaps.go +++ b/pkg/mappings/resources/configmaps.go @@ -1,35 +1,29 @@ package resources import ( - "github.com/loft-sh/vcluster/pkg/constants" + "fmt" + + "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" ) func CreateConfigMapsMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - mapper, err := generic.NewMapper(ctx, &corev1.ConfigMap{}, translate.Default.HostName, generic.SkipIndex()) - if err != nil { - return nil, err - } - - err = ctx.VirtualManager.GetFieldIndexer().IndexField(ctx, &corev1.ConfigMap{}, constants.IndexByPhysicalName, func(rawObj client.Object) []string { - if !translate.Default.SingleNamespaceTarget() && rawObj.GetName() == "kube-root-ca.crt" { - return []string{translate.Default.HostNamespace(rawObj.GetNamespace()) + "/" + translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName)} - } - - return []string{translate.Default.HostNamespace(rawObj.GetNamespace()) + "/" + translate.Default.HostName(rawObj.GetName(), rawObj.GetNamespace())} + mapper, err := generic.NewMapperWithoutRecorder(ctx, &corev1.ConfigMap{}, func(ctx *synccontext.SyncContext, vName, vNamespace string, _ client.Object) string { + return translate.Default.HostName(ctx, vName, vNamespace) }) if err != nil { return nil, err } - return &configMapsMapper{ + return generic.WithRecorder(&configMapsMapper{ Mapper: mapper, - }, nil + }), nil } type configMapsMapper struct { @@ -37,26 +31,131 @@ type configMapsMapper struct { } func (s *configMapsMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, vObj client.Object) types.NamespacedName { - if !translate.Default.SingleNamespaceTarget() && req.Name == "kube-root-ca.crt" { + pName := s.Mapper.VirtualToHost(ctx, req, vObj) + if pName.Name == "kube-root-ca.crt" { return types.NamespacedName{ Name: translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName), - Namespace: s.Mapper.VirtualToHost(ctx, req, vObj).Namespace, + Namespace: pName.Namespace, } } - return s.Mapper.VirtualToHost(ctx, req, vObj) + return pName } func (s *configMapsMapper) HostToVirtual(ctx *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) types.NamespacedName { - if !translate.Default.SingleNamespaceTarget() && req.Name == translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName) { + // ignore kube-root-ca.crt from host + if req.Name == "kube-root-ca.crt" { + return types.NamespacedName{} + } + + // translate the special kube-root-ca.crt back + if req.Name == translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName) { return types.NamespacedName{ Name: "kube-root-ca.crt", Namespace: s.Mapper.HostToVirtual(ctx, req, pObj).Namespace, } - } else if !translate.Default.SingleNamespaceTarget() && req.Name == "kube-root-ca.crt" { - // ignore kube-root-ca.crt from host - return types.NamespacedName{} } return s.Mapper.HostToVirtual(ctx, req, pObj) } + +func (s *configMapsMapper) Migrate(ctx *synccontext.RegisterContext, mapper synccontext.Mapper) error { + // make sure we migrate pods first + podsMapper, err := ctx.Mappings.ByGVK(mappings.Pods()) + if err != nil { + return err + } + + // pods mapper + err = podsMapper.Migrate(ctx, podsMapper) + if err != nil { + return fmt.Errorf("migrate pods") + } + + // list all pods and map them by their used configmaps + list := &corev1.PodList{} + err = ctx.VirtualManager.GetClient().List(ctx, list) + if err != nil { + return fmt.Errorf("error listing csi storage capacities: %w", err) + } + + for _, val := range list.Items { + item := &val + + // this will try to translate and record the mapping + for _, configMap := range configMapsFromPod(item) { + pName := mapper.VirtualToHost(ctx.ToSyncContext("migrate-pod"), configMap, nil) + if pName.Name != "" { + err = ctx.Mappings.Store().RecordAndSaveReference(ctx, synccontext.NameMapping{ + GroupVersionKind: mappings.ConfigMaps(), + VirtualName: configMap, + HostName: pName, + }, synccontext.NameMapping{ + GroupVersionKind: mappings.Pods(), + VirtualName: types.NamespacedName{Name: item.Name, Namespace: item.Namespace}, + }) + if err != nil { + klog.FromContext(ctx).Error(err, "record config map reference on pod") + } + } + } + } + + return s.Mapper.Migrate(ctx, mapper) +} + +func configMapsFromPod(pod *corev1.Pod) []types.NamespacedName { + configMaps := []types.NamespacedName{} + for _, c := range pod.Spec.Containers { + configMaps = append(configMaps, configNamesFromContainer(pod.Namespace, &c)...) + } + for _, c := range pod.Spec.InitContainers { + configMaps = append(configMaps, configNamesFromContainer(pod.Namespace, &c)...) + } + for _, c := range pod.Spec.EphemeralContainers { + configMaps = append(configMaps, configNamesFromEphemeralContainer(pod.Namespace, &c)...) + } + for i := range pod.Spec.Volumes { + if pod.Spec.Volumes[i].ConfigMap != nil { + configMaps = append(configMaps, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].ConfigMap.Name}) + } + if pod.Spec.Volumes[i].Projected != nil { + for j := range pod.Spec.Volumes[i].Projected.Sources { + if pod.Spec.Volumes[i].Projected.Sources[j].ConfigMap != nil { + configMaps = append(configMaps, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].Projected.Sources[j].ConfigMap.Name}) + } + } + } + } + return configMaps +} + +func configNamesFromContainer(namespace string, container *corev1.Container) []types.NamespacedName { + configNames := []types.NamespacedName{} + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" { + configNames = append(configNames, types.NamespacedName{Namespace: namespace, Name: env.ValueFrom.ConfigMapKeyRef.Name}) + } + } + for _, from := range container.EnvFrom { + if from.ConfigMapRef != nil && from.ConfigMapRef.Name != "" { + configNames = append(configNames, types.NamespacedName{Namespace: namespace, Name: from.ConfigMapRef.Name}) + } + } + return configNames +} + +func configNamesFromEphemeralContainer(namespace string, container *corev1.EphemeralContainer) []types.NamespacedName { + configNames := []types.NamespacedName{} + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" { + configNames = append(configNames, types.NamespacedName{Namespace: namespace, Name: env.ValueFrom.ConfigMapKeyRef.Name}) + } + } + for _, from := range container.EnvFrom { + if from.ConfigMapRef != nil && from.ConfigMapRef.Name != "" { + configNames = append(configNames, types.NamespacedName{Namespace: namespace, Name: from.ConfigMapRef.Name}) + } + } + return configNames +} diff --git a/pkg/mappings/resources/configmaps_test.go b/pkg/mappings/resources/configmaps_test.go new file mode 100644 index 000000000..7a92fae88 --- /dev/null +++ b/pkg/mappings/resources/configmaps_test.go @@ -0,0 +1,53 @@ +package resources + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestConfigMapsMapping(t *testing.T) { + // test pod + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Env: []corev1.EnvVar{ + { + Name: "test", + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "a", + }, + }, + }, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "b", + }, + }, + }, + }, + }, + }, + } + requests := configMapsFromPod(pod) + if len(requests) != 2 || requests[0].Name != "a" || requests[0].Namespace != "test" || requests[1].Name != "b" || requests[1].Namespace != "test" { + t.Fatalf("Wrong pod requests returned: %#+v", requests) + } +} diff --git a/pkg/mappings/resources/csidrivers.go b/pkg/mappings/resources/csidrivers.go deleted file mode 100644 index 10132f306..000000000 --- a/pkg/mappings/resources/csidrivers.go +++ /dev/null @@ -1,11 +0,0 @@ -package resources - -import ( - "github.com/loft-sh/vcluster/pkg/mappings/generic" - "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - storagev1 "k8s.io/api/storage/v1" -) - -func CreateCSIDriversMapper(_ *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMirrorMapper(&storagev1.CSIDriver{}) -} diff --git a/pkg/mappings/resources/csinodes.go b/pkg/mappings/resources/csinodes.go deleted file mode 100644 index 69d67103a..000000000 --- a/pkg/mappings/resources/csinodes.go +++ /dev/null @@ -1,11 +0,0 @@ -package resources - -import ( - "github.com/loft-sh/vcluster/pkg/mappings/generic" - "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - storagev1 "k8s.io/api/storage/v1" -) - -func CreateCSINodesMapper(_ *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMirrorMapper(&storagev1.CSINode{}) -} diff --git a/pkg/mappings/resources/endpoints.go b/pkg/mappings/resources/endpoints.go index ec4cc560e..4f3b77808 100644 --- a/pkg/mappings/resources/endpoints.go +++ b/pkg/mappings/resources/endpoints.go @@ -5,8 +5,43 @@ import ( "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) func CreateEndpointsMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMapper(ctx, &corev1.Endpoints{}, translate.Default.HostName) + mapper, err := generic.NewMapper(ctx, &corev1.Endpoints{}, translate.Default.HostName) + if err != nil { + return nil, err + } + + return &endpointsMapper{ + Mapper: mapper, + }, nil +} + +type endpointsMapper struct { + synccontext.Mapper +} + +func (s *endpointsMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, vObj client.Object) types.NamespacedName { + if req.Name == "kubernetes" && req.Namespace == "default" { + return types.NamespacedName{ + Name: translate.VClusterName, + Namespace: ctx.CurrentNamespace, + } + } + + return s.Mapper.VirtualToHost(ctx, req, vObj) +} + +func (s *endpointsMapper) HostToVirtual(ctx *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) types.NamespacedName { + if req.Name == translate.VClusterName && req.Namespace == ctx.CurrentNamespace { + return types.NamespacedName{ + Name: "kubernetes", + Namespace: "default", + } + } + + return s.Mapper.HostToVirtual(ctx, req, pObj) } diff --git a/pkg/mappings/resources/events.go b/pkg/mappings/resources/events.go index 6dfedbca2..27c9fe5b5 100644 --- a/pkg/mappings/resources/events.go +++ b/pkg/mappings/resources/events.go @@ -30,6 +30,10 @@ func CreateEventsMapper(_ *synccontext.RegisterContext) (synccontext.Mapper, err type eventMapper struct{} +func (s *eventMapper) Migrate(_ *synccontext.RegisterContext, _ synccontext.Mapper) error { + return nil +} + func (s *eventMapper) GroupVersionKind() schema.GroupVersionKind { return corev1.SchemeGroupVersion.WithKind("Event") } diff --git a/pkg/mappings/resources/ingressclasses.go b/pkg/mappings/resources/ingressclasses.go deleted file mode 100644 index 37b7547ad..000000000 --- a/pkg/mappings/resources/ingressclasses.go +++ /dev/null @@ -1,11 +0,0 @@ -package resources - -import ( - "github.com/loft-sh/vcluster/pkg/mappings/generic" - "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - networkingv1 "k8s.io/api/networking/v1" -) - -func CreateIngressClassesMapper(_ *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMirrorMapper(&networkingv1.IngressClass{}) -} diff --git a/pkg/mappings/resources/namespaces.go b/pkg/mappings/resources/namespaces.go index 55f405e59..a52966106 100644 --- a/pkg/mappings/resources/namespaces.go +++ b/pkg/mappings/resources/namespaces.go @@ -8,7 +8,7 @@ import ( ) func CreateNamespacesMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMapper(ctx, &corev1.Namespace{}, func(vName, _ string) string { - return translate.Default.HostNamespace(vName) + return generic.NewMapper(ctx, &corev1.Namespace{}, func(ctx *synccontext.SyncContext, vName, _ string) string { + return translate.Default.HostNamespace(ctx, vName) }) } diff --git a/pkg/mappings/resources/networkpolicies.go b/pkg/mappings/resources/networkpolicies.go deleted file mode 100644 index 3d9a4c5cb..000000000 --- a/pkg/mappings/resources/networkpolicies.go +++ /dev/null @@ -1,12 +0,0 @@ -package resources - -import ( - "github.com/loft-sh/vcluster/pkg/mappings/generic" - "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - "github.com/loft-sh/vcluster/pkg/util/translate" - networkingv1 "k8s.io/api/networking/v1" -) - -func CreateNetworkPoliciesMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMapper(ctx, &networkingv1.NetworkPolicy{}, translate.Default.HostName) -} diff --git a/pkg/mappings/resources/persistentvolumes.go b/pkg/mappings/resources/persistentvolumes.go index 3f06e2b2d..ce6871426 100644 --- a/pkg/mappings/resources/persistentvolumes.go +++ b/pkg/mappings/resources/persistentvolumes.go @@ -10,7 +10,11 @@ import ( ) func CreatePersistentVolumesMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMapperWithObject(ctx, &corev1.PersistentVolume{}, func(name, _ string, vObj client.Object) string { + if !ctx.Config.Sync.ToHost.PersistentVolumes.Enabled { + return generic.NewMirrorMapper(&corev1.PersistentVolume{}) + } + + return generic.NewMapperWithObject(ctx, &corev1.PersistentVolume{}, func(_ *synccontext.SyncContext, name, _ string, vObj client.Object) string { if vObj == nil { return name } diff --git a/pkg/mappings/resources/poddisruptionbudgets.go b/pkg/mappings/resources/poddisruptionbudgets.go deleted file mode 100644 index ee9764994..000000000 --- a/pkg/mappings/resources/poddisruptionbudgets.go +++ /dev/null @@ -1,12 +0,0 @@ -package resources - -import ( - "github.com/loft-sh/vcluster/pkg/mappings/generic" - "github.com/loft-sh/vcluster/pkg/syncer/synccontext" - "github.com/loft-sh/vcluster/pkg/util/translate" - policyv1 "k8s.io/api/policy/v1" -) - -func CreatePodDisruptionBudgetsMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMapper(ctx, &policyv1.PodDisruptionBudget{}, translate.Default.HostName) -} diff --git a/pkg/mappings/resources/priorityclasses.go b/pkg/mappings/resources/priorityclasses.go index b9987cc27..3d70f77a8 100644 --- a/pkg/mappings/resources/priorityclasses.go +++ b/pkg/mappings/resources/priorityclasses.go @@ -12,7 +12,7 @@ func CreatePriorityClassesMapper(ctx *synccontext.RegisterContext) (synccontext. return generic.NewMirrorMapper(&schedulingv1.PriorityClass{}) } - return generic.NewMapper(ctx, &schedulingv1.PriorityClass{}, func(vName, _ string) string { + return generic.NewMapper(ctx, &schedulingv1.PriorityClass{}, func(_ *synccontext.SyncContext, vName, _ string) string { // we have to prefix with vCluster as system is reserved return translate.Default.HostNameCluster(vName) }) diff --git a/pkg/mappings/resources/register.go b/pkg/mappings/resources/register.go index 7311be27d..def3b73ef 100644 --- a/pkg/mappings/resources/register.go +++ b/pkg/mappings/resources/register.go @@ -16,21 +16,15 @@ func getMappers(ctx *synccontext.RegisterContext) []BuildMapper { return append([]BuildMapper{ CreateSecretsMapper, CreateConfigMapsMapper, - isEnabled(ctx.Config.Sync.FromHost.CSINodes.Enabled == "true", CreateCSINodesMapper), - isEnabled(ctx.Config.Sync.FromHost.CSIDrivers.Enabled == "true", CreateCSIDriversMapper), - isEnabled(ctx.Config.Sync.FromHost.CSIStorageCapacities.Enabled == "true", CreateCSIStorageCapacitiesMapper), CreateEndpointsMapper, CreateEventsMapper, - CreateIngressClassesMapper, - CreateIngressesMapper, - CreateNamespacesMapper, - CreateNetworkPoliciesMapper, + isEnabled(ctx.Config.Sync.ToHost.Ingresses.Enabled, CreateIngressesMapper), + isEnabled(ctx.Config.Experimental.MultiNamespaceMode.Enabled, CreateNamespacesMapper), CreateNodesMapper, CreatePersistentVolumeClaimsMapper, - CreateServiceAccountsMapper, + isEnabled(ctx.Config.Sync.ToHost.ServiceAccounts.Enabled, CreateServiceAccountsMapper), CreateServiceMapper, - CreatePriorityClassesMapper, - CreatePodDisruptionBudgetsMapper, + isEnabled(ctx.Config.Sync.ToHost.PriorityClasses.Enabled, CreatePriorityClassesMapper), CreatePersistentVolumesMapper, CreatePodsMapper, CreateStorageClassesMapper, diff --git a/pkg/mappings/resources/secrets.go b/pkg/mappings/resources/secrets.go index f7bc920cd..e205d2f59 100644 --- a/pkg/mappings/resources/secrets.go +++ b/pkg/mappings/resources/secrets.go @@ -1,12 +1,256 @@ package resources import ( + "fmt" + "strings" + + podtranslate "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/token" + "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/mappings/generic" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" ) func CreateSecretsMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMapper(ctx, &corev1.Secret{}, translate.Default.HostName) + mapper, err := generic.NewMapper(ctx, &corev1.Secret{}, translate.Default.HostName) + if err != nil { + return nil, err + } + + return &secretsMapper{ + Mapper: mapper, + }, nil +} + +type secretsMapper struct { + synccontext.Mapper +} + +func (s *secretsMapper) Migrate(ctx *synccontext.RegisterContext, mapper synccontext.Mapper) error { + // make sure we migrate pods first + podsMapper, err := ctx.Mappings.ByGVK(mappings.Pods()) + if err != nil { + return err + } + + // pods mapper + err = podsMapper.Migrate(ctx, podsMapper) + if err != nil { + return fmt.Errorf("migrate pods") + } + + // list all pods and map them by their used secrets + list := &corev1.PodList{} + err = ctx.VirtualManager.GetClient().List(ctx, list) + if err != nil { + return fmt.Errorf("error listing csi storage capacities: %w", err) + } + + for _, val := range list.Items { + item := &val + + // this will try to translate and record the mapping + syncContext := ctx.ToSyncContext("migrate-pod") + for _, secret := range secretNamesFromPod(syncContext, item) { + pName := mapper.VirtualToHost(syncContext, secret, nil) + if pName.Name != "" { + err = ctx.Mappings.Store().RecordAndSaveReference(ctx, synccontext.NameMapping{ + GroupVersionKind: mappings.Secrets(), + VirtualName: secret, + HostName: pName, + }, synccontext.NameMapping{ + GroupVersionKind: mappings.Pods(), + VirtualName: types.NamespacedName{Name: item.Name, Namespace: item.Namespace}, + }) + if err != nil { + klog.FromContext(ctx).Error(err, "record secret reference on pod") + } + } + } + } + + // check if ingress sync is enabled + if ctx.Config.Sync.ToHost.Ingresses.Enabled { + // list all pods and map them by their used secrets + list := &networkingv1.IngressList{} + err = ctx.VirtualManager.GetClient().List(ctx, list) + if err != nil { + return fmt.Errorf("error listing csi storage capacities: %w", err) + } + + for _, val := range list.Items { + item := &val + + // this will try to translate and record the mapping + syncContext := ctx.ToSyncContext("migrate-ingress") + for _, secret := range secretNamesFromIngress(syncContext, item) { + pName := mapper.VirtualToHost(syncContext, secret, nil) + if pName.Name != "" { + err = ctx.Mappings.Store().RecordAndSaveReference(ctx, synccontext.NameMapping{ + GroupVersionKind: mappings.Secrets(), + VirtualName: secret, + HostName: pName, + }, synccontext.NameMapping{ + GroupVersionKind: mappings.Pods(), + VirtualName: types.NamespacedName{Name: item.Name, Namespace: item.Namespace}, + }) + if err != nil { + klog.FromContext(ctx).Error(err, "record secret reference on pod") + } + } + } + } + } + + return s.Mapper.Migrate(ctx, mapper) +} + +var TranslateAnnotations = map[string]bool{ + "nginx.ingress.kubernetes.io/auth-secret": true, + "nginx.ingress.kubernetes.io/auth-tls-secret": true, + "nginx.ingress.kubernetes.io/proxy-ssl-secret": true, +} + +func TranslateIngressAnnotations(ctx *synccontext.SyncContext, annotations map[string]string, ingressNamespace string) (map[string]string, []types.NamespacedName) { + foundSecrets := []types.NamespacedName{} + newAnnotations := map[string]string{} + for k, v := range annotations { + if !TranslateAnnotations[k] { + newAnnotations[k] = v + continue + } + + splitted := strings.Split(annotations[k], "/") + if len(splitted) == 1 { // If value is only "secret" + secret := splitted[0] + foundSecrets = append(foundSecrets, types.NamespacedName{Namespace: ingressNamespace, Name: secret}) + newAnnotations[k] = mappings.VirtualToHostName(ctx, secret, ingressNamespace, mappings.Secrets()) + } else if len(splitted) == 2 { // If value is "namespace/secret" + namespace := splitted[0] + secret := splitted[1] + foundSecrets = append(foundSecrets, types.NamespacedName{Namespace: namespace, Name: secret}) + pName := mappings.VirtualToHost(ctx, secret, namespace, mappings.Secrets()) + newAnnotations[k] = pName.Namespace + "/" + pName.Name + } else { + newAnnotations[k] = v + } + } + + return newAnnotations, foundSecrets +} + +func secretNamesFromIngress(ctx *synccontext.SyncContext, ingress *networkingv1.Ingress) []types.NamespacedName { + secrets := []types.NamespacedName{} + _, extraSecrets := TranslateIngressAnnotations(ctx, ingress.Annotations, ingress.Namespace) + secrets = append(secrets, extraSecrets...) + for _, tls := range ingress.Spec.TLS { + if tls.SecretName != "" { + secrets = append(secrets, types.NamespacedName{Namespace: ingress.Namespace, Name: tls.SecretName}) + } + } + return secrets +} + +func secretNamesFromPod(ctx *synccontext.SyncContext, pod *corev1.Pod) []types.NamespacedName { + secrets := []types.NamespacedName{} + for _, c := range pod.Spec.Containers { + secrets = append(secrets, secretNamesFromContainer(pod.Namespace, &c)...) + } + for _, c := range pod.Spec.InitContainers { + secrets = append(secrets, secretNamesFromContainer(pod.Namespace, &c)...) + } + for _, c := range pod.Spec.EphemeralContainers { + secrets = append(secrets, secretNamesFromEphemeralContainer(pod.Namespace, &c)...) + } + for i := range pod.Spec.ImagePullSecrets { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.ImagePullSecrets[i].Name}) + } + secrets = append(secrets, secretNamesFromVolumes(ctx, pod)...) + return secrets +} + +func secretNamesFromVolumes(ctx *synccontext.SyncContext, pod *corev1.Pod) []types.NamespacedName { + secrets := []types.NamespacedName{} + for i := range pod.Spec.Volumes { + if pod.Spec.Volumes[i].Secret != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].Secret.SecretName}) + } + if pod.Spec.Volumes[i].Projected != nil { + for j := range pod.Spec.Volumes[i].Projected.Sources { + if pod.Spec.Volumes[i].Projected.Sources[j].Secret != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].Projected.Sources[j].Secret.Name}) + } + + // check if projected volume source is a serviceaccount and in such a case + // we re-write it as a secret too, handle accordingly + if ctx != nil && ctx.Config != nil && ctx.Config.Sync.ToHost.Pods.UseSecretsForSATokens { + if pod.Spec.Volumes[i].Projected.Sources[j].ServiceAccountToken != nil { + secrets = append(secrets, podtranslate.SecretNameFromPodName(ctx, pod.Name, pod.Namespace)) + } + } + } + } + if pod.Spec.Volumes[i].ISCSI != nil && pod.Spec.Volumes[i].ISCSI.SecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].ISCSI.SecretRef.Name}) + } + if pod.Spec.Volumes[i].RBD != nil && pod.Spec.Volumes[i].RBD.SecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].RBD.SecretRef.Name}) + } + if pod.Spec.Volumes[i].FlexVolume != nil && pod.Spec.Volumes[i].FlexVolume.SecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].FlexVolume.SecretRef.Name}) + } + if pod.Spec.Volumes[i].Cinder != nil && pod.Spec.Volumes[i].Cinder.SecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].Cinder.SecretRef.Name}) + } + if pod.Spec.Volumes[i].CephFS != nil && pod.Spec.Volumes[i].CephFS.SecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].CephFS.SecretRef.Name}) + } + if pod.Spec.Volumes[i].AzureFile != nil && pod.Spec.Volumes[i].AzureFile.SecretName != "" { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].AzureFile.SecretName}) + } + if pod.Spec.Volumes[i].ScaleIO != nil && pod.Spec.Volumes[i].ScaleIO.SecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].ScaleIO.SecretRef.Name}) + } + if pod.Spec.Volumes[i].StorageOS != nil && pod.Spec.Volumes[i].StorageOS.SecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].StorageOS.SecretRef.Name}) + } + if pod.Spec.Volumes[i].CSI != nil && pod.Spec.Volumes[i].CSI.NodePublishSecretRef != nil { + secrets = append(secrets, types.NamespacedName{Namespace: pod.Namespace, Name: pod.Spec.Volumes[i].CSI.NodePublishSecretRef.Name}) + } + } + return secrets +} + +func secretNamesFromContainer(namespace string, container *corev1.Container) []types.NamespacedName { + secrets := []types.NamespacedName{} + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" { + secrets = append(secrets, types.NamespacedName{Namespace: namespace, Name: env.ValueFrom.SecretKeyRef.Name}) + } + } + for _, from := range container.EnvFrom { + if from.SecretRef != nil && from.SecretRef.Name != "" { + secrets = append(secrets, types.NamespacedName{Namespace: namespace, Name: from.SecretRef.Name}) + } + } + return secrets +} + +func secretNamesFromEphemeralContainer(namespace string, container *corev1.EphemeralContainer) []types.NamespacedName { + secrets := []types.NamespacedName{} + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name != "" { + secrets = append(secrets, types.NamespacedName{Namespace: namespace, Name: env.ValueFrom.SecretKeyRef.Name}) + } + } + for _, from := range container.EnvFrom { + if from.SecretRef != nil && from.SecretRef.Name != "" { + secrets = append(secrets, types.NamespacedName{Namespace: namespace, Name: from.SecretRef.Name}) + } + } + return secrets } diff --git a/pkg/mappings/resources/secrets_test.go b/pkg/mappings/resources/secrets_test.go new file mode 100644 index 000000000..5eb6d6384 --- /dev/null +++ b/pkg/mappings/resources/secrets_test.go @@ -0,0 +1,76 @@ +package resources + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSecretMappings(t *testing.T) { + // test ingress + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: networkingv1.IngressSpec{ + TLS: []networkingv1.IngressTLS{ + { + SecretName: "a", + }, + { + SecretName: "b", + }, + }, + }, + } + + // test ingress mapping + requests := secretNamesFromIngress(nil, ingress) + if len(requests) != 2 || requests[0].Name != "a" || requests[0].Namespace != "test" || requests[1].Name != "b" || requests[1].Namespace != "test" { + t.Fatalf("Wrong secret requests returned: %#+v", requests) + } + + // test pod + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Env: []corev1.EnvVar{ + { + Name: "test", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "a", + }, + }, + }, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "b", + }, + }, + }, + }, + }, + } + requests = secretNamesFromPod(nil, pod) + if len(requests) != 2 || requests[0].Name != "a" || requests[0].Namespace != "test" || requests[1].Name != "b" || requests[1].Namespace != "test" { + t.Fatalf("Wrong pod requests returned: %#+v", requests) + } +} diff --git a/pkg/mappings/resources/services.go b/pkg/mappings/resources/services.go index cc2a1ad93..16be9bb39 100644 --- a/pkg/mappings/resources/services.go +++ b/pkg/mappings/resources/services.go @@ -5,8 +5,43 @@ import ( "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) func CreateServiceMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - return generic.NewMapper(ctx, &corev1.Service{}, translate.Default.HostName) + mapper, err := generic.NewMapper(ctx, &corev1.Service{}, translate.Default.HostName) + if err != nil { + return nil, err + } + + return &servicesMapper{ + Mapper: mapper, + }, nil +} + +type servicesMapper struct { + synccontext.Mapper +} + +func (s *servicesMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, vObj client.Object) types.NamespacedName { + if req.Name == "kubernetes" && req.Namespace == "default" { + return types.NamespacedName{ + Name: translate.VClusterName, + Namespace: ctx.CurrentNamespace, + } + } + + return s.Mapper.VirtualToHost(ctx, req, vObj) +} + +func (s *servicesMapper) HostToVirtual(ctx *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) types.NamespacedName { + if req.Name == translate.VClusterName && req.Namespace == ctx.CurrentNamespace { + return types.NamespacedName{ + Name: "kubernetes", + Namespace: "default", + } + } + + return s.Mapper.HostToVirtual(ctx, req, pObj) } diff --git a/pkg/mappings/resources/storageclasses.go b/pkg/mappings/resources/storageclasses.go index 8ec975e9d..87dd246f3 100644 --- a/pkg/mappings/resources/storageclasses.go +++ b/pkg/mappings/resources/storageclasses.go @@ -12,7 +12,7 @@ func CreateStorageClassesMapper(ctx *synccontext.RegisterContext) (synccontext.M return generic.NewMirrorMapper(&storagev1.StorageClass{}) } - return generic.NewMapper(ctx, &storagev1.StorageClass{}, func(name, _ string) string { + return generic.NewMapper(ctx, &storagev1.StorageClass{}, func(_ *synccontext.SyncContext, name, _ string) string { return translate.Default.HostNameCluster(name) }) } diff --git a/pkg/mappings/resources/volumesnapshotcontents.go b/pkg/mappings/resources/volumesnapshotcontents.go index ac9a8b8f1..fa3b4393e 100644 --- a/pkg/mappings/resources/volumesnapshotcontents.go +++ b/pkg/mappings/resources/volumesnapshotcontents.go @@ -25,7 +25,7 @@ func CreateVolumeSnapshotContentsMapper(ctx *synccontext.RegisterContext) (syncc return nil, err } - return generic.NewMapperWithObject(ctx, &volumesnapshotv1.VolumeSnapshotContent{}, func(name, _ string, vObj client.Object) string { + return generic.NewMapperWithObject(ctx, &volumesnapshotv1.VolumeSnapshotContent{}, func(_ *synccontext.SyncContext, name, _ string, vObj client.Object) string { if vObj == nil { return name } diff --git a/pkg/mappings/store/backend.go b/pkg/mappings/store/backend.go new file mode 100644 index 000000000..ac35fea75 --- /dev/null +++ b/pkg/mappings/store/backend.go @@ -0,0 +1,35 @@ +package store + +import "context" + +type Backend interface { + // List retrieves all saved mappings + List(ctx context.Context) ([]*Mapping, error) + + // Watch lists and watches for new mappings + Watch(ctx context.Context) <-chan BackendWatchResponse + + // Save saves the given mapping + Save(ctx context.Context, mapping *Mapping) error + + // Delete removes the given mapping + Delete(ctx context.Context, mapping *Mapping) error +} + +type BackendWatchResponse struct { + Events []*BackendWatchEvent + + Err error +} + +type BackendWatchEvent struct { + Type BackendWatchEventType + Mapping *Mapping +} + +type BackendWatchEventType string + +const ( + BackendWatchEventTypeUpdate BackendWatchEventType = "Update" + BackendWatchEventTypeDelete BackendWatchEventType = "Delete" +) diff --git a/pkg/mappings/store/etcd_backend.go b/pkg/mappings/store/etcd_backend.go new file mode 100644 index 000000000..c553a6775 --- /dev/null +++ b/pkg/mappings/store/etcd_backend.go @@ -0,0 +1,114 @@ +package store + +import ( + "context" + "encoding/json" + "fmt" + "path" + "strings" + + "github.com/loft-sh/vcluster/pkg/etcd" + "go.etcd.io/etcd/api/v3/mvccpb" + "k8s.io/klog/v2" +) + +var mappingsPrefix = "/vcluster/mappings/" + +func NewEtcdBackend(etcdClient etcd.Client) Backend { + return &etcdBackend{ + etcdClient: etcdClient, + } +} + +type etcdBackend struct { + etcdClient etcd.Client +} + +func (m *etcdBackend) List(ctx context.Context) ([]*Mapping, error) { + mappings, err := m.etcdClient.List(ctx, mappingsPrefix, 0) + if err != nil { + return nil, fmt.Errorf("list mappings") + } + + retMappings := make([]*Mapping, 0, len(mappings)) + for _, kv := range mappings { + retMapping := &Mapping{} + err = json.Unmarshal(kv.Data, retMapping) + if err != nil { + return nil, fmt.Errorf("parse mapping %s: %w", string(kv.Key), err) + } + + retMappings = append(retMappings, retMapping) + } + + return retMappings, nil +} + +func (m *etcdBackend) Watch(ctx context.Context) <-chan BackendWatchResponse { + responseChan := make(chan BackendWatchResponse) + watchChan := m.etcdClient.Watch(ctx, mappingsPrefix, 0) + go func() { + defer close(responseChan) + + for event := range watchChan { + if event.Canceled { + responseChan <- BackendWatchResponse{ + Err: event.Err(), + } + } else if len(event.Events) > 0 { + retEvents := make([]*BackendWatchEvent, 0, len(event.Events)) + for _, singleEvent := range event.Events { + var eventType BackendWatchEventType + if singleEvent.Type == mvccpb.PUT { + eventType = BackendWatchEventTypeUpdate + } else if singleEvent.Type == mvccpb.DELETE { + eventType = BackendWatchEventTypeDelete + } else { + continue + } + + // parse mapping + retMapping := &Mapping{} + err := json.Unmarshal(singleEvent.Kv.Value, retMapping) + if err != nil { + klog.FromContext(ctx).Info("Error decoding event", "key", string(singleEvent.Kv.Key), "error", err.Error()) + continue + } + + retEvents = append(retEvents, &BackendWatchEvent{ + Type: eventType, + Mapping: retMapping, + }) + } + + responseChan <- BackendWatchResponse{ + Events: retEvents, + } + } + } + }() + + return responseChan +} + +func (m *etcdBackend) Save(ctx context.Context, mapping *Mapping) error { + mappingBytes, err := json.Marshal(mapping) + if err != nil { + return err + } + + return m.etcdClient.Put(ctx, mappingToKey(mapping), mappingBytes) +} + +func (m *etcdBackend) Delete(ctx context.Context, mapping *Mapping) error { + return m.etcdClient.Delete(ctx, mappingToKey(mapping), 0) +} + +func mappingToKey(mapping *Mapping) string { + nameNamespace := mapping.VirtualName.Name + if mapping.VirtualName.Namespace != "" { + nameNamespace = mapping.VirtualName.Namespace + "/" + nameNamespace + } + + return path.Join(mappingsPrefix, mapping.GroupVersion().String(), strings.ToLower(mapping.Kind), nameNamespace) +} diff --git a/pkg/mappings/store/mapping.go b/pkg/mappings/store/mapping.go new file mode 100644 index 000000000..a1686f071 --- /dev/null +++ b/pkg/mappings/store/mapping.go @@ -0,0 +1,17 @@ +package store + +import "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + +type Mapping struct { + synccontext.NameMapping `json:",inline"` + + Labels []synccontext.LabelMapping `json:"labels,omitempty"` + LabelsCluster []synccontext.LabelMapping `json:"labelsCluster,omitempty"` + References []synccontext.NameMapping `json:"references,omitempty"` + + changed bool `json:"-"` +} + +func (m Mapping) String() string { + return m.NameMapping.String() +} diff --git a/pkg/mappings/store/memory_backend.go b/pkg/mappings/store/memory_backend.go new file mode 100644 index 000000000..a9589abc2 --- /dev/null +++ b/pkg/mappings/store/memory_backend.go @@ -0,0 +1,109 @@ +package store + +import ( + "context" + "sync" + + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" +) + +func NewMemoryBackend(mappings ...*Mapping) Backend { + internalMap := map[synccontext.NameMapping]*Mapping{} + for _, m := range mappings { + internalMap[m.NameMapping] = m + } + + return &memoryBackend{ + mappings: internalMap, + } +} + +type memoryBackend struct { + m sync.Mutex + + mappings map[synccontext.NameMapping]*Mapping + + watches []chan BackendWatchResponse +} + +func (m *memoryBackend) List(_ context.Context) ([]*Mapping, error) { + m.m.Lock() + defer m.m.Unlock() + + retMappings := make([]*Mapping, 0, len(m.mappings)) + for _, mapping := range m.mappings { + retMappings = append(retMappings, mapping) + } + + return retMappings, nil +} + +func (m *memoryBackend) Watch(ctx context.Context) <-chan BackendWatchResponse { + m.m.Lock() + defer m.m.Unlock() + + watchChan := make(chan BackendWatchResponse) + m.watches = append(m.watches, watchChan) + go func() { + <-ctx.Done() + + m.m.Lock() + defer m.m.Unlock() + + // remove chan + close(watchChan) + + // remove from slice + newWatches := make([]chan BackendWatchResponse, 0, len(m.watches)-1) + for _, watch := range m.watches { + if watch != watchChan { + newWatches = append(newWatches, watch) + } + } + m.watches = newWatches + }() + + return watchChan +} + +func (m *memoryBackend) Save(_ context.Context, mapping *Mapping) error { + m.m.Lock() + defer m.m.Unlock() + + m.mappings[mapping.NameMapping] = mapping + for _, watchChan := range m.watches { + go func(watchChan chan BackendWatchResponse) { + watchChan <- BackendWatchResponse{ + Events: []*BackendWatchEvent{ + { + Type: BackendWatchEventTypeUpdate, + Mapping: mapping, + }, + }, + } + }(watchChan) + } + + return nil +} + +func (m *memoryBackend) Delete(_ context.Context, mapping *Mapping) error { + m.m.Lock() + defer m.m.Unlock() + + delete(m.mappings, mapping.NameMapping) + for _, watchChan := range m.watches { + go func(watchChan chan BackendWatchResponse) { + watchChan <- BackendWatchResponse{ + Events: []*BackendWatchEvent{ + { + Type: BackendWatchEventTypeDelete, + Mapping: mapping, + }, + }, + } + }(watchChan) + } + + return nil +} diff --git a/pkg/mappings/store/store.go b/pkg/mappings/store/store.go new file mode 100644 index 000000000..9129c0129 --- /dev/null +++ b/pkg/mappings/store/store.go @@ -0,0 +1,658 @@ +package store + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/loft-sh/vcluster/pkg/scheme" + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const GarbageCollectionInterval = time.Minute * 3 + +func NewStore(ctx context.Context, cachedVirtualClient, cachedHostClient client.Client, backend Backend) (synccontext.MappingsStore, error) { + store := &Store{ + backend: backend, + + cachedVirtualClient: cachedVirtualClient, + cachedHostClient: cachedHostClient, + + mappings: make(map[synccontext.NameMapping]*Mapping), + + hostToVirtualName: make(map[synccontext.Object]lookupName), + virtualToHostName: make(map[synccontext.Object]lookupName), + hostToVirtualLabel: make(map[string]lookupLabel), + hostToVirtualLabelCluster: make(map[string]lookupLabel), + + watches: make(map[schema.GroupVersionKind][]*watcher), + } + + // retrieve initial mappings from backend + err := store.start(ctx) + if err != nil { + return nil, fmt.Errorf("start store: %w", err) + } + + return store, nil +} + +type Store struct { + m sync.RWMutex + + cachedVirtualClient client.Client + cachedHostClient client.Client + + backend Backend + mappings map[synccontext.NameMapping]*Mapping + + // maps Object -> Object + hostToVirtualName map[synccontext.Object]lookupName + virtualToHostName map[synccontext.Object]lookupName + + // maps Label -> Label + hostToVirtualLabel map[string]lookupLabel + hostToVirtualLabelCluster map[string]lookupLabel + + watches map[schema.GroupVersionKind][]*watcher +} + +type lookupName struct { + Object synccontext.Object + + Mappings []*Mapping +} + +type lookupLabel struct { + Label string + + Mappings []*Mapping +} + +func (s *Store) Watch(gvk schema.GroupVersionKind, addQueueFn synccontext.AddQueueFunc) source.Source { + s.m.Lock() + defer s.m.Unlock() + + w := &watcher{ + addQueueFn: addQueueFn, + } + + s.watches[gvk] = append(s.watches[gvk], w) + return w +} + +func (s *Store) StartGarbageCollection(ctx context.Context) { + go func() { + wait.Until(func() { + s.garbageCollectMappings(ctx) + }, GarbageCollectionInterval, ctx.Done()) + }() +} + +func (s *Store) garbageCollectMappings(ctx context.Context) { + s.m.Lock() + defer s.m.Unlock() + + startTime := time.Now() + klog.FromContext(ctx).V(1).Info("Start mappings garbage collection") + defer func() { + klog.FromContext(ctx).V(1).Info("Garbage collection done", "took", time.Since(startTime).String()) + }() + + for _, mapping := range s.mappings { + err := s.garbageCollectMapping(ctx, mapping) + if err != nil { + klog.FromContext(ctx).Error(err, "Garbage collect mapping", "mapping", mapping.String()) + } + } +} + +func (s *Store) garbageCollectMapping(ctx context.Context, mapping *Mapping) error { + // build the object we can query + obj, err := scheme.Scheme.New(mapping.GroupVersionKind) + if err != nil { + if !runtime.IsNotRegisteredError(err) { + return fmt.Errorf("create object: %w", err) + } + + obj = &unstructured.Unstructured{} + } + + uList, ok := obj.(*unstructured.Unstructured) + if ok { + uList.SetKind(mapping.GroupVersionKind.Kind) + uList.SetAPIVersion(mapping.GroupVersionKind.GroupVersion().String()) + } + + // check if virtual object exists + virtualExists := true + err = s.cachedVirtualClient.Get(ctx, types.NamespacedName{Name: mapping.VirtualName.Name, Namespace: mapping.VirtualName.Namespace}, obj.DeepCopyObject().(client.Object)) + if err != nil { + if !kerrors.IsNotFound(err) { + // TODO: filter out other allowed errors here could be Forbidden, Type not found etc. + klog.FromContext(ctx).Info("Error retrieving virtual object", "virtualObject", mapping.Virtual().String()) + } + + virtualExists = false + } + + // check if host object exists + hostExists := true + err = s.cachedVirtualClient.Get(ctx, types.NamespacedName{Name: mapping.HostName.Name, Namespace: mapping.HostName.Namespace}, obj.DeepCopyObject().(client.Object)) + if err != nil { + if !kerrors.IsNotFound(err) { + // TODO: filter out other allowed errors here could be Forbidden, Type not found etc. + klog.FromContext(ctx).Info("Error retrieving host object", "hostObject", mapping.Host().String()) + } + + hostExists = false + } + + // remove mapping if both objects are not found anymore + if virtualExists || hostExists { + return nil + } + + // remove mapping from backend + err = s.backend.Delete(ctx, mapping) + if err != nil { + return fmt.Errorf("remove mapping from backend: %w", err) + } + + klog.FromContext(ctx).Info("Remove mapping as both virtual and host were not found", "mapping", mapping.String()) + s.removeMapping(mapping) + return nil +} + +func (s *Store) start(ctx context.Context) error { + s.m.Lock() + defer s.m.Unlock() + + mappings, err := s.backend.List(ctx) + if err != nil { + return fmt.Errorf("list mappings: %w", err) + } + + for _, mapping := range mappings { + oldMapping, ok := s.mappings[mapping.NameMapping] + if ok { + s.removeMapping(oldMapping) + } + + klog.FromContext(ctx).V(1).Info("Add mapping", "mapping", mapping.String()) + s.addMapping(mapping) + } + + go func() { + wait.Until(func() { + for watchEvent := range s.backend.Watch(ctx) { + s.handleEvent(ctx, watchEvent) + } + + klog.FromContext(ctx).Info("mapping store watch has ended") + }, time.Second, ctx.Done()) + }() + + return nil +} + +func (s *Store) handleEvent(ctx context.Context, watchEvent BackendWatchResponse) { + s.m.Lock() + defer s.m.Unlock() + + if watchEvent.Err != nil { + klog.FromContext(ctx).Error(watchEvent.Err, "watch err in mappings store") + return + } + + for _, event := range watchEvent.Events { + klog.FromContext(ctx).V(1).Info("mapping store received event", "type", event.Type, "mapping", event.Mapping.String()) + + // remove mapping in any case + oldMapping, ok := s.mappings[event.Mapping.NameMapping] + if ok { + s.removeMapping(oldMapping) + } + + // re-add mapping if its an update + if event.Type == BackendWatchEventTypeUpdate { + s.addMapping(event.Mapping) + } + } +} + +func (s *Store) HostToVirtualLabel(_ context.Context, pLabel string) (string, bool) { + s.m.RLock() + defer s.m.RUnlock() + + vObjLookup, ok := s.hostToVirtualLabel[pLabel] + return vObjLookup.Label, ok +} + +func (s *Store) HostToVirtualLabelCluster(_ context.Context, pLabel string) (string, bool) { + s.m.RLock() + defer s.m.RUnlock() + + vObjLookup, ok := s.hostToVirtualLabelCluster[pLabel] + return vObjLookup.Label, ok +} + +func (s *Store) HasHostObject(ctx context.Context, pObj synccontext.Object) bool { + _, ok := s.HostToVirtualName(ctx, pObj) + return ok +} + +func (s *Store) HostToVirtualName(_ context.Context, pObj synccontext.Object) (types.NamespacedName, bool) { + s.m.RLock() + defer s.m.RUnlock() + + vObjLookup, ok := s.hostToVirtualName[pObj] + return vObjLookup.Object.NamespacedName, ok +} + +func (s *Store) HasVirtualObject(ctx context.Context, vObj synccontext.Object) bool { + _, ok := s.VirtualToHostName(ctx, vObj) + return ok +} + +func (s *Store) VirtualToHostName(_ context.Context, vObj synccontext.Object) (types.NamespacedName, bool) { + s.m.RLock() + defer s.m.RUnlock() + + pObjLookup, ok := s.virtualToHostName[vObj] + return pObjLookup.Object.NamespacedName, ok +} + +func (s *Store) RecordAndSaveReference(ctx context.Context, nameMapping, belongsTo synccontext.NameMapping) error { + err := s.RecordReference(ctx, nameMapping, belongsTo) + if err != nil { + return err + } + + return s.SaveMapping(ctx, belongsTo) +} + +func (s *Store) RecordReference(ctx context.Context, nameMapping, belongsTo synccontext.NameMapping) error { + // we don't record incomplete mappings + if nameMapping.Host().Empty() || nameMapping.Virtual().Empty() { + return nil + } + + s.m.Lock() + defer s.m.Unlock() + + // check if there is already a conflicting mapping + err := s.checkNameConflict(nameMapping) + if err != nil { + return err + } + + // check if there is already a mapping + mapping, ok := s.findMapping(belongsTo) + if !ok { + s.createMapping(ctx, nameMapping, belongsTo) + return nil + } + + // check if we need to add mapping + if mapping.NameMapping.Equals(nameMapping) { + return nil + } + + // check if reference already exists + for _, reference := range mapping.References { + if reference.Equals(nameMapping) { + return nil + } + } + + // add mapping + mapping.changed = true + klog.FromContext(ctx).Info("Add name mapping", "host", nameMapping.Host().String(), "virtual", nameMapping.Virtual().String(), "owner", mapping.Virtual().String()) + mapping.References = append(mapping.References, nameMapping) + + // add to lookup maps + s.addNameToMaps(mapping, nameMapping.Virtual(), nameMapping.Host()) + dispatchAll(s.watches[nameMapping.GroupVersionKind], nameMapping) + return nil +} + +func (s *Store) RecordLabel(ctx context.Context, labelMapping synccontext.LabelMapping, belongsTo synccontext.NameMapping) error { + // we don't record incomplete mappings + if labelMapping.Host == "" || labelMapping.Virtual == "" { + return nil + } + + s.m.Lock() + defer s.m.Unlock() + + // check if there is already a conflicting mapping + err := s.checkLabelConflict(labelMapping) + if err != nil { + return err + } + + // check if there is already a mapping + mapping, ok := s.findMapping(belongsTo) + if !ok { + return nil + } + + // check if reference already exists + for _, label := range mapping.Labels { + if label.Equals(labelMapping) { + return nil + } + } + + // add mapping + mapping.changed = true + klog.FromContext(ctx).V(1).Info("Add label mapping", "host", labelMapping.Host, "virtual", labelMapping.Virtual, "owner", mapping.Virtual().String()) + mapping.Labels = append(mapping.Labels, labelMapping) + + // add to lookup maps + s.addLabelToMaps(mapping, labelMapping.Virtual, labelMapping.Host) + return nil +} + +func (s *Store) RecordLabelCluster(ctx context.Context, labelMapping synccontext.LabelMapping, belongsTo synccontext.NameMapping) error { + // we don't record incomplete mappings + if labelMapping.Host == "" || labelMapping.Virtual == "" { + return nil + } + + s.m.Lock() + defer s.m.Unlock() + + // check if there is already a conflicting mapping + err := s.checkLabelClusterConflict(labelMapping) + if err != nil { + return err + } + + // check if there is already a mapping + mapping, ok := s.findMapping(belongsTo) + if !ok { + return nil + } + + // check if reference already exists + for _, label := range mapping.LabelsCluster { + if label.Equals(labelMapping) { + return nil + } + } + + // add mapping + mapping.changed = true + klog.FromContext(ctx).V(1).Info("Add cluster-scoped label mapping", "host", labelMapping.Host, "virtual", labelMapping.Virtual, "owner", mapping.Virtual().String()) + mapping.LabelsCluster = append(mapping.LabelsCluster, labelMapping) + + // add to lookup maps + s.addLabelClusterToMaps(mapping, labelMapping.Virtual, labelMapping.Host) + return nil +} + +func (s *Store) SaveMapping(ctx context.Context, nameMapping synccontext.NameMapping) error { + // we ignore empty mappings here + if nameMapping.Empty() { + return nil + } + + s.m.Lock() + defer s.m.Unlock() + + // check if there is already a mapping + mapping, ok := s.findMapping(nameMapping) + if !ok { + return nil + } else if !mapping.changed { + return nil + } + + // save mapping + klog.FromContext(ctx).Info("Save object mappings in store", "mapping", mapping.String()) + err := s.backend.Save(ctx, mapping) + if err != nil { + return fmt.Errorf("save mapping %s: %w", mapping.NameMapping.String(), err) + } + + mapping.changed = false + return nil +} + +func (s *Store) ReferencesTo(ctx context.Context, vObj synccontext.Object) []synccontext.NameMapping { + // we ignore empty mappings here + if vObj.Empty() { + return nil + } + + s.m.Lock() + defer s.m.Unlock() + + hostNameLookup, ok := s.virtualToHostName[vObj] + if !ok { + return nil + } + + // loop over references and exclude owner mapping + nameMapping := synccontext.NameMapping{ + GroupVersionKind: vObj.GroupVersionKind, + VirtualName: vObj.NamespacedName, + HostName: hostNameLookup.Object.NamespacedName, + } + retReferences := []synccontext.NameMapping{} + for _, reference := range hostNameLookup.Mappings { + if reference.Equals(nameMapping) { + continue + } + + retReferences = append(retReferences, reference.NameMapping) + } + + klog.FromContext(ctx).V(1).Info("Found references for object", "object", vObj.String(), "references", len(retReferences)) + return retReferences +} + +func (s *Store) findMapping(mapping synccontext.NameMapping) (*Mapping, bool) { + // check if the mapping is empty + if mapping.Empty() { + return nil, false + } + + // get objects + vObj, pObj := mapping.Virtual(), mapping.Host() + if vObj.Empty() { + // try to find by pObj + vObjLookup, ok := s.hostToVirtualName[pObj] + if !ok { + return nil, false + } + + vObj = vObjLookup.Object + } else if pObj.Empty() { + // try to find by vObj + pObjLookup, ok := s.virtualToHostName[vObj] + if !ok { + return nil, false + } + + pObj = pObjLookup.Object + } + + // just check for the mapping + retMapping, ok := s.mappings[synccontext.NameMapping{ + GroupVersionKind: mapping.GroupVersionKind, + VirtualName: vObj.NamespacedName, + HostName: pObj.NamespacedName, + }] + return retMapping, ok +} + +func (s *Store) createMapping(ctx context.Context, nameMapping, belongsTo synccontext.NameMapping) { + // check if we should add a new mapping + if belongsTo.Empty() { + return + } + + // check what object is empty + pObj, vObj := belongsTo.Host(), belongsTo.Virtual() + if vObj.Empty() || pObj.Empty() { + // check if the name mapping matches + if nameMapping.GroupVersionKind.String() != belongsTo.GroupVersionKind.String() { + klog.FromContext(ctx).Info("Cannot create name mapping, because owner mapping is incomplete and does not match group version kind", "owner", belongsTo.String(), "nameMapping", nameMapping.String()) + return + } + + // try to find missing virtual or host object + if vObj.Empty() && pObj.Equals(nameMapping.Host()) { + vObj = nameMapping.Virtual() + } else if pObj.Empty() && vObj.Equals(nameMapping.Virtual()) { + pObj = nameMapping.Host() + } else { + return + } + } + + // create new mapping + newMapping := &Mapping{ + NameMapping: synccontext.NameMapping{ + GroupVersionKind: belongsTo.GroupVersionKind, + VirtualName: vObj.NamespacedName, + HostName: pObj.NamespacedName, + }, + + changed: true, + } + + // add to lookup maps + klog.FromContext(ctx).Info("Create name mapping", "host", newMapping.NameMapping.Host().String(), "virtual", newMapping.NameMapping.Virtual().String(), "nameMapping", nameMapping.String(), "belongsTo", belongsTo.String()) + s.addMapping(newMapping) +} + +func (s *Store) checkNameConflict(nameMapping synccontext.NameMapping) error { + // check if the mapping is conflicting + vName, ok := s.hostToVirtualName[nameMapping.Host()] + if ok && !vName.Object.Equals(nameMapping.Virtual()) { + return fmt.Errorf("there is already another name mapping %s -> %s that conflicts with %s -> %s", nameMapping.Host().String(), vName.Object.String(), nameMapping.Host().String(), nameMapping.Virtual().String()) + } + + // check the other way around + pName, ok := s.virtualToHostName[nameMapping.Virtual()] + if ok && !pName.Object.Equals(nameMapping.Host()) { + return fmt.Errorf("there is already another name mapping %s -> %s that conflicts with %s -> %s", nameMapping.Virtual().String(), pName.Object.String(), nameMapping.Virtual().String(), nameMapping.Host().String()) + } + + return nil +} + +func (s *Store) checkLabelConflict(labelMapping synccontext.LabelMapping) error { + // check if the mapping is conflicting + vLabel, ok := s.hostToVirtualLabel[labelMapping.Host] + if ok && vLabel.Label != labelMapping.Virtual { + return fmt.Errorf("there is already another label mapping %s -> %s that conflicts with %s -> %s", labelMapping.Host, vLabel.Label, labelMapping.Host, labelMapping.Virtual) + } + + // check the other way around + pLabel, ok := s.hostToVirtualLabel[labelMapping.Virtual] + if ok && pLabel.Label != labelMapping.Host { + return fmt.Errorf("there is already another label mapping %s -> %s that conflicts with %s -> %s", labelMapping.Virtual, pLabel.Label, labelMapping.Virtual, labelMapping.Host) + } + + return nil +} + +func (s *Store) checkLabelClusterConflict(labelMapping synccontext.LabelMapping) error { + // check if the mapping is conflicting + vLabel, ok := s.hostToVirtualLabelCluster[labelMapping.Host] + if ok && vLabel.Label != labelMapping.Virtual { + return fmt.Errorf("there is already another cluster-scoped label mapping %s -> %s that conflicts with %s -> %s", labelMapping.Host, vLabel.Label, labelMapping.Host, labelMapping.Virtual) + } + + // check the other way around + pLabel, ok := s.hostToVirtualLabelCluster[labelMapping.Virtual] + if ok && pLabel.Label != labelMapping.Host { + return fmt.Errorf("there is already another cluster-scoped label mapping %s -> %s that conflicts with %s -> %s", labelMapping.Virtual, pLabel.Label, labelMapping.Virtual, labelMapping.Host) + } + + return nil +} + +func (s *Store) removeLabelFromMaps(mapping *Mapping, _, pLabel string) { + removeMappingFromLabelMap(s.hostToVirtualLabel, mapping, pLabel) +} + +func (s *Store) removeLabelClusterFromMaps(mapping *Mapping, _, pLabel string) { + removeMappingFromLabelMap(s.hostToVirtualLabelCluster, mapping, pLabel) +} + +func (s *Store) removeNameFromMaps(mapping *Mapping, vObj, pObj synccontext.Object) { + removeMappingFromNameMap(s.hostToVirtualName, mapping, pObj) + removeMappingFromNameMap(s.virtualToHostName, mapping, vObj) +} + +func (s *Store) addLabelToMaps(mapping *Mapping, vLabel, pLabel string) { + addMappingToLabelMap(s.hostToVirtualLabel, mapping, pLabel, vLabel) +} + +func (s *Store) addLabelClusterToMaps(mapping *Mapping, vLabel, pLabel string) { + addMappingToLabelMap(s.hostToVirtualLabelCluster, mapping, pLabel, vLabel) +} + +func (s *Store) addNameToMaps(mapping *Mapping, vObj, pObj synccontext.Object) { + addMappingToNameMap(s.hostToVirtualName, mapping, pObj, vObj) + addMappingToNameMap(s.virtualToHostName, mapping, vObj, pObj) +} + +func (s *Store) addMapping(mapping *Mapping) { + s.mappings[mapping.NameMapping] = mapping + s.addNameToMaps(mapping, mapping.Virtual(), mapping.Host()) + dispatchAll(s.watches[mapping.GroupVersionKind], mapping.NameMapping) + + // add references + for _, reference := range mapping.References { + s.addNameToMaps(mapping, reference.Virtual(), reference.Host()) + dispatchAll(s.watches[reference.GroupVersionKind], reference) + } + + // add labels + for _, label := range mapping.Labels { + s.addLabelToMaps(mapping, label.Virtual, label.Host) + } + + // add labels cluster + for _, label := range mapping.LabelsCluster { + s.addLabelClusterToMaps(mapping, label.Virtual, label.Host) + } +} + +func (s *Store) removeMapping(mapping *Mapping) { + delete(s.mappings, mapping.NameMapping) + dispatchAll(s.watches[mapping.GroupVersionKind], mapping.NameMapping) + + // delete references + for _, reference := range mapping.References { + s.removeNameFromMaps(mapping, reference.Virtual(), reference.Host()) + dispatchAll(s.watches[reference.GroupVersionKind], reference) + } + + // delete labels + for _, label := range mapping.Labels { + s.removeLabelFromMaps(mapping, label.Virtual, label.Host) + } + + // delete labels cluster + for _, label := range mapping.LabelsCluster { + s.removeLabelClusterFromMaps(mapping, label.Virtual, label.Host) + } +} diff --git a/pkg/mappings/store/store_test.go b/pkg/mappings/store/store_test.go new file mode 100644 index 000000000..9fd3a5a56 --- /dev/null +++ b/pkg/mappings/store/store_test.go @@ -0,0 +1,154 @@ +package store + +import ( + "context" + "testing" + + "github.com/loft-sh/vcluster/pkg/scheme" + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + testingutil "github.com/loft-sh/vcluster/pkg/util/testing" + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestStore(t *testing.T) { + genericStore, err := NewStore(context.TODO(), testingutil.NewFakeClient(scheme.Scheme), testingutil.NewFakeClient(scheme.Scheme), NewMemoryBackend()) + assert.NilError(t, err) + + store, ok := genericStore.(*Store) + assert.Equal(t, true, ok) + + gvk := corev1.SchemeGroupVersion.WithKind("Secret") + virtualName := types.NamespacedName{ + Name: "virtual-name", + Namespace: "virtual-namespace", + } + hostName := types.NamespacedName{ + Name: "host-name", + Namespace: "host-namespace", + } + labelMapping := synccontext.LabelMapping{ + Virtual: "virtual-label", + Host: "host-label", + } + + baseCtx := context.TODO() + baseMapping := synccontext.NameMapping{ + GroupVersionKind: gvk, + VirtualName: virtualName, + } + + // record reference + err = store.RecordReference(baseCtx, synccontext.NameMapping{ + GroupVersionKind: gvk, + HostName: hostName, + VirtualName: virtualName, + }, baseMapping) + assert.NilError(t, err) + + // virtual -> host + translatedHostName, ok := store.VirtualToHostName(baseCtx, synccontext.Object{ + GroupVersionKind: gvk, + NamespacedName: virtualName, + }) + assert.Equal(t, true, ok) + assert.Equal(t, hostName, translatedHostName) + + // virtual -> host + translatedVirtualName, ok := store.HostToVirtualName(baseCtx, synccontext.Object{ + GroupVersionKind: gvk, + NamespacedName: hostName, + }) + assert.Equal(t, true, ok) + assert.Equal(t, virtualName, translatedVirtualName) + + // virtual -> host + _, ok = store.HostToVirtualName(baseCtx, synccontext.Object{ + GroupVersionKind: gvk, + }) + assert.Equal(t, false, ok) + + // check inner structure of store + assert.Equal(t, 1, len(store.mappings)) + assert.Equal(t, 0, len(store.hostToVirtualLabel)) + assert.Equal(t, 0, len(store.hostToVirtualLabelCluster)) + assert.Equal(t, 1, len(store.hostToVirtualName)) + assert.Equal(t, 1, len(store.virtualToHostName)) + + // make sure the mapping is not added + nameMapping := synccontext.NameMapping{ + GroupVersionKind: gvk, + HostName: hostName, + VirtualName: virtualName, + } + err = store.RecordReference(baseCtx, nameMapping, baseMapping) + assert.NilError(t, err) + assert.Equal(t, 1, len(store.mappings)) + assert.Equal(t, 0, len(store.hostToVirtualLabel)) + assert.Equal(t, 0, len(store.hostToVirtualLabelCluster)) + assert.Equal(t, 1, len(store.hostToVirtualName)) + assert.Equal(t, 1, len(store.virtualToHostName)) + + // validate mapping itself + mapping, ok := store.mappings[nameMapping] + assert.Equal(t, true, ok) + assert.Equal(t, 0, len(mapping.References)) + assert.Equal(t, 0, len(mapping.Labels)) + assert.Equal(t, 0, len(mapping.LabelsCluster)) + + // map label + err = store.RecordLabel(baseCtx, labelMapping, baseMapping) + assert.NilError(t, err) + + // check mappings + virtualLabel, ok := store.HostToVirtualLabel(baseCtx, labelMapping.Host) + assert.Equal(t, true, ok) + assert.Equal(t, virtualLabel, labelMapping.Virtual) + + // validate mapping itself + mapping, ok = store.mappings[nameMapping] + assert.Equal(t, true, ok) + assert.Equal(t, 0, len(mapping.References)) + assert.Equal(t, 1, len(mapping.Labels)) + assert.Equal(t, 0, len(mapping.LabelsCluster)) + + // garbage collect mapping + store.garbageCollectMappings(context.TODO()) + _, ok = store.mappings[nameMapping] + assert.Equal(t, false, ok) +} + +func TestRecordMapping(t *testing.T) { + genericStore, err := NewStore(context.TODO(), testingutil.NewFakeClient(scheme.Scheme), testingutil.NewFakeClient(scheme.Scheme), NewMemoryBackend()) + assert.NilError(t, err) + + store, ok := genericStore.(*Store) + assert.Equal(t, true, ok) + + baseCtx := context.TODO() + + gvk := corev1.SchemeGroupVersion.WithKind("ConfigMap") + virtual := types.NamespacedName{ + Namespace: "default", + Name: "kube-root-ca.crt", + } + host := types.NamespacedName{ + Namespace: "vcluster-namespace", + Name: "kube-root-ca.crt", + } + host2 := types.NamespacedName{ + Namespace: "vcluster-namespace", + Name: "vcluster-kube-root-ca.crt-x-vcluster", + } + err = store.RecordReference(baseCtx, synccontext.NameMapping{ + GroupVersionKind: gvk, + VirtualName: virtual, + HostName: host2, + }, synccontext.NameMapping{ + GroupVersionKind: gvk, + HostName: host, + }) + assert.NilError(t, err) + assert.Equal(t, 0, len(store.mappings)) +} diff --git a/pkg/mappings/store/util.go b/pkg/mappings/store/util.go new file mode 100644 index 000000000..e484e4af5 --- /dev/null +++ b/pkg/mappings/store/util.go @@ -0,0 +1,71 @@ +package store + +import "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + +func addMappingToLabelMap(lookupMap map[string]lookupLabel, mapping *Mapping, key, other string) { + newLookupLabel, ok := lookupMap[key] + if !ok { + newLookupLabel = lookupLabel{ + Label: other, + } + } + + newLookupLabel.Mappings = append(newLookupLabel.Mappings, mapping) + lookupMap[key] = newLookupLabel +} + +func removeMappingFromNameMap(lookupMap map[synccontext.Object]lookupName, mapping *Mapping, key synccontext.Object) { + newLookupName, ok := lookupMap[key] + if !ok { + return + } + + // remove from mappings + newMappings := []*Mapping{} + for _, otherMapping := range newLookupName.Mappings { + if otherMapping.String() != mapping.String() { + newMappings = append(newMappings, otherMapping) + } + } + if len(newMappings) == 0 { + delete(lookupMap, key) + return + } + + newLookupName.Mappings = newMappings + lookupMap[key] = newLookupName +} + +func removeMappingFromLabelMap(lookupMap map[string]lookupLabel, mapping *Mapping, key string) { + newLookupLabel, ok := lookupMap[key] + if !ok { + return + } + + // remove from mappings + newMappings := []*Mapping{} + for _, otherMapping := range newLookupLabel.Mappings { + if otherMapping.String() != mapping.String() { + newMappings = append(newMappings, otherMapping) + } + } + if len(newMappings) == 0 { + delete(lookupMap, key) + return + } + + newLookupLabel.Mappings = newMappings + lookupMap[key] = newLookupLabel +} + +func addMappingToNameMap(lookupMap map[synccontext.Object]lookupName, mapping *Mapping, key, other synccontext.Object) { + newLookupName, ok := lookupMap[key] + if !ok { + newLookupName = lookupName{ + Object: other, + } + } + + newLookupName.Mappings = append(newLookupName.Mappings, mapping) + lookupMap[key] = newLookupName +} diff --git a/pkg/mappings/store/watcher.go b/pkg/mappings/store/watcher.go new file mode 100644 index 000000000..b03414b2b --- /dev/null +++ b/pkg/mappings/store/watcher.go @@ -0,0 +1,47 @@ +package store + +import ( + "context" + "sync" + + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "k8s.io/client-go/util/workqueue" +) + +type watcher struct { + m sync.Mutex + + addQueueFn synccontext.AddQueueFunc + queue workqueue.RateLimitingInterface +} + +func (w *watcher) Dispatch(nameMapping synccontext.NameMapping) { + w.m.Lock() + defer w.m.Unlock() + + if w.queue == nil { + return + } + + w.addQueueFn(nameMapping, w.queue) +} + +func (w *watcher) Start(_ context.Context, queue workqueue.RateLimitingInterface) error { + w.m.Lock() + defer w.m.Unlock() + + w.queue = queue + return nil +} + +func dispatchAll(watches []*watcher, nameMapping synccontext.NameMapping) { + if len(watches) == 0 { + return + } + + go func() { + for _, watch := range watches { + watch.Dispatch(nameMapping) + } + }() +} diff --git a/pkg/patches/patch_test.go b/pkg/patches/patch_test.go index 8a0a9ed96..273d9d136 100644 --- a/pkg/patches/patch_test.go +++ b/pkg/patches/patch_test.go @@ -496,11 +496,11 @@ func (r *fakeVirtualToHostNameResolver) TranslateNameWithNamespace(name string, if ns == "" { ns = namespace } - return types.NamespacedName{Namespace: r.targetNamespace, Name: translate.Default.HostName(name, ns)} + return types.NamespacedName{Namespace: r.targetNamespace, Name: translate.Default.HostName(nil, name, ns)} }), nil } - return translate.Default.HostName(name, namespace), nil + return translate.Default.HostName(nil, name, namespace), nil } func (r *fakeVirtualToHostNameResolver) TranslateLabelKey(key string) (string, error) { diff --git a/pkg/serviceaccount/claims.go b/pkg/serviceaccount/claims.go deleted file mode 100644 index cc18c0f50..000000000 --- a/pkg/serviceaccount/claims.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serviceaccount - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - - "gopkg.in/square/go-jose.v2/jwt" - "k8s.io/apiserver/pkg/audit" - "k8s.io/klog/v2" - - apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" -) - -const ( - // Injected bound service account token expiration which triggers monitoring of its time-bound feature. - WarnOnlyBoundTokenExpirationSeconds = 60*60 + 7 - - // Extended expiration for those modifed tokens involved in safe rollout if time-bound feature. - ExpirationExtensionSeconds = 24 * 365 * 60 * 60 -) - -// time.Now stubbed out to allow testing -var now = time.Now - -type privateClaims struct { - Kubernetes kubernetes `json:"kubernetes.io,omitempty"` -} - -type kubernetes struct { - Namespace string `json:"namespace,omitempty"` - Svcacct ref `json:"serviceaccount,omitempty"` - Pod *ref `json:"pod,omitempty"` - Secret *ref `json:"secret,omitempty"` - WarnAfter jwt.NumericDate `json:"warnafter,omitempty"` -} - -type ref struct { - Name string `json:"name,omitempty"` - UID string `json:"uid,omitempty"` -} - -func Claims(sa corev1.ServiceAccount, pod *corev1.Pod, secret *corev1.Secret, expirationSeconds, warnafter int64, audience []string) (*jwt.Claims, interface{}) { - now := now() - sc := &jwt.Claims{ - Subject: apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name), - Audience: jwt.Audience(audience), - IssuedAt: jwt.NewNumericDate(now), - NotBefore: jwt.NewNumericDate(now), - Expiry: jwt.NewNumericDate(now.Add(time.Duration(expirationSeconds) * time.Second)), - } - pc := &privateClaims{ - Kubernetes: kubernetes{ - Namespace: sa.Namespace, - Svcacct: ref{ - Name: sa.Name, - UID: string(sa.UID), - }, - }, - } - switch { - case pod != nil: - pc.Kubernetes.Pod = &ref{ - Name: pod.Name, - UID: string(pod.UID), - } - case secret != nil: - pc.Kubernetes.Secret = &ref{ - Name: secret.Name, - UID: string(secret.UID), - } - } - - if warnafter != 0 { - pc.Kubernetes.WarnAfter = *jwt.NewNumericDate(now.Add(time.Duration(warnafter) * time.Second)) - } - - return sc, pc -} - -func NewValidator(getter TokenGetter) Validator { - return &validator{ - getter: getter, - } -} - -type validator struct { - getter TokenGetter -} - -var _ = Validator(&validator{}) - -func (v *validator) Validate(ctx context.Context, _ string, public *jwt.Claims, privateObj interface{}) (*apiserverserviceaccount.ServiceAccountInfo, error) { - private, ok := privateObj.(*privateClaims) - if !ok { - klog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) - return nil, errors.New("token could not be validated") - } - nowTime := now() - err := public.Validate(jwt.Expected{ - Time: nowTime, - }) - if err != nil { - if errors.Is(err, jwt.ErrExpired) { - return nil, errors.New("token has expired") - } - - klog.Errorf("unexpected validation error: %T", err) - return nil, errors.New("token could not be validated") - } - - // consider things deleted prior to now()-leeway to be invalid - invalidIfDeletedBefore := nowTime.Add(-jwt.DefaultLeeway) - namespace := private.Kubernetes.Namespace - saref := private.Kubernetes.Svcacct - podref := private.Kubernetes.Pod - secref := private.Kubernetes.Secret - // Make sure service account still exists (name and UID) - serviceAccount, err := v.getter.GetServiceAccount(namespace, saref.Name) - if err != nil { - klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) - return nil, err - } - if serviceAccount.DeletionTimestamp != nil && serviceAccount.DeletionTimestamp.Time.Before(invalidIfDeletedBefore) { - klog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) - return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name) - } - if string(serviceAccount.UID) != saref.UID { - klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) - return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, saref.UID) - } - - if secref != nil { - // Make sure token hasn't been invalidated by deletion of the secret - secret, err := v.getter.GetSecret(namespace, secref.Name) - if err != nil { - klog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) - return nil, errors.New("token has been invalidated") - } - if secret.DeletionTimestamp != nil && secret.DeletionTimestamp.Time.Before(invalidIfDeletedBefore) { - klog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) - return nil, errors.New("token has been invalidated") - } - if secref.UID != string(secret.UID) { - klog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) - return nil, fmt.Errorf("secret UID (%s) does not match claim (%s)", secret.UID, secref.UID) - } - } - - var podName, podUID string - if podref != nil { - // Make sure token hasn't been invalidated by deletion of the pod - pod, err := v.getter.GetPod(namespace, podref.Name) - if err != nil { - klog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) - return nil, errors.New("token has been invalidated") - } - if pod.DeletionTimestamp != nil && pod.DeletionTimestamp.Time.Before(invalidIfDeletedBefore) { - klog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) - return nil, errors.New("token has been invalidated") - } - if podref.UID != string(pod.UID) { - klog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) - return nil, fmt.Errorf("pod UID (%s) does not match claim (%s)", pod.UID, podref.UID) - } - podName = podref.Name - podUID = podref.UID - } - - // Check special 'warnafter' field for projected service account token transition. - warnafter := private.Kubernetes.WarnAfter - if warnafter != 0 { - if nowTime.After(warnafter.Time()) { - secondsAfterWarn := nowTime.Unix() - warnafter.Time().Unix() - auditInfo := fmt.Sprintf("subject: %s, seconds after warning threshold: %d", public.Subject, secondsAfterWarn) - audit.AddAuditAnnotation(ctx, "authentication.k8s.io/stale-token", auditInfo) - } - } - - return &apiserverserviceaccount.ServiceAccountInfo{ - Namespace: private.Kubernetes.Namespace, - Name: private.Kubernetes.Svcacct.Name, - UID: private.Kubernetes.Svcacct.UID, - PodName: podName, - PodUID: podUID, - }, nil -} - -func (v *validator) NewPrivateClaims() interface{} { - return &privateClaims{} -} diff --git a/pkg/serviceaccount/jwt.go b/pkg/serviceaccount/jwt.go deleted file mode 100644 index cb92e85fd..000000000 --- a/pkg/serviceaccount/jwt.go +++ /dev/null @@ -1,344 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serviceaccount - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "fmt" - "strings" - - jose "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" - - corev1 "k8s.io/api/core/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/authentication/authenticator" - apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" -) - -// TokenGetter defines functions to retrieve a named service account and secret -type TokenGetter interface { - GetServiceAccount(namespace, name string) (*corev1.ServiceAccount, error) - GetPod(namespace, name string) (*corev1.Pod, error) - GetSecret(namespace, name string) (*corev1.Secret, error) -} - -type TokenGenerator interface { - // GenerateToken generates a token which will identify the given - // ServiceAccount. privateClaims is an interface that will be - // serialized into the JWT payload JSON encoding at the root level of - // the payload object. Public claims take precedent over private - // claims i.e. if both claims and privateClaims have an "exp" field, - // the value in claims will be used. - GenerateToken(claims *jwt.Claims, privateClaims interface{}) (string, error) -} - -// JWTTokenGenerator returns a TokenGenerator that generates signed JWT tokens, using the given privateKey. -// privateKey is a PEM-encoded byte array of a private RSA key. -func JWTTokenGenerator(iss string, privateKey interface{}) (TokenGenerator, error) { - var signer jose.Signer - var err error - switch pk := privateKey.(type) { - case *rsa.PrivateKey: - signer, err = signerFromRSAPrivateKey(pk) - if err != nil { - return nil, fmt.Errorf("could not generate signer for RSA keypair: %w", err) - } - case *ecdsa.PrivateKey: - signer, err = signerFromECDSAPrivateKey(pk) - if err != nil { - return nil, fmt.Errorf("could not generate signer for ECDSA keypair: %w", err) - } - case jose.OpaqueSigner: - signer, err = signerFromOpaqueSigner(pk) - if err != nil { - return nil, fmt.Errorf("could not generate signer for OpaqueSigner: %w", err) - } - default: - return nil, fmt.Errorf("unknown private key type %T, must be *rsa.PrivateKey, *ecdsa.PrivateKey, or jose.OpaqueSigner", privateKey) - } - - return &jwtTokenGenerator{ - iss: iss, - signer: signer, - }, nil -} - -// keyIDFromPublicKey derives a key ID non-reversibly from a public key. -// -// The Key ID is field on a given on JWTs and JWKs that help relying parties -// pick the correct key for verification when the identity party advertises -// multiple keys. -// -// Making the derivation non-reversible makes it impossible for someone to -// accidentally obtain the real key from the key ID and use it for token -// validation. -func keyIDFromPublicKey(publicKey interface{}) (string, error) { - publicKeyDERBytes, err := x509.MarshalPKIXPublicKey(publicKey) - if err != nil { - return "", fmt.Errorf("failed to serialize public key to DER format: %w", err) - } - - hasher := crypto.SHA256.New() - _, _ = hasher.Write(publicKeyDERBytes) - publicKeyDERHash := hasher.Sum(nil) - - keyID := base64.RawURLEncoding.EncodeToString(publicKeyDERHash) - - return keyID, nil -} - -func signerFromRSAPrivateKey(keyPair *rsa.PrivateKey) (jose.Signer, error) { - keyID, err := keyIDFromPublicKey(&keyPair.PublicKey) - if err != nil { - return nil, fmt.Errorf("failed to derive keyID: %w", err) - } - - // IMPORTANT: If this function is updated to support additional key sizes, - // algorithmForPublicKey in serviceaccount/openidmetadata.go must also be - // updated to support the same key sizes. Today we only support RS256. - - // Wrap the RSA keypair in a JOSE JWK with the designated key ID. - privateJWK := &jose.JSONWebKey{ - Algorithm: string(jose.RS256), - Key: keyPair, - KeyID: keyID, - Use: "sig", - } - - signer, err := jose.NewSigner( - jose.SigningKey{ - Algorithm: jose.RS256, - Key: privateJWK, - }, - nil, - ) - - if err != nil { - return nil, fmt.Errorf("failed to create signer: %w", err) - } - - return signer, nil -} - -func signerFromECDSAPrivateKey(keyPair *ecdsa.PrivateKey) (jose.Signer, error) { - var alg jose.SignatureAlgorithm - switch keyPair.Curve { - case elliptic.P256(): - alg = jose.ES256 - case elliptic.P384(): - alg = jose.ES384 - case elliptic.P521(): - alg = jose.ES512 - default: - return nil, fmt.Errorf("unknown private key curve, must be 256, 384, or 521") - } - - keyID, err := keyIDFromPublicKey(&keyPair.PublicKey) - if err != nil { - return nil, fmt.Errorf("failed to derive keyID: %w", err) - } - - // Wrap the ECDSA keypair in a JOSE JWK with the designated key ID. - privateJWK := &jose.JSONWebKey{ - Algorithm: string(alg), - Key: keyPair, - KeyID: keyID, - Use: "sig", - } - - signer, err := jose.NewSigner( - jose.SigningKey{ - Algorithm: alg, - Key: privateJWK, - }, - nil, - ) - if err != nil { - return nil, fmt.Errorf("failed to create signer: %w", err) - } - - return signer, nil -} - -func signerFromOpaqueSigner(opaqueSigner jose.OpaqueSigner) (jose.Signer, error) { - alg := jose.SignatureAlgorithm(opaqueSigner.Public().Algorithm) - - signer, err := jose.NewSigner( - jose.SigningKey{ - Algorithm: alg, - Key: &jose.JSONWebKey{ - Algorithm: string(alg), - Key: opaqueSigner, - KeyID: opaqueSigner.Public().KeyID, - Use: "sig", - }, - }, - nil, - ) - if err != nil { - return nil, fmt.Errorf("failed to create signer: %w", err) - } - - return signer, nil -} - -type jwtTokenGenerator struct { - iss string - signer jose.Signer -} - -func (j *jwtTokenGenerator) GenerateToken(claims *jwt.Claims, privateClaims interface{}) (string, error) { - // claims are applied in reverse precedence - return jwt.Signed(j.signer). - Claims(privateClaims). - Claims(claims). - Claims(&jwt.Claims{ - Issuer: j.iss, - }). - CompactSerialize() -} - -// JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator -// Token signatures are verified using each of the given public keys until one works (allowing key rotation) -// If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter -func JWTTokenAuthenticator(issuers []string, keys []interface{}, implicitAuds authenticator.Audiences, validator Validator) authenticator.Token { - issuersMap := make(map[string]bool) - for _, issuer := range issuers { - issuersMap[issuer] = true - } - return &jwtTokenAuthenticator{ - issuers: issuersMap, - keys: keys, - implicitAuds: implicitAuds, - validator: validator, - } -} - -type jwtTokenAuthenticator struct { - issuers map[string]bool - keys []interface{} - validator Validator - implicitAuds authenticator.Audiences -} - -// Validator is called by the JWT token authenticator to apply domain specific -// validation to a token and extract user information. -type Validator interface { - // Validate validates a token and returns user information or an error. - // Validator can assume that the issuer and signature of a token are already - // verified when this function is called. - Validate(ctx context.Context, tokenData string, public *jwt.Claims, private interface{}) (*apiserverserviceaccount.ServiceAccountInfo, error) - // NewPrivateClaims returns a struct that the authenticator should - // deserialize the JWT payload into. The authenticator may then pass this - // struct back to the Validator as the 'private' argument to a Validate() - // call. This struct should contain fields for any private claims that the - // Validator requires to validate the JWT. - NewPrivateClaims() interface{} -} - -func (j *jwtTokenAuthenticator) AuthenticateToken(ctx context.Context, tokenData string) (*authenticator.Response, bool, error) { - if !j.hasCorrectIssuer(tokenData) { - return nil, false, nil - } - - tok, err := jwt.ParseSigned(tokenData) - if err != nil { - return nil, false, nil - } - - public := &jwt.Claims{} - private := j.validator.NewPrivateClaims() - - // TODO: Pick the key that has the same key ID as `tok`, if one exists. - var ( - found bool - errlist []error - ) - for _, key := range j.keys { - if err := tok.Claims(key, public, private); err != nil { - errlist = append(errlist, err) - continue - } - found = true - break - } - - if !found { - return nil, false, utilerrors.NewAggregate(errlist) - } - - tokenAudiences := authenticator.Audiences(public.Audience) - if len(tokenAudiences) == 0 { - // only apiserver audiences are allowed for legacy tokens - audit.AddAuditAnnotation(ctx, "authentication.k8s.io/legacy-token", public.Subject) - tokenAudiences = j.implicitAuds - } - - requestedAudiences, ok := authenticator.AudiencesFrom(ctx) - if !ok { - // default to apiserver audiences - requestedAudiences = j.implicitAuds - } - - auds := tokenAudiences.Intersect(requestedAudiences) - if len(auds) == 0 && len(j.implicitAuds) != 0 { - return nil, false, fmt.Errorf("token audiences %q is invalid for the target audiences %q", tokenAudiences, requestedAudiences) - } - - // If we get here, we have a token with a recognized signature and - // issuer string. - sa, err := j.validator.Validate(ctx, tokenData, public, private) - if err != nil { - return nil, false, err - } - - return &authenticator.Response{ - User: sa.UserInfo(), - Audiences: auds, - }, true, nil -} - -// hasCorrectIssuer returns true if tokenData is a valid JWT in compact -// serialization format and the "iss" claim matches the iss field of this token -// authenticator, and otherwise returns false. -// -// Note: go-jose currently does not allow access to unverified JWS payloads. -// See https://github.com/square/go-jose/issues/169 -func (j *jwtTokenAuthenticator) hasCorrectIssuer(tokenData string) bool { - parts := strings.Split(tokenData, ".") - if len(parts) != 3 { - return false - } - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err != nil { - return false - } - claims := struct { - // WARNING: this JWT is not verified. Do not trust these claims. - Issuer string `json:"iss"` - }{} - if err := json.Unmarshal(payload, &claims); err != nil { - return false - } - return j.issuers[claims.Issuer] -} diff --git a/pkg/setup/controller_context.go b/pkg/setup/controller_context.go index 903cbb327..775a68d48 100644 --- a/pkg/setup/controller_context.go +++ b/pkg/setup/controller_context.go @@ -8,13 +8,16 @@ import ( "github.com/loft-sh/vcluster/pkg/config" "github.com/loft-sh/vcluster/pkg/controllers/resources/nodes" + "github.com/loft-sh/vcluster/pkg/etcd" "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/store" "github.com/loft-sh/vcluster/pkg/plugin" "github.com/loft-sh/vcluster/pkg/pro" "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/telemetry" "github.com/loft-sh/vcluster/pkg/util/blockingcacheclient" + translatepro "github.com/loft-sh/vcluster/pkg/util/translate/pro" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -108,6 +111,8 @@ func getLocalCacheOptions(options *config.VirtualClusterConfig) cache.Options { defaultNamespaces := make(map[string]cache.Config) if !options.Experimental.MultiNamespaceMode.Enabled { defaultNamespaces[options.WorkloadTargetNamespace] = cache.Config{} + + translatepro.AddMappingsToCache(defaultNamespaces) } // do we need access to another namespace to export the kubeconfig ? // we will need access to all the objects that the vcluster usually has access to @@ -318,6 +323,16 @@ func initControllerContext( return nil, err } + etcdClient, err := etcd.NewFromConfig(ctx, vClusterOptions) + if err != nil { + return nil, fmt.Errorf("create etcd client: %w", err) + } + + mappingStore, err := store.NewStore(ctx, virtualManager.GetClient(), localManager.GetClient(), store.NewEtcdBackend(etcdClient)) + if err != nil { + return nil, fmt.Errorf("start mapping store: %w", err) + } + return &synccontext.ControllerContext{ Context: ctx, LocalManager: localManager, @@ -327,7 +342,7 @@ func initControllerContext( WorkloadNamespaceClient: currentNamespaceClient, - Mappings: mappings.NewMappingsRegistry(), + Mappings: mappings.NewMappingsRegistry(mappingStore), StopChan: stopChan, Config: vClusterOptions, diff --git a/pkg/setup/controllers.go b/pkg/setup/controllers.go index f0f17a7a0..e961ef6e5 100644 --- a/pkg/setup/controllers.go +++ b/pkg/setup/controllers.go @@ -21,6 +21,7 @@ import ( appsv1 "k8s.io/api/apps/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -101,6 +102,12 @@ func StartControllers(controllerContext *synccontext.ControllerContext, syncers // if not noop syncer if !controllerContext.Config.Experimental.SyncSettings.DisableSync { + // migrate mappers + err = MigrateMappers(controllerContext.ToRegisterContext(), syncers) + if err != nil { + return err + } + // make sure the kubernetes service is synced err = SyncKubernetesService(controllerContext) if err != nil { @@ -143,6 +150,10 @@ func StartControllers(controllerContext *synccontext.ControllerContext, syncers return fmt.Errorf("plugin set leader: %w", err) } + // start mappings store garbage collection + controllerContext.Mappings.Store().StartGarbageCollection(controllerContext.Context) + + // we are done here klog.FromContext(controllerContext).Info("Successfully started vCluster controllers") return nil } @@ -254,3 +265,33 @@ func WriteKubeConfigToSecret(ctx context.Context, currentNamespace string, curre // write the default Secret return kubeconfig.WriteKubeConfig(ctx, currentNamespaceClient, kubeconfig.GetDefaultSecretName(translate.VClusterName), currentNamespace, syncerConfig, options.Experimental.IsolatedControlPlane.KubeConfig != "") } + +func MigrateMappers(ctx *synccontext.RegisterContext, syncers []syncertypes.Object) error { + mappers := ctx.Mappings.List() + done := map[schema.GroupVersionKind]bool{} + + // migrate mappers + for _, mapper := range mappers { + done[mapper.GroupVersionKind()] = true + err := mapper.Migrate(ctx, mapper) + if err != nil { + return fmt.Errorf("migrate mapper %s: %w", mapper.GroupVersionKind().String(), err) + } + } + + // migrate syncers + for _, syncer := range syncers { + mapper, ok := syncer.(synccontext.Mapper) + if !ok || done[mapper.GroupVersionKind()] { + continue + } + + done[mapper.GroupVersionKind()] = true + err := mapper.Migrate(ctx, mapper) + if err != nil { + return fmt.Errorf("migrate syncer mapper %s: %w", mapper.GroupVersionKind().String(), err) + } + } + + return nil +} diff --git a/pkg/setup/initialize.go b/pkg/setup/initialize.go index bec5e466d..12cb0a049 100644 --- a/pkg/setup/initialize.go +++ b/pkg/setup/initialize.go @@ -195,17 +195,14 @@ func initialize(ctx context.Context, parentCtx context.Context, options *config. go func() { // we need to run this with the parent ctx as otherwise this context will be cancelled by the wait // loop in Initialize - var err error - if distro == vclusterconfig.K8SDistro { - err = k8s.StartK8S( - parentCtx, - serviceCIDR, - options.ControlPlane.Distro.K8S.APIServer, - options.ControlPlane.Distro.K8S.ControllerManager, - options.ControlPlane.Distro.K8S.Scheduler, - options, - ) - } + err := k8s.StartK8S( + parentCtx, + serviceCIDR, + options.ControlPlane.Distro.K8S.APIServer, + options.ControlPlane.Distro.K8S.ControllerManager, + options.ControlPlane.Distro.K8S.Scheduler, + options, + ) if err != nil { klog.Fatalf("Error running k8s: %v", err) } diff --git a/pkg/specialservices/resolver.go b/pkg/specialservices/resolver.go index 4142b2768..c219122ea 100644 --- a/pkg/specialservices/resolver.go +++ b/pkg/specialservices/resolver.go @@ -40,7 +40,7 @@ type NameserverFinder struct { } func (f *NameserverFinder) DNSNamespace(ctx *synccontext.SyncContext) (client.Client, string) { - return ctx.PhysicalClient, translate.Default.HostNamespace(DefaultKubeDNSServiceNamespace) + return ctx.PhysicalClient, translate.Default.HostNamespace(ctx, DefaultKubeDNSServiceNamespace) } func (f *NameserverFinder) SpecialServicesToSync() map[types.NamespacedName]SpecialServiceSyncer { diff --git a/pkg/syncer/fake_syncer.go b/pkg/syncer/fake_syncer.go index f09fc8d65..ef94e6186 100644 --- a/pkg/syncer/fake_syncer.go +++ b/pkg/syncer/fake_syncer.go @@ -7,9 +7,8 @@ import ( "github.com/loft-sh/vcluster/pkg/constants" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" syncertypes "github.com/loft-sh/vcluster/pkg/syncer/types" - "github.com/loft-sh/vcluster/pkg/util/translate" - "github.com/loft-sh/vcluster/pkg/util/loghelper" + "github.com/loft-sh/vcluster/pkg/util/translate" kerrors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/pkg/syncer/synccontext/context.go b/pkg/syncer/synccontext/context.go index dc2b25151..2f0aeeaff 100644 --- a/pkg/syncer/synccontext/context.go +++ b/pkg/syncer/synccontext/context.go @@ -38,22 +38,6 @@ type ControllerContext struct { Mappings MappingsRegistry } -type SyncContext struct { - context.Context - - Log loghelper.Logger - - Config *config.VirtualClusterConfig - - PhysicalClient client.Client - VirtualClient client.Client - - Mappings MappingsRegistry - - CurrentNamespace string - CurrentNamespaceClient client.Client -} - type RegisterContext struct { context.Context diff --git a/pkg/syncer/synccontext/mapper.go b/pkg/syncer/synccontext/mapper.go index 71f61315e..11729bd1a 100644 --- a/pkg/syncer/synccontext/mapper.go +++ b/pkg/syncer/synccontext/mapper.go @@ -1,9 +1,17 @@ package synccontext import ( + "context" + "fmt" + "strings" + + "github.com/loft-sh/vcluster/pkg/scheme" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/source" ) // MappingsRegistry holds different mappings @@ -11,15 +19,75 @@ type MappingsRegistry interface { // ByGVK retrieves a mapper by GroupVersionKind. ByGVK(gvk schema.GroupVersionKind) (Mapper, error) + // List retrieves all mappers as a map + List() map[schema.GroupVersionKind]Mapper + // Has checks if the store contains a mapper with the given GroupVersionKind. Has(gvk schema.GroupVersionKind) bool // AddMapper adds the given mapper to the store. AddMapper(mapper Mapper) error + + // Store returns the mapping store of the registry + Store() MappingsStore +} + +type AddQueueFunc func(nameMapping NameMapping, queue workqueue.RateLimitingInterface) + +// MappingsStore holds logic to store and retrieve mappings +type MappingsStore interface { + // Watch builds a source that can be used in a controller to watch on changes within the store for a given + // GroupVersionKind. + Watch(gvk schema.GroupVersionKind, addQueueFn AddQueueFunc) source.Source + + // StartGarbageCollection starts the mapping store garbage collection + StartGarbageCollection(ctx context.Context) + + // HasHostObject checks if the store has a mapping for the host object + HasHostObject(ctx context.Context, pObj Object) bool + + // HasVirtualObject checks if the store has a mapping for the virtual object + HasVirtualObject(ctx context.Context, pObj Object) bool + + // RecordAndSaveReference records a reference mapping and directly saves it + RecordAndSaveReference(ctx context.Context, nameMapping, belongsTo NameMapping) error + + // RecordReference records a reference mapping + RecordReference(ctx context.Context, nameMapping, belongsTo NameMapping) error + + // RecordLabel records a label mapping in the store + RecordLabel(ctx context.Context, labelMapping LabelMapping, belongsTo NameMapping) error + + // RecordLabelCluster records a label mapping for a cluster scoped object in the store + RecordLabelCluster(ctx context.Context, labelMapping LabelMapping, belongsTo NameMapping) error + + // SaveMapping saves the mapping in the backing store + SaveMapping(ctx context.Context, mapping NameMapping) error + + // ReferencesTo retrieves all known references to this object + ReferencesTo(ctx context.Context, vObj Object) []NameMapping + + // HostToVirtualName maps the given host object to the virtual name if found within the store + HostToVirtualName(ctx context.Context, pObj Object) (types.NamespacedName, bool) + + // VirtualToHostName maps the given virtual object to the host name if found within the store + VirtualToHostName(ctx context.Context, vObj Object) (types.NamespacedName, bool) + + // HostToVirtualLabel maps the given host label to the virtual label if found within the store + HostToVirtualLabel(ctx context.Context, pLabel string) (string, bool) + + // HostToVirtualLabelCluster maps the given host label to the virtual label if found within the store + HostToVirtualLabelCluster(ctx context.Context, pLabel string) (string, bool) } // Mapper holds the mapping logic for an object type Mapper interface { + // Migrate is called right before the controllers are started and should be used for + // validating the mappings are initialized in the store correctly. Mapper is passed here + // as an argument because we want underling structs to retrieve the name from the topmost + // struct that implements the mapping as overriding methods within embedded structs is not possible in golang. + Migrate(ctx *RegisterContext, mapper Mapper) error + // GroupVersionKind retrieves the group version kind GroupVersionKind() schema.GroupVersionKind @@ -32,3 +100,115 @@ type Mapper interface { // IsManaged determines if a physical object is managed by the vCluster IsManaged(ctx *SyncContext, pObj client.Object) (bool, error) } + +type Object struct { + schema.GroupVersionKind + types.NamespacedName +} + +func (o Object) Equals(other Object) bool { + return o.String() == other.String() +} + +func (o Object) Empty() bool { + return o.Name == "" +} + +func (o Object) String() string { + return strings.Join([]string{ + o.GroupVersionKind.String(), + o.NamespacedName.String(), + }, ";") +} + +func NewNameMappingFrom(pObj, vObj client.Object) (NameMapping, error) { + if pObj == nil && vObj == nil { + return NameMapping{}, nil + } + + nameMapping := NameMapping{} + if pObj != nil && pObj.GetName() != "" { + gvk, err := apiutil.GVKForObject(pObj, scheme.Scheme) + if err != nil { + return NameMapping{}, err + } + + nameMapping.GroupVersionKind = gvk + nameMapping.HostName = types.NamespacedName{ + Namespace: pObj.GetNamespace(), + Name: pObj.GetName(), + } + } + + if vObj != nil && vObj.GetName() != "" { + gvk, err := apiutil.GVKForObject(vObj, scheme.Scheme) + if err != nil { + return NameMapping{}, err + } + + if !nameMapping.Empty() && gvk.String() != nameMapping.GroupVersionKind.String() { + return NameMapping{}, fmt.Errorf("mapping GVK is different %s != %s", gvk.String(), nameMapping.GroupVersionKind.String()) + } + + nameMapping.GroupVersionKind = gvk + nameMapping.VirtualName = types.NamespacedName{ + Namespace: vObj.GetNamespace(), + Name: vObj.GetName(), + } + } + + return nameMapping, nil +} + +type NameMapping struct { + schema.GroupVersionKind + + VirtualName types.NamespacedName + HostName types.NamespacedName +} + +func (n NameMapping) Equals(other NameMapping) bool { + return n.Host().Equals(other.Host()) && n.Virtual().Equals(other.Virtual()) +} + +func (n NameMapping) Empty() bool { + return n.Host().Empty() && n.Virtual().Empty() +} + +func (n NameMapping) Virtual() Object { + return Object{ + GroupVersionKind: n.GroupVersionKind, + NamespacedName: n.VirtualName, + } +} + +func (n NameMapping) Host() Object { + return Object{ + GroupVersionKind: n.GroupVersionKind, + NamespacedName: n.HostName, + } +} + +func (n NameMapping) String() string { + return strings.Join([]string{ + n.GroupVersionKind.String(), + n.VirtualName.String(), + n.HostName.String(), + }, ";") +} + +type LabelMapping struct { + Virtual string + Host string +} + +func (l LabelMapping) Equals(other LabelMapping) bool { + return l.Host == other.Host && l.Virtual == other.Virtual +} + +func (l LabelMapping) String() string { + return strings.Join([]string{ + l.Virtual, + l.Host, + }, ";") +} diff --git a/pkg/syncer/synccontext/sync_context.go b/pkg/syncer/synccontext/sync_context.go new file mode 100644 index 000000000..9e75357e8 --- /dev/null +++ b/pkg/syncer/synccontext/sync_context.go @@ -0,0 +1,76 @@ +package synccontext + +import ( + "context" + "fmt" + + "github.com/loft-sh/vcluster/pkg/config" + "github.com/loft-sh/vcluster/pkg/util/loghelper" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type SyncContext struct { + context.Context + + Log loghelper.Logger + + Config *config.VirtualClusterConfig + + PhysicalClient client.Client + VirtualClient client.Client + + Mappings MappingsRegistry + + CurrentNamespace string + CurrentNamespaceClient client.Client +} + +func (s *SyncContext) Close() error { + if s.Mappings != nil && s.Mappings.Store() != nil { + // check if we have the owning object in the context + belongsTo, ok := MappingFrom(s.Context) + if !ok { + return nil + } + + // save the mapping in the store + err := s.Mappings.Store().SaveMapping(s, belongsTo) + if err != nil { + return fmt.Errorf("save mapping: %w", err) + } + } + + return nil +} + +type syncContextMappingType int + +const mappingKey syncContextMappingType = iota + +// WithMappingFromObjects adds the mapping to the context +func WithMappingFromObjects(ctx context.Context, pObj, vObj client.Object) (context.Context, error) { + nameMapping, err := NewNameMappingFrom(pObj, vObj) + if err != nil { + return nil, err + } + + return WithMapping(ctx, nameMapping), nil +} + +// WithMapping adds the mapping to the context +func WithMapping(ctx context.Context, nameMapping NameMapping) context.Context { + if nameMapping.Empty() { + return ctx + } + + return context.WithValue(ctx, mappingKey, nameMapping) +} + +// MappingFrom returns the value of the original request path key on the ctx +func MappingFrom(ctx context.Context) (NameMapping, bool) { + info, ok := ctx.Value(mappingKey).(NameMapping) + if info.Empty() { + return NameMapping{}, false + } + return info, ok +} diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go index 365c78ddb..0f8ed9522 100644 --- a/pkg/syncer/syncer.go +++ b/pkg/syncer/syncer.go @@ -2,6 +2,7 @@ package syncer import ( "context" + "errors" "fmt" "strings" "time" @@ -11,13 +12,13 @@ import ( "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" syncertypes "github.com/loft-sh/vcluster/pkg/syncer/types" + "github.com/loft-sh/vcluster/pkg/util/fifolocker" "github.com/loft-sh/vcluster/pkg/util/translate" - "github.com/moby/locker" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - controller2 "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/loft-sh/vcluster/pkg/util/loghelper" @@ -60,7 +61,7 @@ func NewSyncController(ctx *synccontext.RegisterContext, syncer syncertypes.Sync virtualClient: ctx.VirtualManager.GetClient(), options: options, - locker: locker.New(), + locker: fifolocker.New(), }, nil } @@ -93,7 +94,7 @@ type SyncController struct { virtualClient client.Client options *syncertypes.Options - locker *locker.Locker + locker *fifolocker.Locker } func (r *SyncController) newSyncContext(ctx context.Context, logName string) *synccontext.SyncContext { @@ -109,7 +110,7 @@ func (r *SyncController) newSyncContext(ctx context.Context, logName string) *sy } } -func (r *SyncController) Reconcile(ctx context.Context, origReq ctrl.Request) (_ ctrl.Result, err error) { +func (r *SyncController) Reconcile(ctx context.Context, origReq ctrl.Request) (_ ctrl.Result, retErr error) { // extract if this was a delete request origReq, syncEventType := fromDeleteRequest(origReq) @@ -121,6 +122,11 @@ func (r *SyncController) Reconcile(ctx context.Context, origReq ctrl.Request) (_ // create sync context syncContext := r.newSyncContext(ctx, origReq.Name) + defer func() { + if err := syncContext.Close(); err != nil { + retErr = errors.Join(retErr, err) + } + }() // if host request we need to find the virtual object vReq, pReq, err := r.extractRequest(syncContext, origReq) @@ -134,6 +140,8 @@ func (r *SyncController) Reconcile(ctx context.Context, origReq ctrl.Request) (_ // reconciling on the same object in parallel as this could // happen if a host event and virtual event are queued at the // same time. + // + // This is FIFO, we use a special mutex for this (fifomu.Mutex) r.locker.Lock(vReq.String()) defer func() { _ = r.locker.Unlock(vReq.String()) @@ -155,11 +163,19 @@ func (r *SyncController) Reconcile(ctx context.Context, origReq ctrl.Request) (_ return ctrl.Result{}, err } + // add mapping to context + if !r.options.SkipMappingsRecording { + syncContext.Context, err = synccontext.WithMappingFromObjects(syncContext.Context, pObj, vObj) + if err != nil { + return ctrl.Result{}, err + } + } + // check what function we should call if vObj != nil && pObj != nil { // make sure the object uid matches pAnnotations := pObj.GetAnnotations() - if !r.options.DisableUIDDeletion && pAnnotations != nil && pAnnotations[translate.UIDAnnotation] != "" && pAnnotations[translate.UIDAnnotation] != string(vObj.GetUID()) { + if !r.options.DisableUIDDeletion && pAnnotations[translate.UIDAnnotation] != "" && pAnnotations[translate.UIDAnnotation] != string(vObj.GetUID()) { // requeue if object is already being deleted if pObj.GetDeletionTimestamp() != nil { return ctrl.Result{RequeueAfter: time.Second}, nil @@ -397,6 +413,7 @@ func (r *SyncController) enqueueVirtual(ctx context.Context, obj client.Object, Name: obj.GetName(), }, })) + return } @@ -443,6 +460,7 @@ func (r *SyncController) enqueuePhysical(ctx context.Context, obj client.Object, Name: obj.GetName(), }, }))) + return } @@ -455,10 +473,10 @@ func (r *SyncController) enqueuePhysical(ctx context.Context, obj client.Object, })) } -func (r *SyncController) Register(ctx *synccontext.RegisterContext) error { +func (r *SyncController) Build(ctx *synccontext.RegisterContext) (controller.Controller, error) { // build the basic controller - controller := ctrl.NewControllerManagedBy(ctx.VirtualManager). - WithOptions(controller2.Options{ + controllerBuilder := ctrl.NewControllerManagedBy(ctx.VirtualManager). + WithOptions(controller.Options{ MaxConcurrentReconciles: 10, CacheSyncTimeout: constants.DefaultCacheSyncTimeout, }). @@ -470,13 +488,39 @@ func (r *SyncController) Register(ctx *synccontext.RegisterContext) error { modifier, isControllerModifier := r.syncer.(syncertypes.ControllerModifier) if isControllerModifier { var err error - controller, err = modifier.ModifyController(ctx, controller) + controllerBuilder, err = modifier.ModifyController(ctx, controllerBuilder) if err != nil { - return err + return nil, err + } + } + + return controllerBuilder.Build(r) +} + +func (r *SyncController) Register(ctx *synccontext.RegisterContext) error { + _, err := r.Build(ctx) + return err +} + +func CreateVirtualObject(ctx *synccontext.SyncContext, pObj, vObj client.Object, eventRecorder record.EventRecorder) (ctrl.Result, error) { + gvk, err := apiutil.GVKForObject(vObj, scheme.Scheme) + if err != nil { + return ctrl.Result{}, fmt.Errorf("gvk for object: %w", err) + } + + ctx.Log.Infof("create virtual %s %s/%s", gvk.Kind, vObj.GetNamespace(), vObj.GetName()) + err = ctx.VirtualClient.Create(ctx, vObj) + if err != nil { + if kerrors.IsNotFound(err) { + ctx.Log.Debugf("error syncing %s %s/%s to virtual cluster: %v", gvk.Kind, pObj.GetNamespace(), pObj.GetName(), err) + return ctrl.Result{RequeueAfter: time.Second}, nil } + ctx.Log.Infof("error syncing %s %s/%s to virtual cluster: %v", gvk.Kind, pObj.GetNamespace(), pObj.GetName(), err) + eventRecorder.Eventf(vObj, "Warning", "SyncError", "Error syncing to virtual cluster: %v", err) + return ctrl.Result{}, err } - return controller.Complete(r) + return ctrl.Result{}, nil } func CreateHostObject(ctx *synccontext.SyncContext, vObj, pObj client.Object, eventRecorder record.EventRecorder) (ctrl.Result, error) { diff --git a/pkg/syncer/syncer_test.go b/pkg/syncer/syncer_test.go index 50c4f8017..2c49b7e2d 100644 --- a/pkg/syncer/syncer_test.go +++ b/pkg/syncer/syncer_test.go @@ -3,27 +3,34 @@ package syncer import ( "context" "errors" + "fmt" "sort" + "sync" "testing" + "time" "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/patcher" "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" syncertesting "github.com/loft-sh/vcluster/pkg/syncer/testing" "github.com/loft-sh/vcluster/pkg/syncer/translator" syncertypes "github.com/loft-sh/vcluster/pkg/syncer/types" + "github.com/loft-sh/vcluster/pkg/util/fifolocker" "github.com/loft-sh/vcluster/pkg/util/loghelper" testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "github.com/loft-sh/vcluster/pkg/util/translate" - "github.com/moby/locker" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // named mock instead of fake because there's a real "fake" syncer that syncs fake objects @@ -31,7 +38,7 @@ type mockSyncer struct { syncertypes.GenericTranslator } -func NewMockSyncer(ctx *synccontext.RegisterContext) (syncertypes.Object, error) { +func NewMockSyncer(ctx *synccontext.RegisterContext) (syncertypes.Syncer, error) { mapper, err := ctx.Mappings.ByGVK(mappings.Secrets()) if err != nil { return nil, err @@ -42,18 +49,13 @@ func NewMockSyncer(ctx *synccontext.RegisterContext) (syncertypes.Object, error) }, nil } -func (s *mockSyncer) naiveTranslateCreate(ctx *synccontext.SyncContext, vObj client.Object) client.Object { - pObj := translate.HostMetadata(ctx, vObj, s.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj)) - return pObj -} - func (s *mockSyncer) Syncer() syncertypes.Sync[client.Object] { return ToGenericSyncer[*corev1.Secret](s) } // SyncToHost is called when a virtual object was created and needs to be synced down to the physical cluster func (s *mockSyncer) SyncToHost(ctx *synccontext.SyncContext, event *synccontext.SyncToHostEvent[*corev1.Secret]) (ctrl.Result, error) { - pObj := s.naiveTranslateCreate(ctx, event.Virtual) + pObj := translate.HostMetadata(ctx, event.Virtual, s.VirtualToHost(ctx, types.NamespacedName{Name: event.Virtual.GetName(), Namespace: event.Virtual.GetNamespace()}, event.Virtual)) if pObj == nil { return ctrl.Result{}, errors.New("naive translate create failed") } @@ -62,11 +64,24 @@ func (s *mockSyncer) SyncToHost(ctx *synccontext.SyncContext, event *synccontext } // Sync is called to sync a virtual object with a physical object -func (s *mockSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Secret]) (ctrl.Result, error) { - newPObj := event.Host.DeepCopyObject().(client.Object) - newPObj.SetAnnotations(translate.HostAnnotations(event.Virtual, event.Host)) - newPObj.SetLabels(translate.HostLabels(ctx, event.Virtual, event.Host)) - return ctrl.Result{}, ctx.VirtualClient.Update(ctx, newPObj) +func (s *mockSyncer) Sync(ctx *synccontext.SyncContext, event *synccontext.SyncEvent[*corev1.Secret]) (_ ctrl.Result, retErr error) { + patch, err := patcher.NewSyncerPatcher(ctx, event.Host, event.Virtual) + if err != nil { + return ctrl.Result{}, fmt.Errorf("new syncer patcher: %w", err) + } + defer func() { + if err := patch.Patch(ctx, event.Host, event.Virtual); err != nil { + retErr = utilerrors.NewAggregate([]error{retErr, err}) + } + }() + + event.Host.Annotations = translate.HostAnnotations(event.Virtual, event.Host) + event.Host.Labels = translate.HostLabels(ctx, event.Virtual, event.Host) + + // check data + event.TargetObject().Data = event.SourceObject().Data + + return ctrl.Result{}, nil } func (s *mockSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccontext.SyncToVirtualEvent[*corev1.Secret]) (_ ctrl.Result, retErr error) { @@ -77,24 +92,198 @@ func (s *mockSyncer) SyncToVirtual(ctx *synccontext.SyncContext, event *synccont var _ syncertypes.Syncer = &mockSyncer{} var ( - vclusterNamespace = "test" - namespaceInVclusterA = "default" + namespaceInVClusterA = "default" ) +type fakeSource struct { + m sync.Mutex + + queue workqueue.RateLimitingInterface +} + +func (f *fakeSource) String() string { + return "fake-source" +} + +func (f *fakeSource) Add(request reconcile.Request) { + f.m.Lock() + defer f.m.Unlock() + + f.queue.Add(request) +} + +func (f *fakeSource) Start(_ context.Context, queue workqueue.RateLimitingInterface) error { + f.m.Lock() + defer f.m.Unlock() + + f.queue = queue + return nil +} + +func TestController(t *testing.T) { + translator := translate.NewSingleNamespaceTranslator(syncertesting.DefaultTestTargetNamespace) + + type testCase struct { + Name string + + EnqueueObjs []types.NamespacedName + + InitialPhysicalState []runtime.Object + InitialVirtualState []runtime.Object + + ExpectedPhysicalState map[schema.GroupVersionKind][]runtime.Object + ExpectedVirtualState map[schema.GroupVersionKind][]runtime.Object + + Compare syncertesting.Compare + } + + testCases := []testCase{ + { + Name: "should sync down", + + EnqueueObjs: []types.NamespacedName{ + {Name: "a", Namespace: namespaceInVClusterA}, + }, + + InitialVirtualState: []runtime.Object{ + // secret that might be created by ingress controller or cert managers + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a", + Namespace: namespaceInVClusterA, + UID: "123", + }, + }, + }, + + InitialPhysicalState: []runtime.Object{ + // secret that might be created by ingress controller or cert managers + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a", + Namespace: syncertesting.DefaultTestTargetNamespace, + UID: "123", + }, + }, + }, + + ExpectedVirtualState: map[schema.GroupVersionKind][]runtime.Object{ + // existing secret should remain + corev1.SchemeGroupVersion.WithKind("Secret"): { + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a", + Namespace: namespaceInVClusterA, + UID: "123", + }, + }, + }, + }, + + ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ + // existing secret should remain + corev1.SchemeGroupVersion.WithKind("Secret"): { + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a", + Namespace: syncertesting.DefaultTestTargetNamespace, + UID: "123", + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: translator.HostName(nil, "a", namespaceInVClusterA), + Namespace: syncertesting.DefaultTestTargetNamespace, + Annotations: map[string]string{ + translate.NameAnnotation: "a", + translate.NamespaceAnnotation: namespaceInVClusterA, + translate.UIDAnnotation: "123", + translate.KindAnnotation: corev1.SchemeGroupVersion.WithKind("Secret").String(), + }, + Labels: map[string]string{ + translate.NamespaceLabel: namespaceInVClusterA, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range testCases { + t.Logf("running test #%d: %s", i, tc.Name) + + // setup mocks + ctx := context.Background() + pClient := testingutil.NewFakeClient(scheme.Scheme, tc.InitialPhysicalState...) + vClient := testingutil.NewFakeClient(scheme.Scheme, tc.InitialVirtualState...) + + fakeContext := syncertesting.NewFakeRegisterContext(syncertesting.NewFakeConfig(), pClient, vClient) + syncer, err := NewMockSyncer(fakeContext) + assert.NilError(t, err) + + syncController, err := NewSyncController(fakeContext, syncer) + assert.NilError(t, err) + + genericController, err := syncController.Build(fakeContext) + assert.NilError(t, err) + + source := &fakeSource{} + err = genericController.Watch(source) + assert.NilError(t, err) + + go func() { + err = genericController.Start(fakeContext) + assert.NilError(t, err) + }() + + time.Sleep(time.Second) + + // execute + for _, req := range tc.EnqueueObjs { + source.Add(ctrl.Request{NamespacedName: req}) + } + + time.Sleep(time.Second) + + // assert expected result + // Compare states + if tc.ExpectedPhysicalState != nil { + for gvk, objs := range tc.ExpectedPhysicalState { + err := syncertesting.CompareObjs(ctx, t, tc.Name+" physical state", fakeContext.PhysicalManager.GetClient(), gvk, scheme.Scheme, objs, tc.Compare) + if err != nil { + t.Fatalf("%s - Physical State mismatch: %v", tc.Name, err) + } + } + } + if tc.ExpectedVirtualState != nil { + for gvk, objs := range tc.ExpectedVirtualState { + err := syncertesting.CompareObjs(ctx, t, tc.Name+" virtual state", fakeContext.VirtualManager.GetClient(), gvk, scheme.Scheme, objs, tc.Compare) + if err != nil { + t.Fatalf("%s - Virtual State mismatch: %v", tc.Name, err) + } + } + } + } +} + func TestReconcile(t *testing.T) { - translator := translate.NewSingleNamespaceTranslator(vclusterNamespace) + translator := translate.NewSingleNamespaceTranslator(syncertesting.DefaultTestTargetNamespace) type testCase struct { Name string Focus bool - Syncer func(ctx *synccontext.RegisterContext) (syncertypes.Object, error) + Syncer func(ctx *synccontext.RegisterContext) (syncertypes.Syncer, error) EnqueueObjs []types.NamespacedName InitialPhysicalState []runtime.Object InitialVirtualState []runtime.Object + CreatePhysicalObjects []client.Object + CreateVirtualObjects []client.Object + ExpectedPhysicalState map[schema.GroupVersionKind][]runtime.Object ExpectedVirtualState map[schema.GroupVersionKind][]runtime.Object @@ -110,7 +299,7 @@ func TestReconcile(t *testing.T) { Syncer: NewMockSyncer, EnqueueObjs: []types.NamespacedName{ - {Name: "a", Namespace: namespaceInVclusterA}, + {Name: "a", Namespace: namespaceInVClusterA}, }, InitialVirtualState: []runtime.Object{ @@ -118,7 +307,7 @@ func TestReconcile(t *testing.T) { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "a", - Namespace: namespaceInVclusterA, + Namespace: namespaceInVClusterA, UID: "123", }, }, @@ -129,7 +318,7 @@ func TestReconcile(t *testing.T) { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "a", - Namespace: vclusterNamespace, + Namespace: syncertesting.DefaultTestTargetNamespace, UID: "123", }, }, @@ -141,7 +330,7 @@ func TestReconcile(t *testing.T) { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "a", - Namespace: namespaceInVclusterA, + Namespace: namespaceInVClusterA, UID: "123", }, }, @@ -154,22 +343,22 @@ func TestReconcile(t *testing.T) { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "a", - Namespace: vclusterNamespace, + Namespace: syncertesting.DefaultTestTargetNamespace, UID: "123", }, }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: translator.HostName("a", namespaceInVclusterA), - Namespace: vclusterNamespace, + Name: translator.HostName(nil, "a", namespaceInVClusterA), + Namespace: syncertesting.DefaultTestTargetNamespace, Annotations: map[string]string{ translate.NameAnnotation: "a", - translate.NamespaceAnnotation: namespaceInVclusterA, + translate.NamespaceAnnotation: namespaceInVClusterA, translate.UIDAnnotation: "123", translate.KindAnnotation: corev1.SchemeGroupVersion.WithKind("Secret").String(), }, Labels: map[string]string{ - translate.NamespaceLabel: namespaceInVclusterA, + translate.NamespaceLabel: namespaceInVClusterA, }, }, }, @@ -179,11 +368,11 @@ func TestReconcile(t *testing.T) { shouldErr: false, }, { - Name: "should fail to sync down when object of desired name already exists", + Name: "should adopt object of desired name when already exists", Syncer: NewMockSyncer, EnqueueObjs: []types.NamespacedName{ - {Name: "a", Namespace: namespaceInVclusterA}, + {Name: "a", Namespace: namespaceInVClusterA}, }, InitialVirtualState: []runtime.Object{ @@ -191,7 +380,7 @@ func TestReconcile(t *testing.T) { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "a", - Namespace: namespaceInVclusterA, + Namespace: namespaceInVClusterA, UID: "123", }, }, @@ -201,8 +390,8 @@ func TestReconcile(t *testing.T) { // existing object doesn't have annotations/labels indicating it is owned, but has the name of the synced object &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: translate.Default.HostName("a", namespaceInVclusterA), - Namespace: vclusterNamespace, + Name: translator.HostName(nil, "a", namespaceInVClusterA), + Namespace: syncertesting.DefaultTestTargetNamespace, Annotations: map[string]string{ "app": "existing", }, @@ -222,7 +411,84 @@ func TestReconcile(t *testing.T) { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "a", - Namespace: namespaceInVclusterA, + Namespace: namespaceInVClusterA, + UID: "123", + }, + }, + }, + }, + + ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ + // existing secret should remain + corev1.SchemeGroupVersion.WithKind("Secret"): { + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: translator.HostName(nil, "a", namespaceInVClusterA), + Namespace: syncertesting.DefaultTestTargetNamespace, + Annotations: map[string]string{ + "app": "existing", + translate.NameAnnotation: "a", + translate.NamespaceAnnotation: namespaceInVClusterA, + translate.UIDAnnotation: "123", + translate.KindAnnotation: corev1.SchemeGroupVersion.WithKind("Secret").String(), + }, + Labels: map[string]string{ + "app": "existing", + translate.NamespaceLabel: namespaceInVClusterA, + }, + }, + }, + }, + }, + }, + { + Name: "should not adopt virtual object", + Syncer: NewMockSyncer, + + EnqueueObjs: []types.NamespacedName{ + toHostRequest(reconcile.Request{ + NamespacedName: types.NamespacedName{Name: "abc", Namespace: syncertesting.DefaultTestTargetNamespace}, + }).NamespacedName, + }, + + CreateVirtualObjects: []client.Object{ + // secret that might be created by ingress controller or cert managers + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abc", + Namespace: namespaceInVClusterA, + UID: "123", + }, + }, + }, + + CreatePhysicalObjects: []client.Object{ + // existing object doesn't have annotations/labels indicating it is owned, but has the name of the synced object + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abc", + Namespace: syncertesting.DefaultTestTargetNamespace, + Annotations: map[string]string{ + translate.NameAnnotation: "abc", + translate.NamespaceAnnotation: namespaceInVClusterA, + }, + Labels: map[string]string{ + translate.MarkerLabel: translate.VClusterName, + }, + }, + Data: map[string][]byte{ + "datakey1": []byte("datavalue1"), + }, + }, + }, + + ExpectedVirtualState: map[schema.GroupVersionKind][]runtime.Object{ + // existing secret should remain + corev1.SchemeGroupVersion.WithKind("Secret"): { + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abc", + Namespace: namespaceInVClusterA, UID: "123", }, }, @@ -234,13 +500,14 @@ func TestReconcile(t *testing.T) { corev1.SchemeGroupVersion.WithKind("Secret"): { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: translator.HostName("a", namespaceInVclusterA), - Namespace: vclusterNamespace, + Name: "abc", + Namespace: syncertesting.DefaultTestTargetNamespace, Annotations: map[string]string{ - "app": "existing", + translate.NameAnnotation: "abc", + translate.NamespaceAnnotation: namespaceInVClusterA, }, Labels: map[string]string{ - "app": "existing", + translate.MarkerLabel: translate.VClusterName, }, }, Data: map[string][]byte{ @@ -249,9 +516,6 @@ func TestReconcile(t *testing.T) { }, }, }, - - shouldErr: true, - errMsg: "conflict: cannot sync virtual object default/a as unmanaged physical object test/a-x-default-x-suffix exists with desired name", }, } sort.SliceStable(testCases, func(i, j int) bool { @@ -280,9 +544,8 @@ func TestReconcile(t *testing.T) { vClient := testingutil.NewFakeClient(scheme.Scheme, tc.InitialVirtualState...) fakeContext := syncertesting.NewFakeRegisterContext(syncertesting.NewFakeConfig(), pClient, vClient) - syncerImpl, err := tc.Syncer(fakeContext) + syncer, err := tc.Syncer(fakeContext) assert.NilError(t, err) - syncer := syncerImpl.(syncertypes.Syncer) controller := &SyncController{ syncer: syncer, @@ -301,7 +564,17 @@ func TestReconcile(t *testing.T) { virtualClient: vClient, options: options, - locker: locker.New(), + locker: fifolocker.New(), + } + + // create objects + for _, pObj := range tc.CreatePhysicalObjects { + err = fakeContext.PhysicalManager.GetClient().Create(ctx, pObj) + assert.NilError(t, err) + } + for _, vObj := range tc.CreateVirtualObjects { + err = fakeContext.VirtualManager.GetClient().Create(ctx, vObj) + assert.NilError(t, err) } // execute diff --git a/pkg/syncer/testing/context.go b/pkg/syncer/testing/context.go index 599121925..1fc39394d 100644 --- a/pkg/syncer/testing/context.go +++ b/pkg/syncer/testing/context.go @@ -8,6 +8,7 @@ import ( "github.com/loft-sh/vcluster/pkg/config" "github.com/loft-sh/vcluster/pkg/mappings" "github.com/loft-sh/vcluster/pkg/mappings/resources" + "github.com/loft-sh/vcluster/pkg/mappings/store" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" syncer "github.com/loft-sh/vcluster/pkg/syncer/types" "github.com/loft-sh/vcluster/pkg/util" @@ -48,21 +49,32 @@ func FakeStartSyncer(t *testing.T, ctx *synccontext.RegisterContext, create func assert.NilError(t, err) } + // run migrate + mapper, ok := object.(synccontext.Mapper) + if ok { + err := mapper.Migrate(ctx, mapper) + assert.NilError(t, err) + } + syncCtx := ctx.ToSyncContext(object.Name()) syncCtx.Log = loghelper.NewFromExisting(log.NewLog(0), object.Name()) return syncCtx, object } func NewFakeRegisterContext(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + ctx := context.Background() + mappingsStore, _ := store.NewStore(ctx, vClient, pClient, store.NewMemoryBackend()) + + // create register context translate.Default = translate.NewSingleNamespaceTranslator(DefaultTestTargetNamespace) registerCtx := &synccontext.RegisterContext{ - Context: context.Background(), + Context: ctx, Config: vConfig, CurrentNamespace: DefaultTestCurrentNamespace, CurrentNamespaceClient: pClient, VirtualManager: newFakeManager(vClient), PhysicalManager: newFakeManager(pClient), - Mappings: mappings.NewMappingsRegistry(), + Mappings: mappings.NewMappingsRegistry(mappingsStore), } // make sure we do not ensure any CRDs @@ -70,7 +82,15 @@ func NewFakeRegisterContext(vConfig *config.VirtualClusterConfig, pClient *testi return nil } + // register & migrate mappers resources.MustRegisterMappings(registerCtx) + for _, mapper := range registerCtx.Mappings.List() { + err := mapper.Migrate(registerCtx, mapper) + if err != nil { + panic(err) + } + } + return registerCtx } diff --git a/pkg/syncer/testing/manager.go b/pkg/syncer/testing/manager.go index 388252299..70e2e6c3d 100644 --- a/pkg/syncer/testing/manager.go +++ b/pkg/syncer/testing/manager.go @@ -3,13 +3,16 @@ package testing import ( "context" "net/http" + "time" "github.com/go-logr/logr" "github.com/loft-sh/vcluster/pkg/util/log" testingutil "github.com/loft-sh/vcluster/pkg/util/testing" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -28,7 +31,7 @@ type fakeManager struct { client *testingutil.FakeIndexClient } -func (f *fakeManager) SetFields(interface{}) error { return nil } +func (f *fakeManager) SetFields(_ interface{}) error { return nil } func (f *fakeManager) GetConfig() *rest.Config { return &rest.Config{Host: "unit-test-client"} } @@ -38,7 +41,7 @@ func (f *fakeManager) GetClient() client.Client { return f.client } func (f *fakeManager) GetFieldIndexer() client.FieldIndexer { return f.client } -func (f *fakeManager) GetCache() cache.Cache { return nil } +func (f *fakeManager) GetCache() cache.Cache { return &fakeCache{FakeIndexClient: f.client} } func (f *fakeManager) GetEventRecorderFor(string) record.EventRecorder { return &fakeEventBroadcaster{} @@ -75,3 +78,59 @@ func (f *fakeManager) GetHTTPClient() *http.Client { func (f *fakeManager) AddMetricsServerExtraHandler(_ string, _ http.Handler) error { return nil } + +type fakeCache struct { + *testingutil.FakeIndexClient +} + +func (f *fakeCache) GetInformer(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &fakeInformer{}, nil +} + +func (f *fakeCache) GetInformerForKind(_ context.Context, _ schema.GroupVersionKind, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &fakeInformer{}, nil +} + +func (f *fakeCache) RemoveInformer(_ context.Context, _ client.Object) error { + return nil +} + +func (f *fakeCache) Start(_ context.Context) error { + return nil +} + +func (f *fakeCache) WaitForCacheSync(_ context.Context) bool { + return true +} + +func (f *fakeCache) IndexField(ctx context.Context, obj client.Object, key string, extractValue client.IndexerFunc) error { + return f.FakeIndexClient.IndexField(ctx, obj, key, extractValue) +} + +type fakeInformer struct{} + +func (f *fakeInformer) AddEventHandler(_ toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { + //nolint:nilnil + return nil, nil +} + +func (f *fakeInformer) AddEventHandlerWithResyncPeriod(_ toolscache.ResourceEventHandler, _ time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { + //nolint:nilnil + return nil, nil +} + +func (f *fakeInformer) RemoveEventHandler(_ toolscache.ResourceEventHandlerRegistration) error { + return nil +} + +func (f *fakeInformer) AddIndexers(_ toolscache.Indexers) error { + return nil +} + +func (f *fakeInformer) HasSynced() bool { + return true +} + +func (f *fakeInformer) IsStopped() bool { + return false +} diff --git a/pkg/syncer/types/syncer.go b/pkg/syncer/types/syncer.go index a34881c9f..fd530e6cc 100644 --- a/pkg/syncer/types/syncer.go +++ b/pkg/syncer/types/syncer.go @@ -71,8 +71,9 @@ type Options struct { // and virtual doesn't match anymore. DisableUIDDeletion bool - IsClusterScopedCRD bool - HasStatusSubresource bool + IsClusterScopedCRD bool + + SkipMappingsRecording bool } type OptionsProvider interface { diff --git a/vendor/github.com/moby/locker/locker.go b/pkg/util/fifolocker/locker.go similarity index 93% rename from vendor/github.com/moby/locker/locker.go rename to pkg/util/fifolocker/locker.go index 0b22ddfab..69c6b2f25 100644 --- a/vendor/github.com/moby/locker/locker.go +++ b/pkg/util/fifolocker/locker.go @@ -10,13 +10,17 @@ If a lock with a given name does not exist when `Lock` is called, one is created. Lock references are automatically cleaned up on `Unlock` if nothing else is waiting for the lock. + +CHANGED BY LOFT: We exchanged the default sync.Mutex with fifomu.Mutex to account for problems */ -package locker +package fifolocker import ( "errors" "sync" "sync/atomic" + + "github.com/loft-sh/vcluster/pkg/util/fifomu" ) // ErrNoSuchLock is returned when the requested lock does not exist @@ -30,7 +34,7 @@ type Locker struct { // lockCtr is used by Locker to represent a lock with a given name. type lockCtr struct { - mu sync.Mutex + mu fifomu.Mutex // waiters is the number of waiters waiting to acquire the lock // this is int32 instead of uint32 so we can add `-1` in `dec()` waiters int32 diff --git a/pkg/util/fifomu/fifomu.go b/pkg/util/fifomu/fifomu.go new file mode 100644 index 000000000..713405be7 --- /dev/null +++ b/pkg/util/fifomu/fifomu.go @@ -0,0 +1,165 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fifomu provides a Mutex whose Lock method returns the lock to +// callers in FIFO call order. This is in contrast to sync.Mutex, where +// a single goroutine can repeatedly lock and unlock and relock the mutex +// without handing off to other lock waiter goroutines (that is, until after +// a 1ms starvation threshold, at which point sync.Mutex enters a FIFO +// "starvation mode" for those starved waiters, but that's too late for some +// use cases). +// +// fifomu.Mutex implements the exported methods of sync.Mutex and thus is +// a drop-in replacement (and by extension also implements sync.Locker). +// It also provides a bonus context-aware Mutex.LockContext method. +// +// Note: unless you need the FIFO behavior, you should prefer sync.Mutex. +// For typical workloads, its "greedy-relock" behavior requires less goroutine +// switching and yields better performance. +package fifomu + +import ( + "context" + "sync" +) + +var _ sync.Locker = (*Mutex)(nil) + +// Mutex is a mutual exclusion lock whose Lock method returns +// the lock to callers in FIFO call order. +// +// A Mutex must not be copied after first use. +// +// The zero value for a Mutex is an unlocked mutex. +// +// Mutex implements the same methodset as sync.Mutex, so it can +// be used as a drop-in replacement. It implements an additional +// method Mutex.LockContext, which provides context-aware locking. +type Mutex struct { + waiters list[waiter] + cur int64 + mu sync.Mutex +} + +// Lock locks m. +// +// If the lock is already in use, the calling goroutine +// blocks until the mutex is available. +func (m *Mutex) Lock() { + m.mu.Lock() + if m.cur <= 0 && m.waiters.len == 0 { + m.cur++ + m.mu.Unlock() + return + } + + w := waiterPool.Get().(waiter) //nolint:errcheck + m.waiters.pushBack(w) + m.mu.Unlock() + + <-w + waiterPool.Put(w) +} + +// LockContext locks m. +// +// If the lock is already in use, the calling goroutine +// blocks until the mutex is available or ctx is done. +// +// On failure, LockContext returns context.Cause(ctx) and +// leaves the mutex unchanged. +// +// If ctx is already done, LockContext may still succeed without blocking. +func (m *Mutex) LockContext(ctx context.Context) error { + m.mu.Lock() + if m.cur <= 0 && m.waiters.len == 0 { + m.cur++ + m.mu.Unlock() + return nil + } + + w := waiterPool.Get().(waiter) //nolint:errcheck + elem := m.waiters.pushBackElem(w) + m.mu.Unlock() + + select { + case <-ctx.Done(): + err := context.Cause(ctx) + m.mu.Lock() + select { + case <-w: + // Acquired the lock after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancellation. + err = nil + waiterPool.Put(w) + default: + isFront := m.waiters.front() == elem + m.waiters.remove(elem) + // If we're at the front and there's extra tokens left, + // notify other waiters. + if isFront && m.cur < 1 { + m.notifyWaiters() + } + } + m.mu.Unlock() + return err + + case <-w: + waiterPool.Put(w) + return nil + } +} + +// TryLock tries to lock m and reports whether it succeeded. +func (m *Mutex) TryLock() bool { + m.mu.Lock() + success := m.cur <= 0 && m.waiters.len == 0 + if success { + m.cur++ + } + m.mu.Unlock() + return success +} + +// Unlock unlocks m. +// It is a run-time error if m is not locked on entry to Unlock. +// +// A locked Mutex is not associated with a particular goroutine. +// It is allowed for one goroutine to lock a Mutex and then +// arrange for another goroutine to unlock it. +func (m *Mutex) Unlock() { + m.mu.Lock() + m.cur-- + if m.cur < 0 { + m.mu.Unlock() + panic("sync: unlock of unlocked mutex") + } + m.notifyWaiters() + m.mu.Unlock() +} + +func (m *Mutex) notifyWaiters() { + for { + next := m.waiters.front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value + if m.cur > 0 { + // Anti-starvation measure: we could keep going, but under load + // that could cause starvation for large requests; instead, we leave + // all remaining waiters blocked. + break + } + + m.cur++ + m.waiters.remove(next) + w <- struct{}{} + } +} + +var waiterPool = sync.Pool{New: func() any { return waiter(make(chan struct{})) }} + +type waiter chan struct{} diff --git a/pkg/util/fifomu/fifomu_test.go b/pkg/util/fifomu/fifomu_test.go new file mode 100644 index 000000000..5dbfd1bb4 --- /dev/null +++ b/pkg/util/fifomu/fifomu_test.go @@ -0,0 +1,135 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package fifomu_test + +import ( + "runtime" + "sync" + "testing" + "time" + + "github.com/loft-sh/vcluster/pkg/util/fifomu" +) + +// Acknowledgement: Much of the test code in this file is +// copied from stdlib sync/mutex_test.go. + +// mutexer is the exported methodset of sync.Mutex. +type mutexer interface { + sync.Locker + TryLock() bool +} + +var ( + _ mutexer = (*fifomu.Mutex)(nil) + _ mutexer = (*sync.Mutex)(nil) +) + +// newMu is a function that returns a new mutexer. +// We set it to newFifoMu, newStdlibMu or newSemaphoreMu +// for benchmarking. +var newMu = newFifoMu + +func newFifoMu() mutexer { + return &fifomu.Mutex{} +} + +func HammerMutex(m mutexer, loops int, cdone chan bool) { + for i := 0; i < loops; i++ { + if i%3 == 0 { + if m.TryLock() { + m.Unlock() + } + continue + } + m.Lock() + m.Unlock() //nolint:staticcheck + } + cdone <- true +} + +func TestMutex(t *testing.T) { + if n := runtime.SetMutexProfileFraction(1); n != 0 { + t.Logf("got mutexrate %d expected 0", n) + } + defer runtime.SetMutexProfileFraction(0) + + m := newMu() + + m.Lock() + if m.TryLock() { + t.Fatalf("TryLock succeeded with mutex locked") + } + m.Unlock() + if !m.TryLock() { + t.Fatalf("TryLock failed with mutex unlocked") + } + m.Unlock() + + c := make(chan bool) + for i := 0; i < 10; i++ { + go HammerMutex(m, 1000, c) + } + for i := 0; i < 10; i++ { + <-c + } +} + +func TestMutexMisuse(t *testing.T) { + t.Run("Mutex.Unlock", func(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic due to Unlock of unlocked mutex") + } + }() + + mu := newMu() + mu.Unlock() + }) + + t.Run("Mutex.Unlock2", func(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic due to Unlock of unlocked mutex") + } + }() + + mu := newMu() + mu.Lock() + mu.Unlock() //nolint:staticcheck + mu.Unlock() + }) +} + +func TestMutexFairness(t *testing.T) { + mu := newMu() + stop := make(chan bool) + defer close(stop) + go func() { + for { + mu.Lock() + time.Sleep(100 * time.Microsecond) + mu.Unlock() + select { + case <-stop: + return + default: + } + } + }() + done := make(chan bool, 1) + go func() { + for i := 0; i < 10; i++ { + time.Sleep(100 * time.Microsecond) + mu.Lock() + mu.Unlock() //nolint:staticcheck + } + done <- true + }() + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("can't acquire mutex in 10 seconds") + } +} diff --git a/pkg/util/fifomu/list.go b/pkg/util/fifomu/list.go new file mode 100644 index 000000000..2902c4cb7 --- /dev/null +++ b/pkg/util/fifomu/list.go @@ -0,0 +1,84 @@ +package fifomu + +import ( + "sync" +) + +var elementPool = sync.Pool{New: func() any { return new(element[waiter]) }} + +// list is a doubly-linked list of type T. +type list[T any] struct { + root element[T] + len uint +} + +func (l *list[T]) lazyInit() { + if l.root.next == nil { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + } +} + +// front returns the first element of list l or nil. +func (l *list[T]) front() *element[T] { + if l.len == 0 { + return nil + } + + return l.root.next +} + +// pushBackElem inserts a new element e with value v at +// the back of list l and returns e. +func (l *list[T]) pushBackElem(v T) *element[T] { + l.lazyInit() + + e := elementPool.Get().(*element[T]) //nolint:errcheck + e.Value = v + l.insert(e, l.root.prev) + return e +} + +// pushBack inserts a new element e with value v at +// the back of list l. +func (l *list[T]) pushBack(v T) { + l.lazyInit() + + e := elementPool.Get().(*element[T]) //nolint:errcheck + e.Value = v + l.insert(e, l.root.prev) +} + +// remove removes e from l if e is an element of list l. +func (l *list[T]) remove(e *element[T]) { + if e.list == l { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + } + + elementPool.Put(e) +} + +// insert inserts e after at. +func (l *list[T]) insert(e, at *element[T]) { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ +} + +// element is a node of a linked list. +type element[T any] struct { + next, prev *element[T] + + list *list[T] + + Value T +} diff --git a/pkg/util/translate/multi_namespace.go b/pkg/util/translate/multi_namespace.go index d264aec4c..3f4ba53c6 100644 --- a/pkg/util/translate/multi_namespace.go +++ b/pkg/util/translate/multi_namespace.go @@ -8,6 +8,7 @@ import ( "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "github.com/loft-sh/vcluster/pkg/util/translate/pro" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -29,12 +30,12 @@ func (s *multiNamespace) SingleNamespaceTarget() bool { } // HostName returns the physical name of the name / namespace resource -func (s *multiNamespace) HostName(name, _ string) string { +func (s *multiNamespace) HostName(_ *synccontext.SyncContext, name, _ string) string { return name } // HostNameShort returns the short physical name of the name / namespace resource -func (s *multiNamespace) HostNameShort(name, _ string) string { +func (s *multiNamespace) HostNameShort(_ *synccontext.SyncContext, name, _ string) string { return name } @@ -45,7 +46,7 @@ func (s *multiNamespace) HostNameCluster(name string) string { return SafeConcatName("vcluster", name, "x", s.currentNamespace, "x", VClusterName) } -func (s *multiNamespace) IsManaged(_ *synccontext.SyncContext, pObj client.Object) bool { +func (s *multiNamespace) IsManaged(ctx *synccontext.SyncContext, pObj client.Object) bool { // check if cluster scoped object if pObj.GetNamespace() == "" { return pObj.GetLabels()[MarkerLabel] == s.MarkerLabelCluster() @@ -55,7 +56,7 @@ func (s *multiNamespace) IsManaged(_ *synccontext.SyncContext, pObj client.Objec // If obj is not in the synced namespace OR // If object-name annotation is not set OR // If object-name annotation is different from actual name - if !s.IsTargetedNamespace(pObj.GetNamespace()) || pObj.GetAnnotations()[NameAnnotation] == "" { + if !s.IsTargetedNamespace(ctx, pObj.GetNamespace()) || pObj.GetAnnotations()[NameAnnotation] == "" { return false } else if pObj.GetAnnotations()[KindAnnotation] != "" { gvk, err := apiutil.GVKForObject(pObj, scheme.Scheme) @@ -67,15 +68,23 @@ func (s *multiNamespace) IsManaged(_ *synccontext.SyncContext, pObj client.Objec return true } -func (s *multiNamespace) IsTargetedNamespace(ns string) bool { - return strings.HasPrefix(ns, s.getNamespacePrefix()) && strings.HasSuffix(ns, getNamespaceSuffix(s.currentNamespace, VClusterName)) +func (s *multiNamespace) IsTargetedNamespace(ctx *synccontext.SyncContext, pNamespace string) bool { + if _, ok := pro.HostNamespaceMatchesMapping(ctx, pNamespace); ok { + return true + } + + return strings.HasPrefix(pNamespace, s.getNamespacePrefix()) && strings.HasSuffix(pNamespace, getNamespaceSuffix(s.currentNamespace, VClusterName)) } func (s *multiNamespace) getNamespacePrefix() string { return "vcluster" } -func (s *multiNamespace) HostNamespace(vNamespace string) string { +func (s *multiNamespace) HostNamespace(ctx *synccontext.SyncContext, vNamespace string) string { + if pNamespace, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok { + return pNamespace + } + return hostNamespace(s.currentNamespace, vNamespace, s.getNamespacePrefix(), VClusterName) } @@ -93,16 +102,46 @@ func (s *multiNamespace) MarkerLabelCluster() string { return SafeConcatName(s.currentNamespace, "x", VClusterName) } -func (s *multiNamespace) HostLabelCluster(ctx *synccontext.SyncContext, key string) string { +func (s *multiNamespace) VirtualLabelCluster(ctx *synccontext.SyncContext, pLabel string) (retLabel string, found bool) { + if keyMatchesSyncedLabels(ctx, pLabel) { + return pLabel, true + } else if !strings.HasPrefix(pLabel, LabelPrefix) { + return pLabel, true + } + + defer func() { + recordLabelCluster(ctx, retLabel, pLabel) + }() + + // check if the label is within the store + if ctx != nil && ctx.Mappings != nil && ctx.Mappings.Store() != nil { + vLabel, ok := ctx.Mappings.Store().HostToVirtualLabelCluster(ctx, pLabel) + if ok { + return vLabel, true + } + } + + return "", false +} + +func (s *multiNamespace) HostLabelCluster(ctx *synccontext.SyncContext, key string) (retLabel string) { if keyMatchesSyncedLabels(ctx, key) { return key } + defer func() { + recordLabelCluster(ctx, key, retLabel) + }() + return hostLabelCluster(key, s.currentNamespace) } -func (s *multiNamespace) HostLabel(_ *synccontext.SyncContext, key string) string { - return key +func (s *multiNamespace) VirtualLabel(_ *synccontext.SyncContext, pLabel, _ string) (string, bool) { + return pLabel, true +} + +func (s *multiNamespace) HostLabel(_ *synccontext.SyncContext, vLabel, _ string) string { + return vLabel } func hostLabelCluster(key, vClusterNamespace string) string { diff --git a/pkg/util/translate/pro/namespace_mappings.go b/pkg/util/translate/pro/namespace_mappings.go new file mode 100644 index 000000000..8a9e9c9b0 --- /dev/null +++ b/pkg/util/translate/pro/namespace_mappings.go @@ -0,0 +1,18 @@ +package pro + +import ( + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "sigs.k8s.io/controller-runtime/pkg/cache" +) + +var HostNamespaceMatchesMapping = func(_ *synccontext.SyncContext, _ string) (string, bool) { + return "", false +} + +var VirtualNamespaceMatchesMapping = func(_ *synccontext.SyncContext, _ string) (string, bool) { + return "", false +} + +var AddMappingsToCache = func(_ map[string]cache.Config) { + +} diff --git a/pkg/util/translate/single_namespace.go b/pkg/util/translate/single_namespace.go index 2aae566cf..15fa78c11 100644 --- a/pkg/util/translate/single_namespace.go +++ b/pkg/util/translate/single_namespace.go @@ -9,6 +9,7 @@ import ( "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" "github.com/loft-sh/vcluster/pkg/util/base36" + "github.com/loft-sh/vcluster/pkg/util/translate/pro" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,17 +32,25 @@ func (s *singleNamespace) SingleNamespaceTarget() bool { return true } -func (s *singleNamespace) HostName(name, namespace string) string { - return SingleNamespaceHostName(name, namespace, VClusterName) +func (s *singleNamespace) HostName(ctx *synccontext.SyncContext, vName, vNamespace string) string { + if vName == "" { + return "" + } else if _, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok { + return vName + } + + return SingleNamespaceHostName(vName, vNamespace, VClusterName) } -func (s *singleNamespace) HostNameShort(name, namespace string) string { - if name == "" { +func (s *singleNamespace) HostNameShort(ctx *synccontext.SyncContext, vName, vNamespace string) string { + if vName == "" { return "" + } else if _, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok { + return vName } // we use base36 to avoid as much conflicts as possible - digest := sha256.Sum256([]byte(strings.Join([]string{name, "x", namespace, "x", VClusterName}, "-"))) + digest := sha256.Sum256([]byte(strings.Join([]string{vName, "x", vNamespace, "x", VClusterName}, "-"))) return base36.EncodeBytes(digest[:])[0:10] } @@ -70,9 +79,12 @@ func (s *singleNamespace) IsManaged(ctx *synccontext.SyncContext, pObj client.Ob } // is object not in our target namespace? - if !s.IsTargetedNamespace(pObj.GetNamespace()) { + if !s.IsTargetedNamespace(ctx, pObj.GetNamespace()) { return false - } else if pObj.GetLabels()[MarkerLabel] != VClusterName { + } + + // if host namespace is mapped, we don't check for marker label + if _, ok := pro.HostNamespaceMatchesMapping(ctx, pObj.GetNamespace()); !ok && pObj.GetLabels()[MarkerLabel] != VClusterName { return false } @@ -82,8 +94,9 @@ func (s *singleNamespace) IsManaged(ctx *synccontext.SyncContext, pObj client.Ob gvk, err := apiutil.GVKForObject(pObj, scheme.Scheme) if err == nil { // check if the name annotation is correct - if pObj.GetAnnotations()[NameAnnotation] == "" || - (ctx.Mappings.Has(gvk) && pObj.GetName() != mappings.VirtualToHostName(ctx, pObj.GetAnnotations()[NameAnnotation], pObj.GetAnnotations()[NamespaceAnnotation], gvk)) { + if pObj.GetAnnotations()[NameAnnotation] == "" { + return false + } else if ctx.Mappings.Has(gvk) && pObj.GetName() != mappings.VirtualToHostName(ctx, pObj.GetAnnotations()[NameAnnotation], pObj.GetAnnotations()[NamespaceAnnotation], gvk) { klog.FromContext(ctx).V(1).Info("Host object doesn't match, because name annotations is wrong", "object", pObj.GetName(), "kind", gvk.String(), @@ -91,6 +104,7 @@ func (s *singleNamespace) IsManaged(ctx *synccontext.SyncContext, pObj client.Ob "expectedName", mappings.VirtualToHostName(ctx, pObj.GetAnnotations()[NameAnnotation], pObj.GetAnnotations()[NamespaceAnnotation], gvk), "nameAnnotation", pObj.GetAnnotations()[NamespaceAnnotation]+"/"+pObj.GetAnnotations()[NameAnnotation], ) + return false } @@ -108,28 +122,131 @@ func (s *singleNamespace) IsManaged(ctx *synccontext.SyncContext, pObj client.Ob return true } -func (s *singleNamespace) IsTargetedNamespace(ns string) bool { - return ns == s.targetNamespace +func (s *singleNamespace) IsTargetedNamespace(ctx *synccontext.SyncContext, pNamespace string) bool { + if _, ok := pro.HostNamespaceMatchesMapping(ctx, pNamespace); ok { + return true + } + + return pNamespace == s.targetNamespace } -func (s *singleNamespace) HostNamespace(_ string) string { +func (s *singleNamespace) HostNamespace(ctx *synccontext.SyncContext, vNamespace string) string { + if pNamespace, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok { + return pNamespace + } + return s.targetNamespace } -func (s *singleNamespace) HostLabelCluster(ctx *synccontext.SyncContext, key string) string { +func (s *singleNamespace) VirtualLabelCluster(ctx *synccontext.SyncContext, pLabel string) (retLabel string, found bool) { + if keyMatchesSyncedLabels(ctx, pLabel) { + return pLabel, true + } else if !strings.HasPrefix(pLabel, LabelPrefix) { + return pLabel, true + } + + defer func() { + recordLabelCluster(ctx, retLabel, pLabel) + }() + + // check if the label is within the store + if ctx != nil && ctx.Mappings != nil && ctx.Mappings.Store() != nil { + vLabel, ok := ctx.Mappings.Store().HostToVirtualLabelCluster(ctx, pLabel) + if ok { + return vLabel, true + } + } + + return "", false +} + +func (s *singleNamespace) HostLabelCluster(ctx *synccontext.SyncContext, key string) (retLabel string) { if keyMatchesSyncedLabels(ctx, key) { return key } + defer func() { + recordLabelCluster(ctx, key, retLabel) + }() + return hostLabelCluster(key, s.targetNamespace) } -func (s *singleNamespace) HostLabel(ctx *synccontext.SyncContext, key string) string { - if keyMatchesSyncedLabels(ctx, key) { - return key +func (s *singleNamespace) VirtualLabel(ctx *synccontext.SyncContext, pLabel, vNamespace string) (retLabel string, found bool) { + if _, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok { + return pLabel, true + } else if keyMatchesSyncedLabels(ctx, pLabel) { + return pLabel, true + } else if !strings.HasPrefix(pLabel, LabelPrefix) { + return pLabel, true + } + + defer func() { + recordLabel(ctx, retLabel, pLabel) + }() + + // check if the label is within the store + if ctx != nil && ctx.Mappings != nil && ctx.Mappings.Store() != nil { + vLabel, ok := ctx.Mappings.Store().HostToVirtualLabel(ctx, pLabel) + if ok { + return vLabel, true + } + } + + return "", false +} + +func (s *singleNamespace) HostLabel(ctx *synccontext.SyncContext, vLabel, vNamespace string) (retLabel string) { + if _, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok { + return vLabel + } else if keyMatchesSyncedLabels(ctx, vLabel) { + return vLabel + } + + // record rewriting + defer func() { + recordLabel(ctx, vLabel, retLabel) + }() + + return convertLabelKeyWithPrefix(LabelPrefix, vLabel) +} + +func recordLabel(ctx *synccontext.SyncContext, virtual, host string) { + if ctx != nil && ctx.Mappings != nil && ctx.Mappings.Store() != nil { + // check if we have the owning object in the context + belongsTo, ok := synccontext.MappingFrom(ctx) + if !ok { + return + } + + // record the mapping + err := ctx.Mappings.Store().RecordLabel(ctx, synccontext.LabelMapping{ + Virtual: virtual, + Host: host, + }, belongsTo) + if err != nil { + klog.FromContext(ctx).Error(err, "record label mapping", "host", host, "virtual", virtual) + } } +} + +func recordLabelCluster(ctx *synccontext.SyncContext, virtual, host string) { + if ctx != nil && ctx.Mappings != nil && ctx.Mappings.Store() != nil { + // check if we have the owning object in the context + belongsTo, ok := synccontext.MappingFrom(ctx) + if !ok { + return + } - return convertLabelKeyWithPrefix(LabelPrefix, key) + // record the mapping + err := ctx.Mappings.Store().RecordLabelCluster(ctx, synccontext.LabelMapping{ + Virtual: virtual, + Host: host, + }, belongsTo) + if err != nil { + klog.FromContext(ctx).Error(err, "record label mapping", "host", host, "virtual", virtual) + } + } } func keyMatchesSyncedLabels(ctx *synccontext.SyncContext, key string) bool { diff --git a/pkg/util/translate/translate.go b/pkg/util/translate/translate.go index 4f19f8d41..7da188579 100644 --- a/pkg/util/translate/translate.go +++ b/pkg/util/translate/translate.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "encoding/hex" "fmt" + "maps" "math" "sort" "strings" @@ -12,6 +13,7 @@ import ( "github.com/loft-sh/vcluster/pkg/scheme" "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "github.com/loft-sh/vcluster/pkg/util/translate/pro" "github.com/pkg/errors" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -28,20 +30,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -var ( - NamespaceLabel = "vcluster.loft.sh/namespace" - MarkerLabel = "vcluster.loft.sh/managed-by" - LabelPrefix = "vcluster.loft.sh/label" - NamespaceLabelPrefix = "vcluster.loft.sh/ns-label" - ControllerLabel = "vcluster.loft.sh/controlled-by" - - // VClusterName is the vcluster name, usually set at start time - VClusterName = "suffix" - - ManagedAnnotationsAnnotation = "vcluster.loft.sh/managed-annotations" - ManagedLabelsAnnotation = "vcluster.loft.sh/managed-labels" -) - const ( SkipBackSyncInMultiNamespaceMode = "vcluster.loft.sh/skip-backsync" ) @@ -73,14 +61,85 @@ func HostMetadata[T client.Object](ctx *synccontext.SyncContext, vObj T, name ty return pObj } +func VirtualMetadata[T client.Object](ctx *synccontext.SyncContext, pObj T, name types.NamespacedName, excludedAnnotations ...string) T { + vObj := CopyObjectWithName(pObj, name, false) + vObj.SetAnnotations(VirtualAnnotations(pObj, nil, excludedAnnotations...)) + vObj.SetLabels(VirtualLabels(ctx, pObj, nil, vObj.GetNamespace())) + return vObj +} + +func VirtualLabelsMap(ctx *synccontext.SyncContext, pLabels, vLabels map[string]string, vNamespace string, excluded ...string) map[string]string { + if pLabels == nil { + return nil + } else if _, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok || !Default.SingleNamespaceTarget() { + retMap := map[string]string{} + maps.Copy(retMap, pLabels) + return retMap + } + + excluded = append(excluded, MarkerLabel, NamespaceLabel, ControllerLabel) + retLabels := copyMaps(pLabels, vLabels, func(key string) bool { + return exists(excluded, key) || strings.HasPrefix(key, NamespaceLabelPrefix) + }) + + // try to translate back + for key, value := range retLabels { + delete(retLabels, key) + vKey, ok := Default.VirtualLabel(ctx, key, vNamespace) + if ok { + retLabels[vKey] = value + } + } + + return retLabels +} + +func VirtualAnnotations(pObj, vObj client.Object, excluded ...string) map[string]string { + excluded = append(excluded, NameAnnotation, UIDAnnotation, KindAnnotation, NamespaceAnnotation, ManagedAnnotationsAnnotation, ManagedLabelsAnnotation) + var toAnnotations map[string]string + if vObj != nil { + toAnnotations = vObj.GetAnnotations() + } + + return copyMaps(pObj.GetAnnotations(), toAnnotations, func(key string) bool { + return exists(excluded, key) + }) +} + +func copyMaps(fromMap, toMap map[string]string, excludeKey func(string) bool) map[string]string { + retMap := map[string]string{} + for k, v := range fromMap { + if excludeKey != nil && excludeKey(k) { + continue + } + + retMap[k] = v + } + + for key := range toMap { + if excludeKey != nil && excludeKey(key) { + value, ok := toMap[key] + if ok { + retMap[key] = value + } + } + } + + return retMap +} + func HostLabelsMap(ctx *synccontext.SyncContext, vLabels, pLabels map[string]string, vNamespace string) map[string]string { if vLabels == nil { return nil + } else if _, ok := pro.VirtualNamespaceMatchesMapping(ctx, vNamespace); ok || !Default.SingleNamespaceTarget() { + retMap := map[string]string{} + maps.Copy(retMap, vLabels) + return retMap } newLabels := map[string]string{} for k, v := range vLabels { - newLabels[Default.HostLabel(ctx, k)] = v + newLabels[Default.HostLabel(ctx, k, vNamespace)] = v } newLabels[MarkerLabel] = VClusterName @@ -95,9 +154,55 @@ func HostLabelsMap(ctx *synccontext.SyncContext, vLabels, pLabels map[string]str newLabels[ControllerLabel] = pLabels[ControllerLabel] } + // add already existing labels back + for k, v := range pLabels { + if strings.HasPrefix(k, "vcluster.loft.sh/") { + continue + } + + _, ok := newLabels[k] + if !ok { + newLabels[k] = v + } + } + return newLabels } +func VirtualLabelsMapCluster(ctx *synccontext.SyncContext, pLabels, vLabels map[string]string, excluded ...string) map[string]string { + if pLabels == nil { + return nil + } + + excluded = append(excluded, MarkerLabel, ControllerLabel) + retLabels := copyMaps(pLabels, vLabels, func(key string) bool { + return exists(excluded, key) || strings.HasPrefix(key, NamespaceLabelPrefix) + }) + + // try to translate back + for key, value := range retLabels { + delete(retLabels, key) + vKey, ok := Default.VirtualLabelCluster(ctx, key) + if ok { + retLabels[vKey] = value + } + } + + // add already existing labels back + for k, v := range pLabels { + if strings.HasPrefix(k, "vcluster.loft.sh/") { + continue + } + + _, ok := retLabels[k] + if !ok { + retLabels[k] = v + } + } + + return retLabels +} + func HostLabelsMapCluster(ctx *synccontext.SyncContext, vLabels, pLabels map[string]string) map[string]string { newLabels := map[string]string{} for k, v := range vLabels { @@ -110,8 +215,55 @@ func HostLabelsMapCluster(ctx *synccontext.SyncContext, vLabels, pLabels map[str return newLabels } -func HostLabelSelector(ctx *synccontext.SyncContext, labelSelector *metav1.LabelSelector) *metav1.LabelSelector { - return hostLabelSelector(ctx, labelSelector, Default.HostLabel) +func VirtualLabelSelector(ctx *synccontext.SyncContext, labelSelector *metav1.LabelSelector, vNamespace string) *metav1.LabelSelector { + return virtualLabelSelector(ctx, labelSelector, func(ctx *synccontext.SyncContext, key string) (string, bool) { + return Default.VirtualLabel(ctx, key, vNamespace) + }) +} + +func VirtualLabelSelectorCluster(ctx *synccontext.SyncContext, labelSelector *metav1.LabelSelector) *metav1.LabelSelector { + return virtualLabelSelector(ctx, labelSelector, Default.VirtualLabelCluster) +} + +type vLabelFunc func(ctx *synccontext.SyncContext, key string) (string, bool) + +func virtualLabelSelector(ctx *synccontext.SyncContext, labelSelector *metav1.LabelSelector, labelFunc vLabelFunc) *metav1.LabelSelector { + if labelSelector == nil { + return nil + } + + newLabelSelector := &metav1.LabelSelector{} + if labelSelector.MatchLabels != nil { + newLabelSelector.MatchLabels = map[string]string{} + for k, v := range labelSelector.MatchLabels { + pLabel, ok := labelFunc(ctx, k) + if !ok { + pLabel = k + } + + newLabelSelector.MatchLabels[pLabel] = v + } + } + for _, r := range labelSelector.MatchExpressions { + pLabel, ok := labelFunc(ctx, r.Key) + if !ok { + pLabel = r.Key + } + + newLabelSelector.MatchExpressions = append(newLabelSelector.MatchExpressions, metav1.LabelSelectorRequirement{ + Key: pLabel, + Operator: r.Operator, + Values: r.Values, + }) + } + + return newLabelSelector +} + +func HostLabelSelector(ctx *synccontext.SyncContext, labelSelector *metav1.LabelSelector, vNamespace string) *metav1.LabelSelector { + return hostLabelSelector(ctx, labelSelector, func(ctx *synccontext.SyncContext, key string) string { + return Default.HostLabel(ctx, key, vNamespace) + }) } func HostLabelSelectorCluster(ctx *synccontext.SyncContext, labelSelector *metav1.LabelSelector) *metav1.LabelSelector { @@ -143,6 +295,22 @@ func hostLabelSelector(ctx *synccontext.SyncContext, labelSelector *metav1.Label return newLabelSelector } +func VirtualLabels(ctx *synccontext.SyncContext, pObj, vObj client.Object, vNamespace string) map[string]string { + pLabels := pObj.GetLabels() + if pLabels == nil { + pLabels = map[string]string{} + } + var vLabels map[string]string + if vObj != nil { + vLabels = vObj.GetLabels() + } + if pObj.GetNamespace() == "" { + return VirtualLabelsMapCluster(ctx, pLabels, vLabels) + } + + return VirtualLabelsMap(ctx, pLabels, vLabels, vNamespace) +} + func HostLabels(ctx *synccontext.SyncContext, vObj, pObj client.Object) map[string]string { vLabels := vObj.GetLabels() if vLabels == nil { @@ -218,21 +386,6 @@ func SafeConcatName(name ...string) string { return fullPath } -func UniqueSlice(stringSlice []string) []string { - keys := make(map[string]bool) - list := []string{} - for _, entry := range stringSlice { - if entry == "" { - continue - } - if _, value := keys[entry]; !value { - keys[entry] = true - list = append(list, entry) - } - } - return list -} - func Split(s, sep string) (string, string) { parts := strings.SplitN(s, sep, 2) return strings.TrimSpace(parts[0]), strings.TrimSpace(safeIndex(parts, 1)) diff --git a/pkg/util/translate/translate_test.go b/pkg/util/translate/translate_test.go new file mode 100644 index 000000000..a73233e04 --- /dev/null +++ b/pkg/util/translate/translate_test.go @@ -0,0 +1,268 @@ +package translate + +import ( + "context" + "testing" + + "github.com/loft-sh/vcluster/pkg/mappings" + "github.com/loft-sh/vcluster/pkg/mappings/store" + "github.com/loft-sh/vcluster/pkg/syncer/synccontext" + "gotest.tools/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestAnnotations(t *testing.T) { + vObj := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "test": "test", + }, + }, + } + pObj := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "test": "test", + }, + }, + } + + pObj.Annotations = HostAnnotations(vObj, pObj) + assert.DeepEqual(t, map[string]string{ + "test": "test", + ManagedAnnotationsAnnotation: "test", + KindAnnotation: corev1.SchemeGroupVersion.WithKind("Secret").String(), + NameAnnotation: "", + UIDAnnotation: "", + }, pObj.Annotations) + + pObj.Annotations["other"] = "other" + vObj.Annotations = VirtualAnnotations(pObj, vObj) + assert.DeepEqual(t, map[string]string{ + "other": "other", + "test": "test", + }, vObj.Annotations) + + pObj.Annotations = HostAnnotations(vObj, pObj) + assert.DeepEqual(t, map[string]string{ + "test": "test", + "other": "other", + ManagedAnnotationsAnnotation: "other\ntest", + KindAnnotation: corev1.SchemeGroupVersion.WithKind("Secret").String(), + NameAnnotation: "", + UIDAnnotation: "", + }, pObj.Annotations) +} + +func TestLabelsMapCluster(t *testing.T) { + backend := store.NewMemoryBackend() + mappingsStore, err := store.NewStore(context.TODO(), nil, nil, backend) + assert.NilError(t, err) + + ownerMapping := synccontext.NameMapping{ + GroupVersionKind: corev1.SchemeGroupVersion.WithKind("PersistentVolume"), + VirtualName: types.NamespacedName{ + Name: "test", + }, + HostName: types.NamespacedName{ + Name: "test", + }, + } + err = mappingsStore.RecordReference(context.TODO(), ownerMapping, ownerMapping) + assert.NilError(t, err) + + syncContext := &synccontext.SyncContext{ + Context: synccontext.WithMapping(context.TODO(), ownerMapping), + Mappings: mappings.NewMappingsRegistry(mappingsStore), + } + pMap := HostLabelsMapCluster(syncContext, map[string]string{ + "test": "test", + "test123": "test123", + }, nil) + assert.DeepEqual(t, map[string]string{ + "vcluster.loft.sh/label--x-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label--x-suffix-x-ecd71870d1": "test123", + MarkerLabel: "-x-suffix", + }, pMap) + + pMap["other"] = "other" + + vMap := VirtualLabelsMapCluster(syncContext, pMap, nil) + assert.DeepEqual(t, map[string]string{ + "test": "test", + "test123": "test123", + "other": "other", + }, vMap) + + pMap = HostLabelsMapCluster(syncContext, vMap, pMap) + assert.DeepEqual(t, map[string]string{ + "vcluster.loft.sh/label--x-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label--x-suffix-x-ecd71870d1": "test123", + "vcluster.loft.sh/label--x-suffix-x-d9298a10d1": "other", + MarkerLabel: "-x-suffix", + }, pMap) +} + +func TestLabelsMap(t *testing.T) { + backend := store.NewMemoryBackend() + mappingsStore, err := store.NewStore(context.TODO(), nil, nil, backend) + assert.NilError(t, err) + + ownerMapping := synccontext.NameMapping{ + GroupVersionKind: corev1.SchemeGroupVersion.WithKind("Secret"), + VirtualName: types.NamespacedName{ + Name: "test", + Namespace: "test", + }, + HostName: types.NamespacedName{ + Name: "test", + Namespace: "test", + }, + } + err = mappingsStore.RecordReference(context.TODO(), ownerMapping, ownerMapping) + assert.NilError(t, err) + + syncContext := &synccontext.SyncContext{ + Context: synccontext.WithMapping(context.TODO(), ownerMapping), + Mappings: mappings.NewMappingsRegistry(mappingsStore), + } + pMap := HostLabelsMap(syncContext, map[string]string{ + "test": "test", + "test123": "test123", + }, nil, "test") + assert.DeepEqual(t, map[string]string{ + "vcluster.loft.sh/label-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label-suffix-x-ecd71870d1": "test123", + MarkerLabel: VClusterName, + NamespaceLabel: "test", + }, pMap) + + pMap["other"] = "other" + + vMap := VirtualLabelsMap(syncContext, pMap, nil, "") + assert.DeepEqual(t, map[string]string{ + "test": "test", + "test123": "test123", + "other": "other", + }, vMap) + + pMap = HostLabelsMap(syncContext, vMap, pMap, "test") + assert.DeepEqual(t, map[string]string{ + "vcluster.loft.sh/label-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label-suffix-x-ecd71870d1": "test123", + "vcluster.loft.sh/label-suffix-x-d9298a10d1": "other", + MarkerLabel: VClusterName, + NamespaceLabel: "test", + "other": "other", + }, pMap) +} + +func TestLabelSelector(t *testing.T) { + backend := store.NewMemoryBackend() + mappingsStore, err := store.NewStore(context.TODO(), nil, nil, backend) + assert.NilError(t, err) + + ownerMapping := synccontext.NameMapping{ + GroupVersionKind: corev1.SchemeGroupVersion.WithKind("Secret"), + VirtualName: types.NamespacedName{ + Name: "test", + Namespace: "test", + }, + HostName: types.NamespacedName{ + Name: "test", + Namespace: "test", + }, + } + err = mappingsStore.RecordReference(context.TODO(), ownerMapping, ownerMapping) + assert.NilError(t, err) + + syncContext := &synccontext.SyncContext{ + Context: synccontext.WithMapping(context.TODO(), ownerMapping), + Mappings: mappings.NewMappingsRegistry(mappingsStore), + } + + pMap := HostLabelSelector(syncContext, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "test123": "test123", + }, + }, "") + assert.DeepEqual(t, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "vcluster.loft.sh/label-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label-suffix-x-ecd71870d1": "test123", + }, + }, pMap) + + vMap := VirtualLabelSelector(syncContext, pMap, "") + assert.DeepEqual(t, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "test123": "test123", + }, + }, vMap) + + pMap = HostLabelSelector(syncContext, vMap, "") + assert.DeepEqual(t, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "vcluster.loft.sh/label-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label-suffix-x-ecd71870d1": "test123", + }, + }, pMap) +} + +func TestLabelSelectorCluster(t *testing.T) { + backend := store.NewMemoryBackend() + mappingsStore, err := store.NewStore(context.TODO(), nil, nil, backend) + assert.NilError(t, err) + + ownerMapping := synccontext.NameMapping{ + GroupVersionKind: corev1.SchemeGroupVersion.WithKind("Secret"), + VirtualName: types.NamespacedName{ + Name: "test", + Namespace: "test", + }, + HostName: types.NamespacedName{ + Name: "test", + Namespace: "test", + }, + } + err = mappingsStore.RecordReference(context.TODO(), ownerMapping, ownerMapping) + assert.NilError(t, err) + + syncContext := &synccontext.SyncContext{ + Context: synccontext.WithMapping(context.TODO(), ownerMapping), + Mappings: mappings.NewMappingsRegistry(mappingsStore), + } + + pMap := HostLabelSelectorCluster(syncContext, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "test123": "test123", + }, + }) + assert.DeepEqual(t, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "vcluster.loft.sh/label--x-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label--x-suffix-x-ecd71870d1": "test123", + }, + }, pMap) + + vMap := VirtualLabelSelectorCluster(syncContext, pMap) + assert.DeepEqual(t, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "test": "test", + "test123": "test123", + }, + }, vMap) + + pMap = HostLabelSelectorCluster(syncContext, vMap) + assert.DeepEqual(t, &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "vcluster.loft.sh/label--x-suffix-x-9f86d08188": "test", + "vcluster.loft.sh/label--x-suffix-x-ecd71870d1": "test123", + }, + }, pMap) +} diff --git a/pkg/util/translate/types.go b/pkg/util/translate/types.go index a4375f234..426eb9498 100644 --- a/pkg/util/translate/types.go +++ b/pkg/util/translate/types.go @@ -12,6 +12,20 @@ var ( KindAnnotation = "vcluster.loft.sh/object-kind" ) +var ( + NamespaceLabel = "vcluster.loft.sh/namespace" + MarkerLabel = "vcluster.loft.sh/managed-by" + LabelPrefix = "vcluster.loft.sh/label" + NamespaceLabelPrefix = "vcluster.loft.sh/ns-label" + ControllerLabel = "vcluster.loft.sh/controlled-by" + + // VClusterName is the vcluster name, usually set at start time + VClusterName = "suffix" + + ManagedAnnotationsAnnotation = "vcluster.loft.sh/managed-annotations" + ManagedLabelsAnnotation = "vcluster.loft.sh/managed-labels" +) + var Default Translator = &singleNamespace{} type Translator interface { @@ -22,27 +36,33 @@ type Translator interface { IsManaged(ctx *synccontext.SyncContext, pObj client.Object) bool // IsTargetedNamespace checks if the provided namespace is a sync target for vcluster - IsTargetedNamespace(namespace string) bool + IsTargetedNamespace(ctx *synccontext.SyncContext, namespace string) bool // MarkerLabelCluster returns the marker label for the cluster scoped object MarkerLabelCluster() string // HostName returns the host name for a virtual cluster object - HostName(vName, vNamespace string) string + HostName(ctx *synccontext.SyncContext, vName, vNamespace string) string // HostNameShort returns the short host name for a virtual cluster object - HostNameShort(vName, vNamespace string) string + HostNameShort(ctx *synccontext.SyncContext, vName, vNamespace string) string // HostNameCluster returns the host name for a cluster scoped // virtual cluster object HostNameCluster(vName string) string // HostNamespace returns the host namespace for a virtual cluster object - HostNamespace(vNamespace string) string + HostNamespace(ctx *synccontext.SyncContext, vNamespace string) string + + // HostLabel translates a single label from virtual to host for a namespace scoped resource + HostLabel(ctx *synccontext.SyncContext, vLabel, vNamespace string) string + + // VirtualLabel translates a single label from host to virtual for a namespace scoped resource + VirtualLabel(ctx *synccontext.SyncContext, pLabel, vNamespace string) (string, bool) - // HostLabel translates a single label for a namespace scoped resource - HostLabel(ctx *synccontext.SyncContext, label string) string + // HostLabelCluster translates a single label from host to virtual for a cluster scoped resource + HostLabelCluster(ctx *synccontext.SyncContext, vLabel string) string - // HostLabelCluster translates a single label for a namespace scoped resource - HostLabelCluster(ctx *synccontext.SyncContext, label string) string + // VirtualLabelCluster translates a single label from host to virtual for a cluster scoped resource + VirtualLabelCluster(ctx *synccontext.SyncContext, pLabel string) (string, bool) } diff --git a/test/e2e/servicesync/servicesync.go b/test/e2e/servicesync/servicesync.go index d85a3a580..a1e49d6f8 100644 --- a/test/e2e/servicesync/servicesync.go +++ b/test/e2e/servicesync/servicesync.go @@ -162,7 +162,7 @@ func testMapping(ctx context.Context, fromClient kubernetes.Interface, fromNames framework.ExpectEqual(len(toService.Spec.Selector), 3) framework.ExpectEqual(toService.Spec.Selector[translate.NamespaceLabel], fromNamespace) framework.ExpectEqual(toService.Spec.Selector[translate.MarkerLabel], translate.VClusterName) - framework.ExpectEqual(toService.Spec.Selector[translate.Default.HostLabel(nil, "test")], "test") + framework.ExpectEqual(toService.Spec.Selector[translate.Default.HostLabel(nil, "test", "")], "test") } // check service deletion diff --git a/test/e2e/syncer/pods/pods.go b/test/e2e/syncer/pods/pods.go index 22c6b949c..350825dd9 100644 --- a/test/e2e/syncer/pods/pods.go +++ b/test/e2e/syncer/pods/pods.go @@ -7,7 +7,7 @@ import ( "strings" "time" - podtranslate "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate" + podtranslate "github.com/loft-sh/vcluster/pkg/controllers/resources/pods/token" "github.com/loft-sh/vcluster/pkg/util/podhelper" "github.com/loft-sh/vcluster/pkg/util/random" "github.com/loft-sh/vcluster/pkg/util/translate" @@ -77,7 +77,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { // get current status vpod, err := f.VClusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) framework.ExpectNoError(err) - pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(f.Context, translate.Default.HostName(podName, ns), metav1.GetOptions{}) + pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(f.Context, translate.Default.HostName(nil, podName, ns), metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(vpod.Status, pod.Status) @@ -135,7 +135,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { // get current status vpod, err := f.VClusterClient.CoreV1().Pods(ns).Get(f.Context, podName, metav1.GetOptions{}) framework.ExpectNoError(err) - pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(f.Context, translate.Default.HostName(podName, ns), metav1.GetOptions{}) + pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(f.Context, translate.Default.HostName(nil, podName, ns), metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(vpod.Status, pod.Status) @@ -264,13 +264,13 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // execute a command in a pod to retrieve env var value - stdout, stderr, err := podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(ns), translate.Default.HostName(pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $" + envVarName}, nil) + stdout, stderr, err := podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(nil, ns), translate.Default.HostName(nil, pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $" + envVarName}, nil) framework.ExpectNoError(err) framework.ExpectEqual(string(stdout), cmKeyValue+"\n") // echo adds \n in the end framework.ExpectEqual(string(stderr), "") // execute a command in a pod to retrieve file content - stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(ns), translate.Default.HostName(pod.Name, pod.Namespace), testingContainerName, []string{"cat", filePath + "/" + fileName}, nil) + stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(nil, ns), translate.Default.HostName(nil, pod.Name, pod.Namespace), testingContainerName, []string{"cat", filePath + "/" + fileName}, nil) framework.ExpectNoError(err) framework.ExpectEqual(string(stdout), cmKeyValue) framework.ExpectEqual(string(stderr), "") @@ -347,13 +347,13 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // execute a command in a pod to retrieve env var value - stdout, stderr, err := podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(ns), translate.Default.HostName(pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $" + envVarName}, nil) + stdout, stderr, err := podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(nil, ns), translate.Default.HostName(nil, pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $" + envVarName}, nil) framework.ExpectNoError(err) framework.ExpectEqual(string(stdout), secretKeyValue+"\n") // echo adds \n in the end framework.ExpectEqual(string(stderr), "") // execute a command in a pod to retrieve file content - stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(ns), translate.Default.HostName(pod.Name, pod.Namespace), testingContainerName, []string{"cat", filePath + "/" + fileName}, nil) + stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(nil, ns), translate.Default.HostName(nil, pod.Name, pod.Namespace), testingContainerName, []string{"cat", filePath + "/" + fileName}, nil) framework.ExpectNoError(err) framework.ExpectEqual(string(stdout), secretKeyValue) framework.ExpectEqual(string(stderr), "") @@ -423,17 +423,17 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // execute a command in a pod to retrieve env var value - stdout, stderr, err := podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(ns), translate.Default.HostName(pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $HELLO_WORLD"}, nil) + stdout, stderr, err := podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(nil, ns), translate.Default.HostName(nil, pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $HELLO_WORLD"}, nil) framework.ExpectNoError(err) framework.ExpectEqual(string(stdout), "Hello World\n", "Dependent environment variable is expected to have its value based on the referenced environment variable(s)") // echo adds \n in the end framework.ExpectEqual(string(stderr), "") - stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(ns), translate.Default.HostName(pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $ESCAPED_VAR"}, nil) + stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(nil, ns), translate.Default.HostName(nil, pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $ESCAPED_VAR"}, nil) framework.ExpectNoError(err) framework.ExpectEqual(string(stdout), "$(FIRST)\n", "The double '$' symbol should be escaped") // echo adds \n in the end framework.ExpectEqual(string(stderr), "") - stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(ns), translate.Default.HostName(pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $MY_SERVICE"}, nil) + stdout, stderr, err = podhelper.ExecBuffered(f.Context, f.HostConfig, translate.Default.HostNamespace(nil, ns), translate.Default.HostName(nil, pod.Name, pod.Namespace), testingContainerName, []string{"sh", "-c", "echo $MY_SERVICE"}, nil) framework.ExpectNoError(err) framework.ExpectMatchRegexp(string(stdout), fmt.Sprintf("^%s://%s:%d\n$", myProtocol, ipRegExp, svcPort), "Service host and port environment variables should be resolved in a dependent environment variable") framework.ExpectEqual(string(stderr), "") @@ -460,7 +460,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // get current physical Pod resource - pPod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(f.Context, translate.Default.HostName(pod.Name, pod.Namespace), metav1.GetOptions{}) + pPod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(f.Context, translate.Default.HostName(nil, pod.Name, pod.Namespace), metav1.GetOptions{}) framework.ExpectNoError(err) pKey := translate.HostLabelNamespace(initialNsLabelKey) framework.ExpectHaveKey(pPod.GetLabels(), pKey) @@ -483,7 +483,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { } updated = true } - pPod, err = f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(ctx, translate.Default.HostName(pod.Name, pod.Namespace), metav1.GetOptions{}) + pPod, err = f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(ctx, translate.Default.HostName(nil, pod.Name, pod.Namespace), metav1.GetOptions{}) framework.ExpectNoError(err) pKey = translate.HostLabelNamespace(additionalLabelKey) if value, ok := pPod.GetLabels()[pKey]; ok { @@ -519,7 +519,7 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectNoError(err, "A pod created in the vcluster is expected to be in the Running phase eventually.") // get current physical Pod resource - pPod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(f.Context, translate.Default.HostName(pod.Name, pod.Namespace), metav1.GetOptions{}) + pPod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(f.Context, translate.Default.HostName(nil, pod.Name, pod.Namespace), metav1.GetOptions{}) framework.ExpectNoError(err) // make sure service account token annotation is not present @@ -527,8 +527,8 @@ var _ = ginkgo.Describe("Pods are running in the host cluster", func() { framework.ExpectEqual(ok, false, "service account token annotation should not be present") // make sure the secret is created in host cluster - secretName := translate.Default.HostName(fmt.Sprintf("%s-sa-token", pod.Name), ns) - _, err = f.HostClient.CoreV1().Secrets(translate.Default.HostNamespace(ns)).Get(f.Context, secretName, metav1.GetOptions{}) + secretName := translate.Default.HostName(nil, fmt.Sprintf("%s-sa-token", pod.Name), ns) + _, err = f.HostClient.CoreV1().Secrets(translate.Default.HostNamespace(nil, ns)).Get(f.Context, secretName, metav1.GetOptions{}) framework.ExpectNoError(err) // make sure the project volume for path 'token' is now using a secret instead of service account diff --git a/test/e2e/syncer/pvc/pvc.go b/test/e2e/syncer/pvc/pvc.go index 933bf19f5..88e1674b4 100644 --- a/test/e2e/syncer/pvc/pvc.go +++ b/test/e2e/syncer/pvc/pvc.go @@ -105,7 +105,7 @@ var _ = ginkgo.Describe("Persistent volume synced from host cluster", func() { vpvc, err := f.VClusterClient.CoreV1().PersistentVolumeClaims(ns).Get(f.Context, pvcName, metav1.GetOptions{}) framework.ExpectNoError(err) - pvc, err := f.HostClient.CoreV1().PersistentVolumeClaims(translate.Default.HostNamespace(ns)).Get(f.Context, translate.Default.HostName(pvcName, ns), metav1.GetOptions{}) + pvc, err := f.HostClient.CoreV1().PersistentVolumeClaims(translate.Default.HostNamespace(nil, ns)).Get(f.Context, translate.Default.HostName(nil, pvcName, ns), metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(vpvc.Status, pvc.Status) diff --git a/test/e2e/syncer/services/services.go b/test/e2e/syncer/services/services.go index 037447464..4f66e5e34 100644 --- a/test/e2e/syncer/services/services.go +++ b/test/e2e/syncer/services/services.go @@ -72,7 +72,7 @@ var _ = ginkgo.Describe("Services are created as expected", func() { framework.ExpectNoError(err) // get physical service - pService, err := f.HostClient.CoreV1().Services(translate.Default.HostNamespace(ns)).Get(f.Context, translate.Default.HostName(vService.Name, vService.Namespace), metav1.GetOptions{}) + pService, err := f.HostClient.CoreV1().Services(translate.Default.HostNamespace(nil, ns)).Get(f.Context, translate.Default.HostName(nil, vService.Name, vService.Namespace), metav1.GetOptions{}) framework.ExpectNoError(err) // check node ports are the same @@ -108,7 +108,7 @@ var _ = ginkgo.Describe("Services are created as expected", func() { _, err = f.VClusterClient.CoreV1().Services(ns).Get(f.Context, service.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - _, err = f.HostClient.CoreV1().Services(translate.Default.HostNamespace(ns)).Get(f.Context, translate.Default.HostName(service.Name, service.Namespace), metav1.GetOptions{}) + _, err = f.HostClient.CoreV1().Services(translate.Default.HostNamespace(nil, ns)).Get(f.Context, translate.Default.HostName(nil, service.Name, service.Namespace), metav1.GetOptions{}) framework.ExpectNoError(err) }) diff --git a/test/e2e_plugin/plugin/plugin.go b/test/e2e_plugin/plugin/plugin.go index 24919a2ea..d84d3473b 100644 --- a/test/e2e_plugin/plugin/plugin.go +++ b/test/e2e_plugin/plugin/plugin.go @@ -57,7 +57,7 @@ var _ = ginkgo.Describe("plugin", func() { // wait for service to become synced hostService := &corev1.Service{} gomega.Eventually(func() bool { - err := f.HostCRClient.Get(f.Context, types.NamespacedName{Name: translate.Default.HostName(service.Name, service.Namespace), Namespace: f.VclusterNamespace}, hostService) + err := f.HostCRClient.Get(f.Context, types.NamespacedName{Name: translate.Default.HostName(nil, service.Name, service.Namespace), Namespace: f.VclusterNamespace}, hostService) return err == nil }). WithPolling(pollingInterval). diff --git a/test/e2e_scheduler/scheduler/scheduler.go b/test/e2e_scheduler/scheduler/scheduler.go index 0be269d5a..2481fa866 100644 --- a/test/e2e_scheduler/scheduler/scheduler.go +++ b/test/e2e_scheduler/scheduler/scheduler.go @@ -10,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" ) var _ = ginkgo.Describe("Scheduler sync", func() { @@ -20,12 +21,18 @@ var _ = ginkgo.Describe("Scheduler sync", func() { framework.ExpectNoError(err) for _, vnode := range virtualNodes.Items { + origNode := vnode.DeepCopy() vnode.Spec.Taints = append(vnode.Spec.Taints, corev1.Taint{ Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule, }) - _, err = f.VClusterClient.CoreV1().Nodes().Update(f.Context, &vnode, metav1.UpdateOptions{}) + + patch := client.MergeFrom(origNode) + patchBytes, err := patch.Data(&vnode) + framework.ExpectNoError(err) + + _, err = f.VClusterClient.CoreV1().Nodes().Patch(f.Context, vnode.Name, patch.Type(), patchBytes, metav1.PatchOptions{}) framework.ExpectNoError(err) } diff --git a/test/framework/util.go b/test/framework/util.go index 14691fc47..d6cc10362 100644 --- a/test/framework/util.go +++ b/test/framework/util.go @@ -19,7 +19,7 @@ import ( func (f *Framework) WaitForPodRunning(podName string, ns string) error { return wait.PollUntilContextTimeout(f.Context, time.Second*5, PollTimeout, true, func(ctx context.Context) (bool, error) { - pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(ctx, translate.Default.HostName(podName, ns), metav1.GetOptions{}) + pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(ctx, translate.Default.HostName(nil, podName, ns), metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -45,7 +45,7 @@ func (f *Framework) WaitForPodRunning(podName string, ns string) error { func (f *Framework) WaitForPodToComeUpWithReadinessConditions(podName string, ns string) error { return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { - pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(ctx, translate.Default.HostName(podName, ns), metav1.GetOptions{}) + pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(ctx, translate.Default.HostName(nil, podName, ns), metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -64,7 +64,7 @@ func (f *Framework) WaitForPodToComeUpWithReadinessConditions(podName string, ns func (f *Framework) WaitForPodToComeUpWithEphemeralContainers(podName string, ns string) error { return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { - pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(ns)).Get(ctx, translate.Default.HostName(podName, ns), metav1.GetOptions{}) + pod, err := f.HostClient.CoreV1().Pods(translate.Default.HostNamespace(nil, ns)).Get(ctx, translate.Default.HostName(nil, podName, ns), metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -84,7 +84,7 @@ func (f *Framework) WaitForPodToComeUpWithEphemeralContainers(podName string, ns func (f *Framework) WaitForPersistentVolumeClaimBound(pvcName, ns string) error { return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { - pvc, err := f.HostClient.CoreV1().PersistentVolumeClaims(translate.Default.HostNamespace(ns)).Get(ctx, translate.Default.HostName(pvcName, ns), metav1.GetOptions{}) + pvc, err := f.HostClient.CoreV1().PersistentVolumeClaims(translate.Default.HostNamespace(nil, ns)).Get(ctx, translate.Default.HostName(nil, pvcName, ns), metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -143,7 +143,7 @@ func (f *Framework) WaitForServiceAccount(saName string, ns string) error { func (f *Framework) WaitForService(serviceName string, ns string) error { return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { - _, err := f.HostClient.CoreV1().Services(translate.Default.HostNamespace(ns)).Get(ctx, translate.Default.HostName(serviceName, ns), metav1.GetOptions{}) + _, err := f.HostClient.CoreV1().Services(translate.Default.HostNamespace(nil, ns)).Get(ctx, translate.Default.HostName(nil, serviceName, ns), metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil @@ -186,7 +186,7 @@ func (f *Framework) WaitForServiceInSyncerCache(serviceName string, ns string) e } // Check for annotation - pService, err := f.HostClient.CoreV1().Services(translate.Default.HostNamespace(ns)).Get(ctx, translate.Default.HostName(serviceName, ns), metav1.GetOptions{}) + pService, err := f.HostClient.CoreV1().Services(translate.Default.HostNamespace(nil, ns)).Get(ctx, translate.Default.HostName(nil, serviceName, ns), metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return false, nil diff --git a/vendor/github.com/moby/locker/LICENSE b/vendor/github.com/moby/locker/LICENSE deleted file mode 100644 index 2e0ec1dcf..000000000 --- a/vendor/github.com/moby/locker/LICENSE +++ /dev/null @@ -1,190 +0,0 @@ - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/locker/README.md b/vendor/github.com/moby/locker/README.md deleted file mode 100644 index a0852f0f8..000000000 --- a/vendor/github.com/moby/locker/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Locker -===== - -locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however, the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. - - -## Usage - -```go -package important - -import ( - "sync" - "time" - - "github.com/moby/locker" -) - -type important struct { - locks *locker.Locker - data map[string]interface{} - mu sync.Mutex -} - -func (i *important) Get(name string) interface{} { - i.locks.Lock(name) - defer i.locks.Unlock(name) - return i.data[name] -} - -func (i *important) Create(name string, data interface{}) { - i.locks.Lock(name) - defer i.locks.Unlock(name) - - i.createImportant(data) - - i.mu.Lock() - i.data[name] = data - i.mu.Unlock() -} - -func (i *important) createImportant(data interface{}) { - time.Sleep(10 * time.Second) -} -``` - -For functions dealing with a given name, always lock at the beginning of the -function (or before doing anything with the underlying state), this ensures any -other function that is dealing with the same name will block. - -When needing to modify the underlying data, use the global lock to ensure nothing -else is modifying it at the same time. -Since name lock is already in place, no reads will occur while the modification -is being performed. - diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345f..000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519†function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seedâ€. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 904b57e01..000000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc deleted file mode 100644 index 730e569b0..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitignore b/vendor/gopkg.in/square/go-jose.v2/.gitignore deleted file mode 100644 index 95a851586..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*~ -.*.swp -*.out -*.test -*.pem -*.cov -jose-util/jose-util -jose-util.t.err \ No newline at end of file diff --git a/vendor/gopkg.in/square/go-jose.v2/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml deleted file mode 100644 index 391b99a40..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/.travis.yml +++ /dev/null @@ -1,45 +0,0 @@ -language: go - -sudo: false - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go: -- '1.14.x' -- '1.15.x' -- tip - -go_import_path: gopkg.in/square/go-jose.v2 - -before_script: -- export PATH=$HOME/.local/bin:$PATH - -before_install: -# Install encrypted gitcookies to get around bandwidth-limits -# that is causing Travis-CI builds to fail. For more info, see -# https://github.com/golang/go/issues/12933 -- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true -- bash .gitcookies.sh || true -- go get github.com/wadey/gocovmerge -- go get github.com/mattn/goveralls -- go get github.com/stretchr/testify/assert -- go get github.com/stretchr/testify/require -- go get github.com/google/go-cmp/cmp -- go get golang.org/x/tools/cmd/cover || true -- go get code.google.com/p/go.tools/cmd/cover || true -- pip install cram --user - -script: -- go test . -v -covermode=count -coverprofile=profile.cov -- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov -- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov -- go test ./json -v # no coverage for forked encoding/json package -- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util -- cd .. - -after_success: -- gocovmerge *.cov */*.cov > merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md deleted file mode 100644 index 3305db0f6..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md deleted file mode 100644 index 61b183651..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/square/go-jose.v2/LICENSE b/vendor/gopkg.in/square/go-jose.v2/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/gopkg.in/square/go-jose.v2/README.md deleted file mode 100644 index 1791bfa8f..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) -[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose) -[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. This includes support for JSON Web Encryption, -JSON Web Signature, and JSON Web Token standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). -Tables of supported algorithms are shown below. The library supports both -the compact and full serialization formats, and has optional support for -multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 2](https://gopkg.in/square/go-jose.v2) -([branch](https://github.com/square/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version: - - import "gopkg.in/square/go-jose.v2" - -The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will -still receive backported bug fixes and security fixes, but otherwise -development is frozen. All new feature development takes place on the `v2` -branch. Version 2 also contains additional sub-packages such as the -[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation -contributed by [@shaxbee](https://github.com/shaxbee). - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The Godoc reference has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - Ed25519 | EdDSA2 - -2. Only available in version 2 of the package - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which -allows attaching a key id. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) - AES, HMAC | []byte - -1. Only available in version 2 of the package - -## Examples - -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) - -Examples can be found in the Godoc -reference for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util) -subdirectory also contains a small command-line utility which might be useful -as an example. diff --git a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go deleted file mode 100644 index b69aa0369..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go +++ /dev/null @@ -1,592 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "golang.org/x/crypto/ed25519" - josecipher "gopkg.in/square/go-jose.v2/cipher" - "gopkg.in/square/go-jose.v2/json" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -type edEncrypterVerifier struct { - publicKey ed25519.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -type edDecrypterSigner struct { - privateKey ed25519.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { - if sigAlg != EdDSA { - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &edDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - b, err := json.Marshal(&JSONWebKey{ - Key: &priv.PublicKey, - }) - if err != nil { - return nil, nil, err - } - - headers := rawHeader{ - headerEPK: makeRawMessage(b), - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - epk, err := headers.getEPK() - if err != nil { - return nil, errors.New("square/go-jose: invalid epk header") - } - if epk == nil { - return nil, errors.New("square/go-jose: missing epk header") - } - - publicKey, ok := epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("square/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("square/go-jose: invalid public key in epk header") - } - - apuData, err := headers.getAPU() - if err != nil { - return nil, errors.New("square/go-jose: invalid apu header") - } - apvData, err := headers.getAPV() - if err != nil { - return nil, errors.New("square/go-jose: invalid apv header") - } - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) - } - - var keySize int - - algorithm := headers.getAlgorithm() - switch algorithm { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.getEncryption()), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(string(algorithm), keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - if alg != EdDSA { - return Signature{}, ErrUnsupportedAlgorithm - } - - sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: sig, - protected: &rawHeader{}, - }, nil -} - -func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - if alg != EdDSA { - return ErrUnsupportedAlgorithm - } - ok := ed25519.Verify(ctx.publicKey, payload, signature) - if !ok { - return errors.New("square/go-jose: ed25519 signature failed to verify") - } - return nil -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("square/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go deleted file mode 100644 index f6465c041..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("square/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures that the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("square/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("square/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("square/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdba..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go deleted file mode 100644 index 093c64674..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go +++ /dev/null @@ -1,86 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than or equal to 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - zBytes := z.Bytes() - - // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from - // the returned byte array. This can lead to a problem where zBytes will be - // shorter than expected which breaks the key derivation. Therefore we must pad - // to the full length of the expected coordinate here before calling the KDF. - octSize := dSize(priv.Curve) - if len(zBytes) != octSize { - zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) - } - - reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - - return key -} - -// dSize returns the size in octets for a coordinate on a elliptic curve. -func dSize(curve elliptic.Curve) int { - order := curve.Params().P - bitLen := order.BitLen() - size := bitLen / 8 - if bitLen%8 != 0 { - size++ - } - return size -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go deleted file mode 100644 index 1d36d5015..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("square/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go deleted file mode 100644 index be7433e28..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/crypter.go +++ /dev/null @@ -1,542 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - "reflect" - - "gopkg.in/square/go-jose.v2/json" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JSONWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) - Options() EncrypterOptions -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator - extraHeaders map[HeaderKey]interface{} -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions struct { - Compression CompressionAlgorithm - - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. - ExtraHeaders map[HeaderKey]interface{} -} - -// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. -func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { - if eo.ExtraHeaders == nil { - eo.ExtraHeaders = map[HeaderKey]interface{}{} - } - eo.ExtraHeaders[k] = v - return eo -} - -// WithContentType adds a content type ("cty") header and returns the updated -// EncrypterOptions. -func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderContentType, contentType) -} - -// WithType adds a type ("typ") header and returns the updated EncrypterOptions. -func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderType, typ) -} - -// Recipient represents an algorithm/key to encrypt messages to. -// -// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used -// on the password-based encryption algorithms PBES2-HS256+A128KW, -// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe -// default of 100000 will be used for the count and a 128-bit random salt will -// be generated. -type Recipient struct { - Algorithm KeyAlgorithm - Key interface{} - KeyID string - PBES2Count int - PBES2Salt []byte -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := rcpt.Key.(type) { - case JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case *JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case OpaqueKeyEncrypter: - keyID, rawKey = encryptionKey.KeyID(), encryptionKey - default: - rawKey = encryptionKey - } - - switch rcpt.Algorithm { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { - return nil, ErrInvalidKeySize - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.addRecipient(rcpt) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - if rcpts == nil || len(rcpts) == 0 { - return nil, fmt.Errorf("square/go-jose: recipients is nil or empty") - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - for _, recipient := range rcpts { - err := encrypter.addRecipient(recipient) - if err != nil { - return nil, err - } - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { - var recipientInfo recipientKeyInfo - - switch recipient.Algorithm { - case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) - } - - recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) - if recipient.KeyID != "" { - recipientInfo.keyID = recipient.KeyID - } - - switch recipient.Algorithm { - case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: - if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { - sr.p2c = recipient.PBES2Count - sr.p2s = recipient.PBES2Salt - } - } - - if err == nil { - ctx.recipients = append(ctx.recipients, recipientInfo) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case string: - return newSymmetricRecipient(alg, []byte(encryptionKey)) - case *JSONWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - recipient.keyID = encryptionKey.KeyID - return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) - } - return recipientKeyInfo{}, ErrUnsupportedKeyType -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case string: - return &symmetricKeyCipher{ - key: []byte(decryptionKey), - }, nil - case JSONWebKey: - return newDecrypter(decryptionKey.Key) - case *JSONWebKey: - return newDecrypter(decryptionKey.Key) - } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { - obj := &JSONWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{} - err := obj.protected.set(headerEncryption, ctx.contentAlg) - if err != nil { - return nil, err - } - - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - err = recipient.header.set(headerAlgorithm, info.keyAlg) - if err != nil { - return nil, err - } - - if info.keyID != "" { - err = recipient.header.set(headerKeyID, info.keyID) - if err != nil { - return nil, err - } - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - err = obj.protected.set(headerCompression, ctx.compressionAlg) - if err != nil { - return nil, err - } - } - - for k, v := range ctx.extraHeaders { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - (*obj.protected)[k] = makeRawMessage(b) - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -func (ctx *genericEncrypter) Options() EncrypterOptions { - return EncrypterOptions{ - Compression: ctx.compressionAlg, - ExtraHeaders: ctx.extraHeaders, - } -} - -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient -// decryption use DecryptMulti instead. -func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") - } - - critical, err := headers.getCritical() - if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.getEncryption()) - if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - return plaintext, err -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - critical, err := globalHeaders.getCritical() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return -1, Header{}, nil, err - } - - encryption := globalHeaders.getEncryption() - cipher := getContentCipher(encryption) - if cipher == nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil || err != nil { - return -1, Header{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - sanitized, err := headers.sanitized() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err) - } - - return index, sanitized, plaintext, err -} diff --git a/vendor/gopkg.in/square/go-jose.v2/doc.go b/vendor/gopkg.in/square/go-jose.v2/doc.go deleted file mode 100644 index dd1387f3f..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. It implements encryption and signing based on -the JSON Web Encryption and JSON Web Signature standards, with optional JSON -Web Token support available in a sub-package. The library supports both the -compact and full serialization formats, and has optional support for multiple -recipients. - -*/ -package jose diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go deleted file mode 100644 index 70f7385c4..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/encoding.go +++ /dev/null @@ -1,185 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "io" - "math/big" - "strings" - "unicode" - - "gopkg.in/square/go-jose.v2/json" -) - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/square/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - buf := strings.Builder{} - buf.Grow(len(data)) - for _, r := range data { - if !unicode.IsSpace(r) { - buf.WriteRune(r) - } - } - return buf.String() -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Compress with DEFLATE -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// Decompress with DEFLATE -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - _, err := io.Copy(output, reader) - if err != nil { - return nil, err - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64.RawURLEncoding.DecodeString(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64.RawURLEncoding.EncodeToString(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/README.md b/vendor/gopkg.in/square/go-jose.v2/json/README.md deleted file mode 100644 index 86de5e558..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go deleted file mode 100644 index 4dbc4146c..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/json/decode.go +++ /dev/null @@ -1,1217 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -type NumberUnmarshalType int - -const ( - // unmarshal a JSON number into an interface{} as a float64 - UnmarshalFloat NumberUnmarshalType = iota - // unmarshal a JSON number into an interface{} as a `json.Number` - UnmarshalJSONNumber - // unmarshal a JSON number into an interface{} as a int64 - // if value is an integer otherwise float64 - UnmarshalIntOrFloat -) - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - numberType NumberUnmarshalType -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64, int64 or a Number -// depending on d.numberDecodeType. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - switch d.numberType { - - case UnmarshalJSONNumber: - return Number(s), nil - case UnmarshalIntOrFloat: - v, err := strconv.ParseInt(s, 10, 64) - if err == nil { - return v, nil - } - - // tries to parse integer number in scientific notation - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - - // if it has no decimal value use int64 - if fi, fd := math.Modf(f); fd == 0.0 { - return int64(fi), nil - } - return f, nil - default: - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil - } - -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/encode.go b/vendor/gopkg.in/square/go-jose.v2/json/encode.go deleted file mode 100644 index 1dae8bb7c..000000000 --- a/vendor/gopkg.in/square/go-jose.v2/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML