From 2e389b41e35217539917a617177d7e4a155829e0 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 16:20:39 +0200 Subject: [PATCH 01/12] implement support for APIExportEndpointSlices On-behalf-of: @SAP christoph.mewes@sap.com --- cmd/api-syncagent/kcp.go | 293 ++++++++++++++++++ cmd/api-syncagent/main.go | 129 +++----- cmd/api-syncagent/options.go | 22 +- internal/controller/syncmanager/controller.go | 82 +++-- 4 files changed, 405 insertions(+), 121 deletions(-) create mode 100644 cmd/api-syncagent/kcp.go diff --git a/cmd/api-syncagent/kcp.go b/cmd/api-syncagent/kcp.go new file mode 100644 index 0000000..b023b61 --- /dev/null +++ b/cmd/api-syncagent/kcp.go @@ -0,0 +1,293 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "errors" + "fmt" + "regexp" + + "github.com/kcp-dev/api-syncagent/internal/kcp" + "github.com/kcp-dev/logicalcluster/v3" + + kcpdevv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + kcpdevcore "github.com/kcp-dev/kcp/sdk/apis/core" + kcpdevcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" +) + +// The agent has two potentially different kcp clusters: +// +// endpointCluster - this is where the source of the virtual workspace URLs +// live, i.e. where the APIExport/EndpointSlice. +// managedCluster - this is where the APIExport and APIResourceSchemas +// exist that are meant to be reconciled. +// +// The managedCluster always exists, the endpointCluster only if the workspace +// for the virtual workspace source is different from the managed cluster. + +// setupEndpointKcpCluster sets up a plain, non-kcp-aware ctrl-runtime Cluster object +// that is solvely used to watch whichever object holds the virtual workspace URLs, +// either the APIExport or the APIExportEndpointSlice. +func setupEndpointKcpCluster(endpoint *syncEndpoint) (cluster.Cluster, error) { + // no need for a dedicated endpoint cluster + if endpoint.APIExport.Cluster == endpoint.EndpointSlice.Cluster { + return nil, nil + } + + scheme := runtime.NewScheme() + + if err := kcpdevv1alpha1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevv1alpha1.SchemeGroupVersion, err) + } + + if err := kcpdevcorev1alpha1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevcorev1alpha1.SchemeGroupVersion, err) + } + + // RBAC in kcp might be very tight and might not allow to list/watch all objects; + // restrict the cache's selectors accordingly so we can still make use of caching. + byObject := map[ctrlruntimeclient.Object]cache.ByObject{ + &kcpdevv1alpha1.APIExportEndpointSlice{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": endpoint.EndpointSlice.Name}), + }, + } + + return cluster.New(endpoint.EndpointSlice.Config, func(o *cluster.Options) { + o.Scheme = scheme + o.Cache = cache.Options{ + Scheme: scheme, + ByObject: byObject, + } + }) +} + +// setupManagedKcpCluster sets up a plain, non-kcp-aware ctrl-runtime Cluster object +// that is solvely used to manage the APIExport and APIResourceSchemas. +func setupManagedKcpCluster(endpoint *syncEndpoint) (cluster.Cluster, error) { + scheme := runtime.NewScheme() + + if err := kcpdevv1alpha1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevv1alpha1.SchemeGroupVersion, err) + } + + if err := kcpdevcorev1alpha1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevcorev1alpha1.SchemeGroupVersion, err) + } + + // RBAC in kcp might be very tight and might not allow to list/watch all objects; + // restrict the cache's selectors accordingly so we can still make use of caching. + byObject := map[ctrlruntimeclient.Object]cache.ByObject{ + &kcpdevv1alpha1.APIExport{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": endpoint.APIExport.Name}), + }, + } + + return cluster.New(endpoint.APIExport.Config, func(o *cluster.Options) { + o.Scheme = scheme + o.Cache = cache.Options{ + Scheme: scheme, + ByObject: byObject, + } + }) +} + +type qualifiedCluster struct { + Cluster logicalcluster.Name + Path logicalcluster.Path + Config *rest.Config +} + +type qualifiedAPIExport struct { + *kcpdevv1alpha1.APIExport + qualifiedCluster +} + +type qualifiedAPIExportEndpointSlice struct { + *kcpdevv1alpha1.APIExportEndpointSlice + qualifiedCluster +} + +type syncEndpoint struct { + APIExport qualifiedAPIExport + EndpointSlice *qualifiedAPIExportEndpointSlice +} + +// resolveSyncEndpoint takes the user provided (usually via CLI flags) APIExportEndpointSliceRef and +// APIExportRef and resolves, returning a consistent SyncEndpoint. The initialRestConfig must point +// to the cluster where either of the two objects reside (i.e. if the APIExportRef is given, it +// must point to the cluster where the APIExport lives, and vice versa for the endpoint slice; +// however the endpoint slice references an APIExport in potentially another cluster, and for this +// case the initialRestConfig will be rewritten accordingly). +func resolveSyncEndpoint(ctx context.Context, initialRestConfig *rest.Config, endpointSliceRef string, apiExportRef string) (*syncEndpoint, error) { + // construct temporary, uncached client + scheme := runtime.NewScheme() + if err := kcpdevcorev1alpha1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevcorev1alpha1.SchemeGroupVersion, err) + } + if err := kcpdevv1alpha1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevv1alpha1.SchemeGroupVersion, err) + } + + clientOpts := ctrlruntimeclient.Options{Scheme: scheme} + client, err := ctrlruntimeclient.New(initialRestConfig, clientOpts) + if err != nil { + return nil, fmt.Errorf("failed to create service reader: %w", err) + } + + se := &syncEndpoint{} + + // When an endpoint ref is given, both the APIExportEndpointSlice and the APIExport must exist. + if endpointSliceRef != "" { + endpointSlice, err := resolveAPIExportEndpointSlice(ctx, client, endpointSliceRef) + if err != nil { + return nil, fmt.Errorf("failed to resolve APIExportEndpointSlice: %w", err) + } + endpointSlice.Config = initialRestConfig + + // find the APIExport referenced not by the user (can't: both ref parameters to this function + // are mutually exclusive), but in the APIExportEndpointSlice. + restConfig, err := retargetRestConfig(initialRestConfig, endpointSlice.Spec.APIExport.Path) + if err != nil { + return nil, fmt.Errorf("failed to re-target the given kubeconfig to cluster %q: %w", endpointSlice.Spec.APIExport.Path, err) + } + + client, err := ctrlruntimeclient.New(restConfig, clientOpts) + if err != nil { + return nil, fmt.Errorf("failed to create service reader: %w", err) + } + + apiExport, err := resolveAPIExport(ctx, client, endpointSlice.Spec.APIExport.Name) + if err != nil { + return nil, fmt.Errorf("failed to resolve APIExport: %w", err) + } + apiExport.Config = restConfig + + se.APIExport = apiExport + se.EndpointSlice = &endpointSlice + } else { // if an export ref is given, the endpoint slice is optional (for compat with kcp <0.28) + apiExport, err := resolveAPIExport(ctx, client, apiExportRef) + if err != nil { + return nil, fmt.Errorf("failed to resolve APIExport: %w", err) + } + apiExport.Config = initialRestConfig + + se.APIExport = apiExport + + // try to find an endpoint slice in the same workspace with the same name as the APIExport + endpointSlice, err := resolveAPIExportEndpointSlice(ctx, client, apiExportRef) + if ctrlruntimeclient.IgnoreNotFound(err) != nil { + return nil, fmt.Errorf("failed to resolve APIExportEndpointSlice: %w", err) + } else if err == nil { + apiExport.Config = initialRestConfig + se.EndpointSlice = &endpointSlice + } + } + + return se, nil +} + +func resolveAPIExportEndpointSlice(ctx context.Context, client ctrlruntimeclient.Client, ref string) (qualifiedAPIExportEndpointSlice, error) { + endpointSlice := &kcpdevv1alpha1.APIExportEndpointSlice{} + key := types.NamespacedName{Name: ref} + if err := client.Get(ctx, key, endpointSlice); err != nil { + return qualifiedAPIExportEndpointSlice{}, fmt.Errorf("failed to get APIExportEndpointSlice %q: %w", ref, err) + } + + lcName, lcPath, err := resolveCurrentCluster(ctx, client) + if err != nil { + return qualifiedAPIExportEndpointSlice{}, fmt.Errorf("failed to resolve APIExportEndpointSlice cluster: %w", err) + } + + return qualifiedAPIExportEndpointSlice{ + APIExportEndpointSlice: endpointSlice, + qualifiedCluster: qualifiedCluster{ + Cluster: lcName, + Path: lcPath, + }, + }, nil +} + +func resolveAPIExport(ctx context.Context, client ctrlruntimeclient.Client, ref string) (qualifiedAPIExport, error) { + apiExport := &kcpdevv1alpha1.APIExport{} + key := types.NamespacedName{Name: ref} + if err := client.Get(ctx, key, apiExport); err != nil { + return qualifiedAPIExport{}, fmt.Errorf("failed to get APIExport %q: %w", ref, err) + } + + lcName, lcPath, err := resolveCurrentCluster(ctx, client) + if err != nil { + return qualifiedAPIExport{}, fmt.Errorf("failed to resolve APIExport cluster: %w", err) + } + + return qualifiedAPIExport{ + APIExport: apiExport, + qualifiedCluster: qualifiedCluster{ + Cluster: lcName, + Path: lcPath, + }, + }, nil +} + +func resolveCurrentCluster(ctx context.Context, client ctrlruntimeclient.Client) (logicalcluster.Name, logicalcluster.Path, error) { + lc := &kcpdevcorev1alpha1.LogicalCluster{} + if err := client.Get(ctx, types.NamespacedName{Name: kcp.IdentityClusterName}, lc); err != nil { + return "", logicalcluster.None, fmt.Errorf("failed to resolve current workspace: %w", err) + } + + lcName := logicalcluster.From(lc) + lcPath := logicalcluster.NewPath(lc.Annotations[kcpdevcore.LogicalClusterPathAnnotationKey]) + + return lcName, lcPath, nil +} + +var clusterFinder = regexp.MustCompile(`/clusters/([^/]+)`) + +func retargetRestConfig(cfg *rest.Config, destination string) (*rest.Config, error) { + // no change desired (use current cluster implicitly) + if destination == "" { + return cfg, nil + } + + matches := clusterFinder.FindAllStringSubmatch(cfg.Host, -1) + if len(matches) == 0 { + return nil, errors.New("URL must point to a cluster/workspace") + } + if len(matches) > 1 { + return nil, errors.New("invalid URL: URL contains more than one cluster path") + } + + current := matches[0][1] + if current == destination { + return cfg, nil + } + + newCluster := fmt.Sprintf("/clusters/%s", destination) + + newConfig := rest.CopyConfig(cfg) + newConfig.Host = clusterFinder.ReplaceAllString(cfg.Host, newCluster) + + return newConfig, nil +} diff --git a/cmd/api-syncagent/main.go b/cmd/api-syncagent/main.go index a04c767..1d1f9db 100644 --- a/cmd/api-syncagent/main.go +++ b/cmd/api-syncagent/main.go @@ -25,7 +25,6 @@ import ( "strings" "github.com/go-logr/zapr" - "github.com/kcp-dev/logicalcluster/v3" "github.com/spf13/pflag" "go.uber.org/zap" reconcilerlog "k8c.io/reconciler/pkg/log" @@ -33,27 +32,17 @@ import ( "github.com/kcp-dev/api-syncagent/internal/controller/apiexport" "github.com/kcp-dev/api-syncagent/internal/controller/apiresourceschema" "github.com/kcp-dev/api-syncagent/internal/controller/syncmanager" - "github.com/kcp-dev/api-syncagent/internal/kcp" syncagentlog "github.com/kcp-dev/api-syncagent/internal/log" "github.com/kcp-dev/api-syncagent/internal/version" syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" - kcpdevv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - kcpdevcore "github.com/kcp-dev/kcp/sdk/apis/core" - kcpdevcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ctrlruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -117,24 +106,42 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { return fmt.Errorf("kcp kubeconfig does not point to a specific workspace") } - // We check if the APIExport exists and extract information we need to set up our kcpCluster. - apiExport, lcPath, lcName, err := resolveAPIExport(ctx, kcpRestConfig, opts.APIExportRef) + // We check if the APIExport/APIExportEndpointSlice exists and extract information we need to set up our kcpCluster. + endpoint, err := resolveSyncEndpoint(ctx, kcpRestConfig, opts.APIExportEndpointSliceRef, opts.APIExportRef) if err != nil { - return fmt.Errorf("failed to resolve APIExport: %w", err) + return fmt.Errorf("failed to resolve APIExport/EndpointSlice: %w", err) } - log.Infow("Resolved APIExport", "workspace", lcPath, "logicalcluster", lcName) + log.Infow("Resolved APIExport", "workspace", endpoint.APIExport.Path, "logicalcluster", endpoint.APIExport.Cluster) + + if s := endpoint.EndpointSlice; s != nil { + log.Infow("Using APIExportEndpointSlice", "workspace", s.Path, "logicalcluster", s.Cluster) + } - // init the "permanent" kcp cluster connection - kcpCluster, err := setupKcpCluster(kcpRestConfig, opts) + // init the "permanent" kcp cluster connections + + // always need the managedKcpCluster + managedKcpCluster, err := setupManagedKcpCluster(endpoint) if err != nil { - return fmt.Errorf("failed to initialize kcp cluster: %w", err) + return fmt.Errorf("failed to initialize managed kcp cluster: %w", err) } // start the kcp cluster caches when the manager boots up // (happens regardless of leader election status) - if err := mgr.Add(kcpCluster); err != nil { - return fmt.Errorf("failed to add kcp cluster runnable: %w", err) + if err := mgr.Add(managedKcpCluster); err != nil { + return fmt.Errorf("failed to add managed kcp cluster runnable: %w", err) + } + + // the endpoint cluster can be nil + endpointKcpCluster, err := setupEndpointKcpCluster(endpoint) + if err != nil { + return fmt.Errorf("failed to initialize endpoint kcp cluster: %w", err) + } + + if endpointKcpCluster != nil { + if err := mgr.Add(endpointKcpCluster); err != nil { + return fmt.Errorf("failed to add endpoint kcp cluster runnable: %w", err) + } } startController := func(name string, creator func() error) error { @@ -151,13 +158,13 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { } if err := startController("apiresourceschema", func() error { - return apiresourceschema.Add(mgr, kcpCluster, lcName, log, 4, opts.AgentName, opts.PublishedResourceSelector) + return apiresourceschema.Add(mgr, managedKcpCluster, endpoint.APIExport.Cluster, log, 4, opts.AgentName, opts.PublishedResourceSelector) }); err != nil { return err } if err := startController("apiexport", func() error { - return apiexport.Add(mgr, kcpCluster, lcName, log, opts.APIExportRef, opts.AgentName, opts.PublishedResourceSelector) + return apiexport.Add(mgr, managedKcpCluster, endpoint.APIExport.Cluster, log, opts.APIExportRef, opts.AgentName, opts.PublishedResourceSelector) }); err != nil { return err } @@ -165,7 +172,15 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { // This controller is called "sync" because it makes the most sense to the users, even though internally the relevant // controller is the syncmanager (which in turn would start/stop the sync controllers). if err := startController("sync", func() error { - return syncmanager.Add(ctx, mgr, kcpCluster, kcpRestConfig, log, apiExport, opts.PublishedResourceSelector, opts.Namespace, opts.AgentName) + cluster := endpointKcpCluster + if cluster == nil { + cluster = managedKcpCluster + } + + // It doesn't matter which rest config we specify, as the URL will be overwritten with the + // virtual workspace URL anyway. + + return syncmanager.Add(ctx, mgr, cluster, kcpRestConfig, log, endpoint.APIExport.APIExport, endpoint.EndpointSlice.APIExportEndpointSlice, opts.PublishedResourceSelector, opts.Namespace, opts.AgentName) }); err != nil { return err } @@ -221,74 +236,6 @@ func setupLocalManager(ctx context.Context, opts *Options) (manager.Manager, err return mgr, nil } -func resolveAPIExport(ctx context.Context, restConfig *rest.Config, apiExportRef string) (*kcpdevv1alpha1.APIExport, logicalcluster.Path, logicalcluster.Name, error) { - // construct temporary, uncached client - scheme := runtime.NewScheme() - if err := kcpdevcorev1alpha1.AddToScheme(scheme); err != nil { - return nil, logicalcluster.None, "", fmt.Errorf("failed to register scheme %s: %w", kcpdevcorev1alpha1.SchemeGroupVersion, err) - } - if err := kcpdevv1alpha1.AddToScheme(scheme); err != nil { - return nil, logicalcluster.None, "", fmt.Errorf("failed to register scheme %s: %w", kcpdevv1alpha1.SchemeGroupVersion, err) - } - - client, err := ctrlruntimeclient.New(restConfig, ctrlruntimeclient.Options{ - Scheme: scheme, - }) - if err != nil { - return nil, logicalcluster.None, "", fmt.Errorf("failed to create service reader: %w", err) - } - - apiExport := &kcpdevv1alpha1.APIExport{} - key := types.NamespacedName{Name: apiExportRef} - if err := client.Get(ctx, key, apiExport); err != nil { - return nil, logicalcluster.None, "", fmt.Errorf("failed to get APIExport %q: %w", apiExportRef, err) - } - - // kcp's controller-runtime fork always caches objects including their logicalcluster names. - // Our app technically doesn't care about workspaces / logical clusters, but we still need to - // supply the correct logicalcluster when querying for objects. - // We could take the cluster name from the Service itself, but since we want to log the nicer - // looking cluster _path_, we fetch the workspace's own logicalcluster object. - lc := &kcpdevcorev1alpha1.LogicalCluster{} - key = types.NamespacedName{Name: kcp.IdentityClusterName} - if err := client.Get(ctx, key, lc); err != nil { - return nil, logicalcluster.None, "", fmt.Errorf("failed to resolve current workspace: %w", err) - } - - lcName := logicalcluster.From(lc) - lcPath := logicalcluster.NewPath(lc.Annotations[kcpdevcore.LogicalClusterPathAnnotationKey]) - - return apiExport, lcPath, lcName, nil -} - -// setupKcpCluster sets up a plain, non-kcp-aware ctrl-runtime Cluster object -// that is solvely used to interact with the APIExport and APIResourceSchemas. -func setupKcpCluster(restConfig *rest.Config, opts *Options) (cluster.Cluster, error) { - scheme := runtime.NewScheme() - - if err := kcpdevv1alpha1.AddToScheme(scheme); err != nil { - return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevv1alpha1.SchemeGroupVersion, err) - } - - if err := kcpdevcorev1alpha1.AddToScheme(scheme); err != nil { - return nil, fmt.Errorf("failed to register scheme %s: %w", kcpdevcorev1alpha1.SchemeGroupVersion, err) - } - - return cluster.New(restConfig, func(o *cluster.Options) { - o.Scheme = scheme - // RBAC in kcp might be very tight and might not allow to list/watch all objects; - // restrict the cache's selectors accordingly so we can still make use of caching. - o.Cache = cache.Options{ - Scheme: scheme, - ByObject: map[ctrlruntimeclient.Object]cache.ByObject{ - &kcpdevv1alpha1.APIExport{}: { - Field: fields.SelectorFromSet(fields.Set{"metadata.name": opts.APIExportRef}), - }, - }, - } - }) -} - func loadKubeconfig(filename string) (*rest.Config, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() loadingRules.ExplicitPath = filename diff --git a/cmd/api-syncagent/options.go b/cmd/api-syncagent/options.go index 38165d6..9781cb1 100644 --- a/cmd/api-syncagent/options.go +++ b/cmd/api-syncagent/options.go @@ -56,9 +56,18 @@ type Options struct { // APIExportRef references the APIExport within a kcp workspace that this // Sync Agent should work with by name. The APIExport has to already exist, but it must not have - // pre-existing resource schemas configured. + // any pre-existing resource schemas configured, the agent will fill them in based on + // PublishedResources. + // + // Deprecated: Use APIExportEndpointSliceRef instead. If an APIExport is referenced, the agent + // will attempt to find and use an endpoint slice of the same name. APIExportRef string + // APIExportEndpointSliceRef references the APIExportEndpointSlice within a kcp workspace that this + // Sync Agent should work with by name. The agent will automatically manage the resource schemas + // in the APIExport referenced by this endpoint slice. + APIExportEndpointSliceRef string + PublishedResourceSelectorString string PublishedResourceSelector labels.Selector @@ -87,7 +96,8 @@ func (o *Options) AddFlags(flags *pflag.FlagSet) { flags.StringVar(&o.KcpKubeconfig, "kcp-kubeconfig", o.KcpKubeconfig, "kubeconfig file of kcp") flags.StringVar(&o.Namespace, "namespace", o.Namespace, "Kubernetes namespace the Sync Agent is running in") flags.StringVar(&o.AgentName, "agent-name", o.AgentName, "name of this Sync Agent, must not be changed after the first run, can be left blank to auto-generate a name") - flags.StringVar(&o.APIExportRef, "apiexport-ref", o.APIExportRef, "name of the APIExport in kcp that this Sync Agent is powering") + flags.StringVar(&o.APIExportRef, "apiexport-ref", o.APIExportRef, "name of the APIExport in kcp that this Sync Agent is powering (deprecated, use --apiexportendpointslice-ref instead)") + flags.StringVar(&o.APIExportEndpointSliceRef, "apiexportendpointslice-ref", o.APIExportEndpointSliceRef, "name of the APIExportEndpointSlice in kcp that this Sync Agent is powering") flags.StringVar(&o.PublishedResourceSelectorString, "published-resource-selector", o.PublishedResourceSelectorString, "restrict this Sync Agent to only process PublishedResources matching this label selector (optional)") flags.BoolVar(&o.EnableLeaderElection, "enable-leader-election", o.EnableLeaderElection, "whether to perform leader election") flags.StringVar(&o.KubeconfigHostOverride, "kubeconfig-host-override", o.KubeconfigHostOverride, "override the host configured in the local kubeconfig") @@ -114,8 +124,12 @@ func (o *Options) Validate() error { } } - if len(o.APIExportRef) == 0 { - errs = append(errs, errors.New("--apiexport-ref is required")) + if len(o.APIExportRef) == 0 && len(o.APIExportEndpointSliceRef) == 0 { + errs = append(errs, errors.New("either --apiexportendpointslice-ref or --apiexport-ref is required")) + } + + if len(o.APIExportRef) != 0 && len(o.APIExportEndpointSliceRef) != 0 { + errs = append(errs, errors.New("--apiexportendpointslice-ref and --apiexport-ref are mutually exclusive")) } if len(o.KcpKubeconfig) == 0 { diff --git a/internal/controller/syncmanager/controller.go b/internal/controller/syncmanager/controller.go index cecfb84..c817c5c 100644 --- a/internal/controller/syncmanager/controller.go +++ b/internal/controller/syncmanager/controller.go @@ -18,6 +18,7 @@ package syncmanager import ( "context" + "errors" "fmt" "go.uber.org/zap" @@ -36,7 +37,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" @@ -74,7 +74,9 @@ type Reconciler struct { stateNamespace string agentName string - apiExport *kcpapisv1alpha1.APIExport + // only one of these two must be set + apiExport *kcpapisv1alpha1.APIExport + endpointSlice *kcpapisv1alpha1.APIExportEndpointSlice // URL for which the current vwCluster instance has been created vwURL string @@ -103,6 +105,8 @@ type syncWorker struct { } // Add creates a new controller and adds it to the given manager. +// +// Only one of apiExport and endpointSlice must be given. func Add( ctx context.Context, localManager manager.Manager, @@ -110,10 +114,18 @@ func Add( kcpRestConfig *rest.Config, log *zap.SugaredLogger, apiExport *kcpapisv1alpha1.APIExport, + endpointSlice *kcpapisv1alpha1.APIExportEndpointSlice, prFilter labels.Selector, stateNamespace string, agentName string, ) error { + if apiExport != nil && endpointSlice != nil { + return errors.New("this controller works either with APIExport or APIExportEndpointSlice, not both") + } + if apiExport == nil && endpointSlice == nil { + return errors.New("neither APIExport nor APIExportEndpointSlice provided") + } + discoveryClient, err := discovery.NewClient(localManager.GetConfig()) if err != nil { return fmt.Errorf("failed to create discovery client: %w", err) @@ -123,6 +135,7 @@ func Add( ctx: ctx, localManager: localManager, apiExport: apiExport, + endpointSlice: endpointSlice, kcpCluster: kcpCluster, kcpRestConfig: kcpRestConfig, log: log, @@ -134,19 +147,25 @@ func Add( syncWorkers: map[string]syncWorker{}, } - _, err = builder.ControllerManagedBy(localManager). + bldr := builder.ControllerManagedBy(localManager). Named(ControllerName). WithOptions(controller.Options{ // this controller is meant to control others, so we only want 1 thread MaxConcurrentReconciles: 1, }). - // Watch for changes to APIExport on the kcp side to start/restart the actual syncing controllers; - // the cache is already restricted by a fieldSelector in the main.go to respect the RBAC restrictions, - // so there is no need here to add an additional filter. - WatchesRawSource(source.Kind(kcpCluster.GetCache(), &kcpapisv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpapisv1alpha1.APIExport]("dummy"))). // Watch for changes to the PublishedResources - Watches(&syncagentv1alpha1.PublishedResource{}, controllerutil.EnqueueConst[ctrlruntimeclient.Object]("dummy"), builder.WithPredicates(predicate.ByLabels(prFilter))). - Build(reconciler) + Watches(&syncagentv1alpha1.PublishedResource{}, controllerutil.EnqueueConst[ctrlruntimeclient.Object]("dummy"), builder.WithPredicates(predicate.ByLabels(prFilter))) + + // Watch for changes to APIExport/EndpointSlice on the kcp side to start/restart the actual syncing controllers; + // the cache is already restricted by a fieldSelector in the main.go to respect the RBAC restrictions, + // so there is no need here to add an additional filter. + if endpointSlice != nil { + bldr.WatchesRawSource(source.Kind(kcpCluster.GetCache(), &kcpapisv1alpha1.APIExportEndpointSlice{}, controllerutil.EnqueueConst[*kcpapisv1alpha1.APIExportEndpointSlice]("dummy"))) + } else { + bldr.WatchesRawSource(source.Kind(kcpCluster.GetCache(), &kcpapisv1alpha1.APIExport{}, controllerutil.EnqueueConst[*kcpapisv1alpha1.APIExport]("dummy"))) + } + + _, err = bldr.Build(reconciler) return err } @@ -155,30 +174,41 @@ func (r *Reconciler) Reconcile(ctx context.Context, _ reconcile.Request) (reconc log := r.log.Named(ControllerName) log.Debug("Processing") - key := types.NamespacedName{Name: r.apiExport.Name} + var err error - apiExport := &kcpapisv1alpha1.APIExport{} - if err := r.kcpCluster.GetClient().Get(ctx, key, apiExport); ctrlruntimeclient.IgnoreNotFound(err) != nil { - return reconcile.Result{}, fmt.Errorf("failed to retrieve APIExport: %w", err) - } + if r.endpointSlice != nil { + if err := r.kcpCluster.GetClient().Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(r.endpointSlice), r.endpointSlice); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to retrieve APIExportEndpointSlice: %w", err) + } - return reconcile.Result{}, r.reconcile(ctx, log, apiExport) -} + urls := r.endpointSlice.Status.APIExportEndpoints -func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, apiExport *kcpapisv1alpha1.APIExport) error { - // We're not yet making use of APIEndpointSlices, as we don't even fully - // support a sharded kcp setup yet. Hence for now we're safe just using - // this deprecated VW URL. - //nolint:staticcheck - urls := apiExport.Status.VirtualWorkspaces + if len(urls) == 0 { + // the virtual workspace is not ready yet + log.Warn("APIExportEndpointSlice has no URLs.") + } else { + err = r.reconcile(ctx, log, urls[0].URL) + } + } else { + if err := r.kcpCluster.GetClient().Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(r.apiExport), r.apiExport); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to retrieve APIExport: %w", err) + } - // the virtual workspace is not ready yet - if len(urls) == 0 { - return nil + //nolint:staticcheck + urls := r.apiExport.Status.VirtualWorkspaces + + if len(urls) == 0 { + // the virtual workspace is not ready yet + log.Warn("APIExport has no virtual workspace URLs.") + } else { + err = r.reconcile(ctx, log, urls[0].URL) + } } - vwURL := urls[0].URL + return reconcile.Result{}, err +} +func (r *Reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, vwURL string) error { // if the VW URL changed, stop the manager and all sync controllers if r.vwURL != "" && vwURL != r.vwURL { r.shutdown(log) From 8b197c7278b38a7eb8749fb5cb8d2dfedc28da7b Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 16:52:10 +0200 Subject: [PATCH 02/12] fixes On-behalf-of: @SAP christoph.mewes@sap.com --- cmd/api-syncagent/kcp.go | 5 +++-- cmd/api-syncagent/main.go | 9 ++++++++- test/utils/fixtures.go | 5 +++++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/cmd/api-syncagent/kcp.go b/cmd/api-syncagent/kcp.go index b023b61..3d376dd 100644 --- a/cmd/api-syncagent/kcp.go +++ b/cmd/api-syncagent/kcp.go @@ -22,9 +22,10 @@ import ( "fmt" "regexp" - "github.com/kcp-dev/api-syncagent/internal/kcp" "github.com/kcp-dev/logicalcluster/v3" + "github.com/kcp-dev/api-syncagent/internal/kcp" + kcpdevv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" kcpdevcore "github.com/kcp-dev/kcp/sdk/apis/core" kcpdevcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" @@ -53,7 +54,7 @@ import ( // either the APIExport or the APIExportEndpointSlice. func setupEndpointKcpCluster(endpoint *syncEndpoint) (cluster.Cluster, error) { // no need for a dedicated endpoint cluster - if endpoint.APIExport.Cluster == endpoint.EndpointSlice.Cluster { + if endpoint.EndpointSlice == nil || endpoint.EndpointSlice.Cluster == endpoint.APIExport.Cluster { return nil, nil } diff --git a/cmd/api-syncagent/main.go b/cmd/api-syncagent/main.go index 1d1f9db..e69ca4d 100644 --- a/cmd/api-syncagent/main.go +++ b/cmd/api-syncagent/main.go @@ -36,6 +36,8 @@ import ( "github.com/kcp-dev/api-syncagent/internal/version" syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" + kcpdevv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" @@ -177,10 +179,15 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { cluster = managedKcpCluster } + var endpointSlice *kcpdevv1alpha1.APIExportEndpointSlice + if endpoint.EndpointSlice != nil { + endpointSlice = endpoint.EndpointSlice.APIExportEndpointSlice + } + // It doesn't matter which rest config we specify, as the URL will be overwritten with the // virtual workspace URL anyway. - return syncmanager.Add(ctx, mgr, cluster, kcpRestConfig, log, endpoint.APIExport.APIExport, endpoint.EndpointSlice.APIExportEndpointSlice, opts.PublishedResourceSelector, opts.Namespace, opts.AgentName) + return syncmanager.Add(ctx, mgr, cluster, kcpRestConfig, log, endpoint.APIExport.APIExport, endpointSlice, opts.PublishedResourceSelector, opts.Namespace, opts.AgentName) }); err != nil { return err } diff --git a/test/utils/fixtures.go b/test/utils/fixtures.go index fa12d32..c091a3b 100644 --- a/test/utils/fixtures.go +++ b/test/utils/fixtures.go @@ -160,6 +160,11 @@ func CreateAPIExport(t *testing.T, ctx context.Context, client ctrlruntimeclient Resources: []string{"events"}, Verbs: []string{"get", "create", "update", "patch"}, }, + { + APIGroups: []string{"apis.kcp.io"}, + Resources: []string{"apiexportendpointslices"}, + Verbs: []string{"get", "list", "watch"}, + }, { APIGroups: []string{"apis.kcp.io"}, Resources: []string{"apiresourceschemas"}, From ff5c0399582c2ed6001524d672e3ca8c9962e5d5 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 17:16:53 +0200 Subject: [PATCH 03/12] it's now compatible with kcp 0.28 On-behalf-of: @SAP christoph.mewes@sap.com --- Makefile | 2 +- internal/controller/syncmanager/controller.go | 12 +----------- test/utils/fixtures.go | 12 ------------ 3 files changed, 2 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index 5fef658..f743511 100644 --- a/Makefile +++ b/Makefile @@ -98,7 +98,7 @@ $(YQ): yq_* KCP = _tools/kcp -KCP_VERSION = 0.27.1 +KCP_VERSION = 0.28.1 .PHONY: $(KCP) $(KCP): diff --git a/internal/controller/syncmanager/controller.go b/internal/controller/syncmanager/controller.go index c817c5c..60c3855 100644 --- a/internal/controller/syncmanager/controller.go +++ b/internal/controller/syncmanager/controller.go @@ -18,7 +18,6 @@ package syncmanager import ( "context" - "errors" "fmt" "go.uber.org/zap" @@ -74,7 +73,7 @@ type Reconciler struct { stateNamespace string agentName string - // only one of these two must be set + // endpointSlice is preferred over apiExport apiExport *kcpapisv1alpha1.APIExport endpointSlice *kcpapisv1alpha1.APIExportEndpointSlice @@ -105,8 +104,6 @@ type syncWorker struct { } // Add creates a new controller and adds it to the given manager. -// -// Only one of apiExport and endpointSlice must be given. func Add( ctx context.Context, localManager manager.Manager, @@ -119,13 +116,6 @@ func Add( stateNamespace string, agentName string, ) error { - if apiExport != nil && endpointSlice != nil { - return errors.New("this controller works either with APIExport or APIExportEndpointSlice, not both") - } - if apiExport == nil && endpointSlice == nil { - return errors.New("neither APIExport nor APIExportEndpointSlice provided") - } - discoveryClient, err := discovery.NewClient(localManager.GetConfig()) if err != nil { return fmt.Errorf("failed to create discovery client: %w", err) diff --git a/test/utils/fixtures.go b/test/utils/fixtures.go index c091a3b..b862295 100644 --- a/test/utils/fixtures.go +++ b/test/utils/fixtures.go @@ -129,18 +129,6 @@ func CreateAPIExport(t *testing.T, ctx context.Context, client ctrlruntimeclient t.Fatalf("Failed to create APIExport: %v", err) } - err := wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { - err = client.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(apiExport), apiExport) - if err != nil { - return false, err - } - - return conditions.IsTrue(apiExport, kcpapisv1alpha1.APIExportVirtualWorkspaceURLsReady), nil - }) - if err != nil { - t.Fatalf("Failed to wait for APIExport virtual workspace to become ready: %v", err) - } - // grant permissions to access/manage the APIExport if rbacSubject != nil { clusterRoleName := "api-syncagent" From cf919668c560e54472a7fdd69da0bfcb107b65db Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 17:19:36 +0200 Subject: [PATCH 04/12] have one e2e job per kcp release On-behalf-of: @SAP christoph.mewes@sap.com --- .prow.yaml | 27 ++++++++++++++++++++++++++- Makefile | 2 +- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/.prow.yaml b/.prow.yaml index 88ecff1..7417b95 100644 --- a/.prow.yaml +++ b/.prow.yaml @@ -88,7 +88,7 @@ presubmits: memory: 4Gi cpu: 2 - - name: pull-api-syncagent-test-e2e + - name: pull-api-syncagent-test-e2e-kcp-0.27 always_run: true decorate: true clone_uri: "https://github.com/kcp-dev/api-syncagent" @@ -99,6 +99,31 @@ presubmits: - image: ghcr.io/kcp-dev/infra/build:1.24.3-1 command: - hack/ci/run-e2e-tests.sh + env: + - name: KCP_TAG + value: '0.27.1' + resources: + requests: + memory: 4Gi + cpu: 2 + # docker-in-docker needs privileged mode + securityContext: + privileged: true + + - name: pull-api-syncagent-test-e2e-kcp-0.28 + always_run: true + decorate: true + clone_uri: "https://github.com/kcp-dev/api-syncagent" + labels: + preset-goproxy: "true" + spec: + containers: + - image: ghcr.io/kcp-dev/infra/build:1.24.3-1 + command: + - hack/ci/run-e2e-tests.sh + env: + - name: KCP_TAG + value: '0.28.1' resources: requests: memory: 4Gi diff --git a/Makefile b/Makefile index f743511..4ccabc6 100644 --- a/Makefile +++ b/Makefile @@ -98,7 +98,7 @@ $(YQ): yq_* KCP = _tools/kcp -KCP_VERSION = 0.28.1 +KCP_VERSION ?= 0.28.1 .PHONY: $(KCP) $(KCP): From 9e57fe60d2e34ad8d1762bc5ecc6b766ff39099f Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 17:19:46 +0200 Subject: [PATCH 05/12] there is no dind in these tests On-behalf-of: @SAP christoph.mewes@sap.com --- .prow.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.prow.yaml b/.prow.yaml index 7417b95..d03b628 100644 --- a/.prow.yaml +++ b/.prow.yaml @@ -106,9 +106,6 @@ presubmits: requests: memory: 4Gi cpu: 2 - # docker-in-docker needs privileged mode - securityContext: - privileged: true - name: pull-api-syncagent-test-e2e-kcp-0.28 always_run: true @@ -128,6 +125,3 @@ presubmits: requests: memory: 4Gi cpu: 2 - # docker-in-docker needs privileged mode - securityContext: - privileged: true From 96b22f719916e8a44643f32d312a2374921b738e Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 17:23:45 +0200 Subject: [PATCH 06/12] fix env in prowjobs On-behalf-of: @SAP christoph.mewes@sap.com --- .prow.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.prow.yaml b/.prow.yaml index d03b628..61e8bd3 100644 --- a/.prow.yaml +++ b/.prow.yaml @@ -100,7 +100,7 @@ presubmits: command: - hack/ci/run-e2e-tests.sh env: - - name: KCP_TAG + - name: KCP_VERSION value: '0.27.1' resources: requests: @@ -119,7 +119,7 @@ presubmits: command: - hack/ci/run-e2e-tests.sh env: - - name: KCP_TAG + - name: KCP_VERSION value: '0.28.1' resources: requests: From 66be880c7d21c664db2ba580d7830f7e2ba3a4d3 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 19:00:26 +0200 Subject: [PATCH 07/12] fix running with an AEES ref On-behalf-of: @SAP christoph.mewes@sap.com --- cmd/api-syncagent/main.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/cmd/api-syncagent/main.go b/cmd/api-syncagent/main.go index e69ca4d..708449a 100644 --- a/cmd/api-syncagent/main.go +++ b/cmd/api-syncagent/main.go @@ -85,11 +85,15 @@ func main() { func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { v := version.NewAppVersion() - log.With( - "version", v.GitVersion, - "name", opts.AgentName, - "apiexport", opts.APIExportRef, - ).Info("Moin, I'm the kcp Sync Agent") + hello := log.With("version", v.GitVersion, "name", opts.AgentName) + + if opts.APIExportEndpointSliceRef != "" { + hello = hello.With("apiexportendpointslice", opts.APIExportEndpointSliceRef) + } else { + hello = hello.With("apiexport", opts.APIExportRef) + } + + hello.Info("Moin, I'm the kcp Sync Agent") // create the ctrl-runtime manager mgr, err := setupLocalManager(ctx, opts) @@ -114,10 +118,10 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { return fmt.Errorf("failed to resolve APIExport/EndpointSlice: %w", err) } - log.Infow("Resolved APIExport", "workspace", endpoint.APIExport.Path, "logicalcluster", endpoint.APIExport.Cluster) + log.Infow("Resolved APIExport", "name", endpoint.APIExport.Name, "workspace", endpoint.APIExport.Path, "logicalcluster", endpoint.APIExport.Cluster) if s := endpoint.EndpointSlice; s != nil { - log.Infow("Using APIExportEndpointSlice", "workspace", s.Path, "logicalcluster", s.Cluster) + log.Infow("Using APIExportEndpointSlice", "name", endpoint.EndpointSlice.Name, "workspace", s.Path, "logicalcluster", s.Cluster) } // init the "permanent" kcp cluster connections @@ -166,7 +170,7 @@ func run(ctx context.Context, log *zap.SugaredLogger, opts *Options) error { } if err := startController("apiexport", func() error { - return apiexport.Add(mgr, managedKcpCluster, endpoint.APIExport.Cluster, log, opts.APIExportRef, opts.AgentName, opts.PublishedResourceSelector) + return apiexport.Add(mgr, managedKcpCluster, endpoint.APIExport.Cluster, log, endpoint.APIExport.Name, opts.AgentName, opts.PublishedResourceSelector) }); err != nil { return err } From 3a15f33060651cc9aee31a576010e087d84d8342 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 19:00:39 +0200 Subject: [PATCH 08/12] add basic e2e tests for AEES in the same cluster On-behalf-of: @SAP christoph.mewes@sap.com --- test/e2e/sync/apiexportendpointslice_test.go | 134 +++++++++++++++++++ test/utils/process.go | 25 +++- 2 files changed, 158 insertions(+), 1 deletion(-) create mode 100644 test/e2e/sync/apiexportendpointslice_test.go diff --git a/test/e2e/sync/apiexportendpointslice_test.go b/test/e2e/sync/apiexportendpointslice_test.go new file mode 100644 index 0000000..93316a5 --- /dev/null +++ b/test/e2e/sync/apiexportendpointslice_test.go @@ -0,0 +1,134 @@ +//go:build e2e + +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync + +import ( + "context" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/kcp-dev/logicalcluster/v3" + + syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" + "github.com/kcp-dev/api-syncagent/test/utils" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrlruntime "sigs.k8s.io/controller-runtime" +) + +// TestAPIExportEndpointSlice is functionally equivalent to a simple sync test, +// but is bootstrapping the agent using a AEES ref instead of an APIExport ref. +func TestAPIExportEndpointSliceSameCluster(t *testing.T) { + const ( + apiExportName = "kcp.example.com" + kcpGroupName = "kcp.example.com" + orgWorkspace = "endpointslice-same-cluster" + ) + + ctx := t.Context() + ctrlruntime.SetLogger(logr.Discard()) + + // setup a test environment in kcp + orgKubconfig := utils.CreateOrganization(t, ctx, orgWorkspace, apiExportName) + + // start a service cluster + envtestKubeconfig, envtestClient, _ := utils.RunEnvtest(t, []string{ + "test/crds/crontab.yaml", + }) + + // publish Crontabs and Backups + t.Logf("Publishing CRDs…") + prCrontabs := &syncagentv1alpha1.PublishedResource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "publish-crontabs", + }, + Spec: syncagentv1alpha1.PublishedResourceSpec{ + Resource: syncagentv1alpha1.SourceResourceDescriptor{ + APIGroup: "example.com", + Version: "v1", + Kind: "CronTab", + }, + // These rules make finding the local object easier, but should not be used in production. + Naming: &syncagentv1alpha1.ResourceNaming{ + Name: "{{ .Object.metadata.name }}", + Namespace: "synced-{{ .Object.metadata.namespace }}", + }, + Projection: &syncagentv1alpha1.ResourceProjection{ + Group: kcpGroupName, + }, + }, + } + + if err := envtestClient.Create(ctx, prCrontabs); err != nil { + t.Fatalf("Failed to create PublishedResource: %v", err) + } + + // start the agent in the background to update the APIExport with the CronTabs API; + // use the export's name because kcp created an endpoint slice of the same name + utils.RunEndpointSliceAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) + + // wait until the API is available + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ + Group: kcpGroupName, + Version: "v1", + Resource: "crontabs", + }) + + // create a Crontab object in a team workspace + t.Log("Creating CronTab in kcp…") + crontab := utils.YAMLToUnstructured(t, ` +apiVersion: kcp.example.com/v1 +kind: CronTab +metadata: + namespace: default + name: my-crontab +spec: + cronSpec: '* * *' + image: ubuntu:latest +`) + + if err := teamClient.Create(ctx, crontab); err != nil { + t.Fatalf("Failed to create CronTab in kcp: %v", err) + } + + // wait for the agent to sync the object down into the service cluster + + t.Logf("Wait for CronTab to be synced…") + copy := &unstructured.Unstructured{} + copy.SetAPIVersion("example.com/v1") + copy.SetKind("CronTab") + + err := wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { + copyKey := types.NamespacedName{Namespace: "synced-default", Name: "my-crontab"} + return envtestClient.Get(ctx, copyKey, copy) == nil, nil + }) + if err != nil { + t.Fatalf("Failed to wait for object to be synced down: %v", err) + } +} diff --git a/test/utils/process.go b/test/utils/process.go index 7d00e94..eaea61c 100644 --- a/test/utils/process.go +++ b/test/utils/process.go @@ -68,6 +68,17 @@ func uniqueLogfile(t *testing.T, basename string) string { return fmt.Sprintf("%s_%02d.log", testName, counter) } +func RunEndpointSliceAgent( + ctx context.Context, + t *testing.T, + name string, + kcpKubeconfig string, + localKubeconfig string, + apiExportEndpointSlice string, +) context.CancelFunc { + return runAgent(ctx, t, name, kcpKubeconfig, localKubeconfig, "--apiexportendpointslice-ref", apiExportEndpointSlice) +} + func RunAgent( ctx context.Context, t *testing.T, @@ -75,6 +86,18 @@ func RunAgent( kcpKubeconfig string, localKubeconfig string, apiExport string, +) context.CancelFunc { + return runAgent(ctx, t, name, kcpKubeconfig, localKubeconfig, "--apiexport-ref", apiExport) +} + +func runAgent( + ctx context.Context, + t *testing.T, + name string, + kcpKubeconfig string, + localKubeconfig string, + refFlag string, + refValue string, ) context.CancelFunc { t.Helper() @@ -82,7 +105,7 @@ func RunAgent( args := []string{ "--agent-name", name, - "--apiexport-ref", apiExport, + refFlag, refValue, "--enable-leader-election=false", "--kubeconfig", localKubeconfig, "--kcp-kubeconfig", kcpKubeconfig, From a7188971af042653901ab4c5793170f679c0ebfc Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Fri, 5 Sep 2025 19:36:22 +0200 Subject: [PATCH 09/12] add testcase for AEES in a different workspace On-behalf-of: @SAP christoph.mewes@sap.com --- test/e2e/sync/apiexportendpointslice_test.go | 140 +++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/test/e2e/sync/apiexportendpointslice_test.go b/test/e2e/sync/apiexportendpointslice_test.go index 93316a5..04c9fa2 100644 --- a/test/e2e/sync/apiexportendpointslice_test.go +++ b/test/e2e/sync/apiexportendpointslice_test.go @@ -20,6 +20,7 @@ package sync import ( "context" + "fmt" "testing" "time" @@ -29,6 +30,9 @@ import ( syncagentv1alpha1 "github.com/kcp-dev/api-syncagent/sdk/apis/syncagent/v1alpha1" "github.com/kcp-dev/api-syncagent/test/utils" + kcpdevv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -132,3 +136,139 @@ spec: t.Fatalf("Failed to wait for object to be synced down: %v", err) } } + +func TestAPIExportEndpointSliceDifferentCluster(t *testing.T) { + const ( + apiExportName = "kcp.example.com" + kcpGroupName = "kcp.example.com" + orgWorkspace = "endpointslice-different-cluster" + endpointWorkspace = "endpoint" + ) + + ctx := t.Context() + ctrlruntime.SetLogger(logr.Discard()) + + // setup a test environment in kcp + rootCluster := logicalcluster.NewPath("root") + utils.CreateOrganization(t, ctx, orgWorkspace, apiExportName) + + // create a custom AEES in a different cluster than the APIExport + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + orgClient := kcpClusterClient.Cluster(rootCluster.Join(orgWorkspace)) + endpointClusterName := utils.CreateWorkspace(t, ctx, orgClient, endpointWorkspace) + endpointClient := kcpClusterClient.Cluster(endpointClusterName.Path()) + + endpointSlice := &kcpdevv1alpha1.APIExportEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Spec: kcpdevv1alpha1.APIExportEndpointSliceSpec{ + APIExport: kcpdevv1alpha1.ExportBindingReference{ + Path: rootCluster.Join(orgWorkspace).String(), + Name: apiExportName, + }, + }, + } + + t.Logf("Creating APIExportEndpointSlice %q…", endpointSlice.Name) + if err := endpointClient.Create(ctx, endpointSlice); err != nil { + t.Fatalf("Failed to create APIExportEndpointSlice: %v", err) + } + + agent := rbacv1.Subject{ + Kind: "User", + Name: "api-syncagent-e2e", + } + + utils.GrantWorkspaceAccess(t, ctx, endpointClient, agent, rbacv1.PolicyRule{ + APIGroups: []string{"core.kcp.io"}, + Resources: []string{"logicalclusters"}, + ResourceNames: []string{"cluster"}, + Verbs: []string{"get"}, + }, rbacv1.PolicyRule{ + APIGroups: []string{"apis.kcp.io"}, + Resources: []string{"apiexportendpointslices"}, + ResourceNames: []string{endpointSlice.Name}, + Verbs: []string{"get", "list", "watch"}, + }) + + endpointKubeconfig := utils.CreateKcpAgentKubeconfig(t, fmt.Sprintf("/clusters/%s", endpointClusterName)) + + // start a service cluster + envtestKubeconfig, envtestClient, _ := utils.RunEnvtest(t, []string{ + "test/crds/crontab.yaml", + }) + + // publish Crontabs and Backups + t.Logf("Publishing CRDs…") + prCrontabs := &syncagentv1alpha1.PublishedResource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "publish-crontabs", + }, + Spec: syncagentv1alpha1.PublishedResourceSpec{ + Resource: syncagentv1alpha1.SourceResourceDescriptor{ + APIGroup: "example.com", + Version: "v1", + Kind: "CronTab", + }, + // These rules make finding the local object easier, but should not be used in production. + Naming: &syncagentv1alpha1.ResourceNaming{ + Name: "{{ .Object.metadata.name }}", + Namespace: "synced-{{ .Object.metadata.namespace }}", + }, + Projection: &syncagentv1alpha1.ResourceProjection{ + Group: kcpGroupName, + }, + }, + } + + if err := envtestClient.Create(ctx, prCrontabs); err != nil { + t.Fatalf("Failed to create PublishedResource: %v", err) + } + + // start the agent in the background to update the APIExport with the CronTabs API + utils.RunEndpointSliceAgent(ctx, t, "bob", endpointKubeconfig, envtestKubeconfig, endpointSlice.Name) + + // wait until the API is available + + teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") + teamClient := kcpClusterClient.Cluster(teamClusterPath) + + utils.WaitForBoundAPI(t, ctx, teamClient, schema.GroupVersionResource{ + Group: kcpGroupName, + Version: "v1", + Resource: "crontabs", + }) + + // create a Crontab object in a team workspace + t.Log("Creating CronTab in kcp…") + crontab := utils.YAMLToUnstructured(t, ` +apiVersion: kcp.example.com/v1 +kind: CronTab +metadata: + namespace: default + name: my-crontab +spec: + cronSpec: '* * *' + image: ubuntu:latest +`) + + if err := teamClient.Create(ctx, crontab); err != nil { + t.Fatalf("Failed to create CronTab in kcp: %v", err) + } + + // wait for the agent to sync the object down into the service cluster + + t.Logf("Wait for CronTab to be synced…") + copy := &unstructured.Unstructured{} + copy.SetAPIVersion("example.com/v1") + copy.SetKind("CronTab") + + err := wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) { + copyKey := types.NamespacedName{Namespace: "synced-default", Name: "my-crontab"} + return envtestClient.Get(ctx, copyKey, copy) == nil, nil + }) + if err != nil { + t.Fatalf("Failed to wait for object to be synced down: %v", err) + } +} From 59e8fa7455e173b6d43d724b6039b0d650b259bb Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Mon, 15 Sep 2025 15:59:06 +0200 Subject: [PATCH 10/12] ensure same-cluster test works in 0.27 where we have to manually create the AEES On-behalf-of: @SAP christoph.mewes@sap.com --- test/e2e/sync/apiexportendpointslice_test.go | 26 +++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/test/e2e/sync/apiexportendpointslice_test.go b/test/e2e/sync/apiexportendpointslice_test.go index 04c9fa2..4d117d4 100644 --- a/test/e2e/sync/apiexportendpointslice_test.go +++ b/test/e2e/sync/apiexportendpointslice_test.go @@ -88,13 +88,31 @@ func TestAPIExportEndpointSliceSameCluster(t *testing.T) { t.Fatalf("Failed to create PublishedResource: %v", err) } + // In kcp 0.27, we have to manually create the AEES. To make this test work consistently with + // 0.27 and later versions, we simply always create one. + kcpClusterClient := utils.GetKcpAdminClusterClient(t) + orgClient := kcpClusterClient.Cluster(logicalcluster.NewPath("root").Join(orgWorkspace)) + + endpointSlice := &kcpdevv1alpha1.APIExportEndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Spec: kcpdevv1alpha1.APIExportEndpointSliceSpec{ + APIExport: kcpdevv1alpha1.ExportBindingReference{ + Name: apiExportName, + }, + }, + } + + t.Logf("Creating APIExportEndpointSlice %q…", endpointSlice.Name) + if err := orgClient.Create(ctx, endpointSlice); err != nil { + t.Fatalf("Failed to create APIExportEndpointSlice: %v", err) + } + // start the agent in the background to update the APIExport with the CronTabs API; - // use the export's name because kcp created an endpoint slice of the same name - utils.RunEndpointSliceAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, apiExportName) + utils.RunEndpointSliceAgent(ctx, t, "bob", orgKubconfig, envtestKubeconfig, endpointSlice.Name) // wait until the API is available - kcpClusterClient := utils.GetKcpAdminClusterClient(t) - teamClusterPath := logicalcluster.NewPath("root").Join(orgWorkspace).Join("team-1") teamClient := kcpClusterClient.Cluster(teamClusterPath) From a5eb1865404ea38fc94fab9b366003f6cda0990c Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Mon, 15 Sep 2025 16:21:50 +0200 Subject: [PATCH 11/12] update docs On-behalf-of: @SAP christoph.mewes@sap.com --- docs/content/getting-started.md | 143 ++++++++++++++++++++---- docs/content/publish-resources/index.md | 6 +- 2 files changed, 127 insertions(+), 22 deletions(-) diff --git a/docs/content/getting-started.md b/docs/content/getting-started.md index 447a3b8..7b63723 100644 --- a/docs/content/getting-started.md +++ b/docs/content/getting-started.md @@ -12,8 +12,8 @@ All that is necessary to run the Sync Agent is a running Kubernetes cluster (for ## APIExport Setup Before installing the Sync Agent it is necessary to create an `APIExport` on kcp. The `APIExport` should -be empty, because it is updated later by the Sync Agent, but it defines the new API group we're -introducing. An example file could look like this: +be empty, because it is updated later by the Sync Agent. The export's name is only important for +binding to it, the resources in it can use other API groups. An example file could look like this: ```yaml apiVersion: apis.kcp.io/v1alpha1 @@ -23,9 +23,10 @@ metadata: spec: {} ``` -Create a file with a similar content (you most likely want to change the name, as that is the API -group under which your published resources will be made available) and create it in a kcp workspace -of your choice: +The `APIExport` above might look like it is defining all resources for the `test.example.com` API +group, but in reality it might contain resource schemas like `v1.crontabs.initech.com`. + +Create a file with a similar content and create it in a kcp workspace of your choice: ```sh # use the kcp kubeconfig @@ -65,6 +66,28 @@ $ kubectl create --filename apibinding.yaml apibinding/test.example.com created ``` +### Creating an Endpoint Slice + +Beginning with kcp 0.28, `APIExports` do not contain the list of URLs the Sync Agent would need to +connect to. Instead, kcp will create an `APIExportEndpointSlice` of the same name in the same +workspace as the `APIExport` it belongs to. Just as before, without any existing `APIBindings`, this +endpoint slice will not contain any URLs, so the step in the section above still applies in kcp 0.28+. + +In older kcp versions, no endpoint slice is automatically created by kcp, but admins are free to +create their own. + +The Sync Agent can work with either `APIExports` or `APIExportEndpointSlices`. Depending on your +situation and needs, start it with `--apiexport-ref` or `--apiexportendpointslice-ref`. When a +reference to an endpoint slice is configured, the agent will automatically determine the `APIExport` +the slice belongs to (and it will of course continue to manage the `APIResourceSchemas` in that +`APIExport`). + +!!! note + The endpoint slices can live in workspaces other than the one where the `APIExport` exists. Make + sure that the kcp-kubeconfig provided to the Sync Agent always points to the workspace that has + the configured element (i.e. if using `--apiexportendpointslice-ref`, the kubeconfig must point + to the workspace where the endpoint slice exists, even if the APIExport is somewhere else). + ## Sync Agent Installation The Sync Agent can be installed into any namespace, but in our example we are going with `kcp-system`. @@ -72,10 +95,11 @@ It doesn't necessarily have to live in the same Kubernetes cluster where it is s to, but that is the common setup. Ultimately the Sync Agent synchronizes data between two kube endpoints. -Now that the `APIExport` is created, switch to the Kubernetes cluster from which you wish to -[publish resources](./publish-resources/index.md). You will need to ensure that a kubeconfig with access to -the kcp workspace that the `APIExport` has been created in is stored as a `Secret` on this cluster. -Make sure that the kubeconfig points to the right workspace (not necessarily the `root` workspace). +Now that the `APIExport` (and optionally the `APIExportEndpointSlice`) are created, switch to the +Kubernetes cluster from which you wish to [publish resources](./publish-resources/index.md). You will +need to ensure that a kubeconfig with access to the kcp workspace that the referenced object (export +or endpoint slice) has been created in is stored as a `Secret` on this cluster. Make sure that the +kubeconfig points to the right workspace (not necessarily the `root` workspace). This can be done via a command like this: @@ -88,13 +112,21 @@ $ kubectl create secret generic kcp-kubeconfig \ ### Helm Chart Setup The Sync Agent is shipped as a Helm chart and to install it, the next step is preparing a `values.yaml` -file for the Sync Agent Helm chart. We need to pass the target `APIExport`, a name for the Sync Agent -itself and a reference to the kubeconfig secret we just created. +file for the Sync Agent Helm chart. We need to pass the target `APIExport` or the target +`APIExportEndpointSlice`, a name for the Sync Agent itself and a reference to the kubeconfig secret +we just created. ```yaml -# Required: the name of the APIExport in kcp that this Sync Agent is supposed to serve. +# Either of the following two are required, and both fields are mutually exclusive: + +# the name of the APIExport in kcp that this Sync Agent is supposed to serve. apiExportName: test.example.com +# -- or -- + +# the name of the APIExportEndpointSlice in kcp that this Sync Agent is supposed to serve. +# apiExportEndpointSliceName: test.example.com + # Required: This Agent's public name, used to signal ownership over locally synced objects. # This value must be a valid Kubernetes label value, see # https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set @@ -119,9 +151,11 @@ helm install kcp-api-syncagent kcp/api-syncagent \ --namespace kcp-system ``` -Two `kcp-api-syncagent` Pods should start in the `kcp-system` namespace. If they crash you will need to -identify the reason from container logs. A possible issue is that the provided kubeconfig does not -have permissions against the target kcp workspace. +Two `kcp-api-syncagent` Pods should start in the `kcp-system` namespace. If they crash you will need +to identify the reason from container logs. A possible issue is that the provided kubeconfig does not +have permissions against the target kcp workspace (note that if you have an `APIExportEndpointSlice` +in a different workspace than the `APIExport`, you will need to grant the Sync Agent permissions in +both workspaces). ### Service Cluster RBAC @@ -177,9 +211,11 @@ the RBAC rules that grant the Agent access. The Sync Agent needs to -* access the workspace of its `APIExport`, -* get the `LogicalCluster`, +* access the workspace of its `APIExport` or `APIExportEndpointSlice`, +* get the `LogicalCluster` (if export and endpoint slice are in different workspaces, then the agent + will need permission to fetch the logicalcluster in both of them), * manage its `APIExport`, +* watch the `APIExportEndpointSlice` if configured, * manage `APIResourceSchemas`, * create `events` for `APIExports` and * access the virtual workspace for its `APIExport`. @@ -201,6 +237,17 @@ rules: - cluster verbs: - get + # watch the APIExportEndpointSlice (optional, could also be in a different workspace) + - apiGroups: + - apis.kcp.io + resources: + - apiexportendpointslices + resourceNames: + - test.example.com + verbs: + - get + - list + - watch # manage its APIExport - apiGroups: - apis.kcp.io @@ -271,6 +318,64 @@ subjects: name: api-syncagent-mango ``` +If you indeed have different workspaces for the export and endpoint slice, apply the RBAC above as +mentioned in the workspace with the `APIExport`, and additionally apply the RBAC below in the +workspace where the `APIExportEndpointSlice` exists: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: api-syncagent-mango +rules: + # get the LogicalCluster + - apiGroups: + - core.kcp.io + resources: + - logicalclusters + resourceNames: + - cluster + verbs: + - get + # watch the APIExportEndpointSlice + - apiGroups: + - apis.kcp.io + resources: + - apiexportendpointslices + resourceNames: + - test.example.com + verbs: + - get + - list + - watch + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: api-syncagent-mango:system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: api-syncagent-mango +subjects: + - kind: User + name: api-syncagent-mango + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: api-syncagent-mango:access +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kcp:workspace:access +subjects: + - kind: User + name: api-syncagent-mango +``` + ## Publish Resources Once the Sync Agent Pods are up and running, you should be able to follow the @@ -279,8 +384,8 @@ Once the Sync Agent Pods are up and running, you should be able to follow the ## Consume Service Once resources have been published through the Sync Agent, they can be consumed on the kcp side (i.e. -objects on kcp will be synced back and forth with the service cluster). Follow the -guide to [consuming services](consuming-services.md). +objects on kcp will be synced back and forth with the service cluster). Follow the guide to +[consuming services](consuming-services.md). [kind]: https://github.com/kubernetes-sigs/kind [kcp]: https://kcp.io diff --git a/docs/content/publish-resources/index.md b/docs/content/publish-resources/index.md index f36f67f..3fc105a 100644 --- a/docs/content/publish-resources/index.md +++ b/docs/content/publish-resources/index.md @@ -2,8 +2,8 @@ The guide describes the process of making a resource (usually defined by a CustomResourceDefinition) of one Kubernetes cluster (the "service cluster" or "local cluster") available for use in kcp. This -involves setting up an `APIExport` and then installing the Sync Agent and defining -`PublishedResources` in the local cluster. +involves setting up an `APIExport`, potentially an `APIExportEndpointSlice` and then installing the +Sync Agent and defining `PublishedResources` in the local cluster. All of the documentation and API types are worded and named from the perspective of a service owner, the person(s) who own a service and want to make it available to consumers in kcp. @@ -23,7 +23,7 @@ versions and even change API group, versions and names in transit (for example p the service cluster as v1beta1 within kcp). This process of changing the identity of a CRD is called "projection" in the agent. -All published resources together form the APIExport. When a service is enabled in a workspace +All published resources together form the `APIExport`. When a service is enabled in a workspace (i.e. it is bound to it), users can manage objects for the projected resources described by the published resources. These objects will be synced from the workspace onto the service cluster, where they are meant to be processed in whatever way the service owners desire. Any possible From f3e6ee2fd732195b534cf8022e8a5318d09db1be Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Mon, 15 Sep 2025 19:02:39 +0200 Subject: [PATCH 12/12] work around inconsistency in kcp 0.27 API binding On-behalf-of: @SAP christoph.mewes@sap.com --- test/e2e/sync/apiexportendpointslice_test.go | 13 +++++++++++++ test/utils/utils.go | 20 ++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/test/e2e/sync/apiexportendpointslice_test.go b/test/e2e/sync/apiexportendpointslice_test.go index 4d117d4..b589ae1 100644 --- a/test/e2e/sync/apiexportendpointslice_test.go +++ b/test/e2e/sync/apiexportendpointslice_test.go @@ -122,6 +122,14 @@ func TestAPIExportEndpointSliceSameCluster(t *testing.T) { Resource: "crontabs", }) + // In kcp 0.27, the binding' status is not perfectly in-sync with the actual APIs available in + // the workspace; since this is fixed in 0.28+ and we really care about the APIBinding, not necessarily + // the Kubernetes magic behind it, we keep the loop above but on 0.27 add a small synthetic delay. + // TODO: Remove this once we do not support kcp 0.27 anymore. + if utils.KCPMinor() < 28 { + time.Sleep(2 * time.Second) + } + // create a Crontab object in a team workspace t.Log("Creating CronTab in kcp…") crontab := utils.YAMLToUnstructured(t, ` @@ -258,6 +266,11 @@ func TestAPIExportEndpointSliceDifferentCluster(t *testing.T) { Resource: "crontabs", }) + // TODO: Remove this once we do not support kcp 0.27 anymore. + if utils.KCPMinor() < 28 { + time.Sleep(2 * time.Second) + } + // create a Crontab object in a team workspace t.Log("Creating CronTab in kcp…") crontab := utils.YAMLToUnstructured(t, ` diff --git a/test/utils/utils.go b/test/utils/utils.go index b0c9a7c..5102d92 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -21,6 +21,7 @@ import ( "net/url" "os" "regexp" + "strconv" "strings" "testing" @@ -205,3 +206,22 @@ func YAMLToUnstructured(t *testing.T, data string) *unstructured.Unstructured { return ToUnstructured(t, obj) } + +func KCPMinor() int { + version := os.Getenv("KCP_VERSION") + if version == "" { + panic("No $KCP_VERSION environment variable defined.") + } + + parts := strings.SplitN(version, ".", 3) + if len(parts) != 3 { + panic("Invalid $KCP_VERSION, must be X.Y.Z.") + } + + minor, err := strconv.ParseInt(parts[1], 10, 32) + if err != nil { + panic(fmt.Sprintf("Invalid $KCP_VERSION: not parseable: %v", err)) + } + + return int(minor) +}