diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 20a3beb63396..7e8c1163af1e 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -27,8 +27,11 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/test/e2e/internal/log" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -111,156 +114,183 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult ) - BeforeEach(func() { - Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) - input = inputGetter() - Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) - Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) - Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) - Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + for i := 0; i < 20; i++ { - Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) - if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { - if input.ExtensionConfigName == "" { - input.ExtensionConfigName = specName + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { + if input.ExtensionConfigName == "" { + input.ExtensionConfigName = specName + } } - } - // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) - if input.DeployClusterClassInSeparateNamespace { - clusterClassNamespace = framework.CreateNamespace(ctx, framework.CreateNamespaceInput{Creator: input.BootstrapClusterProxy.GetClient(), Name: fmt.Sprintf("%s-clusterclass", namespace.Name)}, "40s", "10s") - Expect(clusterClassNamespace).ToNot(BeNil(), "Failed to create namespace") - } + if input.DeployClusterClassInSeparateNamespace { + clusterClassNamespace = framework.CreateNamespace(ctx, framework.CreateNamespaceInput{Creator: input.BootstrapClusterProxy.GetClient(), Name: fmt.Sprintf("%s-clusterclass", namespace.Name)}, "40s", "10s") + Expect(clusterClassNamespace).ToNot(BeNil(), "Failed to create namespace") + } - clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) - }) + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) + }) - It("Should create a workload cluster", func() { - By("Creating a workload cluster") + It("Should create a workload cluster", func() { + By("Creating a workload cluster") - infrastructureProvider := clusterctl.DefaultInfrastructureProvider - if input.InfrastructureProvider != nil { - infrastructureProvider = *input.InfrastructureProvider - } + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } - flavor := clusterctl.DefaultFlavor - if input.Flavor != nil { - flavor = *input.Flavor - } + flavor := clusterctl.DefaultFlavor + if input.Flavor != nil { + flavor = *input.Flavor + } - controlPlaneMachineCount := ptr.To[int64](1) - if input.ControlPlaneMachineCount != nil { - controlPlaneMachineCount = input.ControlPlaneMachineCount - } + controlPlaneMachineCount := ptr.To[int64](1) + if input.ControlPlaneMachineCount != nil { + controlPlaneMachineCount = input.ControlPlaneMachineCount + } - workerMachineCount := ptr.To[int64](1) - if input.WorkerMachineCount != nil { - workerMachineCount = input.WorkerMachineCount - } + workerMachineCount := ptr.To[int64](1) + if input.WorkerMachineCount != nil { + workerMachineCount = input.WorkerMachineCount + } - clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) - if input.ClusterName != nil { - clusterName = *input.ClusterName - } + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + if input.ClusterName != nil { + clusterName = *input.ClusterName + } + + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { + // NOTE: test extension is already deployed in the management cluster. If for any reason in future we want + // to make this test more self-contained this test should be modified in order to create an additional + // management cluster; also the E2E test configuration should be modified introducing something like + // optional:true allowing to define which providers should not be installed by default in + // a management cluster. + By("Deploy Test Extension ExtensionConfig") + + // In this test we are defaulting all handlers to non-blocking because we don't expect the handlers to block the + // cluster lifecycle by default. Setting defaultAllHandlersToBlocking to false enforces that the test-extension + // automatically creates the ConfigMap with non-blocking preloaded responses. + defaultAllHandlersToBlocking := false + // select on the current namespace + // This is necessary so in CI this test doesn't influence other tests by enabling lifecycle hooks + // in other test namespaces. + namespaces := []string{namespace.Name} + if input.DeployClusterClassInSeparateNamespace { + // Add the ClusterClass namespace, if the ClusterClass is deployed in a separate namespace. + namespaces = append(namespaces, clusterClassNamespace.Name) + } + extensionConfig := extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, defaultAllHandlersToBlocking, namespaces...) + Expect(input.BootstrapClusterProxy.GetClient().Create(ctx, + extensionConfig)). + To(Succeed(), "Failed to create the ExtensionConfig") + } + + variables := map[string]string{ + // This is used to template the name of the ExtensionConfig into the ClusterClass. + "EXTENSION_CONFIG_NAME": input.ExtensionConfigName, + } + maps.Copy(variables, input.ClusterctlVariables) - if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { - // NOTE: test extension is already deployed in the management cluster. If for any reason in future we want - // to make this test more self-contained this test should be modified in order to create an additional - // management cluster; also the E2E test configuration should be modified introducing something like - // optional:true allowing to define which providers should not be installed by default in - // a management cluster. - By("Deploy Test Extension ExtensionConfig") - - // In this test we are defaulting all handlers to non-blocking because we don't expect the handlers to block the - // cluster lifecycle by default. Setting defaultAllHandlersToBlocking to false enforces that the test-extension - // automatically creates the ConfigMap with non-blocking preloaded responses. - defaultAllHandlersToBlocking := false - // select on the current namespace - // This is necessary so in CI this test doesn't influence other tests by enabling lifecycle hooks - // in other test namespaces. - namespaces := []string{namespace.Name} if input.DeployClusterClassInSeparateNamespace { - // Add the ClusterClass namespace, if the ClusterClass is deployed in a separate namespace. - namespaces = append(namespaces, clusterClassNamespace.Name) + variables["CLUSTER_CLASS_NAMESPACE"] = clusterClassNamespace.Name + By("Creating a cluster referencing a ClusterClass from another namespace") } - extensionConfig := extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, defaultAllHandlersToBlocking, namespaces...) - Expect(input.BootstrapClusterProxy.GetClient().Create(ctx, - extensionConfig)). - To(Succeed(), "Failed to create the ExtensionConfig") - } - variables := map[string]string{ - // This is used to template the name of the ExtensionConfig into the ClusterClass. - "EXTENSION_CONFIG_NAME": input.ExtensionConfigName, - } - maps.Copy(variables, input.ClusterctlVariables) + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + ClusterctlVariables: variables, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: infrastructureProvider, + Flavor: flavor, + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: input.E2EConfig.MustGetVariable(KubernetesVersion), + ControlPlaneMachineCount: controlPlaneMachineCount, + WorkerMachineCount: workerMachineCount, + }, + ControlPlaneWaiters: input.ControlPlaneWaiters, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + PostMachinesProvisioned: func() { + if input.PostMachinesProvisioned != nil { + input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName) + } + }, + }, clusterResources) + + Byf("Verify Cluster Available condition is true") + framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + Byf("Verify Machines Ready condition is true") + framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ + Lister: input.BootstrapClusterProxy.GetClient(), + Name: clusterResources.Cluster.Name, + Namespace: clusterResources.Cluster.Namespace, + }) + + By("PASSED!") - if input.DeployClusterClassInSeparateNamespace { - variables["CLUSTER_CLASS_NAMESPACE"] = clusterClassNamespace.Name - By("Creating a cluster referencing a ClusterClass from another namespace") - } + }) - clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - ConfigCluster: clusterctl.ConfigClusterInput{ - LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), - ClusterctlConfigPath: input.ClusterctlConfigPath, - ClusterctlVariables: variables, - KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: infrastructureProvider, - Flavor: flavor, - Namespace: namespace.Name, - ClusterName: clusterName, - KubernetesVersion: input.E2EConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: controlPlaneMachineCount, - WorkerMachineCount: workerMachineCount, - }, - ControlPlaneWaiters: input.ControlPlaneWaiters, - WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), - WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), - WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), - PostMachinesProvisioned: func() { - if input.PostMachinesProvisioned != nil { - input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName) + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ClusterctlConfigPath, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + if !input.SkipCleanup { + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { + Eventually(func() error { + return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, true)) + }, 10*time.Second, 1*time.Second).Should(Succeed(), "Deleting ExtensionConfig failed") + } + if input.DeployClusterClassInSeparateNamespace { + DeleteNamespace11(ctx, framework.DeleteNamespaceInput{ + Deleter: input.BootstrapClusterProxy.GetClient(), + Name: clusterClassNamespace.Name, + }) } - }, - }, clusterResources) - - Byf("Verify Cluster Available condition is true") - framework.VerifyClusterAvailable(ctx, framework.VerifyClusterAvailableInput{ - Getter: input.BootstrapClusterProxy.GetClient(), - Name: clusterResources.Cluster.Name, - Namespace: clusterResources.Cluster.Namespace, + } }) - Byf("Verify Machines Ready condition is true") - framework.VerifyMachinesReady(ctx, framework.VerifyMachinesReadyInput{ - Lister: input.BootstrapClusterProxy.GetClient(), - Name: clusterResources.Cluster.Name, - Namespace: clusterResources.Cluster.Namespace, - }) + } +} - By("PASSED!") - }) +// DeleteNamespace is used to delete namespace object. +func DeleteNamespace11(ctx context.Context, input framework.DeleteNamespaceInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for DeleteNamespace") + Expect(input.Deleter).NotTo(BeNil(), "input.Deleter is required for DeleteNamespace") + Expect(input.Name).NotTo(BeEmpty(), "input.Name is required for DeleteNamespace") + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: input.Name, + }, + } + log.Logf("Deleting namespace %s", input.Name) + Eventually(func() error { + err := input.Deleter.Delete(ctx, ns) + if err != nil && !errors.IsNotFound(err) { + + return err - AfterEach(func() { - // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ClusterctlConfigPath, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) - if !input.SkipCleanup { - if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { - Eventually(func() error { - return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, true)) - }, 10*time.Second, 1*time.Second).Should(Succeed(), "Deleting ExtensionConfig failed") - } - if input.DeployClusterClassInSeparateNamespace { - framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ - Deleter: input.BootstrapClusterProxy.GetClient(), - Name: clusterClassNamespace.Name, - }) - } } - }) + return nil + }, intervals...).Should(Succeed(), "Failed to delete namespace %s", input.Name) } diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index e9f0ef6ee5ad..3524e9ac46e0 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework/kubetest" ) -var _ = Describe("When following the Cluster API quick-start", func() { +var _ = Describe("When following the Cluster API quick-start11 [PR-Blocking] [ClusterClass]", Label("PR-Blocking", "ClusterClass"), func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ E2EConfig: e2eConfig, @@ -75,7 +75,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { }) }) -var _ = Describe("When following the Cluster API quick-start with ClusterClass [PR-Blocking] [ClusterClass]", Label("PR-Blocking", "ClusterClass"), func() { +var _ = Describe("When following the Cluster API quick-start with ClusterClass ", func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ E2EConfig: e2eConfig, diff --git a/test/framework/resourceversion_helpers.go b/test/framework/resourceversion_helpers.go index ee79889a8e68..819f792f8662 100644 --- a/test/framework/resourceversion_helpers.go +++ b/test/framework/resourceversion_helpers.go @@ -38,10 +38,26 @@ func ValidateResourceVersionStable(ctx context.Context, proxy ClusterProxy, name byf("Check resourceVersions are stable") var previousResourceVersions map[string]string var previousObjects map[string]client.Object + + var times []time.Duration + var series []map[string]client.Object + // var lastObjs map[string]client.Object + + start := time.Now() Eventually(func(g Gomega) { objectsWithResourceVersion, objects, err := getObjectsWithResourceVersion(ctx, proxy, namespace, ownerGraphFilterFunction) g.Expect(err).ToNot(HaveOccurred()) + times = append(times, time.Since(start)) + series = append(series, objects) + //lastObjs = objects + + if len(series) >= 2 { + prev := series[len(series)-2] + curr := series[len(series)-1] + g.Expect(curr).To(BeComparableTo(prev)) + } + defer func() { // Set current resourceVersions as previous resourceVersions for the next try. previousResourceVersions = objectsWithResourceVersion @@ -49,15 +65,48 @@ func ValidateResourceVersionStable(ctx context.Context, proxy ClusterProxy, name }() // This is intentionally failing on the first run. g.Expect(objectsWithResourceVersion).To(BeComparableTo(previousResourceVersions)) - }, 1*time.Minute, 15*time.Second).Should(Succeed(), "resourceVersions never became stable") + }, 2*time.Minute, 10*time.Second).MustPassRepeatedly(2).Should(Succeed(), "resourceVersions remain stable") + //func() string { + + // if len(series) < 2 { + // return "insufficient samples to produce a diff" + // } + // first, last := series[0], series[len(series)-1] + // var b strings.Builder + // if !reflect.DeepEqual(first, last) { + // fmt.Fprintf(&b, " Overall diff first→last (elapsed %s):\n%s\n", + // times[len(times)-1]-times[0], printObjectDiff(first, last)) + // } else { + // fmt.Fprintf(&b, "Overall first→last unchanged after %s\n", + // times[len(times)-1]-times[0]) + // } + // // Consecutive diffs, skipping empties. + // for i := 0; i < len(series)-1; i++ { + // d := printObjectDiff(series[i], series[i+1]) + // if d() == "" { + // continue + // } + // fmt.Fprintf(&b, " Step %d (%s→%s) diff:\n%s\n", + // i, times[i], times[i+1], d) + // } + // return b.String() + //}) // Verify resourceVersions are stable for a while. byf("Check resourceVersions remain stable") Consistently(func(g Gomega) { objectsWithResourceVersion, objects, err := getObjectsWithResourceVersion(ctx, proxy, namespace, ownerGraphFilterFunction) g.Expect(err).ToNot(HaveOccurred()) + resource, er := yaml.Marshal(objectsWithResourceVersion) + g.Expect(er).ToNot(HaveOccurred()) + fmt.Printf("objectsWithResourceVersion:: %s\n", string(resource)) + resource1, er := yaml.Marshal(previousResourceVersions) + g.Expect(er).ToNot(HaveOccurred()) + fmt.Printf("previousResourceVersions:: %s\n", string(resource1)) + g.Expect(previousResourceVersions).To(BeComparableTo(objectsWithResourceVersion), printObjectDiff(previousObjects, objects)) }, 2*time.Minute, 15*time.Second).Should(Succeed(), "resourceVersions didn't stay stable") + } func printObjectDiff(previousObjects, newObjects map[string]client.Object) func() string {