diff --git a/test/e2e/blockdevice/data_exports.go b/test/e2e/blockdevice/data_exports.go index acf58fc293..4495062657 100644 --- a/test/e2e/blockdevice/data_exports.go +++ b/test/e2e/blockdevice/data_exports.go @@ -59,11 +59,14 @@ const ( ) var _ = Describe("DataExports", label.Slow(), Label(precheck.PrecheckSVDM, precheck.PrecheckSnapshot), func() { - var f *framework.Framework - + var ( + f *framework.Framework + ctx context.Context + ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("data-exports") - moduleEnabled, err := checkStorageVolumeDataManagerEnabled() + moduleEnabled, err := checkStorageVolumeDataManagerEnabled(ctx) Expect(err).NotTo(HaveOccurred(), "Failed to get modules") if !moduleEnabled { Skip("Module 'storage-volume-data-manager' is disabled. Skipping all tests with using this module.") @@ -92,7 +95,7 @@ var _ = Describe("DataExports", label.Slow(), Label(precheck.PrecheckSVDM, prech vdbuilder.WithPersistentVolumeClaim(nil, ptr.To(resource.MustParse("51Mi"))), ) - err := f.CreateWithDeferredDeletion(context.Background(), vdRoot, vdData) + err := f.CreateWithDeferredDeletion(ctx, vdRoot, vdData) Expect(err).NotTo(HaveOccurred()) }) @@ -112,17 +115,17 @@ var _ = Describe("DataExports", label.Slow(), Label(precheck.PrecheckSVDM, prech vmbuilder.WithProvisioningUserData(object.UbuntuCloudInit), ) - err := f.CreateWithDeferredDeletion(context.Background(), vm) + err := f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) }) By("Waiting for VM agent to be ready", func() { - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) }) By("Writing test data to the data disk", func() { - util.CreateBlockDeviceFilesystem(f, vm, v1alpha2.DiskDevice, vdData.Name, "ext4") - util.MountBlockDevice(f, vm, v1alpha2.DiskDevice, vdData.Name, mountPointData) + util.CreateBlockDeviceFilesystem(ctx, f, vm, v1alpha2.DiskDevice, vdData.Name, "ext4") + util.MountBlockDevice(ctx, f, vm, v1alpha2.DiskDevice, vdData.Name, mountPointData) util.WriteFile(f, vm, fileDataPath, testFileValue) util.UnmountBlockDevice(f, vm, mountPointData) }) @@ -134,11 +137,11 @@ var _ = Describe("DataExports", label.Slow(), Label(precheck.PrecheckSVDM, prech vmopbuilder.WithType(v1alpha2.VMOPTypeStop), vmopbuilder.WithVirtualMachine(vm.Name), ) - err := f.CreateWithDeferredDeletion(context.Background(), vmopStop) + err := f.CreateWithDeferredDeletion(ctx, vmopStop) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, vmopStop) - util.UntilObjectPhase(string(v1alpha2.MachineStopped), framework.ShortTimeout, vm) + util.UntilObjectPhase(ctx, string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, vmopStop) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineStopped), framework.ShortTimeout, vm) }) By("Creating snapshot of the data disk", func() { @@ -149,26 +152,26 @@ var _ = Describe("DataExports", label.Slow(), Label(precheck.PrecheckSVDM, prech vdsnapshotbuilder.WithRequiredConsistency(true), ) - err := f.CreateWithDeferredDeletion(context.Background(), vdSnapshot) + err := f.CreateWithDeferredDeletion(ctx, vdSnapshot) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VirtualDiskSnapshotPhaseReady), framework.ShortTimeout, vdSnapshot) + util.UntilObjectPhase(ctx, string(v1alpha2.VirtualDiskSnapshotPhaseReady), framework.ShortTimeout, vdSnapshot) }) By("Exporting VirtualDisk to local file", func() { - exportData(f, "vd", vdData.Name, exportedDiskFile) + exportData(ctx, f, "vd", vdData.Name, exportedDiskFile) }) By("Exporting VirtualDiskSnapshot to local file", func() { - exportData(f, "vds", vdSnapshot.Name, exportedSnapshotFile) + exportData(ctx, f, "vds", vdSnapshot.Name, exportedSnapshotFile) }) By("Deleting the original data disk", func() { - err := f.Delete(context.Background(), vdData) + err := f.Delete(ctx, vdData) Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { var vd v1alpha2.VirtualDisk - err := f.Clients.GenericClient().Get(context.Background(), types.NamespacedName{ + err := f.Clients.GenericClient().Get(ctx, types.NamespacedName{ Namespace: vdData.Namespace, Name: vdData.Name, }, &vd) @@ -178,31 +181,31 @@ var _ = Describe("DataExports", label.Slow(), Label(precheck.PrecheckSVDM, prech }) By("Creating disk from exported VirtualDisk", func() { - vdFromDiskExport = createUploadDisk(f, "vd-restored-from-disk") + vdFromDiskExport = createUploadDisk(ctx, f, "vd-restored-from-disk") }) By("Uploading exported disk image", func() { - uploadFile(f, vdFromDiskExport, exportedDiskFile) + uploadFile(ctx, f, vdFromDiskExport, exportedDiskFile) }) By("Waiting for disk from VirtualDisk export to be ready", func() { - util.UntilObjectPhase(util.GetExpectedDiskPhaseByVolumeBindingMode(), framework.LongTimeout, vdFromDiskExport) + util.UntilObjectPhase(ctx, util.GetExpectedDiskPhaseByVolumeBindingMode(), framework.LongTimeout, vdFromDiskExport) }) By("Creating disk from exported VirtualDiskSnapshot", func() { - vdFromSnapshotExport = createUploadDisk(f, "vd-restored-from-snapshot") + vdFromSnapshotExport = createUploadDisk(ctx, f, "vd-restored-from-snapshot") }) By("Uploading exported snapshot image", func() { - uploadFile(f, vdFromSnapshotExport, exportedSnapshotFile) + uploadFile(ctx, f, vdFromSnapshotExport, exportedSnapshotFile) }) By("Waiting for disk from snapshot export to be ready", func() { - util.UntilObjectPhase(util.GetExpectedDiskPhaseByVolumeBindingMode(), framework.LongTimeout, vdFromSnapshotExport) + util.UntilObjectPhase(ctx, util.GetExpectedDiskPhaseByVolumeBindingMode(), framework.LongTimeout, vdFromSnapshotExport) }) By("Attaching restored disks to VM", func() { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vm), vm) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vm), vm) Expect(err).NotTo(HaveOccurred()) vm.Spec.BlockDeviceRefs = []v1alpha2.BlockDeviceSpecRef{ @@ -211,24 +214,24 @@ var _ = Describe("DataExports", label.Slow(), Label(precheck.PrecheckSVDM, prech {Kind: v1alpha2.DiskDevice, Name: vdFromSnapshotExport.Name}, } - err = f.Clients.GenericClient().Update(context.Background(), vm) + err = f.Clients.GenericClient().Update(ctx, vm) Expect(err).NotTo(HaveOccurred()) }) By("Starting the VM", func() { - util.StartVirtualMachine(f, vm) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.StartVirtualMachine(ctx, f, vm) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) }) By("Verifying data on disk restored from VirtualDisk export", func() { - util.MountBlockDevice(f, vm, v1alpha2.DiskDevice, vdFromDiskExport.Name, mountPointData) + util.MountBlockDevice(ctx, f, vm, v1alpha2.DiskDevice, vdFromDiskExport.Name, mountPointData) restoredValue := util.ReadFile(f, vm, fileDataPath) Expect(restoredValue).To(Equal(testFileValue), "Data should match original") util.UnmountBlockDevice(f, vm, mountPointData) }) By("Verifying data on disk restored from VirtualDiskSnapshot export", func() { - util.MountBlockDevice(f, vm, v1alpha2.DiskDevice, vdFromSnapshotExport.Name, mountPointData) + util.MountBlockDevice(ctx, f, vm, v1alpha2.DiskDevice, vdFromSnapshotExport.Name, mountPointData) restoredValue := util.ReadFile(f, vm, fileDataPath) Expect(restoredValue).To(Equal(testFileValue), "Data should match original") util.UnmountBlockDevice(f, vm, mountPointData) @@ -244,12 +247,12 @@ func IsNFS() bool { return sc.Provisioner == framework.NFS } -func needPublishOption(f *framework.Framework) bool { +func needPublishOption(ctx context.Context, f *framework.Framework) bool { hostname, err := os.Hostname() Expect(err).NotTo(HaveOccurred(), "Failed to get hostname") var node corev1.Node err = f.Clients.GenericClient().Get( - context.Background(), + ctx, types.NamespacedName{Name: hostname}, &node, ) @@ -260,11 +263,11 @@ func needPublishOption(f *framework.Framework) bool { return false } -func exportData(f *framework.Framework, resourceType, name, outputFile string) { +func exportData(ctx context.Context, f *framework.Framework, resourceType, name, outputFile string) { opts := d8.DataExportOptions{ Namespace: f.Namespace().Name, OutputFile: outputFile, - Publish: needPublishOption(f), + Publish: needPublishOption(ctx, f), Timeout: framework.LongTimeout, Cleanup: true, } @@ -280,7 +283,7 @@ func exportData(f *framework.Framework, resourceType, name, outputFile string) { }) } -func createUploadDisk(f *framework.Framework, name string) *v1alpha2.VirtualDisk { +func createUploadDisk(ctx context.Context, f *framework.Framework, name string) *v1alpha2.VirtualDisk { vd := vdbuilder.New( vdbuilder.WithName(name), vdbuilder.WithNamespace(f.Namespace().Name), @@ -289,15 +292,15 @@ func createUploadDisk(f *framework.Framework, name string) *v1alpha2.VirtualDisk }), ) - err := f.CreateWithDeferredDeletion(context.Background(), vd) + err := f.CreateWithDeferredDeletion(ctx, vd) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.DiskWaitForUserUpload), framework.LongTimeout, vd) + util.UntilObjectPhase(ctx, string(v1alpha2.DiskWaitForUserUpload), framework.LongTimeout, vd) return vd } -func uploadFile(f *framework.Framework, vd *v1alpha2.VirtualDisk, filePath string) { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vd), vd) +func uploadFile(ctx context.Context, f *framework.Framework, vd *v1alpha2.VirtualDisk, filePath string) { + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vd), vd) Expect(err).NotTo(HaveOccurred()) Expect(vd.Status.ImageUploadURLs).NotTo(BeNil(), "ImageUploadURLs should be set") Expect(vd.Status.ImageUploadURLs.External).NotTo(BeEmpty(), "External upload URL should be set") @@ -366,8 +369,8 @@ func handleUploadResponse(resp *http.Response) error { return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, body) } -func checkStorageVolumeDataManagerEnabled() (bool, error) { - sdnModule, err := framework.NewFramework("").GetModuleConfig(context.Background(), "storage-volume-data-manager") +func checkStorageVolumeDataManagerEnabled(ctx context.Context) (bool, error) { + sdnModule, err := framework.NewFramework("").GetModuleConfig(ctx, "storage-volume-data-manager") if err != nil { return false, err } diff --git a/test/e2e/blockdevice/importer_network_policy.go b/test/e2e/blockdevice/importer_network_policy.go index a9ad9c9138..326fda4af7 100644 --- a/test/e2e/blockdevice/importer_network_policy.go +++ b/test/e2e/blockdevice/importer_network_policy.go @@ -33,9 +33,12 @@ import ( var _ = Describe("ImporterNetworkPolicy", Label(precheck.NoPrecheck), func() { const testName = "importer-network-policy" - var f *framework.Framework - + var ( + f *framework.Framework + ctx context.Context + ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("") f.Before() DeferCleanup(f.After) @@ -44,37 +47,37 @@ var _ = Describe("ImporterNetworkPolicy", Label(precheck.NoPrecheck), func() { It("test network policy isolation for vi importer", func() { By("Create isolated project") project := object.NewIsolatedProject(testName, framework.NamespaceBasePrefix) - err := f.CreateWithDeferredDeletion(context.Background(), project) + err := f.CreateWithDeferredDeletion(ctx, project) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectState("Deployed", framework.ShortTimeout, project) + util.UntilObjectState(ctx, "Deployed", framework.ShortTimeout, project) By("Create virtual image") vi := object.NewGeneratedHTTPVIAlpineBIOS("vi-", project.Name) - err = f.CreateWithDeferredDeletion(context.Background(), vi) + err = f.CreateWithDeferredDeletion(ctx, vi) Expect(err).NotTo(HaveOccurred()) By("Check VI will be in ready phase") - util.UntilObjectPhase(string(v1alpha2.ImageReady), framework.LongTimeout, vi) + util.UntilObjectPhase(ctx, string(v1alpha2.ImageReady), framework.LongTimeout, vi) }) It("test network policy isolation for vd importer", func() { By("Create isolated project") project := object.NewIsolatedProject(testName, framework.NamespaceBasePrefix) - err := f.CreateWithDeferredDeletion(context.Background(), project) + err := f.CreateWithDeferredDeletion(ctx, project) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectState("Deployed", framework.ShortTimeout, project) + util.UntilObjectState(ctx, "Deployed", framework.ShortTimeout, project) By("Create virtual disk") vd := object.NewHTTPVDAlpineBIOS("vd", project.Name) - err = f.CreateWithDeferredDeletion(context.Background(), vd) + err = f.CreateWithDeferredDeletion(ctx, vd) Expect(err).NotTo(HaveOccurred()) By("Create virtual machine") vm := object.NewMinimalVM("vm-", project.Name, vmbuilder.WithDisks(vd)) - err = f.CreateWithDeferredDeletion(context.Background(), vm) + err = f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) By("Check VD will be in ready phase") - util.UntilObjectPhase(string(v1alpha2.DiskReady), framework.LongTimeout, vd) + util.UntilObjectPhase(ctx, string(v1alpha2.DiskReady), framework.LongTimeout, vd) }) }) diff --git a/test/e2e/blockdevice/virtual_disk_provisioning.go b/test/e2e/blockdevice/virtual_disk_provisioning.go index 7da097823b..c2f326667b 100644 --- a/test/e2e/blockdevice/virtual_disk_provisioning.go +++ b/test/e2e/blockdevice/virtual_disk_provisioning.go @@ -34,9 +34,12 @@ import ( ) var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { - var f *framework.Framework - + var ( + f *framework.Framework + ctx context.Context + ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vd-provisioning") sc := framework.GetConfig().StorageClass.TemplateStorageClass if sc != nil && sc.Provisioner == framework.NFS { @@ -57,18 +60,18 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { By("Creating VirtualImage from precreated CVI", func() { vi = object.NewGeneratedVIFromCVI("vi-", f.Namespace().Name, object.PrecreatedCVIAlpineUEFI) - err := f.CreateWithDeferredDeletion(context.Background(), vi) + err := f.CreateWithDeferredDeletion(ctx, vi) Expect(err).NotTo(HaveOccurred()) }) By("Waiting for VirtualImage to be ready", func() { - util.UntilObjectPhase(string(v1alpha2.ImageReady), framework.LongTimeout, vi) + util.UntilObjectPhase(ctx, string(v1alpha2.ImageReady), framework.LongTimeout, vi) }) By("Creating VirtualDisk", func() { vd = object.NewVDFromVI("vd", f.Namespace().Name, vi, vdbuilder.WithSize(ptr.To(resource.MustParse("350Mi")))) - err := f.CreateWithDeferredDeletion(context.Background(), vd) + err := f.CreateWithDeferredDeletion(ctx, vd) Expect(err).NotTo(HaveOccurred()) }) @@ -80,12 +83,12 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { }, )) - err := f.CreateWithDeferredDeletion(context.Background(), vm) + err := f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) }) By("Waiting for VirtualDisk to be ready", func() { - util.UntilObjectPhase(string(v1alpha2.DiskReady), framework.LongTimeout, vd) + util.UntilObjectPhase(ctx, string(v1alpha2.DiskReady), framework.LongTimeout, vd) }) }) @@ -96,22 +99,22 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { ) By("Creating VirtualImage", func() { vi = object.NewGeneratedVIFromCVI("vi-", f.Namespace().Name, object.PrecreatedCVIAlpineUEFI) - err := f.CreateWithDeferredDeletion(context.Background(), vi) + err := f.CreateWithDeferredDeletion(ctx, vi) Expect(err).NotTo(HaveOccurred()) }) By("Waiting for VirtualImage to be ready", func() { - util.UntilObjectPhase(string(v1alpha2.ImageReady), framework.LongTimeout, vi) + util.UntilObjectPhase(ctx, string(v1alpha2.ImageReady), framework.LongTimeout, vi) }) By("Creating VirtualDisk", func() { vd = object.NewVDFromVI("vd", f.Namespace().Name, vi, vdbuilder.WithSize(ptr.To(resource.MustParse("350Mi")))) - err := f.CreateWithDeferredDeletion(context.Background(), vd) + err := f.CreateWithDeferredDeletion(ctx, vd) Expect(err).NotTo(HaveOccurred()) }) By("Waiting for VirtualDisk to be ready", func() { - util.UntilObjectPhase(string(v1alpha2.DiskReady), framework.LongTimeout, vd) + util.UntilObjectPhase(ctx, string(v1alpha2.DiskReady), framework.LongTimeout, vd) }) By("Creating VirtualMachine and waiting for VirtualMachine to be ready", func() { @@ -119,10 +122,10 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { Kind: v1alpha2.VirtualDiskKind, Name: vd.Name, })) - err := f.CreateWithDeferredDeletion(context.Background(), vm) + err := f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) }) }) @@ -131,12 +134,12 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { By("Creating VirtualDisk", func() { vd = object.NewVDFromCVI("vd", f.Namespace().Name, object.PrecreatedCVIAlpineBIOS, vdbuilder.WithSize(ptr.To(resource.MustParse("350Mi")))) - err := f.CreateWithDeferredDeletion(context.Background(), vd) + err := f.CreateWithDeferredDeletion(ctx, vd) Expect(err).NotTo(HaveOccurred()) }) By("Waiting for VirtualDisk to be ready", func() { - util.UntilObjectPhase(string(v1alpha2.DiskReady), framework.LongTimeout, vd) + util.UntilObjectPhase(ctx, string(v1alpha2.DiskReady), framework.LongTimeout, vd) }) By("Creating VirtualMachine and waiting for VirtualMachine to be ready", func() { @@ -144,10 +147,10 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { Kind: v1alpha2.VirtualDiskKind, Name: vd.Name, })) - err := f.CreateWithDeferredDeletion(context.Background(), vm) + err := f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) }) }) @@ -156,12 +159,12 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { By("Creating VirtualDisk", func() { vd = object.NewHTTPVDAlpineBIOS("vd", f.Namespace().Name, vdbuilder.WithSize(ptr.To(resource.MustParse("350Mi")))) - err := f.CreateWithDeferredDeletion(context.Background(), vd) + err := f.CreateWithDeferredDeletion(ctx, vd) Expect(err).NotTo(HaveOccurred()) }) By("Waiting for VirtualDisk to be ready", func() { - util.UntilObjectPhase(string(v1alpha2.DiskReady), framework.LongTimeout, vd) + util.UntilObjectPhase(ctx, string(v1alpha2.DiskReady), framework.LongTimeout, vd) }) By("Creating VirtualMachine and waiting for VirtualMachine to be ready", func() { @@ -169,10 +172,10 @@ var _ = Describe("VirtualDiskProvisioning", Label(precheck.NoPrecheck), func() { Kind: v1alpha2.VirtualDiskKind, Name: vd.Name, })) - err := f.CreateWithDeferredDeletion(context.Background(), vm) + err := f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) }) }) }) diff --git a/test/e2e/blockdevice/virtual_image_creation.go b/test/e2e/blockdevice/virtual_image_creation.go index a7a864a345..e0bc120762 100644 --- a/test/e2e/blockdevice/virtual_image_creation.go +++ b/test/e2e/blockdevice/virtual_image_creation.go @@ -39,9 +39,13 @@ import ( ) var _ = Describe("VirtualImageCreation", Label(precheck.PrecheckSnapshot), func() { - var f *framework.Framework + var ( + f *framework.Framework + ctx context.Context + ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vi-creation") sc := framework.GetConfig().StorageClass.TemplateStorageClass if sc != nil && sc.Provisioner == framework.NFS { @@ -75,16 +79,16 @@ var _ = Describe("VirtualImageCreation", Label(precheck.PrecheckSnapshot), func( }, ), ) - err := f.CreateWithDeferredDeletion(context.Background(), vd) + err := f.CreateWithDeferredDeletion(ctx, vd) Expect(err).NotTo(HaveOccurred()) vm := object.NewMinimalVM("vm-", f.Namespace().Name, vmbuilder.WithBlockDeviceRefs(v1alpha2.BlockDeviceSpecRef{ Kind: v1alpha2.VirtualDiskKind, Name: vd.Name, })) - err = f.CreateWithDeferredDeletion(context.Background(), vm) + err = f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.DiskReady), framework.LongTimeout, vd) - err = f.Delete(context.Background(), vm) + util.UntilObjectPhase(ctx, string(v1alpha2.DiskReady), framework.LongTimeout, vd) + err = f.Delete(ctx, vm) Expect(err).NotTo(HaveOccurred()) }) @@ -95,9 +99,9 @@ var _ = Describe("VirtualImageCreation", Label(precheck.PrecheckSnapshot), func( vdsnapshotbuilder.WithVirtualDiskName(vd.Name), vdsnapshotbuilder.WithRequiredConsistency(true), ) - err := f.CreateWithDeferredDeletion(context.Background(), vdSnapshot) + err := f.CreateWithDeferredDeletion(ctx, vdSnapshot) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VirtualDiskSnapshotPhaseReady), framework.ShortTimeout, vdSnapshot) + util.UntilObjectPhase(ctx, string(v1alpha2.VirtualDiskSnapshotPhaseReady), framework.ShortTimeout, vdSnapshot) }) By("Generating base cvis", func() { @@ -168,11 +172,11 @@ var _ = Describe("VirtualImageCreation", Label(precheck.PrecheckSnapshot), func( By("Creating base images", func() { for _, cvi := range baseCvis { - err := f.CreateWithDeferredDeletion(context.Background(), cvi) + err := f.CreateWithDeferredDeletion(ctx, cvi) Expect(err).NotTo(HaveOccurred()) } for _, vi := range baseVis { - err := f.CreateWithDeferredDeletion(context.Background(), vi) + err := f.CreateWithDeferredDeletion(ctx, vi) Expect(err).NotTo(HaveOccurred()) } }) @@ -241,12 +245,12 @@ var _ = Describe("VirtualImageCreation", Label(precheck.PrecheckSnapshot), func( By("Creating images", func() { for _, vi := range vis { - err := f.CreateWithDeferredDeletion(context.Background(), vi) + err := f.CreateWithDeferredDeletion(ctx, vi) Expect(err).NotTo(HaveOccurred()) } for _, cvi := range cvis { - err := f.CreateWithDeferredDeletion(context.Background(), cvi) + err := f.CreateWithDeferredDeletion(ctx, cvi) Expect(err).NotTo(HaveOccurred()) } }) @@ -263,7 +267,7 @@ var _ = Describe("VirtualImageCreation", Label(precheck.PrecheckSnapshot), func( for _, cvi := range cvis { objects = append(objects, cvi) } - util.UntilObjectPhase(string(v1alpha2.ImageReady), framework.LongTimeout, objects...) + util.UntilObjectPhase(ctx, string(v1alpha2.ImageReady), framework.LongTimeout, objects...) }) }) }) diff --git a/test/e2e/default_config.yaml b/test/e2e/default_config.yaml index c2eb826baf..246e4ee90d 100644 --- a/test/e2e/default_config.yaml +++ b/test/e2e/default_config.yaml @@ -1,6 +1,12 @@ namespaceSuffix: "e2e" +# isCleanupNeeded controls cleanup of resources created during test execution (VMs, VDs, namespaces, etc.) +# Enabled by default. Set to false to skip cleanup for debugging. isCleanupNeeded: true +# isPrecreatedCVICleanupNeeded controls cleanup of precreated ClusterVirtualImages shared across test runs +# Disabled by default: CVIs persist between runs for faster execution. +# Set to true to delete them after the suite. +isPrecreatedCVICleanupNeeded: false clusterTransport: kubeConfig: "" diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index a7bea2acef..be3911b5bd 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "testing" . "github.com/onsi/ginkgo/v2" @@ -54,14 +55,12 @@ var _ = SynchronizedBeforeSuite(func() { // Load spec labels to determine which prechecks to run precheck.LoadSpecLabelsFromFile(precheck.LabelsFile, GinkgoLabelFilter()) // Run prechecks based on loaded labels - // Must run after resource initialization to avoid panic in SynchronizedAfterSuite precheck.Run(framework.NewFramework(""), GinkgoLabelFilter()) - - bootstrapPrecreatedCVIs() }, func() {}) var _ = SynchronizedAfterSuite(func() { - cleanupPrecreatedCVIs() + // Cleanup precreated CVIs if PRECREATED_CVI_CLEANUP=yes + precheck.CleanupPrecreatedCVIs(context.Background(), framework.NewFramework("")) }, func() { legacy.NewAfterAllProcessBody() controller.NewAfterAllProcessBody() diff --git a/test/e2e/internal/config/cleanup.go b/test/e2e/internal/config/cleanup.go index e0eca3a767..5e3478b1cb 100644 --- a/test/e2e/internal/config/cleanup.go +++ b/test/e2e/internal/config/cleanup.go @@ -16,11 +16,6 @@ limitations under the License. package config -import ( - "fmt" - "os" -) - // PrecreatedCVICleanupEnv defines an environment variable to explicitly enable deletion of precreated CVIs after the suite. // // By default, precreated CVIs are not deleted: they are shared across runs and may be reused. @@ -30,19 +25,3 @@ const PrecreatedCVICleanupEnv = "PRECREATED_CVI_CLEANUP" // PostCleanupEnv defines an environment variable used to explicitly request the deletion of created/used resources. // Valid values: "yes", "no", or "" (default = yes). const PostCleanupEnv = "POST_CLEANUP" - -func CheckPrecreatedCVICleanupOption() error { - env := os.Getenv(PrecreatedCVICleanupEnv) - switch env { - case "yes", "no", "": - return nil - default: - return fmt.Errorf(`invalid value for %s env: %q (allowed: "", "yes", "no")`, PrecreatedCVICleanupEnv, env) - } -} - -// IsPrecreatedCVICleanupNeeded returns true only when PRECREATED_CVI_CLEANUP is explicitly set to "yes". -// Default (unset, empty, or "no"): precreated CVIs are not deleted after the suite. -func IsPrecreatedCVICleanupNeeded() bool { - return os.Getenv(PrecreatedCVICleanupEnv) == "yes" -} diff --git a/test/e2e/internal/config/config.go b/test/e2e/internal/config/config.go index 5f54fc48de..e51880836f 100644 --- a/test/e2e/internal/config/config.go +++ b/test/e2e/internal/config/config.go @@ -76,7 +76,13 @@ type Config struct { LogFilter []string `yaml:"logFilter"` CleanupResources []string `yaml:"cleanupResources"` RegexpLogFilter []regexp.Regexp `yaml:"regexpLogFilter"` - IsCleanupNeeded bool `yaml:"isCleanupNeeded"` + // IsCleanupNeeded controls cleanup of resources created during test execution (VMs, VDs, namespaces, etc.). + // Enabled by default (POST_CLEANUP=yes or unset). Set to false to skip cleanup for debugging. + IsCleanupNeeded bool `yaml:"isCleanupNeeded"` + // IsPrecreatedCVICleanupNeeded controls cleanup of precreated ClusterVirtualImages that are shared across test runs. + // Disabled by default (PRECREATED_CVI_CLEANUP=no): CVIs persist between runs for faster execution. + // Set to true to delete them after the suite. + IsPrecreatedCVICleanupNeeded bool `yaml:"isPrecreatedCVICleanupNeeded"` StorageClass StorageClass } @@ -135,6 +141,10 @@ func (c *Config) setEnvs() error { if e, ok := os.LookupEnv(PostCleanupEnv); ok { c.IsCleanupNeeded = e != "no" } + // isPrecreatedCVICleanupNeeded: env var has priority over yaml config + if e, ok := os.LookupEnv("PRECREATED_CVI_CLEANUP"); ok { + c.IsPrecreatedCVICleanupNeeded = e == "yes" + } // ClusterTransport if e, ok := os.LookupEnv("E2E_CLUSTERTRANSPORT_KUBECONFIG"); ok { c.ClusterTransport.KubeConfig = e diff --git a/test/e2e/internal/object/const.go b/test/e2e/internal/object/const.go index aab2a08c39..9639843512 100644 --- a/test/e2e/internal/object/const.go +++ b/test/e2e/internal/object/const.go @@ -16,25 +16,7 @@ limitations under the License. package object -const imageBaseURL = "https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru" - const ( - ImageURLAlpineUEFI = imageBaseURL + "/alpine/alpine-3-23-3-uefi-base.qcow2" - ImageURLAlpineBIOS = imageBaseURL + "/alpine/alpine-3-23-3-bios-base.qcow2" - ImageURLAlpineUEFIPerf = imageBaseURL + "/alpine/alpine-3-21-uefi-perf.qcow2" - ImageURLAlpineBIOSPerf = imageBaseURL + "/alpine/alpine-3-21-bios-perf.qcow2" - ImageURLUbuntu = imageBaseURL + "/ubuntu/ubuntu-24.04-minimal-cloudimg-amd64.qcow2" - ImageURLUbuntuISO = imageBaseURL + "/ubuntu/ubuntu-24.04.2-live-server-amd64.iso" - ImageURLCirros = imageBaseURL + "/cirros/cirros-0.5.1.qcow2" - ImageURLDebian = imageBaseURL + "/debian/debian-12-with-tpm2-tools-amd64-20250814-2204.qcow2" - - ImageURLContainerImage = "cr.yandex/crpvs5j3nh1mi2tpithr/e2e/alpine/alpine-image:latest" - ImageURLLegacyContainerImage = "cr.yandex/crpvs5j3nh1mi2tpithr/e2e/alpine/alpine-3-20:latest" - - // Not bootable - ImageTestDataQCOW = imageBaseURL + "/test/test.qcow2" - ImageTestDataISO = imageBaseURL + "/test/test.iso" - Mi256 = 256 * 1024 * 1024 DefaultVMClass = "generic-for-e2e" diff --git a/test/e2e/internal/object/precreated_cvi.go b/test/e2e/internal/object/precreated_cvi.go index 3707807798..0820a9b218 100644 --- a/test/e2e/internal/object/precreated_cvi.go +++ b/test/e2e/internal/object/precreated_cvi.go @@ -21,7 +21,10 @@ import ( "github.com/deckhouse/virtualization/api/core/v1alpha2" ) +const imageBaseURL = "https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru" + const ( + // Precreated CVI names PrecreatedCVIAlpineUEFI = "v12n-e2e-alpine-uefi" PrecreatedCVIAlpineBIOS = "v12n-e2e-alpine-bios" PrecreatedCVIAlpineUEFIPerf = "v12n-e2e-alpine-uefi-perf" @@ -34,6 +37,23 @@ const ( PrecreatedCVIDebian = "v12n-e2e-debian" PrecreatedCVITestDataQCOW = "v12n-e2e-testdata-qcow" PrecreatedCVITestDataISO = "v12n-e2e-testdata-iso" + + // Image URLs + ImageURLAlpineUEFI = imageBaseURL + "/alpine/alpine-3-23-3-uefi-base.qcow2" + ImageURLAlpineBIOS = imageBaseURL + "/alpine/alpine-3-23-3-bios-base.qcow2" + ImageURLAlpineUEFIPerf = imageBaseURL + "/alpine/alpine-3-21-uefi-perf.qcow2" + ImageURLAlpineBIOSPerf = imageBaseURL + "/alpine/alpine-3-21-bios-perf.qcow2" + ImageURLUbuntu = imageBaseURL + "/ubuntu/ubuntu-24.04-minimal-cloudimg-amd64.qcow2" + ImageURLUbuntuISO = imageBaseURL + "/ubuntu/ubuntu-24.04.2-live-server-amd64.iso" + ImageURLCirros = imageBaseURL + "/cirros/cirros-0.5.1.qcow2" + ImageURLDebian = imageBaseURL + "/debian/debian-12-with-tpm2-tools-amd64-20250814-2204.qcow2" + + ImageURLContainerImage = "cr.yandex/crpvs5j3nh1mi2tpithr/e2e/alpine/alpine-image:latest" + ImageURLLegacyContainerImage = "cr.yandex/crpvs5j3nh1mi2tpithr/e2e/alpine/alpine-3-20:latest" + + // Test data (not bootable) + ImageTestDataQCOW = imageBaseURL + "/test/test.qcow2" + ImageTestDataISO = imageBaseURL + "/test/test.iso" ) // PrecreatedClusterVirtualImages returns the suite-wide CVIs shared by e2e tests. diff --git a/test/e2e/internal/precheck/labels.go b/test/e2e/internal/precheck/labels.go index 47f0d5bd01..b18e131722 100644 --- a/test/e2e/internal/precheck/labels.go +++ b/test/e2e/internal/precheck/labels.go @@ -48,6 +48,10 @@ const ( // PrecheckPostCleanup - test requires postcleanup to be configured. PrecheckPostCleanup = "postcleanup-precheck" + // PrecheckPrecreatedCVI - test requires precreated ClusterVirtualImages to be available. + // This is a common precheck that runs for all tests automatically. + PrecheckPrecreatedCVI = "precreatedcvi-precheck" + // NoPrecheck - test doesn't require any prechecks. // Use this label for tests that don't depend on cluster configuration. NoPrecheck = "no-precheck" @@ -65,6 +69,7 @@ func KnownPrecheckLabels() []string { PrecheckVirtualization, PrecheckUSB, PrecheckPostCleanup, + PrecheckPrecreatedCVI, NoPrecheck, } } diff --git a/test/e2e/internal/precheck/precreatedcvi.go b/test/e2e/internal/precheck/precreatedcvi.go new file mode 100644 index 0000000000..7adfb94bbe --- /dev/null +++ b/test/e2e/internal/precheck/precreatedcvi.go @@ -0,0 +1,151 @@ +/* +Copyright 2026 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package precheck + +import ( + "context" + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/test/e2e/internal/config" + "github.com/deckhouse/virtualization/test/e2e/internal/framework" + "github.com/deckhouse/virtualization/test/e2e/internal/object" + "github.com/deckhouse/virtualization/test/e2e/internal/util" +) + +const ( + precreatedCVIPrecheckEnvName = "PRECREATED_CVI_PRECHECK" +) + +// precreatedCVIPrecheck implements Precheck interface for precreated ClusterVirtualImages. +// This is a common precheck that creates or verifies precreated CVIs for the e2e suite. +type precreatedCVIPrecheck struct{} + +func (p *precreatedCVIPrecheck) Label() string { + return PrecheckPrecreatedCVI +} + +func (p *precreatedCVIPrecheck) Run(ctx context.Context, f *framework.Framework) error { + if !isCheckEnabled(precreatedCVIPrecheckEnvName) { + _, _ = GinkgoWriter.Write([]byte("Precreated CVI precheck is disabled.\n")) + return nil + } + + if err := p.validateCleanupEnv(); err != nil { + return err + } + + cvis := object.PrecreatedClusterVirtualImages() + By(fmt.Sprintf("Ensuring %d precreated CVIs are available", len(cvis))) + + if err := p.ensureCVIs(ctx, f, cvis); err != nil { + return err + } + + // Wait for all CVIs to become ready + By(fmt.Sprintf("Waiting for all %d precreated CVIs to be ready", len(cvis))) + p.waitForCVIsReady(ctx, cvis) + + By(fmt.Sprintf("All %d precreated CVIs are ready", len(cvis))) + return nil +} + +func (p *precreatedCVIPrecheck) validateCleanupEnv() error { + env := os.Getenv(config.PrecreatedCVICleanupEnv) + switch env { + case "", "yes", "no": + // valid values + default: + return fmt.Errorf("invalid value for %s env: %q (allowed: \"\", \"yes\", \"no\")", config.PrecreatedCVICleanupEnv, env) + } + return nil +} + +func (p *precreatedCVIPrecheck) ensureCVIs(ctx context.Context, f *framework.Framework, cvis []*v1alpha2.ClusterVirtualImage) error { + k8sClient := f.GenericClient() + + for _, cvi := range cvis { + existing := &v1alpha2.ClusterVirtualImage{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: cvi.GetName()}, existing) + + if err == nil { + // CVI already exists, verify it's ready + if existing.Status.Phase != v1alpha2.ImageReady { + _, _ = fmt.Fprintf(GinkgoWriter, + "CVI %q exists but not ready (phase: %s), waiting...\n", + cvi.GetName(), existing.Status.Phase) + } + continue + } + + if !k8serrors.IsNotFound(err) { + return fmt.Errorf("failed to get CVI %q: %w", cvi.GetName(), err) + } + + // CVI not found, create it + _, _ = fmt.Fprintf(GinkgoWriter, "Creating CVI %q\n", cvi.GetName()) + + err = k8sClient.Create(ctx, cvi) + if err != nil && !k8serrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create CVI %q: %w", cvi.GetName(), err) + } + } + return nil +} + +func (p *precreatedCVIPrecheck) waitForCVIsReady(ctx context.Context, cvis []*v1alpha2.ClusterVirtualImage) { + GinkgoHelper() + + // Convert []*ClusterVirtualImage to []client.Object for util.UntilObjectPhase + objs := make([]client.Object, 0, len(cvis)) + for _, cvi := range cvis { + objs = append(objs, cvi) + } + + // Use util's polling with 5 minute timeout + util.UntilObjectPhase(ctx, string(v1alpha2.ImageReady), framework.LongTimeout, objs...) +} + +// Register precreatedCVI precheck as common (runs for all tests). +func init() { + RegisterPrecheck(&precreatedCVIPrecheck{}, true) +} + +// CleanupPrecreatedCVIs deletes precreated CVIs if PRECREATED_CVI_CLEANUP=yes. +func CleanupPrecreatedCVIs(ctx context.Context, f *framework.Framework) { + GinkgoHelper() + + if !framework.GetConfig().IsPrecreatedCVICleanupNeeded { + return + } + + cvis := object.PrecreatedClusterVirtualImages() + By(fmt.Sprintf("Cleaning up %d precreated CVIs", len(cvis))) + + k8sClient := f.GenericClient() + for _, cvi := range cvis { + err := k8sClient.Delete(ctx, cvi) + if err != nil && !k8serrors.IsNotFound(err) { + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to delete CVI %q: %v\n", cvi.GetName(), err) + } + } +} diff --git a/test/e2e/internal/precreatedcvi/manager.go b/test/e2e/internal/precreatedcvi/manager.go deleted file mode 100644 index cb307f0fb4..0000000000 --- a/test/e2e/internal/precreatedcvi/manager.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2026 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package precreatedcvi provides suite-level lifecycle (bootstrap and cleanup) for -// precreated ClusterVirtualImages used by e2e tests. -package precreatedcvi - -import ( - "context" - "fmt" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/test/e2e/internal/framework" - "github.com/deckhouse/virtualization/test/e2e/internal/object" -) - -const labelKey = "v12n-e2e-precreated" - -// Manager runs bootstrap and cleanup of precreated CVIs for the e2e suite. -// The list of CVIs is loaded once during Bootstrap and reused in Cleanup. -type Manager struct { - cvis []*v1alpha2.ClusterVirtualImage -} - -// NewManager returns a new precreated CVI manager. -func NewManager() *Manager { - return &Manager{} -} - -// Bootstrap creates or reuses precreated CVIs in the cluster. -// Call once from SynchronizedBeforeSuite (process 1). -func (m *Manager) Bootstrap(ctx context.Context) error { - m.cvis = object.PrecreatedClusterVirtualImages() - - for _, cvi := range m.cvis { - if err := m.createOrReuse(ctx, cvi); err != nil { - return fmt.Errorf("create or reuse CVI %q: %w", cvi.Name, err) - } - } - - return nil -} - -// Cleanup deletes precreated CVIs. -// Call from SynchronizedAfterSuite (process 1). Uses the same CVI list as Bootstrap; -// if Bootstrap was not run, the list is loaded from object so that cleanup can still run. -func (m *Manager) Cleanup(ctx context.Context) error { - if len(m.cvis) == 0 { - m.cvis = object.PrecreatedClusterVirtualImages() - } - - f := framework.NewFramework("") - return f.Delete(ctx, m.CVIsAsObjects()...) -} - -// CVIsAsObjects returns all managed CVIs as controller-runtime Objects. -func (m *Manager) CVIsAsObjects() []crclient.Object { - objs := make([]crclient.Object, 0, len(m.cvis)) - for _, cvi := range m.cvis { - objs = append(objs, cvi) - } - return objs -} - -func (m *Manager) createOrReuse(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { - applyLabel(cvi) - - err := framework.GetClients().GenericClient().Create(ctx, cvi) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return err - } - return nil -} - -func applyLabel(cvi *v1alpha2.ClusterVirtualImage) { - labels := cvi.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - labels[labelKey] = "true" - cvi.SetLabels(labels) -} diff --git a/test/e2e/internal/util/block_device.go b/test/e2e/internal/util/block_device.go index 560f376d75..e57a4786c5 100644 --- a/test/e2e/internal/util/block_device.go +++ b/test/e2e/internal/util/block_device.go @@ -35,10 +35,10 @@ import ( "github.com/deckhouse/virtualization/test/e2e/internal/framework" ) -func GetBlockDevicePath(f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) string { +func GetBlockDevicePath(ctx context.Context, f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) string { GinkgoHelper() - serial, ok := GetBlockDeviceSerialNumber(vm, bdKind, bdName) + serial, ok := GetBlockDeviceSerialNumber(ctx, vm, bdKind, bdName) Expect(ok).To(BeTrue(), "failed to get block device serial number") devicePath, err := GetBlockDeviceBySerial(f, vm, serial) @@ -46,10 +46,10 @@ func GetBlockDevicePath(f *framework.Framework, vm *v1alpha2.VirtualMachine, bdK return devicePath } -func CreateBlockDeviceFilesystem(f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName, fsType string) { +func CreateBlockDeviceFilesystem(ctx context.Context, f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName, fsType string) { GinkgoHelper() - serial, ok := GetBlockDeviceSerialNumber(vm, bdKind, bdName) + serial, ok := GetBlockDeviceSerialNumber(ctx, vm, bdKind, bdName) Expect(ok).To(BeTrue(), "failed to get block device serial number") devicePath, err := GetBlockDeviceBySerial(f, vm, serial) @@ -59,10 +59,10 @@ func CreateBlockDeviceFilesystem(f *framework.Framework, vm *v1alpha2.VirtualMac Expect(err).NotTo(HaveOccurred()) } -func MountBlockDevice(f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName, mountPoint string) { +func MountBlockDevice(ctx context.Context, f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName, mountPoint string) { GinkgoHelper() - serial, ok := GetBlockDeviceSerialNumber(vm, bdKind, bdName) + serial, ok := GetBlockDeviceSerialNumber(ctx, vm, bdKind, bdName) Expect(ok).To(BeTrue(), "failed to get block device serial number") devicePath, err := GetBlockDeviceBySerial(f, vm, serial) @@ -79,10 +79,10 @@ func UnmountBlockDevice(f *framework.Framework, vm *v1alpha2.VirtualMachine, mou Expect(err).NotTo(HaveOccurred()) } -func RegisterFstabEntry(f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) { +func RegisterFstabEntry(ctx context.Context, f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) { GinkgoHelper() - serial, ok := GetBlockDeviceSerialNumber(vm, bdKind, bdName) + serial, ok := GetBlockDeviceSerialNumber(ctx, vm, bdKind, bdName) Expect(ok).To(BeTrue(), "failed to get block device serial number") cmd := fmt.Sprintf(`UUID=$(lsblk -o SERIAL,UUID | grep %s | awk "{print \$2}"); echo "UUID=$UUID /mnt ext4 defaults 0 0" | sudo tee -a /etc/fstab`, serial) @@ -90,10 +90,10 @@ func RegisterFstabEntry(f *framework.Framework, vm *v1alpha2.VirtualMachine, bdK Expect(err).NotTo(HaveOccurred()) } -func GetBlockDeviceHash(f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) string { +func GetBlockDeviceHash(ctx context.Context, f *framework.Framework, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) string { GinkgoHelper() - serial, ok := GetBlockDeviceSerialNumber(vm, bdKind, bdName) + serial, ok := GetBlockDeviceSerialNumber(ctx, vm, bdKind, bdName) Expect(ok).To(BeTrue(), "failed to get block device serial number") devicePath, err := GetBlockDeviceBySerial(f, vm, serial) @@ -128,12 +128,12 @@ func GetBlockDeviceBySerial(f *framework.Framework, vm *v1alpha2.VirtualMachine, return "", errors.New("no block device found") } -func GetBlockDeviceSerialNumber(vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) (string, bool) { +func GetBlockDeviceSerialNumber(ctx context.Context, vm *v1alpha2.VirtualMachine, bdKind v1alpha2.BlockDeviceKind, bdName string) (string, bool) { unstructuredVMI, err := framework.GetClients().DynamicClient().Resource(schema.GroupVersionResource{ Group: "internal.virtualization.deckhouse.io", Version: "v1", Resource: "internalvirtualizationvirtualmachineinstances", - }).Namespace(vm.Namespace).Get(context.Background(), vm.Name, metav1.GetOptions{}) + }).Namespace(vm.Namespace).Get(ctx, vm.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) var kvvmi virtv1.VirtualMachineInstance diff --git a/test/e2e/internal/util/until.go b/test/e2e/internal/util/until.go index 422d62a761..8044e8f724 100644 --- a/test/e2e/internal/util/until.go +++ b/test/e2e/internal/util/until.go @@ -35,9 +35,9 @@ import ( // It accepts a runtime.Object (which serves as a template with name and namespace), // expected phase string, and timeout duration. // The GVK is automatically extracted from the object via the client's scheme. -func UntilObjectPhase(expectedPhase string, timeout time.Duration, objs ...client.Object) { +func UntilObjectPhase(ctx context.Context, expectedPhase string, timeout time.Duration, objs ...client.Object) { GinkgoHelper() - untilObjectField("status.phase", expectedPhase, timeout, objs...) + untilObjectField(ctx, "status.phase", expectedPhase, timeout, objs...) } // UntilConditionReason waits for the specified conditionType in status.conditions to have the given reason value for all provided objects. @@ -151,9 +151,9 @@ func UntilConditionState( // It accepts a runtime.Object (which serves as a template with name and namespace), // expected state string, and timeout duration. // The GVK is automatically extracted from the object via the client's scheme. -func UntilObjectState(expectedState string, timeout time.Duration, objs ...client.Object) { +func UntilObjectState(ctx context.Context, expectedState string, timeout time.Duration, objs ...client.Object) { GinkgoHelper() - untilObjectField("status.state", expectedState, timeout, objs...) + untilObjectField(ctx, "status.state", expectedState, timeout, objs...) } // extractField extracts a string value from an unstructured object at the provided fieldPath (dot-separated, e.g. "status.phase" or "metadata.name"). @@ -175,7 +175,7 @@ func extractField(obj client.Object, fieldPath string) string { // fieldPath (dot-separated path to the field, e.g. "status.phase" or "metadata.name"), // expected value string, field name for error messages, and timeout duration. // The GVK is automatically extracted from the object via the client's scheme. -func untilObjectField(fieldPath, expectedValue string, timeout time.Duration, objs ...client.Object) { +func untilObjectField(ctx context.Context, fieldPath, expectedValue string, timeout time.Duration, objs ...client.Object) { Eventually(func(g Gomega) { for _, obj := range objs { key := client.ObjectKeyFromObject(obj) @@ -188,7 +188,7 @@ func untilObjectField(fieldPath, expectedValue string, timeout time.Duration, ob // Create a new unstructured object for each Get call u := getTemplateUnstructured(obj).DeepCopy() - err := framework.GetClients().GenericClient().Get(context.Background(), key, u) + err := framework.GetClients().GenericClient().Get(ctx, key, u) if err != nil { g.Expect(err).NotTo(HaveOccurred(), "failed to get object %s%s%s", namespace, divider, name) } diff --git a/test/e2e/internal/util/vm.go b/test/e2e/internal/util/vm.go index c1a11dba64..62bd1e2b34 100644 --- a/test/e2e/internal/util/vm.go +++ b/test/e2e/internal/util/vm.go @@ -157,11 +157,11 @@ func getInternalVirtualMachineInstance(ctx context.Context, vm *v1alpha2.Virtual return obj.VirtualMachineInstance, nil } -func UntilVMAgentReady(key client.ObjectKey, timeout time.Duration) { +func UntilVMAgentReady(ctx context.Context, key client.ObjectKey, timeout time.Duration) { GinkgoHelper() Eventually(func() error { - vm, err := framework.GetClients().VirtClient().VirtualMachines(key.Namespace).Get(context.Background(), key.Name, metav1.GetOptions{}) + vm, err := framework.GetClients().VirtClient().VirtualMachines(key.Namespace).Get(ctx, key.Name, metav1.GetOptions{}) if err != nil { return err } @@ -262,7 +262,7 @@ func MigrateVirtualMachine(f *framework.Framework, vm *v1alpha2.VirtualMachine, Expect(err).NotTo(HaveOccurred()) } -func StartVirtualMachine(f *framework.Framework, vm *v1alpha2.VirtualMachine, options ...vmopbuilder.Option) { +func StartVirtualMachine(ctx context.Context, f *framework.Framework, vm *v1alpha2.VirtualMachine, options ...vmopbuilder.Option) { GinkgoHelper() opts := []vmopbuilder.Option{ @@ -274,7 +274,7 @@ func StartVirtualMachine(f *framework.Framework, vm *v1alpha2.VirtualMachine, op opts = append(opts, options...) vmop := vmopbuilder.New(opts...) - err := f.CreateWithDeferredDeletion(context.Background(), vmop) + err := f.CreateWithDeferredDeletion(ctx, vmop) Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/legacy/legacy.go b/test/e2e/legacy/legacy.go index 93114dc451..c71e7f4e34 100644 --- a/test/e2e/legacy/legacy.go +++ b/test/e2e/legacy/legacy.go @@ -66,10 +66,6 @@ func Init() error { } func configure() (err error) { - if err = config.CheckPrecreatedCVICleanupOption(); err != nil { - return err - } - conf = framework.GetConfig() defer framework.SetConfig(conf) diff --git a/test/e2e/snapshot/vmsop.go b/test/e2e/snapshot/vmsop.go index 90152dfcbf..b13c6a1b0d 100644 --- a/test/e2e/snapshot/vmsop.go +++ b/test/e2e/snapshot/vmsop.go @@ -48,10 +48,12 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn vmsop *v1alpha2.VirtualMachineSnapshotOperation vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment - f *framework.Framework + ctx context.Context + f *framework.Framework ) BeforeAll(func() { + ctx = context.Background() f = framework.NewFramework("vmsop") cfg := framework.GetConfig() if cfg.StorageClass.TemplateStorageClass != nil && cfg.StorageClass.TemplateStorageClass.Provisioner == framework.NFS { @@ -83,10 +85,10 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn vmbuilder.WithCPU(1, ptr.To("100%")), ) - err := f.CreateWithDeferredDeletion(context.Background(), vd, vm) + err := f.CreateWithDeferredDeletion(ctx, vd, vm) Expect(err).NotTo(HaveOccurred()) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) }) By("create vmbda", func() { @@ -102,10 +104,10 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn vmbdabuilder.WithVirtualMachineName(vm.Name), vmbdabuilder.WithBlockDeviceRef(v1alpha2.VMBDAObjectRefKindVirtualDisk, vdBlank.Name), ) - err := f.CreateWithDeferredDeletion(context.Background(), vmbda, vdBlank) + err := f.CreateWithDeferredDeletion(ctx, vmbda, vdBlank) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.LongTimeout, vmbda) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.LongTimeout, vmbda) }) By("create vmsnapshot", func() { @@ -117,10 +119,10 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn vmsbuilder.WithRequiredConsistency(false), ) - err := f.CreateWithDeferredDeletion(context.Background(), vmsnapshot) + err := f.CreateWithDeferredDeletion(ctx, vmsnapshot) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VirtualMachineSnapshotPhaseReady), framework.LongTimeout, vmsnapshot) + util.UntilObjectPhase(ctx, string(v1alpha2.VirtualMachineSnapshotPhaseReady), framework.LongTimeout, vmsnapshot) }) }) @@ -143,10 +145,10 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn }), ) - err := f.CreateWithDeferredDeletion(context.Background(), vmsop) + err := f.CreateWithDeferredDeletion(ctx, vmsop) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VMSOPPhaseCompleted), framework.LongTimeout, vmsop) + util.UntilObjectPhase(ctx, string(v1alpha2.VMSOPPhaseCompleted), framework.LongTimeout, vmsop) }) By("Check that resounsec doesn't exist for DryRun mode", func() { @@ -154,16 +156,16 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn return } - err := f.VirtClient().VirtualMachines(f.Namespace().Name).Delete(context.Background(), clonedName(vm.Name), metav1.DeleteOptions{}) + err := f.VirtClient().VirtualMachines(f.Namespace().Name).Delete(ctx, clonedName(vm.Name), metav1.DeleteOptions{}) Expect(err).To(HaveOccurred()) - err = f.VirtClient().VirtualMachineBlockDeviceAttachments(f.Namespace().Name).Delete(context.Background(), clonedName(vmbda.Name), metav1.DeleteOptions{}) + err = f.VirtClient().VirtualMachineBlockDeviceAttachments(f.Namespace().Name).Delete(ctx, clonedName(vmbda.Name), metav1.DeleteOptions{}) Expect(err).To(HaveOccurred()) - err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(context.Background(), clonedName(vd.Name), metav1.DeleteOptions{}) + err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(ctx, clonedName(vd.Name), metav1.DeleteOptions{}) Expect(err).To(HaveOccurred()) - err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(context.Background(), clonedName(vdBlank.Name), metav1.DeleteOptions{}) + err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(ctx, clonedName(vdBlank.Name), metav1.DeleteOptions{}) Expect(err).To(HaveOccurred()) }) @@ -172,14 +174,14 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn return } - createdVM, err := f.VirtClient().VirtualMachines(f.Namespace().Name).Get(context.Background(), clonedName(vm.Name), metav1.GetOptions{}) + createdVM, err := f.VirtClient().VirtualMachines(f.Namespace().Name).Get(ctx, clonedName(vm.Name), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - createdVMBDA, err := f.VirtClient().VirtualMachineBlockDeviceAttachments(f.Namespace().Name).Get(context.Background(), clonedName(vmbda.Name), metav1.GetOptions{}) + createdVMBDA, err := f.VirtClient().VirtualMachineBlockDeviceAttachments(f.Namespace().Name).Get(ctx, clonedName(vmbda.Name), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(createdVM), framework.LongTimeout) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.LongTimeout, createdVMBDA) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(createdVM), framework.LongTimeout) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.LongTimeout, createdVMBDA) }) By("Delete created Resources", func() { @@ -187,16 +189,16 @@ var _ = Describe("VMSOPCreateVirtualMachine", Ordered, Label(precheck.PrecheckSn return } - err := f.VirtClient().VirtualMachines(f.Namespace().Name).Delete(context.Background(), clonedName(vm.Name), metav1.DeleteOptions{}) + err := f.VirtClient().VirtualMachines(f.Namespace().Name).Delete(ctx, clonedName(vm.Name), metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) - err = f.VirtClient().VirtualMachineBlockDeviceAttachments(f.Namespace().Name).Delete(context.Background(), clonedName(vmbda.Name), metav1.DeleteOptions{}) + err = f.VirtClient().VirtualMachineBlockDeviceAttachments(f.Namespace().Name).Delete(ctx, clonedName(vmbda.Name), metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) - err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(context.Background(), clonedName(vd.Name), metav1.DeleteOptions{}) + err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(ctx, clonedName(vd.Name), metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) - err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(context.Background(), clonedName(vdBlank.Name), metav1.DeleteOptions{}) + err = f.VirtClient().VirtualDisks(f.Namespace().Name).Delete(ctx, clonedName(vdBlank.Name), metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) }) }, diff --git a/test/e2e/suite_cvi.go b/test/e2e/suite_cvi.go deleted file mode 100644 index bab061f2ee..0000000000 --- a/test/e2e/suite_cvi.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2026 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "context" - "fmt" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/test/e2e/internal/config" - "github.com/deckhouse/virtualization/test/e2e/internal/framework" - "github.com/deckhouse/virtualization/test/e2e/internal/precreatedcvi" - "github.com/deckhouse/virtualization/test/e2e/internal/util" -) - -var cviManager = precreatedcvi.NewManager() - -func bootstrapPrecreatedCVIs() { - GinkgoHelper() - - By("Creating or reusing precreated CVIs") - err := cviManager.Bootstrap(context.Background()) - Expect(err).NotTo(HaveOccurred()) - - cvis := cviManager.CVIsAsObjects() - By(fmt.Sprintf("Waiting for all %d precreated CVIs to be ready", len(cvis))) - - util.UntilObjectPhase(string(v1alpha2.ImageReady), framework.LongTimeout, cvis...) - By(fmt.Sprintf("All %d precreated CVIs are ready", len(cvis))) -} - -func cleanupPrecreatedCVIs() { - GinkgoHelper() - - if !framework.GetConfig().IsCleanupNeeded || !config.IsPrecreatedCVICleanupNeeded() { - return - } - - By("Cleaning up precreated CVIs") - err := cviManager.Cleanup(context.Background()) - Expect(err).NotTo(HaveOccurred(), "Failed to delete precreated CVIs") -} diff --git a/test/e2e/vm/additional_network_interfaces.go b/test/e2e/vm/additional_network_interfaces.go index 8095113046..734b549e66 100644 --- a/test/e2e/vm/additional_network_interfaces.go +++ b/test/e2e/vm/additional_network_interfaces.go @@ -65,10 +65,12 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", Label(precheck.NoP vmFoo *v1alpha2.VirtualMachine vmBar *v1alpha2.VirtualMachine - f *framework.Framework + ctx context.Context + f *framework.Framework ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-additional-network") DeferCleanup(f.After) @@ -98,24 +100,24 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", Label(precheck.NoP vmFoo = buildVMWithNetworks("vm-foo", ns, vdFooRoot.Name, tc.vmFooAdditionalIP, true) vmBar = buildVMWithNetworks("vm-bar", ns, vdBarRoot.Name, tc.vmBarAdditionalIP, tc.vmBarHasMainNetwork) - err := f.CreateWithDeferredDeletion(context.Background(), vdFooRoot, vdBarRoot, vmFoo, vmBar) + err := f.CreateWithDeferredDeletion(ctx, vdFooRoot, vdBarRoot, vmFoo, vmBar) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmFoo, vmBar) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmFoo, vmBar) util.UntilSSHReady(f, vmFoo, framework.LongTimeout) if tc.vmBarHasMainNetwork { util.UntilSSHReady(f, vmBar, framework.LongTimeout) } By(fmt.Sprintf("Wait until vms %s and %s in phase running", vmFoo.GetName(), vmBar.GetName()), func() { - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmFoo, vmBar) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmFoo, vmBar) }) }) // If test fail due this timeout, rollback in test waiting for agent to be ready. By("Wait for additional network interfaces to be ready", func() { util.UntilConditionStatus( - context.Background(), + ctx, vmcondition.TypeNetworkReady.String(), "True", framework.LongTimeout, @@ -139,11 +141,11 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", Label(precheck.NoP }) By("Check Cilium agents after migration", func() { - err := network.CheckCiliumAgents(context.Background(), f.Kubectl(), vmFoo.Name, f.Namespace().Name) + err := network.CheckCiliumAgents(ctx, f.Kubectl(), vmFoo.Name, f.Namespace().Name) Expect(err).NotTo(HaveOccurred(), "Cilium agents check for VM %s", vmFoo.Name) if tc.vmBarHasMainNetwork { - err = network.CheckCiliumAgents(context.Background(), f.Kubectl(), vmBar.Name, f.Namespace().Name) + err = network.CheckCiliumAgents(ctx, f.Kubectl(), vmBar.Name, f.Namespace().Name) Expect(err).NotTo(HaveOccurred(), "Cilium agents check for VM %s", vmBar.Name) } }) @@ -158,7 +160,7 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", Label(precheck.NoP By("Wait for additional network interfaces to be ready after migration", func() { util.UntilConditionStatus( - context.Background(), + ctx, vmcondition.TypeNetworkReady.String(), "True", framework.LongTimeout, @@ -199,13 +201,13 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", Label(precheck.NoP Name: util.ClusterNetworkName(secondAdditionalInterfaceVLANID), }) - err := f.CreateWithDeferredDeletion(context.Background(), vdRoot, vm) + err := f.CreateWithDeferredDeletion(ctx, vdRoot, vm) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) util.UntilConditionStatus( - context.Background(), + ctx, vmcondition.TypeNetworkReady.String(), "True", framework.LongTimeout, @@ -219,15 +221,15 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", Label(precheck.NoP }) By("Remove middle ClusterNetwork from VM spec", func() { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vm), vm) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vm), vm) Expect(err).NotTo(HaveOccurred()) vm.Spec.Networks = []v1alpha2.NetworksSpec{vm.Spec.Networks[0], vm.Spec.Networks[2]} - err = f.Clients.GenericClient().Update(context.Background(), vm) + err = f.Clients.GenericClient().Update(ctx, vm) Expect(err).NotTo(HaveOccurred()) }) By("Reboot VM via VMOP", func() { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vm), vm) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vm), vm) Expect(err).NotTo(HaveOccurred()) runningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, vm.Status.Conditions) @@ -236,10 +238,10 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", Label(precheck.NoP util.RebootVirtualMachineByVMOP(f, vm) util.UntilVirtualMachineRebooted(crclient.ObjectKeyFromObject(vm), previousRunningTime, framework.LongTimeout) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) util.UntilConditionStatus( - context.Background(), + ctx, vmcondition.TypeNetworkReady.String(), "True", framework.LongTimeout, diff --git a/test/e2e/vm/affinity_toleration.go b/test/e2e/vm/affinity_toleration.go index 265b15943f..fa78e64972 100644 --- a/test/e2e/vm/affinity_toleration.go +++ b/test/e2e/vm/affinity_toleration.go @@ -101,7 +101,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", Ordered, Label(precheck. ) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmA) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmA) }) By("Creating vm-b, vm-c and vm-d", func() { @@ -135,7 +135,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", Ordered, Label(precheck. err := f.CreateWithDeferredDeletion(ctx, objs...) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmB, vmC, vmD) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmB, vmC, vmD) }) var nodeA string @@ -243,7 +243,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", Ordered, Label(precheck. ) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmNodeSelector) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmNodeSelector) util.UntilConditionStatus( ctx, vmcondition.TypeMigratable.String(), @@ -320,7 +320,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", Ordered, Label(precheck. ) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmNodeAffinity) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmNodeAffinity) util.UntilConditionStatus( ctx, vmcondition.TypeMigratable.String(), diff --git a/test/e2e/vm/configuration.go b/test/e2e/vm/configuration.go index 5a28e88ec0..cb5385c4f9 100644 --- a/test/e2e/vm/configuration.go +++ b/test/e2e/vm/configuration.go @@ -50,6 +50,7 @@ const ( var _ = Describe("VirtualMachineConfiguration", Label(precheck.NoPrecheck), func() { DescribeTable("the configuration should be applied", func(restartApprovalMode v1alpha2.RestartApprovalMode) { + ctx := context.Background() f := framework.NewFramework(fmt.Sprintf("vm-configuration-%s", strings.ToLower(string(restartApprovalMode)))) t := NewConfigurationTest(f) @@ -58,14 +59,14 @@ var _ = Describe("VirtualMachineConfiguration", Label(precheck.NoPrecheck), func By("Environment preparation") t.GenerateResources(restartApprovalMode) - err := f.CreateWithDeferredDeletion(context.Background(), t.VM, t.VDRoot, t.VDBlank) + err := f.CreateWithDeferredDeletion(ctx, t.VM, t.VDRoot, t.VDBlank) Expect(err).NotTo(HaveOccurred()) By("Waiting for VM agent to be ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(t.VM), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(t.VM), framework.LongTimeout) By("Checking initial configuration") - err = f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err = f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) Expect(t.VM.Status.Resources.CPU.Cores).To(Equal(initialCPUCores)) Expect(t.VM.Status.Resources.Memory.Size).To(Equal(resource.MustParse(initialMemorySize))) @@ -73,7 +74,7 @@ var _ = Describe("VirtualMachineConfiguration", Label(precheck.NoPrecheck), func Expect(util.IsVDAttached(t.VM, t.VDBlank)).To(BeFalse()) By("Applying changes") - err = f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err = f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) runningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, t.VM.Status.Conditions) previousRunningTime := runningCondition.LastTransitionTime.Time @@ -85,7 +86,7 @@ var _ = Describe("VirtualMachineConfiguration", Label(precheck.NoPrecheck), func Kind: v1alpha2.DiskDevice, Name: t.VDBlank.Name, }) - err = f.Clients.GenericClient().Update(context.Background(), t.VM) + err = f.Clients.GenericClient().Update(ctx, t.VM) Expect(err).NotTo(HaveOccurred()) if util.IsRestartRequired(t.VM, 3*time.Second) { @@ -94,10 +95,10 @@ var _ = Describe("VirtualMachineConfiguration", Label(precheck.NoPrecheck), func By("Waiting for VM to be rebooted") util.UntilVirtualMachineRebooted(crclient.ObjectKeyFromObject(t.VM), previousRunningTime, framework.LongTimeout) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(t.VM), framework.MiddleTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(t.VM), framework.MiddleTimeout) By("Checking changed configuration") - err = f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err = f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) Expect(t.VM.Status.Resources.CPU.Cores).To(Equal(changedCPUCores)) Expect(t.VM.Status.Resources.Memory.Size).To(Equal(resource.MustParse(changedMemorySize))) diff --git a/test/e2e/vm/connectivity.go b/test/e2e/vm/connectivity.go index 49f4e65251..f139fab57c 100644 --- a/test/e2e/vm/connectivity.go +++ b/test/e2e/vm/connectivity.go @@ -43,11 +43,13 @@ import ( var _ = Describe("VirtualMachineConnectivity", Label(precheck.NoPrecheck), func() { var ( - f *framework.Framework - t *VMConnectivityTest + f *framework.Framework + t *VMConnectivityTest + ctx context.Context ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-connectivity") DeferCleanup(f.After) f.Before() @@ -57,22 +59,22 @@ var _ = Describe("VirtualMachineConnectivity", Label(precheck.NoPrecheck), func( It("checks VM network connectivity", func() { By("Environment preparation", func() { t.GenerateEnvironmentResources() - err := f.CreateWithDeferredDeletion(context.Background(), t.VDa, t.VDb, t.VMa, t.VMb, t.ServiceA, t.ServiceB, t.CurlPod) + err := f.CreateWithDeferredDeletion(ctx, t.VDa, t.VDb, t.VMa, t.VMb, t.ServiceA, t.ServiceB, t.CurlPod) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, t.VMa, t.VMb) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(t.VMa), framework.MiddleTimeout) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(t.VMb), framework.MiddleTimeout) - util.UntilObjectPhase(string(corev1.PodRunning), framework.ShortTimeout, t.CurlPod) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, t.VMa, t.VMb) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(t.VMa), framework.MiddleTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(t.VMb), framework.MiddleTimeout) + util.UntilObjectPhase(ctx, string(corev1.PodRunning), framework.ShortTimeout, t.CurlPod) t.CheckCloudInitCompleted(framework.LongTimeout) }) // There is a known issue with the Cilium agent check. By("Check Cilium agents are properly configured for the VMs", func() { - err := network.CheckCiliumAgents(context.Background(), f.Kubectl(), t.VMa.Name, f.Namespace().Name) + err := network.CheckCiliumAgents(ctx, f.Kubectl(), t.VMa.Name, f.Namespace().Name) Expect(err).NotTo(HaveOccurred(), "Cilium agents check should succeed for VM %s", t.VMa.Name) - err = network.CheckCiliumAgents(context.Background(), f.Kubectl(), t.VMb.Name, f.Namespace().Name) + err = network.CheckCiliumAgents(ctx, f.Kubectl(), t.VMb.Name, f.Namespace().Name) Expect(err).NotTo(HaveOccurred(), "Cilium agents check should succeed for VM %s", t.VMb.Name) }) @@ -106,7 +108,7 @@ var _ = Describe("VirtualMachineConnectivity", Label(precheck.NoPrecheck), func( By("Replace selector in service A with selector from service B", func() { t.ServiceA.Spec.Selector["service"] = t.SelectorB - err := f.Clients.GenericClient().Update(context.Background(), t.ServiceA) + err := f.Clients.GenericClient().Update(ctx, t.ServiceA) Expect(err).NotTo(HaveOccurred()) }) @@ -118,7 +120,7 @@ var _ = Describe("VirtualMachineConnectivity", Label(precheck.NoPrecheck), func( By("Change selector in service A back to selector from service A", func() { t.ServiceA.Spec.Selector["service"] = t.SelectorA - err := f.Clients.GenericClient().Update(context.Background(), t.ServiceA) + err := f.Clients.GenericClient().Update(ctx, t.ServiceA) Expect(err).NotTo(HaveOccurred()) }) diff --git a/test/e2e/vm/disk_attachment.go b/test/e2e/vm/disk_attachment.go index d29e5ddf87..8293c0fc73 100644 --- a/test/e2e/vm/disk_attachment.go +++ b/test/e2e/vm/disk_attachment.go @@ -98,8 +98,8 @@ var _ = Describe("DiskAttachment", Label(precheck.NoPrecheck), func() { expectedDiskPhase := util.GetExpectedDiskPhaseByVolumeBindingMode() By("Wait for resources to be ready", func() { - util.UntilObjectPhase(expectedDiskPhase, framework.LongTimeout, vdBlank) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilObjectPhase(ctx, expectedDiskPhase, framework.LongTimeout, vdBlank) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) util.UntilSSHReady(f, vm, framework.MiddleTimeout) }) }) @@ -114,7 +114,7 @@ var _ = Describe("DiskAttachment", Label(precheck.NoPrecheck), func() { err := f.CreateWithDeferredDeletion(ctx, vmbda) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.LongTimeout, vmbda) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.LongTimeout, vmbda) }) By("Verify disk count increased by 1", func() { diff --git a/test/e2e/vm/hotplug_pod.go b/test/e2e/vm/hotplug_pod.go index 98049e5a93..e870ea920b 100644 --- a/test/e2e/vm/hotplug_pod.go +++ b/test/e2e/vm/hotplug_pod.go @@ -38,17 +38,19 @@ import ( var _ = Describe("HotplugPod", Label(precheck.NoPrecheck), func() { var ( - f *framework.Framework - vi *v1alpha2.VirtualImage + f *framework.Framework + vi *v1alpha2.VirtualImage + ctx context.Context ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("hotplug-pod") f.Before() DeferCleanup(f.After) newVI := object.NewGeneratedHTTPVIAlpineBIOSPerf("hotplug-pod-", f.Namespace().Name) - newVI, err := f.VirtClient().VirtualImages(f.Namespace().Name).Create(context.Background(), newVI, metav1.CreateOptions{}) + newVI, err := f.VirtClient().VirtualImages(f.Namespace().Name).Create(ctx, newVI, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(newVI) vi = newVI @@ -62,27 +64,27 @@ var _ = Describe("HotplugPod", Label(precheck.NoPrecheck), func() { By("Create VM", func() { root := object.NewVDFromVI("root", f.Namespace().Name, vi) blank = object.NewBlankVD("blank", f.Namespace().Name, nil, ptr.To(resource.MustParse("100Mi"))) - Expect(f.CreateWithDeferredDeletion(context.Background(), root, blank)).To(Succeed()) + Expect(f.CreateWithDeferredDeletion(ctx, root, blank)).To(Succeed()) var err error vm = object.NewMinimalVM("hotplug-pod-", f.Namespace().Name, vmbuilder.WithDisks(root), vmbuilder.WithCPU(1, ptr.To("100%"))) - vm, err = f.VirtClient().VirtualMachines(f.Namespace().Name).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err = f.VirtClient().VirtualMachines(f.Namespace().Name).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) }) By("Wait until VM agent is ready", func() { - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) }) By("Attaching disk", func() { vmbda := object.NewVMBDAFromDisk(vm.Name, vm.Name, blank) - Expect(f.CreateWithDeferredDeletion(context.Background(), vmbda)).To(Succeed()) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, vmbda) + Expect(f.CreateWithDeferredDeletion(ctx, vmbda)).To(Succeed()) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, vmbda) }) By("Evict hp pod", func() { - pods, err := f.KubeClient().CoreV1().Pods(f.Namespace().Name).List(context.Background(), metav1.ListOptions{ + pods, err := f.KubeClient().CoreV1().Pods(f.Namespace().Name).List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{ "kubevirt.internal.virtualization.deckhouse.io": "d8v-hotplug-disk", }).String(), @@ -92,7 +94,7 @@ var _ = Describe("HotplugPod", Label(precheck.NoPrecheck), func() { pod := pods.Items[0] - err = f.KubeClient().CoreV1().Pods(pod.GetNamespace()).EvictV1(context.Background(), &policyv1.Eviction{ + err = f.KubeClient().CoreV1().Pods(pod.GetNamespace()).EvictV1(ctx, &policyv1.Eviction{ ObjectMeta: metav1.ObjectMeta{ Name: pod.GetName(), Namespace: pod.GetNamespace(), diff --git a/test/e2e/vm/ipam.go b/test/e2e/vm/ipam.go index 3f7d838fa3..96c3de72bc 100644 --- a/test/e2e/vm/ipam.go +++ b/test/e2e/vm/ipam.go @@ -149,7 +149,7 @@ var _ = Describe("IPAM", Label(precheck.NoPrecheck), func() { }) By("Wait virtual machine to be running", func() { - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) }) By("Verify vmip attached to vm", func() { diff --git a/test/e2e/vm/label_annotation.go b/test/e2e/vm/label_annotation.go index c75f57bcbb..0910a18740 100644 --- a/test/e2e/vm/label_annotation.go +++ b/test/e2e/vm/label_annotation.go @@ -62,7 +62,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", Label(precheck.NoPrecheck), err := f.CreateWithDeferredDeletion(ctx, vm) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) By(fmt.Sprintf("Adding label %q=%q to VM", metadataSpecialKey, metadataSpecialValue)) updateVirtualMachineMetadata(ctx, f, vm, func(current *v1alpha2.VirtualMachine) { diff --git a/test/e2e/vm/live_migration_tcp_session.go b/test/e2e/vm/live_migration_tcp_session.go index 5472bc5188..3cf00623fe 100644 --- a/test/e2e/vm/live_migration_tcp_session.go +++ b/test/e2e/vm/live_migration_tcp_session.go @@ -54,9 +54,11 @@ var _ = Describe("VirtualMachineLiveMigrationTCPSession", Label(precheck.NoPrech f *framework.Framework storageClass *storagev1.StorageClass + ctx context.Context ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-live-migration-tcp-session") storageClass = framework.GetConfig().StorageClass.TemplateStorageClass @@ -88,10 +90,10 @@ var _ = Describe("VirtualMachineLiveMigrationTCPSession", Label(precheck.NoPrech iperfServer = newVirtualMachine(iperfServerName, f.Namespace().Name, iperfServerDisk, object.PerfCloudInit) iperfClient = newVirtualMachine(iperfClientName, f.Namespace().Name, iperfClientDisk, object.AlpineCloudInit) - err := f.CreateWithDeferredDeletion(context.Background(), iperfServerDisk, iperfClientDisk, iperfServer, iperfClient) + err := f.CreateWithDeferredDeletion(ctx, iperfServerDisk, iperfClientDisk, iperfServer, iperfClient) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, iperfServer, iperfClient) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, iperfServer, iperfClient) }) By("Wait for the iPerf server to start", func() { @@ -101,7 +103,7 @@ var _ = Describe("VirtualMachineLiveMigrationTCPSession", Label(precheck.NoPrech By("Run the iPerf client", func() { Expect(isAlpineSSHDStarted(f, iperfClient.Name, iperfClient.Namespace)).To(BeTrue(), "the SSHD service status should be `started`") - iperfServer, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(context.Background(), iperfServer.Name, metav1.GetOptions{}) + iperfServer, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(ctx, iperfServer.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) cmd := fmt.Sprintf("nohup iperf3 -c %s -t 0 --json > ~/%s 2>&1 < /dev/null &", iperfServer.Status.IPAddress, reportName) @@ -121,7 +123,7 @@ var _ = Describe("VirtualMachineLiveMigrationTCPSession", Label(precheck.NoPrech }) By("Check the iPerf client report", func() { - iperfServer, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(context.Background(), iperfServerName, metav1.GetOptions{}) + iperfServer, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(ctx, iperfServerName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) stopIPerfClient(iperfClient.Name, f.Namespace().Name, f) report = getIPerfClientReport(iperfClient.Name, f.Namespace().Name, reportName, f) diff --git a/test/e2e/vm/migration.go b/test/e2e/vm/migration.go index 322f9d1386..7d6b59e993 100644 --- a/test/e2e/vm/migration.go +++ b/test/e2e/vm/migration.go @@ -64,12 +64,15 @@ var _ = Describe("VirtualMachineMigration", Label(precheck.NoPrecheck), func() { vmopMigrateBIOS *v1alpha2.VirtualMachineOperation vmopMigrateUEFI *v1alpha2.VirtualMachineOperation - f *framework.Framework + f *framework.Framework + ctx context.Context + biosDiskCountOriginal string uefiDiskCountOriginal string ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-migration") DeferCleanup(f.After) @@ -199,11 +202,12 @@ var _ = Describe("VirtualMachineMigration", Label(precheck.NoPrecheck), func() { vdRootBIOS, vdBlankBIOS, vmBIOS, vdRootUEFI, vdBlankUEFI, vmUEFI, vdHotplugBIOS, vdHotplugUEFI, viHotplugBIOS, viHotplugUEFI, }, toObjects(vmbdas)...) - err := f.CreateWithDeferredDeletion(context.Background(), allObjects...) + err := f.CreateWithDeferredDeletion(ctx, allObjects...) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmBIOS, vmUEFI) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmBIOS, vmUEFI) util.UntilObjectPhase( + ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.LongTimeout, toObjects(vmbdas)..., ) @@ -230,12 +234,12 @@ var _ = Describe("VirtualMachineMigration", Label(precheck.NoPrecheck), func() { vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine(vmUEFI.Name), ) - err := f.CreateWithDeferredDeletion(context.Background(), vmopMigrateBIOS, vmopMigrateUEFI) + err := f.CreateWithDeferredDeletion(ctx, vmopMigrateBIOS, vmopMigrateUEFI) Expect(err).NotTo(HaveOccurred()) }) By("Wait for migration to complete", func() { - ctxVMBDA, cancelVMBDA := context.WithCancel(context.Background()) + ctxVMBDA, cancelVMBDA := context.WithCancel(ctx) defer cancelVMBDA() vmbdaWatchErrCh := make(chan error, 1) @@ -250,18 +254,18 @@ var _ = Describe("VirtualMachineMigration", Label(precheck.NoPrecheck), func() { }() Eventually(func(g Gomega) { - err := f.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vmBIOS), vmBIOS) + err := f.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vmBIOS), vmBIOS) Expect(err).NotTo(HaveOccurred()) // Intentionally fail the test on a single error, so g.Expect is not needed - err = f.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vmUEFI), vmUEFI) + err = f.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vmUEFI), vmUEFI) Expect(err).NotTo(HaveOccurred()) // Intentionally fail the test on a single error, so g.Expect is not needed // TODO: remove temporary migration skip logic when both known issues are fixed: // kubevirt "client socket is closed" and Volume(s)UpdateError. util.SkipIfKnownMigrationFailure(vmBIOS) util.SkipIfKnownMigrationFailure(vmUEFI) - err = f.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vmopMigrateBIOS), vmopMigrateBIOS) + err = f.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vmopMigrateBIOS), vmopMigrateBIOS) Expect(err).NotTo(HaveOccurred()) // Intentionally fail the test on a single error, so g.Expect is not needed - err = f.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vmopMigrateUEFI), vmopMigrateUEFI) + err = f.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vmopMigrateUEFI), vmopMigrateUEFI) Expect(err).NotTo(HaveOccurred()) // Intentionally fail the test on a single error, so g.Expect is not needed g.Expect(vmopMigrateBIOS.Status.Phase).To(Equal(v1alpha2.VMOPPhaseCompleted)) @@ -294,9 +298,9 @@ var _ = Describe("VirtualMachineMigration", Label(precheck.NoPrecheck), func() { // There is a known issue with the Cilium agent check. By("Check Cilium agents are properly configured for the VM", func() { - err := network.CheckCiliumAgents(context.Background(), f.Kubectl(), vmBIOS.Name, f.Namespace().Name) + err := network.CheckCiliumAgents(ctx, f.Kubectl(), vmBIOS.Name, f.Namespace().Name) Expect(err).NotTo(HaveOccurred(), "Cilium agents check should succeed for VM %s", vmBIOS.Name) - err = network.CheckCiliumAgents(context.Background(), f.Kubectl(), vmUEFI.Name, f.Namespace().Name) + err = network.CheckCiliumAgents(ctx, f.Kubectl(), vmUEFI.Name, f.Namespace().Name) Expect(err).NotTo(HaveOccurred(), "Cilium agents check should succeed for VM %s", vmUEFI.Name) }) diff --git a/test/e2e/vm/power_state.go b/test/e2e/vm/power_state.go index b008ad5edd..8704f5cb17 100644 --- a/test/e2e/vm/power_state.go +++ b/test/e2e/vm/power_state.go @@ -52,6 +52,7 @@ var _ = Describe("PowerState", Label(precheck.NoPrecheck), func() { case v1alpha2.ManualPolicy: namespaceSuffix = "manual" } + ctx := context.Background() f := framework.NewFramework(fmt.Sprintf("power-state-%s", namespaceSuffix)) DeferCleanup(f.After) f.Before() @@ -61,17 +62,17 @@ var _ = Describe("PowerState", Label(precheck.NoPrecheck), func() { By("Environment preparation", func() { t.GenerateResources(runPolicy) err := f.CreateWithDeferredDeletion( - context.Background(), t.VI, t.VDRoot, t.VDBlank, t.VM, t.VMBDA, + ctx, t.VI, t.VDRoot, t.VDBlank, t.VM, t.VMBDA, ) Expect(err).NotTo(HaveOccurred()) if t.VM.Spec.RunPolicy == v1alpha2.ManualPolicy { - util.UntilObjectPhase(string(v1alpha2.MachineStopped), framework.LongTimeout, t.VM) - util.StartVirtualMachine(f, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineStopped), framework.LongTimeout, t.VM) + util.StartVirtualMachine(ctx, f, t.VM) } - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, t.VM) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA) util.UntilSSHReady(f, t.VM, framework.MiddleTimeout) }) @@ -82,31 +83,31 @@ var _ = Describe("PowerState", Label(precheck.NoPrecheck), func() { vmopbuilder.WithType(v1alpha2.VMOPTypeStop), vmopbuilder.WithVirtualMachine(t.VM.Name), ) - err := f.CreateWithDeferredDeletion(context.Background(), vmopStop) + err := f.CreateWithDeferredDeletion(ctx, vmopStop) Expect(err).NotTo(HaveOccurred()) switch t.VM.Spec.RunPolicy { case v1alpha2.AlwaysOnPolicy: - util.UntilObjectPhase(string(v1alpha2.VMOPPhaseFailed), framework.ShortTimeout, vmopStop) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) + util.UntilObjectPhase(ctx, string(v1alpha2.VMOPPhaseFailed), framework.ShortTimeout, vmopStop) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) case v1alpha2.AlwaysOnUnlessStoppedManually, v1alpha2.ManualPolicy: - util.UntilObjectPhase(string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, vmopStop) - util.UntilObjectPhase(string(v1alpha2.MachineStopped), framework.ShortTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, vmopStop) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineStopped), framework.ShortTimeout, t.VM) } }) By("Start VM by VMOP", func() { if t.VM.Spec.RunPolicy != v1alpha2.AlwaysOnPolicy { - util.StartVirtualMachine(f, t.VM) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.MiddleTimeout, t.VM) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) + util.StartVirtualMachine(ctx, f, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.MiddleTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) util.UntilSSHReady(f, t.VM, framework.MiddleTimeout) } }) By("Shutdown VM by SSH", func() { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) runningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, t.VM.Status.Conditions) runningLastTransitionTime := runningCondition.LastTransitionTime.Time @@ -116,25 +117,25 @@ var _ = Describe("PowerState", Label(precheck.NoPrecheck), func() { switch t.VM.Spec.RunPolicy { case v1alpha2.AlwaysOnPolicy: util.UntilVirtualMachineRebooted(crclient.ObjectKeyFromObject(t.VM), runningLastTransitionTime, framework.LongTimeout) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) util.UntilSSHReady(f, t.VM, framework.MiddleTimeout) case v1alpha2.AlwaysOnUnlessStoppedManually, v1alpha2.ManualPolicy: - util.UntilObjectPhase(string(v1alpha2.MachineStopped), framework.LongTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineStopped), framework.LongTimeout, t.VM) } }) By("Start VM by VMOP", func() { if t.VM.Spec.RunPolicy != v1alpha2.AlwaysOnPolicy { - util.StartVirtualMachine(f, t.VM) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.MiddleTimeout, t.VM) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) + util.StartVirtualMachine(ctx, f, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.MiddleTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) util.UntilSSHReady(f, t.VM, framework.MiddleTimeout) } }) By("Reboot VM by VMOP", func() { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) runningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, t.VM.Status.Conditions) @@ -146,18 +147,18 @@ var _ = Describe("PowerState", Label(precheck.NoPrecheck), func() { vmopbuilder.WithType(v1alpha2.VMOPTypeRestart), vmopbuilder.WithVirtualMachine(t.VM.Name), ) - err = f.CreateWithDeferredDeletion(context.Background(), vmopRestart) + err = f.CreateWithDeferredDeletion(ctx, vmopRestart) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, vmopRestart) + util.UntilObjectPhase(ctx, string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, vmopRestart) util.UntilVirtualMachineRebooted(crclient.ObjectKeyFromObject(t.VM), runningLastTransitionTime, framework.MiddleTimeout) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) util.UntilSSHReady(f, t.VM, framework.MiddleTimeout) }) By("Reboot VM by SSH", func() { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) runningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, t.VM.Status.Conditions) @@ -166,13 +167,13 @@ var _ = Describe("PowerState", Label(precheck.NoPrecheck), func() { util.RebootVirtualMachineBySSH(f, t.VM) util.UntilVirtualMachineRebooted(crclient.ObjectKeyFromObject(t.VM), runningLastTransitionTime, framework.LongTimeout) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.ShortTimeout, t.VMBDA) util.UntilSSHReady(f, t.VM, framework.MiddleTimeout) }) By("Check VM can reach external network", func() { - err := network.CheckCiliumAgents(context.Background(), f.Kubectl(), t.VM.Name, f.Namespace().Name) + err := network.CheckCiliumAgents(ctx, f.Kubectl(), t.VM.Name, f.Namespace().Name) Expect(err).NotTo(HaveOccurred(), "Cilium agents check should succeed for VM %s", t.VM.Name) network.CheckExternalConnectivity(f, t.VM.Name, network.ExternalConnectivityHosts) }) diff --git a/test/e2e/vm/sizing_policy.go b/test/e2e/vm/sizing_policy.go index 6b3c24dd92..777ea5ec1b 100644 --- a/test/e2e/vm/sizing_policy.go +++ b/test/e2e/vm/sizing_policy.go @@ -65,12 +65,12 @@ var _ = Describe("SizingPolicy", Label(precheck.NoPrecheck), func() { err := f.CreateWithDeferredDeletion(ctx, t.VMClass) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.ClassPhaseReady), framework.ShortTimeout, t.VMClass) + util.UntilObjectPhase(ctx, string(v1alpha2.ClassPhaseReady), framework.ShortTimeout, t.VMClass) err = f.CreateWithDeferredDeletion(ctx, t.VD, t.VM) Expect(err).NotTo(HaveOccurred()) By("Waiting for VM agent to be ready") - util.UntilVMAgentReady(client.ObjectKeyFromObject(t.VM), framework.LongTimeout) + util.UntilVMAgentReady(ctx, client.ObjectKeyFromObject(t.VM), framework.LongTimeout) By("Validating VM by VMClass") t.ValidateVirtualMachineByClass(t.VMClass, t.VM) @@ -98,7 +98,7 @@ var _ = Describe("SizingPolicy", Label(precheck.NoPrecheck), func() { Expect(err).NotTo(HaveOccurred()) By("Waiting for VM to be ready") - util.UntilVMAgentReady(client.ObjectKeyFromObject(t.VM), framework.LongTimeout) + util.UntilVMAgentReady(ctx, client.ObjectKeyFromObject(t.VM), framework.LongTimeout) By("Validating VM by VMClass") t.ValidateVirtualMachineByClass(t.VMClass, t.VM) @@ -133,7 +133,7 @@ var _ = Describe("SizingPolicy", Label(precheck.NoPrecheck), func() { Expect(err).NotTo(HaveOccurred()) By("Waiting for VM to be ready") - util.UntilVMAgentReady(client.ObjectKeyFromObject(t.VM), framework.LongTimeout) + util.UntilVMAgentReady(ctx, client.ObjectKeyFromObject(t.VM), framework.LongTimeout) By("Validating VM by VMClass") t.ValidateVirtualMachineByClass(t.VMClass, t.VM) diff --git a/test/e2e/vm/target_migration.go b/test/e2e/vm/target_migration.go index 0ca4ef4511..a6b18bdf96 100644 --- a/test/e2e/vm/target_migration.go +++ b/test/e2e/vm/target_migration.go @@ -50,10 +50,12 @@ var _ = Describe("TargetMigration", Label(precheck.NoPrecheck), func() { initialNodeName string targetNodeSelector map[string]string - f *framework.Framework + f *framework.Framework + ctx context.Context ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-target-migration") DeferCleanup(f.After) f.Before() @@ -74,42 +76,42 @@ var _ = Describe("TargetMigration", Label(precheck.NoPrecheck), func() { vm.WithDisks(virtualDisk), ) - err := f.CreateWithDeferredDeletion(context.Background(), virtualDisk, virtualMachine) + err := f.CreateWithDeferredDeletion(ctx, virtualDisk, virtualMachine) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, virtualMachine) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, virtualMachine) }) By("Migrate the `VirtualMachine`", func() { - virtualMachine, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(context.Background(), virtualMachine.Name, metav1.GetOptions{}) + virtualMachine, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(ctx, virtualMachine.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) initialNodeName = virtualMachine.Status.Node - targetNodeSelector, err = defineTargetNodeSelector(f, initialNodeName) + targetNodeSelector, err = defineTargetNodeSelector(ctx, f, initialNodeName) Expect(err).NotTo(HaveOccurred()) targetMigrationVMOP = newTargetMigrationVMOP(virtualMachine, targetNodeSelector) - err = f.CreateWithDeferredDeletion(context.Background(), targetMigrationVMOP) + err = f.CreateWithDeferredDeletion(ctx, targetMigrationVMOP) Expect(err).NotTo(HaveOccurred()) util.UntilVMMigrationSucceeded(client.ObjectKeyFromObject(virtualMachine), framework.MaxTimeout) - util.UntilObjectPhase(string(v1alpha2.VMOPPhaseCompleted), framework.ShortTimeout, targetMigrationVMOP) + util.UntilObjectPhase(ctx, string(v1alpha2.VMOPPhaseCompleted), framework.ShortTimeout, targetMigrationVMOP) }) By("Check the result", func() { - targetMigrationVMOP, err := f.Clients.VirtClient().VirtualMachineOperations(f.Namespace().Name).Get(context.Background(), targetMigrationVMOP.Name, metav1.GetOptions{}) + targetMigrationVMOP, err := f.Clients.VirtClient().VirtualMachineOperations(f.Namespace().Name).Get(ctx, targetMigrationVMOP.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(targetMigrationVMOP.Spec.Migrate).NotTo(BeNil()) Expect(targetMigrationVMOP.Spec.Migrate.NodeSelector).To(HaveKey(hostnameLabelKey)) - intvirtvmim, err := getVirtualMachineInstanceMigration(f, fmt.Sprintf("vmop-%s", targetMigrationVMOP.Name)) + intvirtvmim, err := getVirtualMachineInstanceMigration(ctx, f, fmt.Sprintf("vmop-%s", targetMigrationVMOP.Name)) Expect(err).NotTo(HaveOccurred()) Expect(intvirtvmim).NotTo(BeNil()) Expect(intvirtvmim.Spec.AddedNodeSelector).To(HaveKey(hostnameLabelKey)) Expect(intvirtvmim.Status.Phase).To(Equal(virtv1.MigrationSucceeded)) - virtualMachine, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(context.Background(), virtualMachine.Name, metav1.GetOptions{}) + virtualMachine, err := f.Clients.VirtClient().VirtualMachines(f.Namespace().Name).Get(ctx, virtualMachine.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(initialNodeName).NotTo(Equal(virtualMachine.Status.Node)) @@ -128,10 +130,10 @@ func newTargetMigrationVMOP(virtualMachine *v1alpha2.VirtualMachine, nodeSelecto ) } -func defineTargetNodeSelector(f *framework.Framework, currentNodeName string) (map[string]string, error) { +func defineTargetNodeSelector(ctx context.Context, f *framework.Framework, currentNodeName string) (map[string]string, error) { nodes := &corev1.NodeList{} err := f.Clients.GenericClient().List( - context.Background(), + ctx, nodes, client.MatchingLabels( map[string]string{ @@ -163,9 +165,9 @@ func defineTargetNodeSelector(f *framework.Framework, currentNodeName string) (m return nil, errors.New("could not define a target node for the virtual machine") } -func getVirtualMachineInstanceMigration(f *framework.Framework, name string) (*virtv1.VirtualMachineInstanceMigration, error) { +func getVirtualMachineInstanceMigration(ctx context.Context, f *framework.Framework, name string) (*virtv1.VirtualMachineInstanceMigration, error) { obj := &rewrite.VirtualMachineInstanceMigration{} - err := f.RewriteClient().Get(context.Background(), name, obj, rewrite.InNamespace(f.Namespace().Name)) + err := f.RewriteClient().Get(ctx, name, obj, rewrite.InNamespace(f.Namespace().Name)) if err != nil { if k8serrors.IsNotFound(err) { return nil, nil diff --git a/test/e2e/vm/tpm.go b/test/e2e/vm/tpm.go index 0b224f3c9b..96f20f2929 100644 --- a/test/e2e/vm/tpm.go +++ b/test/e2e/vm/tpm.go @@ -36,9 +36,12 @@ import ( ) var _ = Describe("VMCheckTPM", label.TPM(), Label(precheck.NoPrecheck), func() { - var f *framework.Framework - + var ( + f *framework.Framework + ctx context.Context + ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-tpm-check") DeferCleanup(f.After) @@ -83,9 +86,9 @@ runcmd: vm.WithOsType(osType), vm.WithProvisioningUserData(cloudInit), ) - err := f.CreateWithDeferredDeletion(context.Background(), vdRoot, vmTPM) + err := f.CreateWithDeferredDeletion(ctx, vdRoot, vmTPM) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vmTPM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vmTPM) util.UntilSSHReady(f, vmTPM, framework.LongTimeout) By(fmt.Sprintf("Checks that the VM has the TPM module version %s.", expectedTPMVersion)) diff --git a/test/e2e/vm/usb.go b/test/e2e/vm/usb.go index a10e1b2eed..3550bd8e7d 100644 --- a/test/e2e/vm/usb.go +++ b/test/e2e/vm/usb.go @@ -40,11 +40,13 @@ import ( var _ = Describe("VirtualMachineUSB", Label(precheck.PrecheckUSB), func() { var ( - f *framework.Framework - t *VMUSBTest + f *framework.Framework + t *VMUSBTest + ctx context.Context ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-usb") DeferCleanup(func() { t.unassignNodeUSB() @@ -52,18 +54,18 @@ var _ = Describe("VirtualMachineUSB", Label(precheck.PrecheckUSB), func() { }) f.Before() - t = NewVMUSBTest(f) + t = NewVMUSBTest(ctx, f) }) It("should write data to USB device and preserve after reconnection", func() { By("Environment preparation", func() { // TODO: Move all preflight checks to the `SynchronizedBeforeSuite` to ensure they are executed in a synchronized context. - if !t.checkDummyHCDConfigured() { + if !t.checkDummyHCDConfigured(ctx) { Skip("dummy_hcd is not configured. Run generate_dummy_hcd_ngc.sh first.") } - t.GenerateEnvironmentResources() - err := f.CreateWithDeferredDeletion(context.Background(), t.VD) + t.GenerateEnvironmentResources(ctx) + err := f.CreateWithDeferredDeletion(ctx, t.VD) Expect(err).NotTo(HaveOccurred()) t.assignNodeUSB() @@ -74,10 +76,10 @@ var _ = Describe("VirtualMachineUSB", Label(precheck.PrecheckUSB), func() { }) By("Creating VM with USB device", func() { - err := f.CreateWithDeferredDeletion(context.Background(), t.VM) + err := f.CreateWithDeferredDeletion(ctx, t.VM) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, t.VM) util.UntilSSHReady(f, t.VM, framework.MiddleTimeout) util.UntilGuestCommandsReady(f, t.VM, []string{"sudo", "tee", "udevadm"}, framework.LongTimeout) }) @@ -102,7 +104,7 @@ var _ = Describe("VirtualMachineUSB", Label(precheck.PrecheckUSB), func() { util.MigrateVirtualMachine(f, t.VM) util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(t.VM), framework.LongTimeout) - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.ShortTimeout, t.VM) util.UntilSSHReady(f, t.VM, framework.ShortTimeout) }) @@ -136,17 +138,16 @@ type VMUSBTest struct { testContent string } -func NewVMUSBTest(f *framework.Framework) *VMUSBTest { +func NewVMUSBTest(ctx context.Context, f *framework.Framework) *VMUSBTest { return &VMUSBTest{ Framework: f, - ctx: context.Background(), + ctx: ctx, testFile: "/mnt/usb/testfile.txt", testContent: "Hello USB " + time.Now().Format(time.RFC3339), } } -func (t *VMUSBTest) checkDummyHCDConfigured() bool { - ctx := context.Background() +func (t *VMUSBTest) checkDummyHCDConfigured(ctx context.Context) bool { virtClient := t.Framework.VirtClient() nodeUSBList, err := virtClient.NodeUSBDevices().List(ctx, metav1.ListOptions{}) @@ -167,8 +168,7 @@ func (t *VMUSBTest) checkDummyHCDConfigured() bool { return false } -func (t *VMUSBTest) GenerateEnvironmentResources() { - ctx := context.Background() +func (t *VMUSBTest) GenerateEnvironmentResources(ctx context.Context) { virtClient := t.Framework.VirtClient() nodeUSBList, err := virtClient.NodeUSBDevices().List(ctx, metav1.ListOptions{}) diff --git a/test/e2e/vm/version.go b/test/e2e/vm/version.go index b7787551f7..e2b803556c 100644 --- a/test/e2e/vm/version.go +++ b/test/e2e/vm/version.go @@ -35,9 +35,12 @@ import ( ) var _ = Describe("VirtualMachineVersions", Label(precheck.NoPrecheck), func() { - var f *framework.Framework - + var ( + f *framework.Framework + ctx context.Context + ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("vm-versions") DeferCleanup(f.After) f.Before() @@ -60,15 +63,15 @@ var _ = Describe("VirtualMachineVersions", Label(precheck.NoPrecheck), func() { ) By("Creating resources") - err := f.CreateWithDeferredDeletion(context.Background(), vdRoot, vm) + err := f.CreateWithDeferredDeletion(ctx, vdRoot, vm) Expect(err).NotTo(HaveOccurred()) By("Waiting for VirtualMachine to be Running") - util.UntilObjectPhase(string(v1alpha2.MachineRunning), framework.LongTimeout, vm) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineRunning), framework.LongTimeout, vm) By("Checking VM status has qemu and libvirt versions") Eventually(func(g Gomega) { - err := f.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vm), vm) + err := f.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vm), vm) g.Expect(err).NotTo(HaveOccurred()) g.Expect(vm.Status.Versions).NotTo(BeNil()) g.Expect(vm.Status.Versions.Qemu).NotTo(BeEmpty()) diff --git a/test/e2e/vm/volume_migration_local_disks.go b/test/e2e/vm/volume_migration_local_disks.go index 7c84510d6c..66f5f8af90 100644 --- a/test/e2e/vm/volume_migration_local_disks.go +++ b/test/e2e/vm/volume_migration_local_disks.go @@ -57,11 +57,13 @@ func decoratorsForVolumeMigrations() []interface{} { var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Label(precheck.NoPrecheck), func() { var ( f *framework.Framework + ctx context.Context storageClass *storagev1.StorageClass vi *v1alpha2.VirtualImage ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("volume-migration-local-disks") storageClass = framework.GetConfig().StorageClass.TemplateStorageClass if storageClass == nil { @@ -73,7 +75,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab DeferCleanup(f.After) newVI := object.NewGeneratedVIFromCVI("volume-migration-local-disks-", f.Namespace().Name, object.PrecreatedCVIAlpineBIOSPerf) - newVI, err := f.VirtClient().VirtualImages(f.Namespace().Name).Create(context.Background(), newVI, metav1.CreateOptions{}) + newVI, err := f.VirtClient().VirtualImages(f.Namespace().Name).Create(ctx, newVI, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(newVI) vi = newVI @@ -107,18 +109,18 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab vm, vds := build() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + _, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) } By("Wait until VM agent is ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) const vmopName = "local-disks-migration" @@ -126,7 +128,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab util.MigrateVirtualMachine(f, vm, vmopbuilder.WithName(vmopName)) Eventually(func() error { - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) if err != nil { return err } @@ -134,7 +136,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab // kubevirt "client socket is closed" and Volume(s)UpdateError. util.SkipIfKnownMigrationFailure(vm) - vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(ctx, vmopName, metav1.GetOptions{}) if err != nil { return err } @@ -145,7 +147,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab return nil }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.MigrationState).ShouldNot(BeNil()) Expect(vm.Status.MigrationState.EndTimestamp).ShouldNot(BeNil()) @@ -164,18 +166,18 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab vm, vds := build() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + _, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) } By("Wait until VM agent is ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) ExecStressNGInVirtualMachine(f, vm) @@ -198,18 +200,18 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab vm, vds := localMigrationRootAndAdditionalBuild() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + _, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) } By("Wait until VM agent is ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) for i := range 2 { vmopName := "local-disks-migration-" + strconv.Itoa(i) @@ -218,7 +220,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab util.MigrateVirtualMachine(f, vm, vmopbuilder.WithName(vmopName)) Eventually(func() error { - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) if err != nil { return err } @@ -226,7 +228,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab // kubevirt "client socket is closed" and Volume(s)UpdateError. util.SkipIfKnownMigrationFailure(vm) - vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(ctx, vmopName, metav1.GetOptions{}) if err != nil { return err } @@ -237,7 +239,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab return nil }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.MigrationState).ShouldNot(BeNil()) Expect(vm.Status.MigrationState.EndTimestamp).ShouldNot(BeNil()) @@ -252,18 +254,18 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab vm, vds := localMigrationRootAndAdditionalBuild() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + _, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) } By("Wait until VM agent is ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) ExecStressNGInVirtualMachine(f, vm) @@ -284,20 +286,20 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab util.MigrateVirtualMachine(f, vm, vmopbuilder.WithName(vmopName2)) Eventually(func(g Gomega) { - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) // TODO: remove temporary migration skip logic when both known issues are fixed: // kubevirt "client socket is closed" and Volume(s)UpdateError. util.SkipIfKnownMigrationFailure(vm) - vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName2, metav1.GetOptions{}) + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(ctx, vmopName2, metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) g.Expect(completed.Status).To(Equal(metav1.ConditionTrue), "Reason: %s, Message: %s", completed.Reason, completed.Message) }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.MigrationState).ShouldNot(BeNil()) Expect(vm.Status.MigrationState.EndTimestamp).ShouldNot(BeNil()) @@ -311,18 +313,18 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab vm, vds := localMigrationRootAndAdditionalBuild() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + _, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) } By("Wait until VM agent is ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) ExecStressNGInVirtualMachine(f, vm) @@ -332,7 +334,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab util.MigrateVirtualMachine(f, vm, vmopbuilder.WithName(vmopName)) Eventually(func() error { - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) if err != nil { return err } @@ -350,7 +352,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab untilVirtualDisksMigrationsFailed(f) }, Entry("when virtual machine deleting", func(vm *v1alpha2.VirtualMachine) error { - return f.VirtClient().VirtualMachines(vm.GetNamespace()).Delete(context.Background(), vm.GetName(), metav1.DeleteOptions{}) + return f.VirtClient().VirtualMachines(vm.GetNamespace()).Delete(ctx, vm.GetName(), metav1.DeleteOptions{}) }), // Disabled because vm stopped after migration, that's why test fails. // Entry("when virtual machine stopped from OS", func(vm *v1alpha2.VirtualMachine) error { @@ -369,7 +371,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab GinkgoHelper() patchBytes := []byte(fmt.Sprintf(`{"metadata":{"labels": {"%s": "true"}}}`, unknownLabelKey)) - _, err := f.KubeClient().CoreV1().Nodes().Patch(context.Background(), node.GetName(), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err := f.KubeClient().CoreV1().Nodes().Patch(ctx, node.GetName(), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) Expect(err).NotTo(HaveOccurred()) } @@ -384,13 +386,13 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/metadata/labels", newLabels)).Bytes() Expect(err).NotTo(HaveOccurred()) - _, err = f.KubeClient().CoreV1().Nodes().Patch(context.Background(), node.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + _, err = f.KubeClient().CoreV1().Nodes().Patch(ctx, node.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) Expect(err).NotTo(HaveOccurred()) } } BeforeEach(func() { - nodes, err := f.KubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + nodes, err := f.KubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, node := range nodes.Items { @@ -410,26 +412,26 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab vm, vds := localMigrationRootAndAdditionalBuild() vm.Spec.NodeSelector = map[string]string{unknownLabelKey: "true"} - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + _, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) } By("Wait until VM agent is ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) vmNodeName := vm.Status.Node Expect(vmNodeName).NotTo(BeEmpty()) - nodes, err := f.KubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + nodes, err := f.KubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, node := range nodes.Items { @@ -444,7 +446,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab util.MigrateVirtualMachine(f, vm, vmopbuilder.WithName(vmopName)) Eventually(func() error { - pods, err := f.KubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{}) + pods, err := f.KubeClient().CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) if len(pods.Items) != 2 { @@ -477,7 +479,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab return fmt.Errorf("pending pod is not unschedulable") }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) - err = f.VirtClient().VirtualMachineOperations(ns).Delete(context.Background(), vmopName, metav1.DeleteOptions{}) + err = f.VirtClient().VirtualMachineOperations(ns).Delete(ctx, vmopName, metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) untilVirtualDisksMigrationsFailed(f) @@ -490,13 +492,13 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab vm, vds := localMigrationRootAndAdditionalBuild() By("Creating VM") - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) By("Creating VDs") for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + _, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) } @@ -504,19 +506,19 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab By("Creating RWO VD for VMBDA") const vdVmbdaName = "vd-vmbda-rwo" vdVmbda := object.NewBlankVD(vdVmbdaName, ns, &storageClass.Name, ptr.To(resource.MustParse("100Mi"))) - _, err = f.VirtClient().VirtualDisks(ns).Create(context.Background(), vdVmbda, metav1.CreateOptions{}) + _, err = f.VirtClient().VirtualDisks(ns).Create(ctx, vdVmbda, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vdVmbda) By("Creating VMBDA") const vmbdaName = "vd-vmbda-rwo" vmbda := object.NewVMBDAFromDisk(vmbdaName, vm.Name, vdVmbda) - _, err = f.VirtClient().VirtualMachineBlockDeviceAttachments(ns).Create(context.Background(), vmbda, metav1.CreateOptions{}) + _, err = f.VirtClient().VirtualMachineBlockDeviceAttachments(ns).Create(ctx, vmbda, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vmbda) By("Wait until VM agent is ready") - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) const vmopName = "local-disks-migration-with-rwo-vmbda" @@ -524,7 +526,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab util.MigrateVirtualMachine(f, vm, vmopbuilder.WithName(vmopName)) Eventually(func() error { - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) if err != nil { return err } @@ -532,7 +534,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab // kubevirt "client socket is closed" and Volume(s)UpdateError. util.SkipIfKnownMigrationFailure(vm) - vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(ctx, vmopName, metav1.GetOptions{}) if err != nil { return err } @@ -543,7 +545,7 @@ var _ = Describe("RWOVirtualDiskMigration", decoratorsForVolumeMigrations(), Lab return nil }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.MigrationState).ShouldNot(BeNil()) Expect(vm.Status.MigrationState.EndTimestamp).ShouldNot(BeNil()) diff --git a/test/e2e/vm/volume_migration_storage_class_changed.go b/test/e2e/vm/volume_migration_storage_class_changed.go index 497004fb1c..5da03bd576 100644 --- a/test/e2e/vm/volume_migration_storage_class_changed.go +++ b/test/e2e/vm/volume_migration_storage_class_changed.go @@ -47,18 +47,20 @@ import ( var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label(precheck.NoPrecheck), func() { var ( f *framework.Framework + ctx context.Context storageClass *storagev1.StorageClass vi *v1alpha2.VirtualImage targetStorageClassName string ) BeforeEach(func() { + ctx = context.Background() f = framework.NewFramework("volume-migration-storage-class-changed") storageClass = framework.GetConfig().StorageClass.TemplateStorageClass if storageClass == nil { Skip("TemplateStorageClass is not set.") } - targetStorageClass, err := getTargetStorageClass(f, storageClass) + targetStorageClass, err := getTargetStorageClass(ctx, f, storageClass) Expect(err).NotTo(HaveOccurred()) if targetStorageClass == "" { @@ -71,7 +73,7 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label DeferCleanup(f.After) newVI := object.NewGeneratedVIFromCVI("volume-migration-storage-class-changed-", f.Namespace().Name, object.PrecreatedCVIAlpineBIOS) - newVI, err = f.VirtClient().VirtualImages(f.Namespace().Name).Create(context.Background(), newVI, metav1.CreateOptions{}) + newVI, err = f.VirtClient().VirtualImages(f.Namespace().Name).Create(ctx, newVI, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(newVI) vi = newVI @@ -112,13 +114,13 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label vm, vds := build() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) var vdsForMigration []*v1alpha2.VirtualDisk for _, vd := range vds { - vd, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + vd, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) @@ -128,10 +130,10 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label } Expect(vdsForMigration).Should(HaveLen(len(disksForMigration))) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) By("Patch VD with new storage class") - err = patchStorageClassName(context.Background(), f, targetStorageClassName, vdsForMigration...) + err = patchStorageClassName(ctx, f, targetStorageClassName, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) By("Wait until VM migration succeeded") @@ -140,10 +142,10 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label untilVirtualDisksMigrationsSucceeded(f) for _, vdForMigration := range vdsForMigration { - migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdForMigration.GetName(), metav1.GetOptions{}) + migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(ctx, vdForMigration.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) + pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(ctx, migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(pvc.Spec.StorageClassName).NotTo(BeNil()) Expect(*pvc.Spec.StorageClassName).To(Equal(targetStorageClassName)) @@ -162,13 +164,13 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label vm, vds := build() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + vm, err := f.VirtClient().VirtualMachines(ns).Create(ctx, vm, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vm) var vdsForMigration []*v1alpha2.VirtualDisk for _, vd := range vds { - vd, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + vd, err := f.VirtClient().VirtualDisks(ns).Create(ctx, vd, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(vd) @@ -178,14 +180,14 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label } Expect(vdsForMigration).Should(HaveLen(len(disksForMigration))) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) By("Patch VD with new storage class") - err = patchStorageClassName(context.Background(), f, targetStorageClassName, vdsForMigration...) + err = patchStorageClassName(ctx, f, targetStorageClassName, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) Eventually(func() error { - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) if err != nil { return err } @@ -197,7 +199,7 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label } // revert migration - err = patchStorageClassName(context.Background(), f, storageClass.Name, vdsForMigration...) + err = patchStorageClassName(ctx, f, storageClass.Name, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) return nil @@ -222,12 +224,12 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label } f.DeferDelete(objs...) - err := f.CreateWithDeferredDeletion(context.Background(), objs...) + err := f.CreateWithDeferredDeletion(ctx, objs...) Expect(err).NotTo(HaveOccurred()) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(vm), framework.LongTimeout) - vdForMigration, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdRootName, metav1.GetOptions{}) + vdForMigration, err := f.VirtClient().VirtualDisks(ns).Get(ctx, vdRootName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) toStorageClasses := []string{targetStorageClassName, storageClass.Name} @@ -235,7 +237,7 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label for _, sc := range toStorageClasses { By(fmt.Sprintf("Patch VD %s with new storage class %s", vdForMigration.Name, sc)) - err = patchStorageClassName(context.Background(), f, sc, vdForMigration) + err = patchStorageClassName(ctx, f, sc, vdForMigration) Expect(err).NotTo(HaveOccurred()) Eventually(func() error { @@ -243,7 +245,7 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label // controller may revert volume migration (VM not running, VM not migrating, etc.). util.SkipIfVDMigrationReverted(ns) - vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + vm, err = f.VirtClient().VirtualMachines(ns).Get(ctx, vm.GetName(), metav1.GetOptions{}) if err != nil { return err } @@ -252,7 +254,7 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label util.SkipIfKnownMigrationFailure(vm) var lastVMOP *v1alpha2.VirtualMachineOperation - vmops, err := f.VirtClient().VirtualMachineOperations(ns).List(context.Background(), metav1.ListOptions{}) + vmops, err := f.VirtClient().VirtualMachineOperations(ns).List(ctx, metav1.ListOptions{}) if err != nil { return err } @@ -286,10 +288,10 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label untilVirtualDisksMigrationsSucceeded(f) - migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdForMigration.GetName(), metav1.GetOptions{}) + migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(ctx, vdForMigration.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) + pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(ctx, migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(pvc.Spec.StorageClassName).NotTo(BeNil()) Expect(*pvc.Spec.StorageClassName).To(Equal(sc)) @@ -298,17 +300,17 @@ var _ = Describe("StorageClassMigration", decoratorsForVolumeMigrations(), Label }) }) -func getTargetStorageClass(f *framework.Framework, storageClass *storagev1.StorageClass) (string, error) { +func getTargetStorageClass(ctx context.Context, f *framework.Framework, storageClass *storagev1.StorageClass) (string, error) { // GetVolumeAndAccessModes needs no nil object. notEmptyVD := &v1alpha2.VirtualDisk{} modeGetter := volumemode.NewVolumeAndAccessModesGetter(f.GenericClient(), getStorageProfile(f)) - volumeMode, _, err := modeGetter.GetVolumeAndAccessModes(context.Background(), notEmptyVD, storageClass) + volumeMode, _, err := modeGetter.GetVolumeAndAccessModes(ctx, notEmptyVD, storageClass) if err != nil { return "", err } - scList, err := f.KubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{}) + scList, err := f.KubeClient().StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) if err != nil { return "", err } @@ -324,7 +326,7 @@ func getTargetStorageClass(f *framework.Framework, storageClass *storagev1.Stora continue } - nextVolumeMode, _, err := modeGetter.GetVolumeAndAccessModes(context.Background(), notEmptyVD, &sc) + nextVolumeMode, _, err := modeGetter.GetVolumeAndAccessModes(ctx, notEmptyVD, &sc) if err != nil { GinkgoWriter.Printf("Skipping storage class %s: cannot get volume mode: %s\n", sc.Name, err) continue diff --git a/test/e2e/vmop/restore.go b/test/e2e/vmop/restore.go index 7b96b5031b..3b41e8123b 100644 --- a/test/e2e/vmop/restore.go +++ b/test/e2e/vmop/restore.go @@ -73,6 +73,7 @@ const ( var _ = Describe("VirtualMachineOperationRestore", label.Slow(), Label(precheck.PrecheckSnapshot, precheck.PrecheckSDN), func() { DescribeTable("restores a virtual machine from a snapshot", func(restoreMode v1alpha2.SnapshotOperationMode, restartApprovalMode v1alpha2.RestartApprovalMode, runPolicy v1alpha2.RunPolicy, removeRecoverableResources bool) { + ctx := context.Background() f := framework.NewFramework(fmt.Sprintf("vmop-restore-%s", strings.ToLower(string(restoreMode)))) DeferCleanup(f.After) f.Before() @@ -84,45 +85,45 @@ var _ = Describe("VirtualMachineOperationRestore", label.Slow(), Label(precheck. Expect(util.IsClusterNetworkExists(f, additionalInterfaceVLANID)).To(BeTrue(), fmt.Sprintf("The cluster network does not exist. Please apply the cluster network first using the command: %s", util.ClusterNetworkCreateCommand(additionalInterfaceVLANID))) t := newRestoreTest(f) - if !t.IsStorageClassAvailableForTest(t.VM) { + if !t.IsStorageClassAvailableForTest(ctx, t.VM) { Skip("Temporary skip on sds-replicated-volume until snapshot functionality is fixed") } By("Environment preparation", func() { t.GenerateResources(restoreMode, restartApprovalMode, runPolicy) err := f.CreateWithDeferredDeletion( - context.Background(), t.VI, t.VDRoot, t.VDBlank, t.VM, t.VMBDA, t.VDBlankWithNoFstabEntry, t.VMBDAWithNoFstabEntry, + ctx, t.VI, t.VDRoot, t.VDBlank, t.VM, t.VMBDA, t.VDBlankWithNoFstabEntry, t.VMBDAWithNoFstabEntry, ) Expect(err).NotTo(HaveOccurred()) if t.VM.Spec.RunPolicy == v1alpha2.ManualPolicy { - util.UntilObjectPhase(string(v1alpha2.MachineStopped), framework.LongTimeout, t.VM) - util.StartVirtualMachine(f, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineStopped), framework.LongTimeout, t.VM) + util.StartVirtualMachine(ctx, f, t.VM) } - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(t.VM), framework.LongTimeout) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA, t.VMBDAWithNoFstabEntry) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(t.VM), framework.LongTimeout) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA, t.VMBDAWithNoFstabEntry) - util.CreateBlockDeviceFilesystem(f, t.VM, v1alpha2.DiskDevice, t.VDBlank.Name, "ext4") - util.MountBlockDevice(f, t.VM, v1alpha2.DiskDevice, t.VDBlank.Name, mountPoint) - util.RegisterFstabEntry(f, t.VM, v1alpha2.DiskDevice, t.VDBlank.Name) + util.CreateBlockDeviceFilesystem(ctx, f, t.VM, v1alpha2.DiskDevice, t.VDBlank.Name, "ext4") + util.MountBlockDevice(ctx, f, t.VM, v1alpha2.DiskDevice, t.VDBlank.Name, mountPoint) + util.RegisterFstabEntry(ctx, f, t.VM, v1alpha2.DiskDevice, t.VDBlank.Name) util.WriteFile(f, t.VM, fileDataPath, originalValueOnDisk) - util.CreateBlockDeviceFilesystem(f, t.VM, v1alpha2.DiskDevice, t.VDBlankWithNoFstabEntry.Name, "ext4") - util.MountBlockDevice(f, t.VM, v1alpha2.DiskDevice, t.VDBlankWithNoFstabEntry.Name, mountPoint) + util.CreateBlockDeviceFilesystem(ctx, f, t.VM, v1alpha2.DiskDevice, t.VDBlankWithNoFstabEntry.Name, "ext4") + util.MountBlockDevice(ctx, f, t.VM, v1alpha2.DiskDevice, t.VDBlankWithNoFstabEntry.Name, mountPoint) util.WriteFile(f, t.VM, fileDataPath, originalValueOnDisk) // Unmount the disk to ensure nothing affects the hash. util.UnmountBlockDevice(f, t.VM, mountPoint) - t.BlockDeviceHash = util.GetBlockDeviceHash(f, t.VM, v1alpha2.DiskDevice, t.VDBlankWithNoFstabEntry.Name) + t.BlockDeviceHash = util.GetBlockDeviceHash(ctx, f, t.VM, v1alpha2.DiskDevice, t.VDBlankWithNoFstabEntry.Name) t.CheckAdditionalNetworkInterface(t.VM, additionalNetworkIP) - err = f.CreateWithDeferredDeletion(context.Background(), t.VMSnapshot) + err = f.CreateWithDeferredDeletion(ctx, t.VMSnapshot) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VirtualMachineSnapshotPhaseReady), framework.MiddleTimeout, t.VMSnapshot) + util.UntilObjectPhase(ctx, string(v1alpha2.VirtualMachineSnapshotPhaseReady), framework.MiddleTimeout, t.VMSnapshot) }) By("Changing VM", func() { util.WriteFile(f, t.VM, fileDataPath, changedValueOnDisk) - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) runningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, t.VM.Status.Conditions) @@ -132,7 +133,7 @@ var _ = Describe("VirtualMachineOperationRestore", label.Slow(), Label(precheck. t.VM.Labels[vmLabelName] = vmLabelChangedValue t.VM.Spec.CPU.Cores = changedCPUCores t.VM.Spec.Memory.Size = resource.MustParse(changedMemorySize) - err = f.Clients.GenericClient().Update(context.Background(), t.VM) + err = f.Clients.GenericClient().Update(ctx, t.VM) Expect(err).NotTo(HaveOccurred()) if util.IsRestartRequired(t.VM, 3*time.Second) { @@ -140,12 +141,12 @@ var _ = Describe("VirtualMachineOperationRestore", label.Slow(), Label(precheck. } util.UntilVirtualMachineRebooted(crclient.ObjectKeyFromObject(t.VM), runningLastTransitionTime, framework.LongTimeout) - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(t.VM), framework.ShortTimeout) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(t.VM), framework.ShortTimeout) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA) }) By("Check that VM is in changed state", func() { Expect(util.ReadFile(f, t.VM, fileDataPath)).To(Equal(changedValueOnDisk)) - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VM), t.VM) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VM), t.VM) Expect(err).NotTo(HaveOccurred()) Expect(t.VM.Annotations[vmAnnotationName]).To(Equal(vmAnnotationChangedValue)) Expect(t.VM.Labels[vmLabelName]).To(Equal(vmLabelChangedValue)) @@ -154,37 +155,37 @@ var _ = Describe("VirtualMachineOperationRestore", label.Slow(), Label(precheck. }) By("Resource preparation", func() { if removeRecoverableResources { - t.RemoveRecoverableResources() + t.RemoveRecoverableResources(ctx) } }) By("Restore VM from snapshot", func() { - t.RestoreVM(t.VM, t.VMOPRestore) + t.RestoreVM(ctx, t.VM, t.VMOPRestore) }) By("Check VM after restore", func() { - t.CheckVMAfterRestore(t.VM, t.VDRoot, t.VDBlank, t.VDBlankWithNoFstabEntry, t.VMBDA, t.VMBDAWithNoFstabEntry, t.VMOPRestore) + t.CheckVMAfterRestore(ctx, t.VM, t.VDRoot, t.VDBlank, t.VDBlankWithNoFstabEntry, t.VMBDA, t.VMBDAWithNoFstabEntry, t.VMOPRestore) }) By("After restoration, verify that labels and annotations are preserved on the resources", func() { - err := f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VDRoot), t.VDRoot) + err := f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VDRoot), t.VDRoot) Expect(err).NotTo(HaveOccurred()) Expect(t.VDRoot.Annotations[resourceAnnotationName]).To(Equal(resourceAnnotationValue)) Expect(t.VDRoot.Labels[resourceLabelName]).To(Equal(resourceLabelValue)) - err = f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VDBlank), t.VDBlank) + err = f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VDBlank), t.VDBlank) Expect(err).NotTo(HaveOccurred()) Expect(t.VDBlank.Annotations[resourceAnnotationName]).To(Equal(resourceAnnotationValue)) Expect(t.VDBlank.Labels[resourceLabelName]).To(Equal(resourceLabelValue)) - err = f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VMBDA), t.VMBDA) + err = f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VMBDA), t.VMBDA) Expect(err).NotTo(HaveOccurred()) Expect(t.VMBDA.Annotations[resourceAnnotationName]).To(Equal(resourceAnnotationValue)) Expect(t.VMBDA.Labels[resourceLabelName]).To(Equal(resourceLabelValue)) - err = f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VDBlankWithNoFstabEntry), t.VDBlankWithNoFstabEntry) + err = f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VDBlankWithNoFstabEntry), t.VDBlankWithNoFstabEntry) Expect(err).NotTo(HaveOccurred()) Expect(t.VDBlankWithNoFstabEntry.Annotations[resourceAnnotationName]).To(Equal(resourceAnnotationValue)) Expect(t.VDBlankWithNoFstabEntry.Labels[resourceLabelName]).To(Equal(resourceLabelValue)) - err = f.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(t.VMBDAWithNoFstabEntry), t.VMBDAWithNoFstabEntry) + err = f.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(t.VMBDAWithNoFstabEntry), t.VMBDAWithNoFstabEntry) Expect(err).NotTo(HaveOccurred()) Expect(t.VMBDAWithNoFstabEntry.Annotations[resourceAnnotationName]).To(Equal(resourceAnnotationValue)) Expect(t.VMBDAWithNoFstabEntry.Labels[resourceLabelName]).To(Equal(resourceLabelValue)) @@ -388,47 +389,47 @@ runcmd: ) } -func (t *restoreModeTest) RemoveRecoverableResources() { +func (t *restoreModeTest) RemoveRecoverableResources(ctx context.Context) { GinkgoHelper() util.StopVirtualMachineFromOS(t.Framework, t.VM) - util.UntilObjectPhase(string(v1alpha2.MachineStopped), framework.ShortTimeout, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineStopped), framework.ShortTimeout, t.VM) - err := t.Framework.Delete(context.Background(), t.VDRoot, t.VDBlank, t.VMBDA, t.VDBlankWithNoFstabEntry, t.VMBDAWithNoFstabEntry) + err := t.Framework.Delete(ctx, t.VDRoot, t.VDBlank, t.VMBDA, t.VDBlankWithNoFstabEntry, t.VMBDAWithNoFstabEntry) Expect(err).NotTo(HaveOccurred()) // Wait for resources to be deleted before proceeding. Eventually(func(g Gomega) { var vdRootLocal v1alpha2.VirtualDisk - err = t.Framework.Clients.GenericClient().Get(context.Background(), types.NamespacedName{ + err = t.Framework.Clients.GenericClient().Get(ctx, types.NamespacedName{ Namespace: t.VDRoot.Namespace, Name: t.VDRoot.Name, }, &vdRootLocal) g.Expect(k8serrors.IsNotFound(err)).Should(BeTrue()) var vdBlankLocal v1alpha2.VirtualDisk - err = t.Framework.Clients.GenericClient().Get(context.Background(), types.NamespacedName{ + err = t.Framework.Clients.GenericClient().Get(ctx, types.NamespacedName{ Namespace: t.VDBlank.Namespace, Name: t.VDBlank.Name, }, &vdBlankLocal) g.Expect(k8serrors.IsNotFound(err)).Should(BeTrue()) var vmbdaLocal v1alpha2.VirtualMachineBlockDeviceAttachment - err = t.Framework.Clients.GenericClient().Get(context.Background(), types.NamespacedName{ + err = t.Framework.Clients.GenericClient().Get(ctx, types.NamespacedName{ Namespace: t.VMBDA.Namespace, Name: t.VMBDA.Name, }, &vmbdaLocal) g.Expect(k8serrors.IsNotFound(err)).Should(BeTrue()) var vdBlankWithNoFstabEntryLocal v1alpha2.VirtualDisk - err = t.Framework.Clients.GenericClient().Get(context.Background(), types.NamespacedName{ + err = t.Framework.Clients.GenericClient().Get(ctx, types.NamespacedName{ Namespace: t.VDBlankWithNoFstabEntry.Namespace, Name: t.VDBlankWithNoFstabEntry.Name, }, &vdBlankWithNoFstabEntryLocal) g.Expect(k8serrors.IsNotFound(err)).Should(BeTrue()) var vmbdaWithNoFstabEntryLocal v1alpha2.VirtualMachineBlockDeviceAttachment - err = t.Framework.Clients.GenericClient().Get(context.Background(), types.NamespacedName{ + err = t.Framework.Clients.GenericClient().Get(ctx, types.NamespacedName{ Namespace: t.VMBDAWithNoFstabEntry.Namespace, Name: t.VMBDAWithNoFstabEntry.Name, }, &vmbdaWithNoFstabEntryLocal) @@ -437,6 +438,7 @@ func (t *restoreModeTest) RemoveRecoverableResources() { } func (t *restoreModeTest) CheckVMAfterRestore( + ctx context.Context, vm *v1alpha2.VirtualMachine, vdRoot, vdBlank, vdBlankWithNoFstabEntry *v1alpha2.VirtualDisk, vmbda, vmbdaWithNoFstabEntry *v1alpha2.VirtualMachineBlockDeviceAttachment, @@ -444,7 +446,7 @@ func (t *restoreModeTest) CheckVMAfterRestore( ) { GinkgoHelper() - err := t.Framework.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vm), vm) + err := t.Framework.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vm), vm) Expect(err).NotTo(HaveOccurred()) // In DryRun mode, the VM should remain unchanged and VMOPRestore should contain @@ -452,7 +454,7 @@ func (t *restoreModeTest) CheckVMAfterRestore( // the VM should be restored to the snapshot state. switch vmopRestore.Spec.Restore.Mode { case v1alpha2.SnapshotOperationModeDryRun: - err := t.Framework.Clients.GenericClient().Get(context.Background(), crclient.ObjectKeyFromObject(vmopRestore), vmopRestore) + err := t.Framework.Clients.GenericClient().Get(ctx, crclient.ObjectKeyFromObject(vmopRestore), vmopRestore) Expect(err).NotTo(HaveOccurred()) t.CheckResourceReadyForRestore(vmopRestore, v1alpha2.VirtualMachineKind, vm.Name) @@ -462,7 +464,7 @@ func (t *restoreModeTest) CheckVMAfterRestore( t.CheckResourceReadyForRestore(vmopRestore, v1alpha2.VirtualDiskKind, vdBlankWithNoFstabEntry.Name) t.CheckResourceReadyForRestore(vmopRestore, v1alpha2.VirtualMachineBlockDeviceAttachmentKind, vmbdaWithNoFstabEntry.Name) - Expect(util.GetBlockDeviceHash(t.Framework, vm, v1alpha2.DiskDevice, vdBlankWithNoFstabEntry.Name)).To(Equal(t.BlockDeviceHash)) + Expect(util.GetBlockDeviceHash(ctx, t.Framework, vm, v1alpha2.DiskDevice, vdBlankWithNoFstabEntry.Name)).To(Equal(t.BlockDeviceHash)) Expect(util.ReadFile(t.Framework, vm, fileDataPath)).To(Equal(changedValueOnDisk)) Expect(vm.Annotations[vmAnnotationName]).To(Equal(vmAnnotationChangedValue)) Expect(vm.Labels[vmLabelName]).To(Equal(vmLabelChangedValue)) @@ -500,12 +502,12 @@ func (t *restoreModeTest) getResourceInfoFromVMOP(vmopRestore *v1alpha2.VirtualM return nil } -func (t *restoreModeTest) RestoreVM(vm *v1alpha2.VirtualMachine, vmopRestore *v1alpha2.VirtualMachineOperation) { +func (t *restoreModeTest) RestoreVM(ctx context.Context, vm *v1alpha2.VirtualMachine, vmopRestore *v1alpha2.VirtualMachineOperation) { GinkgoHelper() - err := t.Framework.CreateWithDeferredDeletion(context.Background(), vmopRestore) + err := t.Framework.CreateWithDeferredDeletion(ctx, vmopRestore) Expect(err).NotTo(HaveOccurred()) - util.UntilObjectPhase(string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, t.VMOPRestore) + util.UntilObjectPhase(ctx, string(v1alpha2.VMOPPhaseCompleted), framework.LongTimeout, t.VMOPRestore) if vmopRestore.Spec.Restore.Mode == v1alpha2.SnapshotOperationModeDryRun { return @@ -515,12 +517,12 @@ func (t *restoreModeTest) RestoreVM(vm *v1alpha2.VirtualMachine, vmopRestore *v1 // if runPolicy == ManualPolicy, the VM should be started // cannot use isRestartRequired here, because we might skip the stopped phase if t.VM.Spec.RunPolicy == v1alpha2.ManualPolicy { - util.UntilObjectPhase(string(v1alpha2.MachineStopped), framework.ShortTimeout, t.VM) - util.StartVirtualMachine(t.Framework, t.VM) + util.UntilObjectPhase(ctx, string(v1alpha2.MachineStopped), framework.ShortTimeout, t.VM) + util.StartVirtualMachine(ctx, t.Framework, t.VM) } - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(t.VM), framework.LongTimeout) - util.UntilObjectPhase(string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA) + util.UntilVMAgentReady(ctx, crclient.ObjectKeyFromObject(t.VM), framework.LongTimeout) + util.UntilObjectPhase(ctx, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), framework.MiddleTimeout, t.VMBDA) } func (t *restoreModeTest) CheckAdditionalNetworkInterface(vm *v1alpha2.VirtualMachine, ip string) { @@ -531,11 +533,11 @@ func (t *restoreModeTest) CheckAdditionalNetworkInterface(vm *v1alpha2.VirtualMa Expect(cmdOut).To(ContainSubstring(fmt.Sprintf("inet %s", ip))) } -func (t *restoreModeTest) IsStorageClassAvailableForTest(vm *v1alpha2.VirtualMachine) bool { +func (t *restoreModeTest) IsStorageClassAvailableForTest(ctx context.Context, vm *v1alpha2.VirtualMachine) bool { GinkgoHelper() var scList storagev1.StorageClassList - err := framework.GetClients().GenericClient().List(context.Background(), &scList) + err := framework.GetClients().GenericClient().List(ctx, &scList) Expect(err).NotTo(HaveOccurred()) sc := config.FindDefaultStorageClass(&scList)