diff --git a/test/extended/node/image_volume.go b/test/extended/node/image_volume.go index ba267811312a..dea55d4e1a2f 100644 --- a/test/extended/node/image_volume.go +++ b/test/extended/node/image_volume.go @@ -2,6 +2,8 @@ package node import ( "context" + "path" + "strings" "time" g "github.com/onsi/ginkgo/v2" @@ -15,97 +17,141 @@ import ( admissionapi "k8s.io/pod-security-admission/api" exutil "github.com/openshift/origin/test/extended/util" + "github.com/openshift/origin/test/extended/util/image" ) -var _ = g.Describe("[sig-node] [FeatureGate:ImageVolume] ImageVolume", func() { - defer g.GinkgoRecover() - - f := framework.NewDefaultFramework("image-volume") - f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - - var ( - oc = exutil.NewCLI("image-volume") - podName = "image-volume-test" - image = "image-registry.openshift-image-registry.svc:5000/openshift/cli:latest" - ) - - g.BeforeEach(func() { - // Microshift doesn't inherit OCP feature gates, and ImageVolume won't work either - isMicroshift, err := exutil.IsMicroShiftCluster(oc.AdminKubeClient()) - o.Expect(err).NotTo(o.HaveOccurred()) - if isMicroshift { - g.Skip("Not supported on Microshift") - } - }) +// imageVolumeTestConfig parameterizes the shared image/artifact volume test suite. +type imageVolumeTestConfig struct { + // describeLabel is appended to "[sig-node] [FeatureGate:ImageVolume] " + describeLabel string + // frameworkName is used for framework.NewDefaultFramework and exutil.NewCLI + frameworkName string + // getVolumeRef returns the reference for the image/artifact volume. + // Called at the start of each test that needs a valid, pullable reference. + getVolumeRef func(ctx context.Context, oc *exutil.CLI, namespace string) string + // pathsToVerify are paths relative to the image root that must exist after mounting. + // The directory of the first path is used as the subPath in subPath test. + pathsToVerify []string +} - g.It("should succeed with pod and pull policy of Always", func(ctx context.Context) { - pod := buildPodWithImageVolume(f.Namespace.Name, "", podName, image) - createPodAndWaitForRunning(ctx, oc, pod) - verifyVolumeMounted(f, pod, "ls", "/mnt/image/bin/oc") - }) +// Register image volume tests +var _ = describeImageVolumeTests(imageVolumeTestConfig{ + describeLabel: "ArtifactVolume", + frameworkName: "artifact-volume", + getVolumeRef: func(_ context.Context, _ *exutil.CLI, _ string) string { + return image.LocationFor("quay.io/crio/artifact:subpath") + }, + pathsToVerify: []string{"subpath/2", "subpath/3"}, +}) - g.It("should handle multiple image volumes", func(ctx context.Context) { - pod := buildPodWithMultipleImageVolumes(f.Namespace.Name, "", podName, image, image) - createPodAndWaitForRunning(ctx, oc, pod) - verifyVolumeMounted(f, pod, "ls", "/mnt/image/bin/oc") - verifyVolumeMounted(f, pod, "ls", "/mnt/image2/bin/oc") - }) +var _ = describeImageVolumeTests(imageVolumeTestConfig{ + describeLabel: "ImageVolume", + frameworkName: "image-volume", + getVolumeRef: func(_ context.Context, _ *exutil.CLI, _ string) string { + return "image-registry.openshift-image-registry.svc:5000/openshift/cli:latest" + }, + pathsToVerify: []string{"bin/oc"}, +}) - g.It("should fail when image does not exist", func(ctx context.Context) { - pod := buildPodWithImageVolume(f.Namespace.Name, "", podName, "nonexistent:latest") +// describeImageVolumeTests generates a full test suite for image or artifact volumes. +func describeImageVolumeTests(config imageVolumeTestConfig) bool { + return g.Describe("[sig-node] [FeatureGate:ImageVolume] "+config.describeLabel, func() { + defer g.GinkgoRecover() - g.By("Creating a pod with non-existent image volume") - _, err := oc.AdminKubeClient().CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) - o.Expect(err).NotTo(o.HaveOccurred()) + f := framework.NewDefaultFramework(config.frameworkName) + f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - g.By("Waiting for pod to be ErrImagePull or ImagePullBackOff") - // wait for 5 mins so that the pod in metal-ovn-two-node-arbiter-ipv6-techpreview can become ImagePullBackOff. - err = e2epod.WaitForPodCondition(ctx, oc.AdminKubeClient(), pod.Namespace, pod.Name, "ImagePullBackOff", 5*time.Minute, func(pod *v1.Pod) (bool, error) { - return len(pod.Status.ContainerStatuses) > 0 && - pod.Status.ContainerStatuses[0].State.Waiting != nil && - (pod.Status.ContainerStatuses[0].State.Waiting.Reason == "ImagePullBackOff" || pod.Status.ContainerStatuses[0].State.Waiting.Reason == "ErrImagePull"), - nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) - }) + var ( + oc = exutil.NewCLI(config.frameworkName) + podName = config.frameworkName + "-test" + ) - g.It("should succeed if image volume is not existing but unused", func(ctx context.Context) { - pod := buildPodWithImageVolume(f.Namespace.Name, "", podName, "nonexistent:latest") - pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{} - createPodAndWaitForRunning(ctx, oc, pod) - // The container has no image volume mount, so just checking running is enough - }) - - g.It("should succeed with multiple pods and same image on the same node", func(ctx context.Context) { - pod1 := buildPodWithImageVolume(f.Namespace.Name, "", podName, image) - pod1 = createPodAndWaitForRunning(ctx, oc, pod1) - - pod2 := buildPodWithImageVolume(f.Namespace.Name, pod1.Spec.NodeName, podName+"-2", image) - pod2 = createPodAndWaitForRunning(ctx, oc, pod2) + g.BeforeEach(func() { + // Microshift doesn't inherit OCP feature gates, and ImageVolume won't work either + isMicroshift, err := exutil.IsMicroShiftCluster(oc.AdminKubeClient()) + o.Expect(err).NotTo(o.HaveOccurred()) + if isMicroshift { + g.Skip("Not supported on Microshift") + } + }) - verifyVolumeMounted(f, pod1, "ls", "/mnt/image/bin/oc") - verifyVolumeMounted(f, pod2, "ls", "/mnt/image/bin/oc") - }) + g.It("should succeed with pod and pull policy of Always", func(ctx context.Context) { + ref := config.getVolumeRef(ctx, oc, f.Namespace.Name) + pod := buildPodWithImageVolume(f.Namespace.Name, "", podName, ref) + createPodAndWaitForRunning(ctx, oc, pod) + verifyPathsExist(f, pod, "/mnt/image", config.pathsToVerify) + }) - g.Context("when subPath is used", func() { - g.It("should handle image volume with subPath", func(ctx context.Context) { - pod := buildPodWithImageVolumeSubPath(f.Namespace.Name, "", podName, image, "bin") + g.It("should handle multiple image volumes", func(ctx context.Context) { + ref := config.getVolumeRef(ctx, oc, f.Namespace.Name) + pod := buildPodWithMultipleImageVolumes(f.Namespace.Name, "", podName, ref, ref) createPodAndWaitForRunning(ctx, oc, pod) - verifyVolumeMounted(f, pod, "ls", "/mnt/image/oc") + verifyPathsExist(f, pod, "/mnt/image", config.pathsToVerify) + verifyPathsExist(f, pod, "/mnt/image2", config.pathsToVerify) }) - g.It("should fail to mount image volume with invalid subPath", func(ctx context.Context) { - pod := buildPodWithImageVolumeSubPath(f.Namespace.Name, "", podName, image, "noexist") - g.By("Creating a pod with image volume and subPath") + g.It("should fail when image does not exist", func(ctx context.Context) { + pod := buildPodWithImageVolume(f.Namespace.Name, "", podName, "nonexistent:latest") + + g.By("Creating a pod with non-existent image volume") _, err := oc.AdminKubeClient().CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) - g.By("Waiting for a pod to fail") - err = e2epod.WaitForPodContainerToFail(ctx, oc.AdminKubeClient(), pod.Namespace, pod.Name, 0, kuberuntime.ErrCreateContainer.Error(), 5*time.Minute) + g.By("Waiting for pod to be ErrImagePull or ImagePullBackOff") + // wait for 5 mins so that the pod in metal-ovn-two-node-arbiter-ipv6-techpreview can become ImagePullBackOff. + err = e2epod.WaitForPodCondition(ctx, oc.AdminKubeClient(), pod.Namespace, pod.Name, "ImagePullBackOff", 5*time.Minute, func(pod *v1.Pod) (bool, error) { + return len(pod.Status.ContainerStatuses) > 0 && + pod.Status.ContainerStatuses[0].State.Waiting != nil && + (pod.Status.ContainerStatuses[0].State.Waiting.Reason == "ImagePullBackOff" || pod.Status.ContainerStatuses[0].State.Waiting.Reason == "ErrImagePull"), + nil + }) o.Expect(err).NotTo(o.HaveOccurred()) }) + + g.It("should succeed if image volume is not existing but unused", func(ctx context.Context) { + pod := buildPodWithImageVolume(f.Namespace.Name, "", podName, "nonexistent:latest") + pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{} + createPodAndWaitForRunning(ctx, oc, pod) + // The container has no image volume mount, so just checking running is enough + }) + + g.It("should succeed with multiple pods and same image on the same node", func(ctx context.Context) { + ref := config.getVolumeRef(ctx, oc, f.Namespace.Name) + + pod1 := buildPodWithImageVolume(f.Namespace.Name, "", podName, ref) + pod1 = createPodAndWaitForRunning(ctx, oc, pod1) + + pod2 := buildPodWithImageVolume(f.Namespace.Name, pod1.Spec.NodeName, podName+"-2", ref) + pod2 = createPodAndWaitForRunning(ctx, oc, pod2) + + verifyPathsExist(f, pod1, "/mnt/image", config.pathsToVerify) + verifyPathsExist(f, pod2, "/mnt/image", config.pathsToVerify) + }) + + g.Context("when subPath is used", func() { + g.It("should handle image volume with subPath", func(ctx context.Context) { + ref := config.getVolumeRef(ctx, oc, f.Namespace.Name) + // Use the top-level directory of the first path as the subPath + subPath := strings.Split(config.pathsToVerify[0], "/")[0] + pod := buildPodWithImageVolumeSubPath(f.Namespace.Name, "", podName, ref, subPath) + createPodAndWaitForRunning(ctx, oc, pod) + verifyPathsExist(f, pod, "/mnt/image", trimSubPath(config.pathsToVerify, subPath)) + }) + + g.It("should fail to mount image volume with invalid subPath", func(ctx context.Context) { + ref := config.getVolumeRef(ctx, oc, f.Namespace.Name) + pod := buildPodWithImageVolumeSubPath(f.Namespace.Name, "", podName, ref, "noexist") + g.By("Creating a pod with image volume and subPath") + _, err := oc.AdminKubeClient().CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Waiting for a pod to fail") + err = e2epod.WaitForPodContainerToFail(ctx, oc.AdminKubeClient(), pod.Namespace, pod.Name, 0, kuberuntime.ErrCreateContainer.Error(), 5*time.Minute) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + }) }) -}) +} func createPodAndWaitForRunning(ctx context.Context, oc *exutil.CLI, pod *v1.Pod) *v1.Pod { g.By("Creating a pod") @@ -121,12 +167,28 @@ func createPodAndWaitForRunning(ctx context.Context, oc *exutil.CLI, pod *v1.Pod return created } -func verifyVolumeMounted(f *framework.Framework, pod *v1.Pod, commands ...string) { - g.By("Verifying image volume in pod is mounted") - stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, commands...) +func verifyPathsExist(f *framework.Framework, pod *v1.Pod, mountPoint string, paths []string) { + args := []string{"ls"} + for _, p := range paths { + args = append(args, path.Join(mountPoint, p)) + } + g.By("Verifying paths exist in pod") + stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, args...) o.Expect(stdout).NotTo(o.BeEmpty()) } +// trimSubPath returns paths relative to subPath. +// Panics via gomega if any path does not start with subPath as its first component. +func trimSubPath(paths []string, subPath string) []string { + result := make([]string, len(paths)) + for i, p := range paths { + parts := strings.Split(p, "/") + o.ExpectWithOffset(1, parts[0]).To(o.Equal(subPath), "path %q does not start with subPath %q", p, subPath) + result[i] = path.Join(parts[1:]...) + } + return result +} + func buildPodWithImageVolume(namespace, nodeName, podName, image string) *v1.Pod { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/extended/util/image/README.md b/test/extended/util/image/README.md index f9d9184e758e..ac6884ed3953 100644 --- a/test/extended/util/image/README.md +++ b/test/extended/util/image/README.md @@ -53,6 +53,7 @@ Exceptions to the manifest list requirement can be configured in the `verify-man **Permanent exceptions:** * `e2e-22-registry-k8s-io-e2e-test-images-node-perf-tf-wide-deep` - TensorFlow image with limited architecture support * `registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver` - Upstream GCE PD CSI driver images are not available on ppc64le or s390x, because GCE does not support these architectures. +* `e2e-quay-io-crio-artifact-subpath-8cuvQpZ0AoyNahYr` - This is an artifact, not an image. It's not runnable and thus it doesn't have to be multiarch. Additional permanent exceptions may be added to this list as needed with proper justification. diff --git a/test/extended/util/image/image.go b/test/extended/util/image/image.go index 4f91678d21d8..0342913278bb 100644 --- a/test/extended/util/image/image.go +++ b/test/extended/util/image/image.go @@ -70,6 +70,9 @@ var ( // used by zstd:chunked image tests "quay.io/crio/zstd-chunked:2": -1, + + // used by artifact volume tests + "quay.io/crio/artifact:subpath": -1, } ) diff --git a/test/extended/util/image/zz_generated.txt b/test/extended/util/image/zz_generated.txt index 6abb22b491f3..ad1715f60877 100644 --- a/test/extended/util/image/zz_generated.txt +++ b/test/extended/util/image/zz_generated.txt @@ -1,5 +1,6 @@ # This file is generated by hack/update-generated.sh docker.io/library/registry:2.8.0-beta.1 quay.io/openshift/community-e2e-images:e2e-docker-io-library-registry-2-8-0-beta-1-8x_YFKSuz9Xw6lZD +quay.io/crio/artifact:subpath quay.io/openshift/community-e2e-images:e2e-quay-io-crio-artifact-subpath-8cuvQpZ0AoyNahYr quay.io/crio/zstd-chunked:2 quay.io/openshift/community-e2e-images:e2e-quay-io-crio-zstd-chunked-2-8Uf42eAKe3YN2nwu quay.io/keycloak/keycloak:25.0 quay.io/openshift/community-e2e-images:e2e-quay-io-keycloak-keycloak-25-0-rEIw9B2Zcc3L1M6k quay.io/kubevirt/fedora-with-test-tooling-container-disk:20241024_891122a6fc quay.io/openshift/community-e2e-images:e2e-quay-io-kubevirt-fedora-with-test-tooling-container-disk-20241024_891122a6fc-IycYTh-87XrXse4E