BugId: 1999571 - fix clone into larger capacity nfs volume (#1939)

* BugId: 1999571 - fix clone into larger capacity nfs volume

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* fix lint issues

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>
This commit is contained in:
Michael Henriksen 2021-09-15 14:22:35 -04:00 committed by GitHub
parent f5351b8bd4
commit 2889d68766
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 97 additions and 11 deletions

View File

@ -64,6 +64,10 @@ spec:
chmod 777 /data/nfs/disk9;
mkdir /data/nfs/disk10;
chmod 777 /data/nfs/disk10;
mkdir /data/nfs/extraDisk1;
chmod 777 /data/nfs/extraDisk1;
mkdir /data/nfs/extraDisk2;
chmod 777 /data/nfs/extraDisk2;
/usr/bin/nfsd.sh
volumes:
- name: nfsdata

View File

@ -1506,7 +1506,7 @@ func (r *DatavolumeReconciler) advancedClonePossible(dataVolume *cdiv1.DataVolum
return false, err
}
return r.validateCloneSizeCompatible(sourcePvc, targetStorageSpec)
return r.validateAdvancedCloneSizeCompatible(sourcePvc, targetStorageSpec)
}
func (r *DatavolumeReconciler) validateSameStorageClass(
@ -1548,7 +1548,7 @@ func (r *DatavolumeReconciler) validateSameVolumeMode(
return true, nil
}
func (r *DatavolumeReconciler) validateCloneSizeCompatible(
func (r *DatavolumeReconciler) validateAdvancedCloneSizeCompatible(
sourcePvc *corev1.PersistentVolumeClaim,
targetStorageSpec *corev1.PersistentVolumeClaimSpec) (bool, error) {
@ -1560,9 +1560,13 @@ func (r *DatavolumeReconciler) validateCloneSizeCompatible(
srcCapacity, hasSrcCapacity := sourcePvc.Status.Capacity[corev1.ResourceStorage]
targetRequest, hasTargetRequest := targetStorageSpec.Resources.Requests[corev1.ResourceStorage]
allowExpansion := srcStorageClass.AllowVolumeExpansion != nil && *srcStorageClass.AllowVolumeExpansion
if !hasSrcCapacity || !hasTargetRequest || (srcCapacity.Cmp(targetRequest) < 0 && !allowExpansion) {
if !hasSrcCapacity || !hasTargetRequest {
// return error so we retry the reconcile
return false, errors.New("source/target sizes not compatible")
return false, errors.New("source/target size info missing")
}
if srcCapacity.Cmp(targetRequest) < 0 && !allowExpansion {
return false, nil
}
return true, nil

View File

@ -646,6 +646,69 @@ var _ = Describe("all clone tests", func() {
})
})
var _ = Describe("With nfs and larger target capacity", func() {
f := framework.NewFramework(namespacePrefix)
var (
bigPV *v1.PersistentVolume
bigDV *cdiv1.DataVolume
)
AfterEach(func() {
if bigDV != nil {
err := utils.DeleteDataVolume(f.CdiClient, f.Namespace.Name, bigDV.Name)
Expect(err).ToNot(HaveOccurred())
}
if bigPV != nil {
err := utils.WaitTimeoutForPVDeleted(f.K8sClient, bigPV, 30*time.Second)
Expect(err).ToNot(HaveOccurred())
}
})
It("should successfully clone", func() {
if !utils.IsNfs() {
Skip("NFS specific test")
}
By("Creating a source from a real image")
sourceDv := utils.NewDataVolumeWithHTTPImport("source-dv", "200Mi", fmt.Sprintf(utils.TinyCoreIsoURL, f.CdiInstallNs))
sourceDv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, sourceDv)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(sourceDv)
By("Waiting for import to be completed")
utils.WaitForDataVolumePhaseWithTimeout(f.CdiClient, f.Namespace.Name, cdiv1.Succeeded, sourceDv.Name, 3*90*time.Second)
pvDef := framework.NfsPvDef(1, framework.ExtraNfsDiskPrefix, utils.NfsService.Spec.ClusterIP, framework.BiggerNfsPvSize)
pv, err := utils.CreatePVFromDefinition(f.K8sClient, pvDef)
Expect(err).ToNot(HaveOccurred())
bigPV = pv
targetDv := utils.NewDataVolumeForImageCloning("target-dv", framework.BiggerNfsPvSize, f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv)
Expect(err).ToNot(HaveOccurred())
bigDV = targetDv
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv)
By("Waiting for clone to be completed")
err = utils.WaitForDataVolumePhaseWithTimeout(f.CdiClient, f.Namespace.Name, cdiv1.Succeeded, targetDv.Name, 3*90*time.Second)
Expect(err).ToNot(HaveOccurred())
By("Verify target is bigger")
srcPVC, err := f.K8sClient.CoreV1().PersistentVolumeClaims(sourceDv.Namespace).Get(context.TODO(), sourceDv.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
targetPVC, err := f.K8sClient.CoreV1().PersistentVolumeClaims(targetDv.Namespace).Get(context.TODO(), targetDv.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
srcCapacity := srcPVC.Status.Capacity.Storage()
Expect(srcCapacity).ToNot(BeNil())
targetCapacity := targetPVC.Status.Capacity.Storage()
Expect(targetCapacity).ToNot(BeNil())
Expect(srcCapacity.Cmp(*targetCapacity)).To(Equal(-1))
})
})
var _ = Describe("Validate Data Volume clone to smaller size", func() {
f := framework.NewFramework(namespacePrefix)
tinyCoreIsoURL := func() string { return fmt.Sprintf(utils.TinyCoreIsoURL, f.CdiInstallNs) }

View File

@ -1,7 +1,9 @@
package framework
import (
"fmt"
"strconv"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
@ -14,14 +16,25 @@ import (
)
const (
// DefaultNfsPvSize is the default nfs pv capacity
DefaultNfsPvSize = "10Gi"
// BiggerNfsPvSize is the bigger nfs pv capacity
BiggerNfsPvSize = "20Gi"
// ExtraNfsDiskPrefix is the prefix for extra nfs disks
ExtraNfsDiskPrefix = "/extraDisk"
timeout = time.Second * 90
pollingInterval = time.Second
pvCount = 10
defaultPrefix = "/disk"
)
func createNFSPVs(client *kubernetes.Clientset, cdiNs string) error {
ip := utils.NfsService.Spec.ClusterIP
for i := 1; i <= pvCount; i++ {
if _, err := utils.CreatePVFromDefinition(client, nfsPVDef(strconv.Itoa(i), utils.NfsService.Spec.ClusterIP)); err != nil {
if _, err := utils.CreatePVFromDefinition(client, NfsPvDef(i, defaultPrefix, ip, DefaultNfsPvSize)); err != nil {
// reset rangeCount
return err
}
@ -31,7 +44,7 @@ func createNFSPVs(client *kubernetes.Clientset, cdiNs string) error {
func deleteNFSPVs(client *kubernetes.Clientset, cdiNs string) error {
for i := 1; i <= pvCount; i++ {
pv := nfsPVDef(strconv.Itoa(i), utils.NfsService.Spec.ClusterIP)
pv := NfsPvDef(i, defaultPrefix, utils.NfsService.Spec.ClusterIP, DefaultNfsPvSize)
if err := utils.DeletePV(client, pv); err != nil {
if !errors.IsNotFound(err) {
return err
@ -39,7 +52,7 @@ func deleteNFSPVs(client *kubernetes.Clientset, cdiNs string) error {
}
}
for i := 1; i <= pvCount; i++ {
pv := nfsPVDef(strconv.Itoa(i), utils.NfsService.Spec.ClusterIP)
pv := NfsPvDef(i, defaultPrefix, utils.NfsService.Spec.ClusterIP, DefaultNfsPvSize)
if err := utils.WaitTimeoutForPVDeleted(client, pv, timeout); err != nil {
return err
}
@ -47,10 +60,12 @@ func deleteNFSPVs(client *kubernetes.Clientset, cdiNs string) error {
return nil
}
func nfsPVDef(index, serviceIP string) *corev1.PersistentVolume {
// NfsPvDef creates pv defs for nfs
func NfsPvDef(index int, prefix, serviceIP, size string) *corev1.PersistentVolume {
is := strconv.Itoa(index)
return &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "nfs-pv" + index,
Name: fmt.Sprintf("nfs-pv%s%s", strings.Replace(strings.ToLower(prefix), "/", "-", -1), is),
},
Spec: corev1.PersistentVolumeSpec{
StorageClassName: "nfs",
@ -59,12 +74,12 @@ func nfsPVDef(index, serviceIP string) *corev1.PersistentVolume {
corev1.ReadWriteMany,
},
Capacity: corev1.ResourceList{
corev1.ResourceName(corev1.ResourceStorage): resource.MustParse("30Gi"),
corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(size),
},
PersistentVolumeSource: corev1.PersistentVolumeSource{
NFS: &corev1.NFSVolumeSource{
Server: serviceIP,
Path: "/disk" + index,
Path: prefix + is,
},
},
PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete,