Address possible nils in dv controller, log CSIDrivers in tests (#2253)

* [WIP] Debug ceph csidriver not being there

We'd expect the CSIDriver object to be there, otherwise ceph install might be struggling

Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>

* Avoid possible StorageClassName nils

Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>

Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
This commit is contained in:
akalenyu 2022-10-20 01:50:04 +03:00 committed by GitHub
parent 5d9cc8d9ff
commit 4f0fa1fec2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 65 additions and 12 deletions

View File

@ -704,7 +704,10 @@ func (r *DatavolumeReconciler) reconcileClone(log logr.Logger,
if pvc == nil { if pvc == nil {
if selectedCloneStrategy == SmartClone { if selectedCloneStrategy == SmartClone {
snapshotClassName, _ := r.getSnapshotClassForSmartClone(datavolume, pvcSpec) snapshotClassName, err := r.getSnapshotClassForSmartClone(datavolume, pvcSpec)
if err != nil {
return reconcile.Result{}, err
}
return r.reconcileSmartClonePvc(log, datavolume, pvcSpec, transferName, snapshotClassName) return r.reconcileSmartClonePvc(log, datavolume, pvcSpec, transferName, snapshotClassName)
} }
if selectedCloneStrategy == CsiClone { if selectedCloneStrategy == CsiClone {
@ -714,12 +717,20 @@ func (r *DatavolumeReconciler) reconcileClone(log logr.Logger,
} }
if !csiDriverAvailable { if !csiDriverAvailable {
// err csi clone not possible // err csi clone not possible
storageClass, err := GetStorageClassByName(r.client, pvcSpec.StorageClassName)
if err != nil {
return reconcile.Result{}, err
}
noCsiDriverMsg := "CSI Clone configured, failed to look for CSIDriver - target storage class could not be found"
if storageClass != nil {
noCsiDriverMsg = fmt.Sprintf("CSI Clone configured, but no CSIDriver available for %s", storageClass.Name)
}
return reconcile.Result{}, return reconcile.Result{},
r.updateDataVolumeStatusPhaseWithEvent(cdiv1.CloneScheduled, datavolume, pvc, selectedCloneStrategy, r.updateDataVolumeStatusPhaseWithEvent(cdiv1.CloneScheduled, datavolume, pvc, selectedCloneStrategy,
DataVolumeEvent{ DataVolumeEvent{
eventType: corev1.EventTypeWarning, eventType: corev1.EventTypeWarning,
reason: ErrUnableToClone, reason: ErrUnableToClone,
message: fmt.Sprintf("CSI Clone configured, but no CSIDriver available for %s", *pvcSpec.StorageClassName), message: noCsiDriverMsg,
}) })
} }
@ -1504,7 +1515,7 @@ func (r *DatavolumeReconciler) cleanupTransfer(log logr.Logger, dv *cdiv1.DataVo
} }
RemoveFinalizer(dv, crossNamespaceFinalizer) RemoveFinalizer(dv, crossNamespaceFinalizer)
if err := r.updateDataVolume(dv); dv != nil { if err := r.updateDataVolume(dv); err != nil {
return err return err
} }
@ -1886,6 +1897,9 @@ func (r *DatavolumeReconciler) validateAdvancedCloneSizeCompatible(
sourcePvc *corev1.PersistentVolumeClaim, sourcePvc *corev1.PersistentVolumeClaim,
targetStorageSpec *corev1.PersistentVolumeClaimSpec) (bool, error) { targetStorageSpec *corev1.PersistentVolumeClaimSpec) (bool, error) {
srcStorageClass := &storagev1.StorageClass{} srcStorageClass := &storagev1.StorageClass{}
if sourcePvc.Spec.StorageClassName == nil {
return false, fmt.Errorf("Source PVC Storage Class name wasn't populated yet by PVC controller")
}
if err := r.client.Get(context.TODO(), types.NamespacedName{Name: *sourcePvc.Spec.StorageClassName}, srcStorageClass); IgnoreNotFound(err) != nil { if err := r.client.Get(context.TODO(), types.NamespacedName{Name: *sourcePvc.Spec.StorageClassName}, srcStorageClass); IgnoreNotFound(err) != nil {
return false, err return false, err
} }

View File

@ -1403,7 +1403,7 @@ var _ = Describe("All DataVolume Tests", func() {
Expect(snapclass).To(BeEmpty()) Expect(snapclass).To(BeEmpty())
}) })
It("Should not return snapshot class, if storage class does not exist", func() { It("Should not return snapshot class, if storage class exists but snapshot class does not exist", func() {
dv := newCloneDataVolume("test-dv") dv := newCloneDataVolume("test-dv")
scName := "testsc" scName := "testsc"
sc := createStorageClass(scName, map[string]string{ sc := createStorageClass(scName, map[string]string{
@ -1555,6 +1555,30 @@ var _ = Describe("All DataVolume Tests", func() {
Entry("Should be Succeeded, if source pvc is ClaimBound", corev1.ClaimBound, cdiv1.Succeeded), Entry("Should be Succeeded, if source pvc is ClaimBound", corev1.ClaimBound, cdiv1.Succeeded),
) )
It("Should not panic if CSI Driver not available and no storage class on PVC spec", func() {
strategy := cdiv1.CDICloneStrategy(cdiv1.CloneStrategyCsiClone)
dv := newCloneDataVolume("test-dv")
scName := "testsc"
srcPvc := createPvcInStorageClass("test", metav1.NamespaceDefault, &scName, nil, nil, corev1.ClaimBound)
sc := createStorageClassWithProvisioner(scName, map[string]string{
AnnDefaultStorageClass: "true",
}, map[string]string{}, "csi-plugin")
accessMode := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}
storageProfile := createStorageProfileWithCloneStrategy(scName,
[]cdiv1.ClaimPropertySet{{AccessModes: accessMode, VolumeMode: &blockMode}},
&strategy)
reconciler := createDatavolumeReconciler(dv, srcPvc, storageProfile, sc, createVolumeSnapshotContentCrd(), createVolumeSnapshotClassCrd(), createVolumeSnapshotCrd())
By("Reconcile")
result, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dv.Name, Namespace: dv.Namespace}})
Expect(err).ToNot(HaveOccurred())
Expect(result).ToNot(BeNil())
})
}) })
var _ = Describe("Clone without source", func() { var _ = Describe("Clone without source", func() {

View File

@ -772,6 +772,7 @@ func (r *KubernetesReporter) Dump(kubeCli *kubernetes.Clientset, cdiClient *cdiC
return return
} }
r.logCSIDrivers(kubeCli)
r.logDVs(cdiClient) r.logDVs(cdiClient)
r.logEvents(kubeCli, since) r.logEvents(kubeCli, since)
r.logNodes(kubeCli) r.logNodes(kubeCli)
@ -792,7 +793,6 @@ func (r *KubernetesReporter) Cleanup() {
} }
func (r *KubernetesReporter) logPods(kubeCli *kubernetes.Clientset) { func (r *KubernetesReporter) logPods(kubeCli *kubernetes.Clientset) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_pods.log", r.FailureCount)), f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_pods.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
@ -815,7 +815,6 @@ func (r *KubernetesReporter) logPods(kubeCli *kubernetes.Clientset) {
} }
func (r *KubernetesReporter) logServices(kubeCli *kubernetes.Clientset) { func (r *KubernetesReporter) logServices(kubeCli *kubernetes.Clientset) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_services.log", r.FailureCount)), f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_services.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
@ -838,7 +837,6 @@ func (r *KubernetesReporter) logServices(kubeCli *kubernetes.Clientset) {
} }
func (r *KubernetesReporter) logEndpoints(kubeCli *kubernetes.Clientset) { func (r *KubernetesReporter) logEndpoints(kubeCli *kubernetes.Clientset) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_endpoints.log", r.FailureCount)), f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_endpoints.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
@ -861,7 +859,6 @@ func (r *KubernetesReporter) logEndpoints(kubeCli *kubernetes.Clientset) {
} }
func (r *KubernetesReporter) logNodes(kubeCli *kubernetes.Clientset) { func (r *KubernetesReporter) logNodes(kubeCli *kubernetes.Clientset) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_nodes.log", r.FailureCount)), f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_nodes.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
@ -884,7 +881,6 @@ func (r *KubernetesReporter) logNodes(kubeCli *kubernetes.Clientset) {
} }
func (r *KubernetesReporter) logPVs(kubeCli *kubernetes.Clientset) { func (r *KubernetesReporter) logPVs(kubeCli *kubernetes.Clientset) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_pvs.log", r.FailureCount)), f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_pvs.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
@ -907,7 +903,6 @@ func (r *KubernetesReporter) logPVs(kubeCli *kubernetes.Clientset) {
} }
func (r *KubernetesReporter) logPVCs(kubeCli *kubernetes.Clientset) { func (r *KubernetesReporter) logPVCs(kubeCli *kubernetes.Clientset) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_pvcs.log", r.FailureCount)), f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_pvcs.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
@ -951,8 +946,29 @@ func (r *KubernetesReporter) logDVs(cdiClientset *cdiClientset.Clientset) {
fmt.Fprintln(f, string(j)) fmt.Fprintln(f, string(j))
} }
func (r *KubernetesReporter) logLogs(kubeCli *kubernetes.Clientset, since time.Duration) { func (r *KubernetesReporter) logCSIDrivers(kubeCli *kubernetes.Clientset) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_csidrivers.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to open the file: %v\n", err)
return
}
defer f.Close()
csiDrivers, err := kubeCli.StorageV1().CSIDrivers().List(context.TODO(), metav1.ListOptions{})
if err != nil {
fmt.Fprintf(os.Stderr, "failed to fetch csidrivers: %v\n", err)
return
}
j, err := json.MarshalIndent(csiDrivers, "", " ")
if err != nil {
return
}
fmt.Fprintln(f, string(j))
}
func (r *KubernetesReporter) logLogs(kubeCli *kubernetes.Clientset, since time.Duration) {
logsdir := filepath.Join(r.artifactsDir, "pods") logsdir := filepath.Join(r.artifactsDir, "pods")
if err := os.MkdirAll(logsdir, 0777); err != nil { if err := os.MkdirAll(logsdir, 0777); err != nil {
@ -999,7 +1015,6 @@ func (r *KubernetesReporter) logLogs(kubeCli *kubernetes.Clientset, since time.D
} }
func (r *KubernetesReporter) logEvents(kubeCli *kubernetes.Clientset, since time.Duration) { func (r *KubernetesReporter) logEvents(kubeCli *kubernetes.Clientset, since time.Duration) {
f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_events.log", r.FailureCount)), f, err := os.OpenFile(filepath.Join(r.artifactsDir, fmt.Sprintf("%d_events.log", r.FailureCount)),
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {