diff --git a/cmd/cdi-controller/controller.go b/cmd/cdi-controller/controller.go index b11c9516e..14b903fcc 100644 --- a/cmd/cdi-controller/controller.go +++ b/cmd/cdi-controller/controller.go @@ -7,19 +7,26 @@ import ( "os" "os/signal" + crdv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" route1client "github.com/openshift/client-go/route/clientset/versioned" routeinformers "github.com/openshift/client-go/route/informers/externalversions" "github.com/pkg/errors" + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + extclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + crdinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" clientset "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned" informers "kubevirt.io/containerized-data-importer/pkg/client/informers/externalversions" "kubevirt.io/containerized-data-importer/pkg/common" "kubevirt.io/containerized-data-importer/pkg/controller" + csiclientset "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" + csiinformers "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions" ) const ( @@ -105,7 +112,20 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) { klog.Fatalf("Error building example clientset: %s", err.Error()) } + csiClient, err := csiclientset.NewForConfig(cfg) + if err != nil { + klog.Fatalf("Error building csi clientset: %s", err.Error()) + } + + extClient, err := extclientset.NewForConfig(cfg) + if err != nil { + klog.Fatalf("Error building extClient: %s", err.Error()) + } + cdiInformerFactory := informers.NewSharedInformerFactory(cdiClient, common.DefaultResyncPeriod) + csiInformerFactory := csiinformers.NewFilteredSharedInformerFactory(csiClient, common.DefaultResyncPeriod, "", func(options *v1.ListOptions) { + options.LabelSelector = common.CDILabelSelector + }) pvcInformerFactory := k8sinformers.NewSharedInformerFactory(client, common.DefaultResyncPeriod) podInformerFactory := k8sinformers.NewFilteredSharedInformerFactory(client, common.DefaultResyncPeriod, "", func(options *v1.ListOptions) { options.LabelSelector = common.CDILabelSelector @@ -115,6 +135,7 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) { }) ingressInformerFactory := k8sinformers.NewSharedInformerFactory(client, common.DefaultResyncPeriod) routeInformerFactory := routeinformers.NewSharedInformerFactory(openshiftClient, common.DefaultResyncPeriod) + crdInformerFactory := crdinformers.NewSharedInformerFactory(extClient, common.DefaultResyncPeriod) pvcInformer := pvcInformerFactory.Core().V1().PersistentVolumeClaims() podInformer := podInformerFactory.Core().V1().Pods() @@ -123,10 +144,14 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) { routeInformer := routeInformerFactory.Route().V1().Routes() dataVolumeInformer := cdiInformerFactory.Cdi().V1alpha1().DataVolumes() configInformer := cdiInformerFactory.Cdi().V1alpha1().CDIConfigs() + snapshotInformer := csiInformerFactory.Snapshot().V1alpha1().VolumeSnapshots() + crdInformer := crdInformerFactory.Apiextensions().V1beta1().CustomResourceDefinitions().Informer() dataVolumeController := controller.NewDataVolumeController( client, cdiClient, + csiClient, + extClient, pvcInformer, dataVolumeInformer) @@ -146,6 +171,13 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) { pullPolicy, verbose) + smartCloneController := controller.NewSmartCloneController(client, + cdiClient, + csiClient, + pvcInformer, + snapshotInformer, + dataVolumeInformer) + uploadController := controller.NewUploadController( client, cdiClient, @@ -184,10 +216,13 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) { go podInformerFactory.Start(stopCh) go serviceInformerFactory.Start(stopCh) go ingressInformerFactory.Start(stopCh) + go crdInformerFactory.Start(stopCh) if isOpenshift := controller.IsOpenshift(client); isOpenshift { go routeInformerFactory.Start(stopCh) } + addCrdInformerEventHandlers(crdInformer, extClient, csiInformerFactory, smartCloneController, stopCh) + klog.V(1).Infoln("started informers") go func() { @@ -225,6 +260,8 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) { } }() + startSmartController(extClient, csiInformerFactory, smartCloneController, stopCh) + if err = createReadyFile(); err != nil { klog.Fatalf("Error creating ready file: %+v", err) } @@ -280,3 +317,40 @@ func handleSignals() <-chan struct{} { }() return stopCh } + +func addCrdInformerEventHandlers(crdInformer cache.SharedIndexInformer, extClient extclientset.Interface, + csiInformerFactory csiinformers.SharedInformerFactory, smartCloneController *controller.SmartCloneController, + stopCh <-chan struct{}) { + crdInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + crd := obj.(*v1beta1.CustomResourceDefinition) + crdName := crd.Name + + vsClass := crdv1alpha1.VolumeSnapshotClassResourcePlural + "." + crdv1alpha1.GroupName + vsContent := crdv1alpha1.VolumeSnapshotContentResourcePlural + "." + crdv1alpha1.GroupName + vs := crdv1alpha1.VolumeSnapshotResourcePlural + "." + crdv1alpha1.GroupName + + switch crdName { + case vsClass: + fallthrough + case vsContent: + fallthrough + case vs: + startSmartController(extClient, csiInformerFactory, smartCloneController, stopCh) + } + }, + }) +} + +func startSmartController(extclient extclientset.Interface, csiInformerFactory csiinformers.SharedInformerFactory, + smartCloneController *controller.SmartCloneController, stopCh <-chan struct{}) { + if controller.IsCsiCrdsDeployed(extclient) { + go csiInformerFactory.Start(stopCh) + go func() { + err := smartCloneController.Run(1, stopCh) + if err != nil { + klog.Fatalf("Error running smart clone controller: %+v", err) + } + }() + } +} diff --git a/doc/datavolumes.md b/doc/datavolumes.md index e9267e98e..0bce2b3ff 100644 --- a/doc/datavolumes.md +++ b/doc/datavolumes.md @@ -12,6 +12,7 @@ The following statuses are possible. * PVCBound: The PVC associated with the operation has been bound. * Import/Clone/UploadScheduled: The operation (import/clone/upload) has been scheduled. * Import/Clone/UploadInProgress: The operation (import/clone/upload) is in progress. +* SnapshotForSmartClone/SmartClonePVCInProgress: The Smart-Cloning operation is in progress. * Succeeded: The operation has succeeded. * Failed: The operation has failed. * Unknown: Unknown status. diff --git a/doc/smart-clone.md b/doc/smart-clone.md new file mode 100644 index 000000000..4a7c3690a --- /dev/null +++ b/doc/smart-clone.md @@ -0,0 +1,35 @@ +# Data Volume cloning with Smart-Cloning + +## Introduction +Data Volumes (DV) can be created also by specifying a PVC as an input source. It will trigger a clone of the original PVC. See more details [here](datavolumes.md#pvc-source). + +CDI implementation of cloning a PVC is done with host assisted cloning by streaming the data from the source PVC and write to the new PVC. + +In order to improve the performance of the cloning process, a Smart-Cloning flow using snapshots is introduced. + +## Smart-Cloning +CDI use the feature of creating a PVC from snapshot in order to clone PVCs more efficiently when a CSI plugin with snapshot capabilities is available. + +The yaml structure and annotations of the DV are not changed. + +### Create PVC from snapshot +Since Kubernetes v1.12, a feature enabling to create a PVC from a volume snapshot has been introduced. See more details [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support) + +Note: To enable support for restoring a volume from a volume snapshot data source, enable the `VolumeSnapshotDataSource` feature gate on the apiserver and controller-manager. + + +### Flow description +Here is a description of the flow of the Smart-Cloning: + +- DataVolume is created with a PVC source +- Check if Smart-Cloning is possible: + * The source and target PVCs must be in the same namespace + * The source and target PVCs must be in the same Storage Class + * There must be a Snapshot Class associated with the Storage Class +- If Smart-Cloning is possible: + * Create a snapshot of the source PVC + * Create a PVC from the created snapshot + * Delete the snapshot +- If Smart-Cloning is not possible: + * Trigger a host-assisted clone + diff --git a/glide.lock b/glide.lock index 64c01d2e6..786d1ec2e 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 017a56fcd0a1df5f134329ef8570a2728b823748621c728c13d287f70e91285e -updated: 2019-05-15T08:28:52.7344063Z +hash: 1aa17cf68605325907751b9dee5bb31dc90ef5135ed97547c05a0b248bf21a51 +updated: 2019-05-19T09:08:43.794386139Z imports: - name: github.com/appscode/jsonpatch version: 7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2 @@ -67,7 +67,7 @@ imports: - compiler - extensions - name: github.com/gorilla/mux - version: c5c6c98bc25355028a63748a498942a6398ccd22 + version: ed099d42384823742bba0bf9a72b53b55c9e2e38 - name: github.com/gregjones/httpcache version: 787624de3eb7bd915c329cba748687a3b22666a6 subpackages: @@ -89,6 +89,11 @@ imports: version: ab8a2e0c74be9d3be70b3184d9acc634935ded82 - name: github.com/kelseyhightower/envconfig version: f611eb38b3875cc3bd991ca91c51d06446afa14c +- name: github.com/kubernetes-csi/external-snapshotter + version: e49856eb417cbafa51e5a3fb3bd0ac9e31ab1873 + subpackages: + - pkg/apis + - pkg/apis/volumesnapshot/v1alpha1 - name: github.com/mailru/easyjson version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d subpackages: @@ -136,7 +141,7 @@ imports: - reporters/stenographer/support/go-isatty - types - name: github.com/onsi/gomega - version: f0e010e04c08c48a875f83d17df37b04eb3a985b + version: 6a48b4839f850fe696893f09085e3fc485d74a4a subpackages: - format - gbytes @@ -350,8 +355,15 @@ imports: - pkg/apis/apiextensions - pkg/apis/apiextensions/v1beta1 - pkg/client/clientset/clientset + - pkg/client/clientset/clientset/fake - pkg/client/clientset/clientset/scheme - pkg/client/clientset/clientset/typed/apiextensions/v1beta1 + - pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake + - pkg/client/informers/externalversions + - pkg/client/informers/externalversions/apiextensions + - pkg/client/informers/externalversions/apiextensions/v1beta1 + - pkg/client/informers/externalversions/internalinterfaces + - pkg/client/listers/apiextensions/v1beta1 - name: k8s.io/apimachinery version: 86fb29eff6288413d76bd8506874fddd9fccdff0 subpackages: diff --git a/glide.yaml b/glide.yaml index 3027055bc..20bb06141 100644 --- a/glide.yaml +++ b/glide.yaml @@ -40,3 +40,5 @@ import: version: rebase-1.13.4 - package: github.com/openshift/client-go version: rebase-1.13.4 +- package: github.com/kubernetes-csi/external-snapshotter/pkg/apis + version: e49856eb417cbafa51e5a3fb3bd0ac9e31ab1873 diff --git a/hack/build/run-functional-tests.sh b/hack/build/run-functional-tests.sh index 65fb7d20c..40a190da6 100755 --- a/hack/build/run-functional-tests.sh +++ b/hack/build/run-functional-tests.sh @@ -39,8 +39,9 @@ arg_kubeconfig="${KUBECONFIG:+-kubeconfig=$KUBECONFIG}" arg_kubectl="${KUBECTL:+-kubectl-path=$KUBECTL}" arg_oc="${KUBECTL:+-oc-path=$KUBECTL}" arg_gocli="${GOCLI:+-gocli-path=$GOCLI}" +arg_sc_snap="-snapshot-sc=csi-rbd" -test_args="${test_args} -ginkgo.v ${arg_master} ${arg_namespace} ${arg_kubeconfig} ${arg_kubectl} ${arg_oc} ${arg_gocli}" +test_args="${test_args} -ginkgo.v ${arg_master} ${arg_namespace} ${arg_kubeconfig} ${arg_kubectl} ${arg_oc} ${arg_gocli} ${arg_sc_snap}" echo 'Wait until all CDI Pods are ready' retry_counter=0 diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 5c7ffa6a0..05cc0d05d 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -26,6 +26,7 @@ CODEGEN_PKG=${CODEGEN_PKG:-$( find "${SCRIPT_ROOT}/pkg/" -name "*generated*.go" -exec rm {} -f \; rm -rf "${SCRIPT_ROOT}/pkg/client" +rm -rf "${SCRIPT_ROOT}/pkg/snapshot-client" ${SCRIPT_ROOT}/hack/build/build-go.sh generate @@ -38,6 +39,11 @@ ${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ "core:v1alpha1 upload:v1alpha1" \ --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt +${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ + kubevirt.io/containerized-data-importer/pkg/snapshot-client github.com/kubernetes-csi/external-snapshotter/pkg/apis \ + volumesnapshot:v1alpha1 \ + --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt + (cd ${SCRIPT_ROOT}/tools/openapi-spec-generator/ && go build -o ../../bin/openapi-spec-generator) ${SCRIPT_ROOT}/bin/openapi-spec-generator > ${SCRIPT_ROOT}/api/openapi-spec/swagger.json diff --git a/pkg/apis/core/v1alpha1/types.go b/pkg/apis/core/v1alpha1/types.go index 0b5e5ca00..b07f60fac 100644 --- a/pkg/apis/core/v1alpha1/types.go +++ b/pkg/apis/core/v1alpha1/types.go @@ -152,6 +152,12 @@ const ( // CloneInProgress represents a data volume with a current phase of CloneInProgress CloneInProgress DataVolumePhase = "CloneInProgress" + // SnapshotForSmartCloneInProgress represents a data volume with a current phase of SnapshotForSmartCloneInProgress + SnapshotForSmartCloneInProgress DataVolumePhase = "SnapshotForSmartCloneInProgress" + + // SmartClonePVCInProgress represents a data volume with a current phase of SmartClonePVCInProgress + SmartClonePVCInProgress DataVolumePhase = "SmartClonePVCInProgress" + // UploadScheduled represents a data volume with a current phase of UploadScheduled UploadScheduled DataVolumePhase = "UploadScheduled" diff --git a/pkg/common/common.go b/pkg/common/common.go index caa58937e..0574b367a 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -78,6 +78,9 @@ const ( // ClonerSocketPath (controller pkg only) ClonerSocketPath = "/tmp/clone/socket" + // SmartClonerCDILabel is the label applied to resources created by the smart-clone controller + SmartClonerCDILabel = "cdi-smart-clone" + // UploadServerCDILabel is the label applied to upload server resources UploadServerCDILabel = "cdi-upload-server" // UploadServerPodname is name of the upload server pod container diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index bbf5839c2..d52ad12d4 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -185,6 +185,7 @@ func (c *Controller) run(threadiness int, stopCh <-chan struct{}, controller int if !cache.WaitForCacheSync(stopCh, c.podInformer.HasSynced) { return errors.New("Timeout waiting for pod cache sync") } + klog.V(3).Infoln("Controller cache has synced") for i := 0; i < threadiness; i++ { //Go is not pure object oriented language. The command repetition below is a result of that. diff --git a/pkg/controller/datavolume-controller.go b/pkg/controller/datavolume-controller.go index 16c6a1374..284c7b1d0 100644 --- a/pkg/controller/datavolume-controller.go +++ b/pkg/controller/datavolume-controller.go @@ -26,8 +26,10 @@ import ( "strconv" "time" + csisnapshotv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + extclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -51,6 +53,7 @@ import ( listers "kubevirt.io/containerized-data-importer/pkg/client/listers/core/v1alpha1" "kubevirt.io/containerized-data-importer/pkg/common" expectations "kubevirt.io/containerized-data-importer/pkg/expectations" + csiclientset "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" ) const controllerAgentName = "datavolume-controller" @@ -78,6 +81,12 @@ const ( CloneScheduled = "CloneScheduled" // CloneInProgress provides a const to indicate clone is in progress CloneInProgress = "CloneInProgress" + // SnapshotForSmartCloneInProgress provides a const to indicate snapshot creation for smart-clone is in progress + SnapshotForSmartCloneInProgress = "SnapshotForSmartCloneInProgress" + // SnapshotForSmartCloneCreated provides a const to indicate snapshot creation for smart-clone has been completed + SnapshotForSmartCloneCreated = "SnapshotForSmartCloneCreated" + // SmartClonePVCInProgress provides a const to indicate snapshot creation for smart-clone is in progress + SmartClonePVCInProgress = "SmartClonePVCInProgress" // CloneFailed provides a const to indicate clone has failed CloneFailed = "CloneFailed" // CloneSucceeded provides a const to indicate clone has succeeded @@ -114,6 +123,10 @@ const ( MessageCloneFailed = "Cloning from %s/%s into %s/%s failed" // MessageCloneSucceeded provides a const to form clone has succeeded message MessageCloneSucceeded = "Successfully cloned from %s/%s into %s/%s" + // MessageSmartCloneInProgress provides a const to form snapshot for smart-clone is in progress message + MessageSmartCloneInProgress = "Creating snapshot for smart-clone is in progress (for pvc %s/%s)" + // MessageSmartClonePVCInProgress provides a const to form snapshot for smart-clone is in progress message + MessageSmartClonePVCInProgress = "Creating PVC for smart-clone is in progress (for pvc %s/%s)" // MessageUploadScheduled provides a const to form upload is scheduled message MessageUploadScheduled = "Upload into %s scheduled" // MessageUploadReady provides a const to form upload is ready message @@ -132,6 +145,8 @@ type DataVolumeController struct { kubeclientset kubernetes.Interface // clientset is a clientset for our own API group cdiClientSet clientset.Interface + csiClientSet csiclientset.Interface + extClientSet extclientset.Interface pvcLister corelisters.PersistentVolumeClaimLister pvcsSynced cache.InformerSynced @@ -157,6 +172,8 @@ type DataVolumeEvent struct { func NewDataVolumeController( kubeclientset kubernetes.Interface, cdiClientSet clientset.Interface, + csiClientSet csiclientset.Interface, + extClientSet extclientset.Interface, pvcInformer coreinformers.PersistentVolumeClaimInformer, dataVolumeInformer informers.DataVolumeInformer) *DataVolumeController { @@ -173,6 +190,8 @@ func NewDataVolumeController( controller := &DataVolumeController{ kubeclientset: kubeclientset, cdiClientSet: cdiClientSet, + csiClientSet: csiClientSet, + extClientSet: extClientSet, pvcLister: pvcInformer.Lister(), pvcsSynced: pvcInformer.Informer().HasSynced, dataVolumesLister: dataVolumeInformer.Lister(), @@ -307,7 +326,6 @@ func (c *DataVolumeController) processNextWorkItem() bool { // converge the two. It then updates the Status block of the DataVolume resource // with the current status of the resource. func (c *DataVolumeController) syncHandler(key string) error { - exists := true // Convert the namespace/name string into a distinct namespace and name @@ -352,20 +370,37 @@ func (c *DataVolumeController) syncHandler(key string) error { return errors.Errorf(msg) } + // expectations prevent us from creating multiple pods. An expectation forces + // us to observe a pod's creation in the cache. needsSync := c.pvcExpectations.SatisfiedExpectations(key) + if !exists && needsSync { - newPvc, err := newPersistentVolumeClaim(dataVolume) - if err != nil { - return err - } - c.pvcExpectations.ExpectCreations(key, 1) - pvc, err = c.kubeclientset.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Create(newPvc) - if err != nil { - c.pvcExpectations.CreationObserved(key) - return err - } - if canUpdateProgress(newPvc.Annotations) { - go c.scheduleProgressUpdate(dataVolume.Name, dataVolume.Namespace, pvc.GetUID()) + snapshotClassName := c.getSnapshotClassForSmartClone(dataVolume) + if snapshotClassName != "" { + klog.V(3).Infof("Smart-Clone via Snapshot is available with Volume Snapshot Class: %s", snapshotClassName) + newSnapshot := newSnapshot(dataVolume, snapshotClassName) + _, err := c.csiClientSet.SnapshotV1alpha1().VolumeSnapshots(newSnapshot.Namespace).Create(newSnapshot) + if err != nil { + return err + } + err = c.updateSmartCloneStatusPhase(cdiv1.SnapshotForSmartCloneInProgress, dataVolume) + if err != nil { + return err + } + } else { + newPvc, err := newPersistentVolumeClaim(dataVolume) + if err != nil { + return err + } + c.pvcExpectations.ExpectCreations(key, 1) + pvc, err = c.kubeclientset.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Create(newPvc) + if err != nil { + c.pvcExpectations.CreationObserved(key) + return err + } + if canUpdateProgress(newPvc.Annotations) { + go c.scheduleProgressUpdate(dataVolume.Name, dataVolume.Namespace, pvc.GetUID()) + } } } @@ -406,6 +441,134 @@ func (c *DataVolumeController) scheduleProgressUpdate(dataVolumeName, dataVolume } } +func (c *DataVolumeController) getSnapshotClassForSmartClone(dataVolume *cdiv1.DataVolume) string { + // Check if clone is requested + if dataVolume.Spec.Source.PVC == nil { + return "" + } + + // Check if relevant CRDs are available + if !IsCsiCrdsDeployed(c.extClientSet) { + klog.V(3).Infof("Missing CSI snapshotter CRDs") + return "" + } + + // Find source PVC + sourcePvcNs := dataVolume.Spec.Source.PVC.Namespace + if sourcePvcNs == "" { + sourcePvcNs = dataVolume.Namespace + } + + pvc, err := c.pvcLister.PersistentVolumeClaims(sourcePvcNs).Get(dataVolume.Spec.Source.PVC.Name) + if err != nil { + if k8serrors.IsNotFound(err) { + klog.V(3).Infof("Source PVC is missing: %s/%s", dataVolume.Spec.Source.PVC.Namespace, dataVolume.Spec.Source.PVC.Name) + } + runtime.HandleError(err) + return "" + } + + targetPvcStorageClassName := dataVolume.Spec.PVC.StorageClassName + + // Handle unspecified storage class name, fallback to default storage class + if targetPvcStorageClassName == nil { + storageclasses, err := c.kubeclientset.StorageV1().StorageClasses().List(metav1.ListOptions{}) + if err != nil { + runtime.HandleError(err) + return "" + } + for _, storageClass := range storageclasses.Items { + if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" { + targetPvcStorageClassName = &storageClass.Name + break + } + } + } + + if targetPvcStorageClassName == nil { + klog.V(3).Infof("Target PVC's Storage Class not found") + return "" + } + + sourcePvcStorageClassName := pvc.Spec.StorageClassName + + // Compare source and target storage classess + if *sourcePvcStorageClassName != *targetPvcStorageClassName { + klog.V(3).Infof("Source PVC and target PVC belong to different storage classes: %s - %s", + *sourcePvcStorageClassName, *targetPvcStorageClassName) + return "" + } + + // Compare source and target namespaces + if pvc.Namespace != dataVolume.Namespace { + klog.V(3).Infof("Source PVC and target PVC belong to different namespaces: %s - %s", + pvc.Namespace, dataVolume.Namespace) + return "" + } + + // Fetch the source storage class + storageclass, err := c.kubeclientset.StorageV1().StorageClasses().Get(*sourcePvcStorageClassName, metav1.GetOptions{}) + if err != nil { + runtime.HandleError(err) + return "" + } + + // List the snapshot classes + scs, err := c.csiClientSet.SnapshotV1alpha1().VolumeSnapshotClasses().List(metav1.ListOptions{}) + if err != nil { + klog.V(3).Infof("Cannot list snapshot classes") + return "" + } + for _, snapshotClass := range scs.Items { + // Validate association between snapshot class and storage class + if snapshotClass.Snapshotter == storageclass.Provisioner { + klog.V(3).Infof("smart-clone is applicable for datavolume '%s' with snapshot class '%s'", + dataVolume.Name, snapshotClass.Name) + return snapshotClass.Name + } + } + + return "" +} + +func newSnapshot(dataVolume *cdiv1.DataVolume, snapshotClassName string) *csisnapshotv1.VolumeSnapshot { + annotations := make(map[string]string) + annotations[AnnSmartCloneRequest] = "true" + className := snapshotClassName + labels := map[string]string{ + common.CDILabelKey: common.CDILabelValue, + common.CDIComponentLabel: common.SmartClonerCDILabel, + } + snapshot := &csisnapshotv1.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: dataVolume.Name, + Namespace: dataVolume.Namespace, + Labels: labels, + Annotations: annotations, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(dataVolume, schema.GroupVersionKind{ + Group: cdiv1.SchemeGroupVersion.Group, + Version: cdiv1.SchemeGroupVersion.Version, + Kind: "DataVolume", + }), + }, + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: csisnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + Status: csisnapshotv1.VolumeSnapshotStatus{}, + Spec: csisnapshotv1.VolumeSnapshotSpec{ + Source: &corev1.TypedLocalObjectReference{ + Name: dataVolume.Spec.Source.PVC.Name, + Kind: "PersistentVolumeClaim", + }, + VolumeSnapshotClassName: &className, + }, + } + return snapshot +} + func (c *DataVolumeController) updateImportStatusPhase(pvc *corev1.PersistentVolumeClaim, dataVolumeCopy *cdiv1.DataVolume, event *DataVolumeEvent) { phase, ok := pvc.Annotations[AnnPodPhase] if ok { @@ -437,6 +600,21 @@ func (c *DataVolumeController) updateImportStatusPhase(pvc *corev1.PersistentVol } } +func (c *DataVolumeController) updateSmartCloneStatusPhase(phase cdiv1.DataVolumePhase, dataVolume *cdiv1.DataVolume) error { + var dataVolumeCopy = dataVolume.DeepCopy() + var event DataVolumeEvent + + switch phase { + case cdiv1.SnapshotForSmartCloneInProgress: + dataVolumeCopy.Status.Phase = cdiv1.SnapshotForSmartCloneInProgress + event.eventType = corev1.EventTypeNormal + event.reason = SnapshotForSmartCloneInProgress + event.message = fmt.Sprintf(MessageSmartCloneInProgress, dataVolumeCopy.Spec.Source.PVC.Namespace, dataVolumeCopy.Spec.Source.PVC.Name) + } + + return c.emitEvent(dataVolume, dataVolumeCopy, &event) +} + func (c *DataVolumeController) updateCloneStatusPhase(pvc *corev1.PersistentVolumeClaim, dataVolumeCopy *cdiv1.DataVolume, event *DataVolumeEvent) { phase, ok := pvc.Annotations[AnnPodPhase] if ok { @@ -502,12 +680,11 @@ func (c *DataVolumeController) updateUploadStatusPhase(pvc *corev1.PersistentVol func (c *DataVolumeController) updateDataVolumeStatus(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { dataVolumeCopy := dataVolume.DeepCopy() - var err error var event DataVolumeEvent curPhase := dataVolumeCopy.Status.Phase if pvc == nil { - if curPhase != cdiv1.PhaseUnset && curPhase != cdiv1.Pending { + if curPhase != cdiv1.PhaseUnset && curPhase != cdiv1.Pending && curPhase != cdiv1.SnapshotForSmartCloneInProgress { // if pvc doesn't exist and we're not still initializing, then // something has gone wrong. Perhaps the PVC was deleted out from @@ -519,6 +696,7 @@ func (c *DataVolumeController) updateDataVolumeStatus(dataVolume *cdiv1.DataVolu } } else { + switch pvc.Status.Phase { case corev1.ClaimPending: dataVolumeCopy.Status.Phase = cdiv1.Pending @@ -558,15 +736,20 @@ func (c *DataVolumeController) updateDataVolumeStatus(dataVolume *cdiv1.DataVolu } } + return c.emitEvent(dataVolume, dataVolumeCopy, &event) +} + +func (c *DataVolumeController) emitEvent(dataVolume *cdiv1.DataVolume, dataVolumeCopy *cdiv1.DataVolume, event *DataVolumeEvent) error { // Only update the object if something actually changed in the status. if !reflect.DeepEqual(dataVolume.Status, dataVolumeCopy.Status) { - _, err = c.cdiClientSet.CdiV1alpha1().DataVolumes(dataVolume.Namespace).Update(dataVolumeCopy) + _, err := c.cdiClientSet.CdiV1alpha1().DataVolumes(dataVolume.Namespace).Update(dataVolumeCopy) // Emit the event only when the status change happens, not every time if event.eventType != "" { c.recorder.Event(dataVolume, event.eventType, event.reason, event.message) } + return err } - return err + return nil } // canUpdateProgress determines what kind annotations will be able generate progress update information. diff --git a/pkg/controller/datavolume-controller_test.go b/pkg/controller/datavolume-controller_test.go index c063abf82..59b3b60f3 100644 --- a/pkg/controller/datavolume-controller_test.go +++ b/pkg/controller/datavolume-controller_test.go @@ -32,9 +32,11 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + extfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1" "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned/fake" informers "kubevirt.io/containerized-data-importer/pkg/client/informers/externalversions" + csifake "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/fake" ) var ( @@ -46,7 +48,9 @@ type fixture struct { t *testing.T client *fake.Clientset + csiclient *csifake.Clientset kubeclient *k8sfake.Clientset + extclient *extfake.Clientset // Objects to put in the store. dataVolumeLister []*cdiv1.DataVolume @@ -59,6 +63,8 @@ type fixture struct { // Objects from here preloaded into NewSimpleFake. kubeobjects []runtime.Object objects []runtime.Object + csiobjects []runtime.Object + extobjects []runtime.Object } func newFixture(t *testing.T) *fixture { @@ -66,6 +72,14 @@ func newFixture(t *testing.T) *fixture { f.t = t f.objects = []runtime.Object{} f.kubeobjects = []runtime.Object{} + f.csiobjects = []runtime.Object{} + f.extobjects = []runtime.Object{} + return f +} + +func newFixtureCsiCrds(t *testing.T) *fixture { + f := newFixture(t) + f.extobjects = append(f.extobjects, createVolumeSnapshotContentCrd(), createVolumeSnapshotClassCrd(), createVolumeSnapshotCrd()) return f } @@ -88,6 +102,10 @@ func newImportDataVolume(name string) *cdiv1.DataVolume { } func newCloneDataVolume(name string) *cdiv1.DataVolume { + return newCloneDataVolumeWithPVCNS(name, "default") +} + +func newCloneDataVolumeWithPVCNS(name string, pvcNamespace string) *cdiv1.DataVolume { return &cdiv1.DataVolume{ TypeMeta: metav1.TypeMeta{APIVersion: cdiv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -98,7 +116,7 @@ func newCloneDataVolume(name string) *cdiv1.DataVolume { Source: cdiv1.DataVolumeSource{ PVC: &cdiv1.DataVolumeSourcePVC{ Name: "test", - Namespace: "default", + Namespace: pvcNamespace, }, }, PVC: &corev1.PersistentVolumeClaimSpec{}, @@ -140,7 +158,9 @@ func newBlankImageDataVolume(name string) *cdiv1.DataVolume { func (f *fixture) newController() (*DataVolumeController, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory) { f.client = fake.NewSimpleClientset(f.objects...) + f.csiclient = csifake.NewSimpleClientset(f.csiobjects...) f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) + f.extclient = extfake.NewSimpleClientset(f.extobjects...) i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc()) @@ -155,6 +175,8 @@ func (f *fixture) newController() (*DataVolumeController, informers.SharedInform c := NewDataVolumeController(f.kubeclient, f.client, + f.csiclient, + f.extclient, k8sI.Core().V1().PersistentVolumeClaims(), i.Cdi().V1alpha1().DataVolumes()) @@ -782,4 +804,130 @@ func TestBlankImageClaimLost(t *testing.T) { result.Status.Phase = cdiv1.Failed f.expectUpdateDataVolumeStatusAction(result) f.run(getKey(dataVolume, t)) + +} + +// Smart-clone test +func TestSmartCloneNoPVCSource(t *testing.T) { + f := newFixtureCsiCrds(t) + dataVolume := newImportDataVolume("test") + c, _, _ := f.newController() + snapClass := c.getSnapshotClassForSmartClone(dataVolume) + if snapClass != "" { + t.Errorf("Should not be smart-clone applicable, no source PVC") + } +} + +func TestSmartCloneNoCsiCrds(t *testing.T) { + f := newFixture(t) + scName := "test" + sc := createStorageClass(scName, map[string]string{ + AnnDefaultStorageClass: "true", + }) + f.kubeobjects = append(f.kubeobjects, sc) + dataVolume := newCloneDataVolume("test") + pvc := createPvcInStorageClass("test", "default", &scName, nil, nil) + f.pvcLister = append(f.pvcLister, pvc) + c, _, _ := f.newController() + snapClass := c.getSnapshotClassForSmartClone(dataVolume) + if snapClass != "" { + t.Errorf("Should not be smart-clone applicable, no CSI CRDs") + } +} + +func TestSmartClonePVCSourceDifferentSC(t *testing.T) { + f := newFixtureCsiCrds(t) + sc := createStorageClass("test2", map[string]string{ + AnnDefaultStorageClass: "true", + }) + f.kubeobjects = append(f.kubeobjects, sc) + dataVolume := newCloneDataVolume("test") + sourceScName := "test" + pvc := createPvcInStorageClass("test", "default", &sourceScName, nil, nil) + f.pvcLister = append(f.pvcLister, pvc) + c, _, _ := f.newController() + + snapClass := c.getSnapshotClassForSmartClone(dataVolume) + if snapClass != "" { + t.Errorf("Should not be smart-clone applicable, different Storage classes source/target PVC") + } +} + +func TestNoSmartClonePVCSourceDifferentNS(t *testing.T) { + f := newFixtureCsiCrds(t) + sc := createStorageClass("test", map[string]string{ + AnnDefaultStorageClass: "true", + }) + f.kubeobjects = append(f.kubeobjects, sc) + dataVolume := newCloneDataVolumeWithPVCNS("test", "namespace2") + sourceScName := "test" + pvc := createPvcInStorageClass("test", "namespace2", &sourceScName, nil, nil) + f.pvcLister = append(f.pvcLister, pvc) + c, _, _ := f.newController() + + snapClass := c.getSnapshotClassForSmartClone(dataVolume) + if snapClass != "" { + t.Errorf("Should not be smart-clone applicable, different NameSpaces source/target PVC") + } +} + +func TestNoSmartCloneNoSnapshotClass(t *testing.T) { + f := newFixtureCsiCrds(t) + scName := "test" + sc := createStorageClass(scName, map[string]string{ + AnnDefaultStorageClass: "true", + }) + f.kubeobjects = append(f.kubeobjects, sc) + dataVolume := newCloneDataVolume("test") + pvc := createPvcInStorageClass("test", "default", &scName, nil, nil) + f.pvcLister = append(f.pvcLister, pvc) + c, _, _ := f.newController() + + snapClass := c.getSnapshotClassForSmartClone(dataVolume) + if snapClass != "" { + t.Errorf("Should not be smart-clone applicable, different NameSpaces soure/target PVC") + } +} + +func TestSmartCloneNotMatchingStorageClass(t *testing.T) { + f := newFixtureCsiCrds(t) + scName := "test" + sc := createStorageClass(scName, map[string]string{ + AnnDefaultStorageClass: "true", + }) + f.kubeobjects = append(f.kubeobjects, sc) + snapClass := createSnapshotClass("snap-class", nil, "csi-snap") + f.csiobjects = append(f.csiobjects, snapClass) + dataVolume := newCloneDataVolume("test") + pvc := createPvcInStorageClass("test", "default", &scName, nil, nil) + f.pvcLister = append(f.pvcLister, pvc) + + c, _, _ := f.newController() + + resultSnapClass := c.getSnapshotClassForSmartClone(dataVolume) + if resultSnapClass != "" { + t.Errorf("Should not be smart-clone applicable, No matching Snapshot Class") + } +} +func TestSmartCloneMatchingStorageClass(t *testing.T) { + f := newFixtureCsiCrds(t) + scName := "test" + sc := createStorageClassWithProvisioner(scName, map[string]string{ + AnnDefaultStorageClass: "true", + }, "csi-plugin") + f.kubeobjects = append(f.kubeobjects, sc) + expectedSnapshotClass := "snap-class" + snapClass := createSnapshotClass(expectedSnapshotClass, nil, "csi-plugin") + f.csiobjects = append(f.csiobjects, snapClass) + dataVolume := newCloneDataVolume("test") + pvc := createPvcInStorageClass("test", "default", &scName, nil, nil) + f.pvcLister = append(f.pvcLister, pvc) + + c, _, _ := f.newController() + + snapClassName := c.getSnapshotClassForSmartClone(dataVolume) + + if snapClassName != expectedSnapshotClass { + t.Errorf("Should be expected SnapshotClass") + } } diff --git a/pkg/controller/smart-clone-controller.go b/pkg/controller/smart-clone-controller.go new file mode 100644 index 000000000..b1b625379 --- /dev/null +++ b/pkg/controller/smart-clone-controller.go @@ -0,0 +1,388 @@ +package controller + +import ( + "fmt" + "time" + + "reflect" + + csisnapshotv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1" + clientset "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned" + cdischeme "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned/scheme" + informers "kubevirt.io/containerized-data-importer/pkg/client/informers/externalversions/core/v1alpha1" + listers "kubevirt.io/containerized-data-importer/pkg/client/listers/core/v1alpha1" + "kubevirt.io/containerized-data-importer/pkg/common" + csiclientset "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" + snapshotsinformers "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1" + snapshotslisters "kubevirt.io/containerized-data-importer/pkg/snapshot-client/listers/volumesnapshot/v1alpha1" +) + +const ( + //AnnSmartCloneRequest sets our expected annotation for a CloneRequest + AnnSmartCloneRequest = "k8s.io/SmartCloneRequest" +) + +// SmartCloneController represents the CDI SmartClone Controller +type SmartCloneController struct { + clientset kubernetes.Interface + cdiClientSet clientset.Interface + csiClientSet csiclientset.Interface + + snapshotInformer cache.SharedIndexInformer + snapshotsLister snapshotslisters.VolumeSnapshotLister + pvcLister corelisters.PersistentVolumeClaimLister + dataVolumeLister listers.DataVolumeLister + dataVolumesSynced cache.InformerSynced + snapshotsSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + recorder record.EventRecorder +} + +// NewSmartCloneController sets up a Smart Clone Controller, and returns a pointer to +// to the newly created Controller +func NewSmartCloneController(client kubernetes.Interface, + cdiClientSet clientset.Interface, + csiClientSet csiclientset.Interface, + pvcInformer coreinformers.PersistentVolumeClaimInformer, + snapshotInformer snapshotsinformers.VolumeSnapshotInformer, + dataVolumeInformer informers.DataVolumeInformer) *SmartCloneController { + + // Create event broadcaster + // Add smart-clone-controller types to the default Kubernetes Scheme so Events can be + // logged for smart-clone-controller types. + cdischeme.AddToScheme(scheme.Scheme) + klog.V(3).Info("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.V(2).Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: client.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + + c := &SmartCloneController{ + clientset: client, + cdiClientSet: cdiClientSet, + csiClientSet: csiClientSet, + pvcLister: pvcInformer.Lister(), + snapshotsLister: snapshotInformer.Lister(), + snapshotInformer: snapshotInformer.Informer(), + dataVolumeLister: dataVolumeInformer.Lister(), + dataVolumesSynced: dataVolumeInformer.Informer().HasSynced, + snapshotsSynced: snapshotInformer.Informer().HasSynced, + recorder: recorder, + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + } + + // Set up an event handler for when VolumeSnapshot resources change + snapshotInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueueItem, + UpdateFunc: func(old, new interface{}) { + c.enqueueItem(new) + }, + DeleteFunc: c.enqueueItem, + }) + + // Set up an event handler for when PVC resources change + // handleObject function ensures we filter PVCs not created by this controller + pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueueItem, + UpdateFunc: func(old, new interface{}) { + c.enqueueItem(new) + }, + DeleteFunc: c.enqueueItem, + }) + + return c +} + +//ProcessNextItem ... +func (c *SmartCloneController) ProcessNextItem() bool { + key, shutdown := c.queue.Get() + if shutdown { + return false + } + defer c.queue.Done(key) + + ns, name, err := cache.SplitMetaNamespaceKey(key.(string)) + if err != nil { + runtime.HandleError(errors.Errorf("invalid resource key: %s", key)) + return false + } + + pvc, err := c.pvcLister.PersistentVolumeClaims(ns).Get(name) + if err != nil { + if k8serrors.IsNotFound(err) { + pvc = nil + } else { + // Error getting PVC - return + return false + } + } + snapshot, err := c.snapshotsLister.VolumeSnapshots(ns).Get(name) + if err != nil { + if k8serrors.IsNotFound(err) { + snapshot = nil + } else { + // Error getting Snapshot - return + return false + } + } + + if pvc != nil { + if pvc.Status.Phase != corev1.ClaimBound { + // PVC isn't bound yet - return + return false + } + if pvc.ObjectMeta.Annotations[AnnSmartCloneRequest] == "true" { + snapshotName := pvc.Spec.DataSource.Name + snapshotToDelete, err := c.snapshotsLister.VolumeSnapshots(ns).Get(snapshotName) + if err != nil { + // Error getting Snapshot - return + return true + } + if snapshotToDelete != nil { + klog.V(3).Infof("ProcessNextItem snapshotName: %s", snapshotName) + err = c.csiClientSet.SnapshotV1alpha1().VolumeSnapshots(ns).Delete(snapshotName, &metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("error deleting snapshot for smart-clone %q: %v", key, err) + return true + } + klog.V(3).Infof("Snapshot deleted: %s", snapshotName) + + dataVolume, err := c.dataVolumeLister.DataVolumes(snapshot.Namespace).Get(snapshot.Name) + if err != nil { + return true + } + + // Update DV phase and emit PVC in progress event + c.updateSmartCloneStatusPhase(cdiv1.Succeeded, dataVolume, pvc) + } + } + } else if snapshot != nil { + err := c.syncSnapshot(key.(string)) + if err != nil { + klog.Errorf("error processing snapshot %q: %v", key, err) + return true + } + } + return c.forgetKey(key, fmt.Sprintf("ProcessNextItem: processing pvc/snapshot %q completed", key)) +} + +// forget the passed-in key for this event and optionally log a message. +func (c *SmartCloneController) forgetKey(key interface{}, msg string) bool { + if len(msg) > 0 { + klog.V(3).Info(msg) + } + c.queue.Forget(key) + return true +} + +func (c *SmartCloneController) syncSnapshot(key string) error { + snapshot, exists, err := c.snapshotFromKey(key) + if err != nil { + return err + } else if !exists { + return nil + } + + _, ok := snapshot.Annotations[AnnSmartCloneRequest] + if !ok { + //ignoring snapshot, not created by DataVolume Controller + return nil + } + + snapshotReadyToUse := snapshot.Status.ReadyToUse + klog.V(3).Infof("Snapshot \"%s/%s\" - ReadyToUse: %t", snapshot.Namespace, snapshot.Name, snapshotReadyToUse) + if !snapshotReadyToUse { + return nil + } + + return c.processNextSnapshotItem(snapshot) +} + +// The snapshot is ReadyToUse, then we can create the PVC and update DV status +func (c *SmartCloneController) processNextSnapshotItem(snapshot *csisnapshotv1.VolumeSnapshot) error { + dataVolume, err := c.dataVolumeLister.DataVolumes(snapshot.Namespace).Get(snapshot.Name) + if err != nil { + return err + } + + // Update DV phase and emit PVC in progress event + c.updateSmartCloneStatusPhase(SmartClonePVCInProgress, dataVolume, nil) + + newPvc := newPvcFromSnapshot(snapshot, dataVolume) + if newPvc == nil { + klog.Errorf("error creating new pvc from snapshot object") + return nil + } + + _, err = c.clientset.CoreV1().PersistentVolumeClaims(snapshot.Namespace).Create(newPvc) + if err != nil { + return err + } + + return nil +} + +func (c *SmartCloneController) objFromKey(informer cache.SharedIndexInformer, key interface{}) (interface{}, bool, error) { + keyString, ok := key.(string) + if !ok { + return nil, false, errors.New("keys is not of type string") + } + obj, ok, err := informer.GetIndexer().GetByKey(keyString) + if err != nil { + return nil, false, errors.Wrap(err, "error getting interface obj from store") + } + if !ok { + return nil, false, nil + } + return obj, true, nil +} + +// return a VolumeSnapshot pointer based on the passed-in work queue key. +func (c *SmartCloneController) snapshotFromKey(key interface{}) (*csisnapshotv1.VolumeSnapshot, bool, error) { + obj, exists, err := c.objFromKey(c.snapshotInformer, key) + if err != nil { + return nil, false, errors.Wrap(err, "could not get pvc object from key") + } else if !exists { + return nil, false, nil + } + + snapshot, ok := obj.(*csisnapshotv1.VolumeSnapshot) + if !ok { + return nil, false, errors.New("Object not of type *v1.PersistentVolumeClaim") + } + return snapshot, true, nil +} + +//Run is being called from cdi-controller (cmd) +func (c *SmartCloneController) Run(threadiness int, stopCh <-chan struct{}) error { + defer runtime.HandleCrash() + defer c.queue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.V(2).Info("Starting SmartCloneController controller") + + // Wait for the caches to be synced before starting workers + klog.V(2).Info("Waiting for informer caches to sync") + if !cache.WaitForCacheSync(stopCh, c.snapshotsSynced, c.dataVolumesSynced) { + return errors.New("Timeout waiting for caches sync") + } + klog.V(2).Info("Starting worker") + for i := 0; i < threadiness; i++ { + go wait.Until(c.runSnapshotWorkers, time.Second, stopCh) + } + + klog.V(2).Info("Started workers") + <-stopCh + klog.V(2).Info("Shutting down workers") + + return nil +} + +func (c *SmartCloneController) runSnapshotWorkers() { + for c.ProcessNextItem() { + // empty + } +} + +// enqueueItem takes a VolumeSnapshot or PVC resource and converts it into a namespace/name +// string which is then put onto the work queue. +func (c *SmartCloneController) enqueueItem(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + runtime.HandleError(err) + return + } + c.queue.AddRateLimited(key) +} + +func (c *SmartCloneController) updateSmartCloneStatusPhase(phase cdiv1.DataVolumePhase, dataVolume *cdiv1.DataVolume, newPVC *corev1.PersistentVolumeClaim) error { + var dataVolumeCopy = dataVolume.DeepCopy() + var event DataVolumeEvent + + switch phase { + case cdiv1.SnapshotForSmartCloneInProgress: + dataVolumeCopy.Status.Phase = cdiv1.SnapshotForSmartCloneInProgress + event.eventType = corev1.EventTypeNormal + event.reason = SnapshotForSmartCloneInProgress + event.message = fmt.Sprintf(MessageSmartCloneInProgress, dataVolumeCopy.Spec.Source.PVC.Namespace, dataVolumeCopy.Spec.Source.PVC.Name) + case cdiv1.SmartClonePVCInProgress: + dataVolumeCopy.Status.Phase = cdiv1.SmartClonePVCInProgress + event.eventType = corev1.EventTypeNormal + event.reason = SmartClonePVCInProgress + event.message = fmt.Sprintf(MessageSmartClonePVCInProgress, dataVolumeCopy.Spec.Source.PVC.Namespace, dataVolumeCopy.Spec.Source.PVC.Name) + case cdiv1.Succeeded: + dataVolumeCopy.Status.Phase = cdiv1.Succeeded + event.eventType = corev1.EventTypeNormal + event.reason = CloneSucceeded + event.message = fmt.Sprintf(MessageCloneSucceeded, dataVolumeCopy.Spec.Source.PVC.Namespace, dataVolumeCopy.Spec.Source.PVC.Name, newPVC.Namespace, newPVC.Name) + } + + return c.emitEvent(dataVolume, dataVolumeCopy, &event) +} + +func (c *SmartCloneController) emitEvent(dataVolume *cdiv1.DataVolume, dataVolumeCopy *cdiv1.DataVolume, event *DataVolumeEvent) error { + // Only update the object if something actually changed in the status. + if !reflect.DeepEqual(dataVolume.Status, dataVolumeCopy.Status) { + _, err := c.cdiClientSet.CdiV1alpha1().DataVolumes(dataVolume.Namespace).Update(dataVolumeCopy) + // Emit the event only when the status change happens, not every time + if event.eventType != "" { + c.recorder.Event(dataVolume, event.eventType, event.reason, event.message) + } + return err + } + return nil +} + +func newPvcFromSnapshot(snapshot *csisnapshotv1.VolumeSnapshot, dataVolume *cdiv1.DataVolume) *corev1.PersistentVolumeClaim { + labels := map[string]string{ + "cdi-controller": snapshot.Name, + common.CDILabelKey: common.CDILabelValue, + common.CDIComponentLabel: common.SmartClonerCDILabel, + } + ownerRef := metav1.GetControllerOf(snapshot) + if ownerRef == nil { + return nil + } + annotations := make(map[string]string) + annotations[AnnSmartCloneRequest] = "true" + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: snapshot.Name, + Namespace: snapshot.Namespace, + Labels: labels, + Annotations: annotations, + OwnerReferences: []metav1.OwnerReference{*ownerRef}, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + DataSource: &corev1.TypedLocalObjectReference{ + Name: snapshot.Name, + Kind: "VolumeSnapshot", + APIGroup: &csisnapshotv1.SchemeGroupVersion.Group, + }, + VolumeMode: dataVolume.Spec.PVC.VolumeMode, + AccessModes: dataVolume.Spec.PVC.AccessModes, + StorageClassName: dataVolume.Spec.PVC.StorageClassName, + Resources: corev1.ResourceRequirements{ + Requests: dataVolume.Spec.PVC.Resources.Requests, + }, + }, + } +} diff --git a/pkg/controller/util.go b/pkg/controller/util.go index a2dc9d6b4..e208a6ce2 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -9,10 +9,12 @@ import ( "strings" "time" + crdv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" routev1 "github.com/openshift/api/route/v1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + extclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -1421,3 +1423,25 @@ func isInsecureTLS(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim) ( return false, nil } + +// IsCsiCrdsDeployed checks whether the CSI snapshotter CRD are deployed +func IsCsiCrdsDeployed(c extclientset.Interface) bool { + vsClass := crdv1alpha1.VolumeSnapshotClassResourcePlural + "." + crdv1alpha1.GroupName + vsContent := crdv1alpha1.VolumeSnapshotContentResourcePlural + "." + crdv1alpha1.GroupName + vs := crdv1alpha1.VolumeSnapshotResourcePlural + "." + crdv1alpha1.GroupName + + return isCrdDeployed(c, vsClass) && + isCrdDeployed(c, vsContent) && + isCrdDeployed(c, vs) +} + +func isCrdDeployed(c extclientset.Interface, name string) bool { + obj, err := c.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + return false + } + return false + } + return obj != nil +} diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index eefbb25fe..3c6c311fa 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -21,6 +22,8 @@ import ( "k8s.io/client-go/util/workqueue" bootstrapapi "k8s.io/cluster-bootstrap/token/api" + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1" cdifake "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned/fake" "kubevirt.io/containerized-data-importer/pkg/common" @@ -2016,3 +2019,89 @@ func createStorageClass(name string, annotations map[string]string) *storagev1.S }, } } + +func createStorageClassWithProvisioner(name string, annotations map[string]string, provisioner string) *storagev1.StorageClass { + return &storagev1.StorageClass{ + Provisioner: provisioner, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: annotations, + }, + } +} +func createSnapshotClass(name string, annotations map[string]string, snapshotter string) *snapshotv1.VolumeSnapshotClass { + return &snapshotv1.VolumeSnapshotClass{ + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + APIVersion: snapshotv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: annotations, + }, + Snapshotter: snapshotter, + } +} + +func createVolumeSnapshotContentCrd() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: apiextensionsv1beta1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: crdv1.VolumeSnapshotContentResourcePlural + "." + crdv1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: crdv1.GroupName, + Version: crdv1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.ClusterScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: crdv1.VolumeSnapshotContentResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotContent{}).Name(), + }, + }, + } +} + +func createVolumeSnapshotClassCrd() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: apiextensionsv1beta1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: crdv1.VolumeSnapshotClassResourcePlural + "." + crdv1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: crdv1.GroupName, + Version: crdv1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.ClusterScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: crdv1.VolumeSnapshotClassResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotClass{}).Name(), + }, + }, + } +} + +func createVolumeSnapshotCrd() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: apiextensionsv1beta1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: crdv1.VolumeSnapshotResourcePlural + "." + crdv1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: crdv1.GroupName, + Version: crdv1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: crdv1.VolumeSnapshotResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), + }, + }, + } +} diff --git a/pkg/operator/resources/cluster/controller.go b/pkg/operator/resources/cluster/controller.go index 2aa4eaaae..4978172ce 100644 --- a/pkg/operator/resources/cluster/controller.go +++ b/pkg/operator/resources/cluster/controller.go @@ -59,6 +59,7 @@ func GetControllerPermissions() []rbacv1.PolicyRule { }, Resources: []string{ "persistentvolumeclaims", + "volumesnapshots", }, Verbs: []string{ "get", @@ -77,6 +78,7 @@ func GetControllerPermissions() []rbacv1.PolicyRule { Resources: []string{ "persistentvolumeclaims/finalizers", "pods/finalizers", + "volumesnapshots/finalizers", }, Verbs: []string{ "update", @@ -188,6 +190,28 @@ func GetControllerPermissions() []rbacv1.PolicyRule { "*", }, }, + { + APIGroups: []string{ + "snapshot.storage.k8s.io", + }, + Resources: []string{ + "*", + }, + Verbs: []string{ + "*", + }, + }, + { + APIGroups: []string{ + "apiextensions.k8s.io", + }, + Resources: []string{ + "customresourcedefinitions", + }, + Verbs: []string{ + "*", + }, + }, } } diff --git a/pkg/operator/resources/operator/operator.go b/pkg/operator/resources/operator/operator.go index 8c2dac436..96dd022fd 100644 --- a/pkg/operator/resources/operator/operator.go +++ b/pkg/operator/resources/operator/operator.go @@ -297,6 +297,7 @@ func getOperatorClusterRules() *[]rbacv1.PolicyRule { Resources: []string{ "pods", "persistentvolumeclaims", + "volumesnapshots", }, Verbs: []string{ "get", @@ -315,6 +316,7 @@ func getOperatorClusterRules() *[]rbacv1.PolicyRule { Resources: []string{ "persistentvolumeclaims/finalizers", "pods/finalizers", + "volumesnapshots/finalizers", }, Verbs: []string{ "update", @@ -389,6 +391,28 @@ func getOperatorClusterRules() *[]rbacv1.PolicyRule { "update", }, }, + { + APIGroups: []string{ + "snapshot.storage.k8s.io", + }, + Resources: []string{ + "*", + }, + Verbs: []string{ + "*", + }, + }, + { + APIGroups: []string{ + "apiextensions.k8s.io", + }, + Resources: []string{ + "customresourcedefinitions", + }, + Verbs: []string{ + "*", + }, + }, } return &rules diff --git a/pkg/snapshot-client/clientset/versioned/clientset.go b/pkg/snapshot-client/clientset/versioned/clientset.go new file mode 100644 index 000000000..a333388fc --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + snapshotv1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + SnapshotV1alpha1() snapshotv1alpha1.SnapshotV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Snapshot() snapshotv1alpha1.SnapshotV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + snapshotV1alpha1 *snapshotv1alpha1.SnapshotV1alpha1Client +} + +// SnapshotV1alpha1 retrieves the SnapshotV1alpha1Client +func (c *Clientset) SnapshotV1alpha1() snapshotv1alpha1.SnapshotV1alpha1Interface { + return c.snapshotV1alpha1 +} + +// Deprecated: Snapshot retrieves the default version of SnapshotClient. +// Please explicitly pick a version. +func (c *Clientset) Snapshot() snapshotv1alpha1.SnapshotV1alpha1Interface { + return c.snapshotV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.snapshotV1alpha1, err = snapshotv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.snapshotV1alpha1 = snapshotv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.snapshotV1alpha1 = snapshotv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/pkg/snapshot-client/clientset/versioned/doc.go b/pkg/snapshot-client/clientset/versioned/doc.go new file mode 100644 index 000000000..53b927fa7 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/pkg/snapshot-client/clientset/versioned/fake/clientset_generated.go b/pkg/snapshot-client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..0cbc5fc06 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" + snapshotv1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1" + fakesnapshotv1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// SnapshotV1alpha1 retrieves the SnapshotV1alpha1Client +func (c *Clientset) SnapshotV1alpha1() snapshotv1alpha1.SnapshotV1alpha1Interface { + return &fakesnapshotv1alpha1.FakeSnapshotV1alpha1{Fake: &c.Fake} +} + +// Snapshot retrieves the SnapshotV1alpha1Client +func (c *Clientset) Snapshot() snapshotv1alpha1.SnapshotV1alpha1Interface { + return &fakesnapshotv1alpha1.FakeSnapshotV1alpha1{Fake: &c.Fake} +} diff --git a/pkg/snapshot-client/clientset/versioned/fake/doc.go b/pkg/snapshot-client/clientset/versioned/fake/doc.go new file mode 100644 index 000000000..dd9c6d622 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/snapshot-client/clientset/versioned/fake/register.go b/pkg/snapshot-client/clientset/versioned/fake/register.go new file mode 100644 index 000000000..e3163dfbe --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + snapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + snapshotv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/pkg/snapshot-client/clientset/versioned/scheme/doc.go b/pkg/snapshot-client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..eec0cb9d2 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/pkg/snapshot-client/clientset/versioned/scheme/register.go b/pkg/snapshot-client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..44b22d298 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + snapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + snapshotv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/doc.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/doc.go new file mode 100644 index 000000000..e1cc6da82 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/doc.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/doc.go new file mode 100644 index 000000000..0e14be489 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot.go new file mode 100644 index 000000000..efc720e59 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeSnapshots implements VolumeSnapshotInterface +type FakeVolumeSnapshots struct { + Fake *FakeSnapshotV1alpha1 + ns string +} + +var volumesnapshotsResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Resource: "volumesnapshots"} + +var volumesnapshotsKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshot"} + +// Get takes name of the volumeSnapshot, and returns the corresponding volumeSnapshot object, and an error if there is any. +func (c *FakeVolumeSnapshots) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(volumesnapshotsResource, c.ns, name), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// List takes label and field selectors, and returns the list of VolumeSnapshots that match those selectors. +func (c *FakeVolumeSnapshots) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(volumesnapshotsResource, volumesnapshotsKind, c.ns, opts), &v1alpha1.VolumeSnapshotList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeSnapshotList{ListMeta: obj.(*v1alpha1.VolumeSnapshotList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeSnapshotList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshots. +func (c *FakeVolumeSnapshots) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(volumesnapshotsResource, c.ns, opts)) + +} + +// Create takes the representation of a volumeSnapshot and creates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *FakeVolumeSnapshots) Create(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(volumesnapshotsResource, c.ns, volumeSnapshot), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// Update takes the representation of a volumeSnapshot and updates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *FakeVolumeSnapshots) Update(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(volumesnapshotsResource, c.ns, volumeSnapshot), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVolumeSnapshots) UpdateStatus(volumeSnapshot *v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(volumesnapshotsResource, "status", c.ns, volumeSnapshot), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// Delete takes name of the volumeSnapshot and deletes it. Returns an error if one occurs. +func (c *FakeVolumeSnapshots) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(volumesnapshotsResource, c.ns, name), &v1alpha1.VolumeSnapshot{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeSnapshots) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(volumesnapshotsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeSnapshotList{}) + return err +} + +// Patch applies the patch and returns the patched volumeSnapshot. +func (c *FakeVolumeSnapshots) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(volumesnapshotsResource, c.ns, name, pt, data, subresources...), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot_client.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot_client.go new file mode 100644 index 000000000..ebb4f9369 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot_client.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1" +) + +type FakeSnapshotV1alpha1 struct { + *testing.Fake +} + +func (c *FakeSnapshotV1alpha1) VolumeSnapshots(namespace string) v1alpha1.VolumeSnapshotInterface { + return &FakeVolumeSnapshots{c, namespace} +} + +func (c *FakeSnapshotV1alpha1) VolumeSnapshotClasses() v1alpha1.VolumeSnapshotClassInterface { + return &FakeVolumeSnapshotClasses{c} +} + +func (c *FakeSnapshotV1alpha1) VolumeSnapshotContents() v1alpha1.VolumeSnapshotContentInterface { + return &FakeVolumeSnapshotContents{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSnapshotV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotclass.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotclass.go new file mode 100644 index 000000000..c591cc046 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotclass.go @@ -0,0 +1,120 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeSnapshotClasses implements VolumeSnapshotClassInterface +type FakeVolumeSnapshotClasses struct { + Fake *FakeSnapshotV1alpha1 +} + +var volumesnapshotclassesResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Resource: "volumesnapshotclasses"} + +var volumesnapshotclassesKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotClass"} + +// Get takes name of the volumeSnapshotClass, and returns the corresponding volumeSnapshotClass object, and an error if there is any. +func (c *FakeVolumeSnapshotClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumesnapshotclassesResource, name), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotClasses that match those selectors. +func (c *FakeVolumeSnapshotClasses) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumesnapshotclassesResource, volumesnapshotclassesKind, opts), &v1alpha1.VolumeSnapshotClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeSnapshotClassList{ListMeta: obj.(*v1alpha1.VolumeSnapshotClassList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeSnapshotClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotClasses. +func (c *FakeVolumeSnapshotClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumesnapshotclassesResource, opts)) +} + +// Create takes the representation of a volumeSnapshotClass and creates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *FakeVolumeSnapshotClasses) Create(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumesnapshotclassesResource, volumeSnapshotClass), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} + +// Update takes the representation of a volumeSnapshotClass and updates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *FakeVolumeSnapshotClasses) Update(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumesnapshotclassesResource, volumeSnapshotClass), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} + +// Delete takes name of the volumeSnapshotClass and deletes it. Returns an error if one occurs. +func (c *FakeVolumeSnapshotClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumesnapshotclassesResource, name), &v1alpha1.VolumeSnapshotClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeSnapshotClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumesnapshotclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeSnapshotClassList{}) + return err +} + +// Patch applies the patch and returns the patched volumeSnapshotClass. +func (c *FakeVolumeSnapshotClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumesnapshotclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotcontent.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotcontent.go new file mode 100644 index 000000000..9bbe9394e --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotcontent.go @@ -0,0 +1,120 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeSnapshotContents implements VolumeSnapshotContentInterface +type FakeVolumeSnapshotContents struct { + Fake *FakeSnapshotV1alpha1 +} + +var volumesnapshotcontentsResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Resource: "volumesnapshotcontents"} + +var volumesnapshotcontentsKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotContent"} + +// Get takes name of the volumeSnapshotContent, and returns the corresponding volumeSnapshotContent object, and an error if there is any. +func (c *FakeVolumeSnapshotContents) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumesnapshotcontentsResource, name), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotContents that match those selectors. +func (c *FakeVolumeSnapshotContents) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotContentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumesnapshotcontentsResource, volumesnapshotcontentsKind, opts), &v1alpha1.VolumeSnapshotContentList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeSnapshotContentList{ListMeta: obj.(*v1alpha1.VolumeSnapshotContentList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeSnapshotContentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotContents. +func (c *FakeVolumeSnapshotContents) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumesnapshotcontentsResource, opts)) +} + +// Create takes the representation of a volumeSnapshotContent and creates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *FakeVolumeSnapshotContents) Create(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumesnapshotcontentsResource, volumeSnapshotContent), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} + +// Update takes the representation of a volumeSnapshotContent and updates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *FakeVolumeSnapshotContents) Update(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumesnapshotcontentsResource, volumeSnapshotContent), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} + +// Delete takes name of the volumeSnapshotContent and deletes it. Returns an error if one occurs. +func (c *FakeVolumeSnapshotContents) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumesnapshotcontentsResource, name), &v1alpha1.VolumeSnapshotContent{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeSnapshotContents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumesnapshotcontentsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeSnapshotContentList{}) + return err +} + +// Patch applies the patch and returns the patched volumeSnapshotContent. +func (c *FakeVolumeSnapshotContents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumesnapshotcontentsResource, name, pt, data, subresources...), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/generated_expansion.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..7270ff6f1 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type VolumeSnapshotExpansion interface{} + +type VolumeSnapshotClassExpansion interface{} + +type VolumeSnapshotContentExpansion interface{} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot.go new file mode 100644 index 000000000..2f4d897ca --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot.go @@ -0,0 +1,191 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/scheme" +) + +// VolumeSnapshotsGetter has a method to return a VolumeSnapshotInterface. +// A group's client should implement this interface. +type VolumeSnapshotsGetter interface { + VolumeSnapshots(namespace string) VolumeSnapshotInterface +} + +// VolumeSnapshotInterface has methods to work with VolumeSnapshot resources. +type VolumeSnapshotInterface interface { + Create(*v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) + Update(*v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) + UpdateStatus(*v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeSnapshot, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeSnapshotList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshot, err error) + VolumeSnapshotExpansion +} + +// volumeSnapshots implements VolumeSnapshotInterface +type volumeSnapshots struct { + client rest.Interface + ns string +} + +// newVolumeSnapshots returns a VolumeSnapshots +func newVolumeSnapshots(c *SnapshotV1alpha1Client, namespace string) *volumeSnapshots { + return &volumeSnapshots{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the volumeSnapshot, and returns the corresponding volumeSnapshot object, and an error if there is any. +func (c *volumeSnapshots) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshots that match those selectors. +func (c *volumeSnapshots) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeSnapshotList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshots. +func (c *volumeSnapshots) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeSnapshot and creates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *volumeSnapshots) Create(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Post(). + Namespace(c.ns). + Resource("volumesnapshots"). + Body(volumeSnapshot). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshot and updates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *volumeSnapshots) Update(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(volumeSnapshot.Name). + Body(volumeSnapshot). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeSnapshots) UpdateStatus(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(volumeSnapshot.Name). + SubResource("status"). + Body(volumeSnapshot). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeSnapshot and deletes it. Returns an error if one occurs. +func (c *volumeSnapshots) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshots) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshots"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshot. +func (c *volumeSnapshots) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("volumesnapshots"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot_client.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot_client.go new file mode 100644 index 000000000..3a10eb69c --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot_client.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/scheme" +) + +type SnapshotV1alpha1Interface interface { + RESTClient() rest.Interface + VolumeSnapshotsGetter + VolumeSnapshotClassesGetter + VolumeSnapshotContentsGetter +} + +// SnapshotV1alpha1Client is used to interact with features provided by the snapshot.storage.k8s.io group. +type SnapshotV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SnapshotV1alpha1Client) VolumeSnapshots(namespace string) VolumeSnapshotInterface { + return newVolumeSnapshots(c, namespace) +} + +func (c *SnapshotV1alpha1Client) VolumeSnapshotClasses() VolumeSnapshotClassInterface { + return newVolumeSnapshotClasses(c) +} + +func (c *SnapshotV1alpha1Client) VolumeSnapshotContents() VolumeSnapshotContentInterface { + return newVolumeSnapshotContents(c) +} + +// NewForConfig creates a new SnapshotV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SnapshotV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SnapshotV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SnapshotV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SnapshotV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SnapshotV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SnapshotV1alpha1Client { + return &SnapshotV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SnapshotV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotclass.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotclass.go new file mode 100644 index 000000000..b40b3405b --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotclass.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/scheme" +) + +// VolumeSnapshotClassesGetter has a method to return a VolumeSnapshotClassInterface. +// A group's client should implement this interface. +type VolumeSnapshotClassesGetter interface { + VolumeSnapshotClasses() VolumeSnapshotClassInterface +} + +// VolumeSnapshotClassInterface has methods to work with VolumeSnapshotClass resources. +type VolumeSnapshotClassInterface interface { + Create(*v1alpha1.VolumeSnapshotClass) (*v1alpha1.VolumeSnapshotClass, error) + Update(*v1alpha1.VolumeSnapshotClass) (*v1alpha1.VolumeSnapshotClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeSnapshotClass, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeSnapshotClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotClass, err error) + VolumeSnapshotClassExpansion +} + +// volumeSnapshotClasses implements VolumeSnapshotClassInterface +type volumeSnapshotClasses struct { + client rest.Interface +} + +// newVolumeSnapshotClasses returns a VolumeSnapshotClasses +func newVolumeSnapshotClasses(c *SnapshotV1alpha1Client) *volumeSnapshotClasses { + return &volumeSnapshotClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeSnapshotClass, and returns the corresponding volumeSnapshotClass object, and an error if there is any. +func (c *volumeSnapshotClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Get(). + Resource("volumesnapshotclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotClasses that match those selectors. +func (c *volumeSnapshotClasses) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeSnapshotClassList{} + err = c.client.Get(). + Resource("volumesnapshotclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotClasses. +func (c *volumeSnapshotClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumesnapshotclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeSnapshotClass and creates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *volumeSnapshotClasses) Create(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Post(). + Resource("volumesnapshotclasses"). + Body(volumeSnapshotClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshotClass and updates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *volumeSnapshotClasses) Update(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Put(). + Resource("volumesnapshotclasses"). + Name(volumeSnapshotClass.Name). + Body(volumeSnapshotClass). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeSnapshotClass and deletes it. Returns an error if one occurs. +func (c *volumeSnapshotClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumesnapshotclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshotClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumesnapshotclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshotClass. +func (c *volumeSnapshotClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Patch(pt). + Resource("volumesnapshotclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotcontent.go b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotcontent.go new file mode 100644 index 000000000..9c68dea67 --- /dev/null +++ b/pkg/snapshot-client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotcontent.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned/scheme" +) + +// VolumeSnapshotContentsGetter has a method to return a VolumeSnapshotContentInterface. +// A group's client should implement this interface. +type VolumeSnapshotContentsGetter interface { + VolumeSnapshotContents() VolumeSnapshotContentInterface +} + +// VolumeSnapshotContentInterface has methods to work with VolumeSnapshotContent resources. +type VolumeSnapshotContentInterface interface { + Create(*v1alpha1.VolumeSnapshotContent) (*v1alpha1.VolumeSnapshotContent, error) + Update(*v1alpha1.VolumeSnapshotContent) (*v1alpha1.VolumeSnapshotContent, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeSnapshotContent, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeSnapshotContentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotContent, err error) + VolumeSnapshotContentExpansion +} + +// volumeSnapshotContents implements VolumeSnapshotContentInterface +type volumeSnapshotContents struct { + client rest.Interface +} + +// newVolumeSnapshotContents returns a VolumeSnapshotContents +func newVolumeSnapshotContents(c *SnapshotV1alpha1Client) *volumeSnapshotContents { + return &volumeSnapshotContents{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeSnapshotContent, and returns the corresponding volumeSnapshotContent object, and an error if there is any. +func (c *volumeSnapshotContents) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Get(). + Resource("volumesnapshotcontents"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotContents that match those selectors. +func (c *volumeSnapshotContents) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotContentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeSnapshotContentList{} + err = c.client.Get(). + Resource("volumesnapshotcontents"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotContents. +func (c *volumeSnapshotContents) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumesnapshotcontents"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeSnapshotContent and creates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *volumeSnapshotContents) Create(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Post(). + Resource("volumesnapshotcontents"). + Body(volumeSnapshotContent). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshotContent and updates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *volumeSnapshotContents) Update(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Put(). + Resource("volumesnapshotcontents"). + Name(volumeSnapshotContent.Name). + Body(volumeSnapshotContent). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeSnapshotContent and deletes it. Returns an error if one occurs. +func (c *volumeSnapshotContents) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumesnapshotcontents"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshotContents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumesnapshotcontents"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshotContent. +func (c *volumeSnapshotContents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Patch(pt). + Resource("volumesnapshotcontents"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/snapshot-client/informers/externalversions/factory.go b/pkg/snapshot-client/informers/externalversions/factory.go new file mode 100644 index 000000000..26c39d333 --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + versioned "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" + internalinterfaces "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/internalinterfaces" + volumesnapshot "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/volumesnapshot" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Snapshot() volumesnapshot.Interface +} + +func (f *sharedInformerFactory) Snapshot() volumesnapshot.Interface { + return volumesnapshot.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/snapshot-client/informers/externalversions/generic.go b/pkg/snapshot-client/informers/externalversions/generic.go new file mode 100644 index 000000000..bc37df555 --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/generic.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=snapshot.storage.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("volumesnapshots"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Snapshot().V1alpha1().VolumeSnapshots().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("volumesnapshotclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Snapshot().V1alpha1().VolumeSnapshotClasses().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("volumesnapshotcontents"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Snapshot().V1alpha1().VolumeSnapshotContents().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/pkg/snapshot-client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/snapshot-client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..630ed4738 --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" + versioned "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/pkg/snapshot-client/informers/externalversions/volumesnapshot/interface.go b/pkg/snapshot-client/informers/externalversions/volumesnapshot/interface.go new file mode 100644 index 000000000..9dc1dece7 --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/volumesnapshot/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package snapshot + +import ( + internalinterfaces "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/internalinterfaces" + v1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/interface.go b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/interface.go new file mode 100644 index 000000000..24ed5419d --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/interface.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // VolumeSnapshots returns a VolumeSnapshotInformer. + VolumeSnapshots() VolumeSnapshotInformer + // VolumeSnapshotClasses returns a VolumeSnapshotClassInformer. + VolumeSnapshotClasses() VolumeSnapshotClassInformer + // VolumeSnapshotContents returns a VolumeSnapshotContentInformer. + VolumeSnapshotContents() VolumeSnapshotContentInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// VolumeSnapshots returns a VolumeSnapshotInformer. +func (v *version) VolumeSnapshots() VolumeSnapshotInformer { + return &volumeSnapshotInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VolumeSnapshotClasses returns a VolumeSnapshotClassInformer. +func (v *version) VolumeSnapshotClasses() VolumeSnapshotClassInformer { + return &volumeSnapshotClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// VolumeSnapshotContents returns a VolumeSnapshotContentInformer. +func (v *version) VolumeSnapshotContents() VolumeSnapshotContentInformer { + return &volumeSnapshotContentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshot.go b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshot.go new file mode 100644 index 000000000..a70cbdcf7 --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshot.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + volumesnapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" + internalinterfaces "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/internalinterfaces" + v1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/listers/volumesnapshot/v1alpha1" +) + +// VolumeSnapshotInformer provides access to a shared informer and lister for +// VolumeSnapshots. +type VolumeSnapshotInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeSnapshotLister +} + +type volumeSnapshotInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVolumeSnapshotInformer constructs a new informer for VolumeSnapshot type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotInformer constructs a new informer for VolumeSnapshot type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshots(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshots(namespace).Watch(options) + }, + }, + &volumesnapshotv1alpha1.VolumeSnapshot{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&volumesnapshotv1alpha1.VolumeSnapshot{}, f.defaultInformer) +} + +func (f *volumeSnapshotInformer) Lister() v1alpha1.VolumeSnapshotLister { + return v1alpha1.NewVolumeSnapshotLister(f.Informer().GetIndexer()) +} diff --git a/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotclass.go b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotclass.go new file mode 100644 index 000000000..e9f71196e --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotclass.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + volumesnapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" + internalinterfaces "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/internalinterfaces" + v1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/listers/volumesnapshot/v1alpha1" +) + +// VolumeSnapshotClassInformer provides access to a shared informer and lister for +// VolumeSnapshotClasses. +type VolumeSnapshotClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeSnapshotClassLister +} + +type volumeSnapshotClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeSnapshotClassInformer constructs a new informer for VolumeSnapshotClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotClassInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotClassInformer constructs a new informer for VolumeSnapshotClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotClassInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotClasses().Watch(options) + }, + }, + &volumesnapshotv1alpha1.VolumeSnapshotClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotClassInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&volumesnapshotv1alpha1.VolumeSnapshotClass{}, f.defaultInformer) +} + +func (f *volumeSnapshotClassInformer) Lister() v1alpha1.VolumeSnapshotClassLister { + return v1alpha1.NewVolumeSnapshotClassLister(f.Informer().GetIndexer()) +} diff --git a/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotcontent.go b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotcontent.go new file mode 100644 index 000000000..a65988d30 --- /dev/null +++ b/pkg/snapshot-client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotcontent.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + volumesnapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "kubevirt.io/containerized-data-importer/pkg/snapshot-client/clientset/versioned" + internalinterfaces "kubevirt.io/containerized-data-importer/pkg/snapshot-client/informers/externalversions/internalinterfaces" + v1alpha1 "kubevirt.io/containerized-data-importer/pkg/snapshot-client/listers/volumesnapshot/v1alpha1" +) + +// VolumeSnapshotContentInformer provides access to a shared informer and lister for +// VolumeSnapshotContents. +type VolumeSnapshotContentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeSnapshotContentLister +} + +type volumeSnapshotContentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeSnapshotContentInformer constructs a new informer for VolumeSnapshotContent type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotContentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotContentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotContentInformer constructs a new informer for VolumeSnapshotContent type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotContentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotContents().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotContents().Watch(options) + }, + }, + &volumesnapshotv1alpha1.VolumeSnapshotContent{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotContentInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotContentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotContentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&volumesnapshotv1alpha1.VolumeSnapshotContent{}, f.defaultInformer) +} + +func (f *volumeSnapshotContentInformer) Lister() v1alpha1.VolumeSnapshotContentLister { + return v1alpha1.NewVolumeSnapshotContentLister(f.Informer().GetIndexer()) +} diff --git a/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/expansion_generated.go b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..0fbdc8cae --- /dev/null +++ b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// VolumeSnapshotListerExpansion allows custom methods to be added to +// VolumeSnapshotLister. +type VolumeSnapshotListerExpansion interface{} + +// VolumeSnapshotNamespaceListerExpansion allows custom methods to be added to +// VolumeSnapshotNamespaceLister. +type VolumeSnapshotNamespaceListerExpansion interface{} + +// VolumeSnapshotClassListerExpansion allows custom methods to be added to +// VolumeSnapshotClassLister. +type VolumeSnapshotClassListerExpansion interface{} + +// VolumeSnapshotContentListerExpansion allows custom methods to be added to +// VolumeSnapshotContentLister. +type VolumeSnapshotContentListerExpansion interface{} diff --git a/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshot.go b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshot.go new file mode 100644 index 000000000..182fe322f --- /dev/null +++ b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshot.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotLister helps list VolumeSnapshots. +type VolumeSnapshotLister interface { + // List lists all VolumeSnapshots in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) + // VolumeSnapshots returns an object that can list and get VolumeSnapshots. + VolumeSnapshots(namespace string) VolumeSnapshotNamespaceLister + VolumeSnapshotListerExpansion +} + +// volumeSnapshotLister implements the VolumeSnapshotLister interface. +type volumeSnapshotLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotLister returns a new VolumeSnapshotLister. +func NewVolumeSnapshotLister(indexer cache.Indexer) VolumeSnapshotLister { + return &volumeSnapshotLister{indexer: indexer} +} + +// List lists all VolumeSnapshots in the indexer. +func (s *volumeSnapshotLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshot)) + }) + return ret, err +} + +// VolumeSnapshots returns an object that can list and get VolumeSnapshots. +func (s *volumeSnapshotLister) VolumeSnapshots(namespace string) VolumeSnapshotNamespaceLister { + return volumeSnapshotNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VolumeSnapshotNamespaceLister helps list and get VolumeSnapshots. +type VolumeSnapshotNamespaceLister interface { + // List lists all VolumeSnapshots in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) + // Get retrieves the VolumeSnapshot from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.VolumeSnapshot, error) + VolumeSnapshotNamespaceListerExpansion +} + +// volumeSnapshotNamespaceLister implements the VolumeSnapshotNamespaceLister +// interface. +type volumeSnapshotNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VolumeSnapshots in the indexer for a given namespace. +func (s volumeSnapshotNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshot)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshot from the indexer for a given namespace and name. +func (s volumeSnapshotNamespaceLister) Get(name string) (*v1alpha1.VolumeSnapshot, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumesnapshot"), name) + } + return obj.(*v1alpha1.VolumeSnapshot), nil +} diff --git a/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshotclass.go b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshotclass.go new file mode 100644 index 000000000..2112cdb2e --- /dev/null +++ b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshotclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotClassLister helps list VolumeSnapshotClasses. +type VolumeSnapshotClassLister interface { + // List lists all VolumeSnapshotClasses in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotClass, err error) + // Get retrieves the VolumeSnapshotClass from the index for a given name. + Get(name string) (*v1alpha1.VolumeSnapshotClass, error) + VolumeSnapshotClassListerExpansion +} + +// volumeSnapshotClassLister implements the VolumeSnapshotClassLister interface. +type volumeSnapshotClassLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotClassLister returns a new VolumeSnapshotClassLister. +func NewVolumeSnapshotClassLister(indexer cache.Indexer) VolumeSnapshotClassLister { + return &volumeSnapshotClassLister{indexer: indexer} +} + +// List lists all VolumeSnapshotClasses in the indexer. +func (s *volumeSnapshotClassLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshotClass)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshotClass from the index for a given name. +func (s *volumeSnapshotClassLister) Get(name string) (*v1alpha1.VolumeSnapshotClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumesnapshotclass"), name) + } + return obj.(*v1alpha1.VolumeSnapshotClass), nil +} diff --git a/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshotcontent.go b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshotcontent.go new file mode 100644 index 000000000..ff7aac4a6 --- /dev/null +++ b/pkg/snapshot-client/listers/volumesnapshot/v1alpha1/volumesnapshotcontent.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotContentLister helps list VolumeSnapshotContents. +type VolumeSnapshotContentLister interface { + // List lists all VolumeSnapshotContents in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotContent, err error) + // Get retrieves the VolumeSnapshotContent from the index for a given name. + Get(name string) (*v1alpha1.VolumeSnapshotContent, error) + VolumeSnapshotContentListerExpansion +} + +// volumeSnapshotContentLister implements the VolumeSnapshotContentLister interface. +type volumeSnapshotContentLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotContentLister returns a new VolumeSnapshotContentLister. +func NewVolumeSnapshotContentLister(indexer cache.Indexer) VolumeSnapshotContentLister { + return &volumeSnapshotContentLister{indexer: indexer} +} + +// List lists all VolumeSnapshotContents in the indexer. +func (s *volumeSnapshotContentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotContent, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshotContent)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshotContent from the index for a given name. +func (s *volumeSnapshotContentLister) Get(name string) (*v1alpha1.VolumeSnapshotContent, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumesnapshotcontent"), name) + } + return obj.(*v1alpha1.VolumeSnapshotContent), nil +} diff --git a/tests/framework/framework.go b/tests/framework/framework.go index 7dd162b44..f358f630c 100644 --- a/tests/framework/framework.go +++ b/tests/framework/framework.go @@ -40,12 +40,13 @@ const ( // run-time flags var ( - kubectlPath *string - ocPath *string - cdiInstallNs *string - kubeConfig *string - master *string - goCLIPath *string + kubectlPath *string + ocPath *string + cdiInstallNs *string + kubeConfig *string + master *string + goCLIPath *string + snapshotSCName *string ) // Config provides some basic test config options @@ -91,6 +92,8 @@ type Framework struct { Master string // GoCliPath is a test run-time flag to store the location of gocli GoCLIPath string + // SnapshotSCName is the Storage Class name that supports Snapshots + SnapshotSCName string } // TODO: look into k8s' SynchronizedBeforeSuite() and SynchronizedAfterSuite() code and their general @@ -108,6 +111,7 @@ func init() { kubeConfig = flag.String("kubeconfig", "/var/run/kubernetes/admin.kubeconfig", "The absolute path to the kubeconfig file") master = flag.String("master", "", "master url:port") goCLIPath = flag.String("gocli-path", "cli.sh", "The path to cli script") + snapshotSCName = flag.String("snapshot-sc", "", "The Storage Class supporting snapshots") } // NewFrameworkOrDie calls NewFramework and handles errors by calling Fail. Config is optional, but @@ -158,6 +162,7 @@ func NewFramework(prefix string, config Config) (*Framework, error) { f.KubeConfig = *kubeConfig f.Master = *master f.GoCLIPath = *goCLIPath + f.SnapshotSCName = *snapshotSCName restConfig, err := f.LoadConfig() if err != nil { diff --git a/tests/smartclone_test.go b/tests/smartclone_test.go new file mode 100644 index 000000000..e56a305c2 --- /dev/null +++ b/tests/smartclone_test.go @@ -0,0 +1,147 @@ +package tests + +import ( + "fmt" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "kubevirt.io/containerized-data-importer/pkg/controller" + "kubevirt.io/containerized-data-importer/tests/framework" + "kubevirt.io/containerized-data-importer/tests/utils" + + cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1" +) + +var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]SmartClone tests", func() { + + var sourcePvc *v1.PersistentVolumeClaim + + fillData := "123456789012345678901234567890123456789012345678901234567890" + testFile := utils.DefaultPvcMountPath + "/source.txt" + fillCommand := "echo \"" + fillData + "\" >> " + testFile + + f := framework.NewFrameworkOrDie("dv-func-test") + + AfterEach(func() { + if sourcePvc != nil { + By("[AfterEach] Clean up source PVC") + err := f.DeletePVC(sourcePvc) + Expect(err).ToNot(HaveOccurred()) + sourcePvc = nil + } + }) + + Describe("Verify DataVolume Smart Cloning - Positive flow", func() { + It("succeed creating smart-clone dv", func() { + if !IsSnapshotStorageClassAvailable(f) { + Skip("Storage Class for clone via snapshot is not available") + } + dataVolume := createDataVolume("dv-smart-clone-test-1", sourcePvc, fillCommand, f.SnapshotSCName, f) + // Wait for snapshot creation to start + waitForDvPhase(cdiv1.SnapshotForSmartCloneInProgress, dataVolume, f) + verifyEvent(controller.SnapshotForSmartCloneInProgress, dataVolume.Namespace, f) + // Wait for PVC creation to start + waitForDvPhase(cdiv1.SmartClonePVCInProgress, dataVolume, f) + verifyEvent(controller.SmartClonePVCInProgress, dataVolume.Namespace, f) + // Verify PVC's content + verifyPVC(dataVolume, f) + // Wait for operation Succeeded + waitForDvPhase(cdiv1.Succeeded, dataVolume, f) + verifyEvent(controller.CloneSucceeded, dataVolume.Namespace, f) + + // Cleanup + err := utils.DeleteDataVolume(f.CdiClient, f.Namespace.Name, dataVolume.Name) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + Describe("Verify DataVolume Smart Cloning - Negative flow", func() { + It("verify inapplicable smart-clone dv", func() { + if controller.IsOpenshift(f.K8sClient) { + Skip("Test not available in openshift") + } + dataVolume := createDataVolume("dv-smart-clone-test-negative", sourcePvc, fillCommand, "", f) + + // Wait for operation Succeeded + waitForDvPhase(cdiv1.Succeeded, dataVolume, f) + verifyEvent(controller.CloneSucceeded, dataVolume.Namespace, f) + + events, _ := RunKubectlCommand(f, "get", "events", "-n", dataVolume.Namespace) + Expect(strings.Contains(events, controller.SnapshotForSmartCloneInProgress)).To(BeFalse()) + + // Cleanup + err := utils.DeleteDataVolume(f.CdiClient, f.Namespace.Name, dataVolume.Name) + Expect(err).ToNot(HaveOccurred()) + }) + }) +}) + +func verifyPVC(dataVolume *cdiv1.DataVolume, f *framework.Framework) { + By("verifying pvc was created") + targetPvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Get(dataVolume.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By(fmt.Sprint("Verifying target PVC content")) + Expect(f.VerifyTargetPVCContent(f.Namespace, targetPvc, fillData, testBaseDir, testFile)).To(BeTrue()) +} + +func waitForDvPhase(phase cdiv1.DataVolumePhase, dataVolume *cdiv1.DataVolume, f *framework.Framework) { + By(fmt.Sprintf("waiting for datavolume to match phase %s", string(phase))) + err := utils.WaitForDataVolumePhase(f.CdiClient, f.Namespace.Name, phase, dataVolume.Name) + if err != nil { + PrintControllerLog(f) + dv, dverr := f.CdiClient.CdiV1alpha1().DataVolumes(f.Namespace.Name).Get(dataVolume.Name, metav1.GetOptions{}) + if dverr != nil { + Fail(fmt.Sprintf("datavolume %s phase %s", dv.Name, dv.Status.Phase)) + } + } +} + +func IsSnapshotStorageClassAvailable(f *framework.Framework) bool { + sc, err := f.K8sClient.StorageV1().StorageClasses().Get(f.SnapshotSCName, metav1.GetOptions{}) + if err != nil { + return false + } + return sc.Name == f.SnapshotSCName +} + +func createDataVolume(dataVolumeName string, sourcePvc *v1.PersistentVolumeClaim, command string, scName string, f *framework.Framework) *cdiv1.DataVolume { + By(fmt.Sprintf("Storage Class name: %s", scName)) + sourcePVCName := fmt.Sprintf("%s-src-pvc", dataVolumeName) + sourcePodFillerName := fmt.Sprintf("%s-filler-pod", dataVolumeName) + pvcDef := utils.NewPVCDefinition(sourcePVCName, "1G", nil, nil) + if scName != "" { + pvcDef.Spec.StorageClassName = &scName + } + sourcePvc = f.CreateAndPopulateSourcePVC(pvcDef, sourcePodFillerName, command) + + By(fmt.Sprintf("creating a new target PVC (datavolume) to clone %s", sourcePvc.Name)) + dataVolume := utils.NewCloningDataVolume(dataVolumeName, "1Gi", sourcePvc) + if scName != "" { + dataVolume.Spec.PVC.StorageClassName = &scName + } + + By(fmt.Sprintf("creating new datavolume %s", dataVolume.Name)) + dataVolume, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dataVolume) + Expect(err).ToNot(HaveOccurred()) + + return dataVolume +} + +func verifyEvent(eventReason string, dataVolumeNamespace string, f *framework.Framework) { + By(fmt.Sprintf("Verifying event occurred: %s", eventReason)) + Eventually(func() bool { + events, err := RunKubectlCommand(f, "get", "events", "-n", dataVolumeNamespace) + if err == nil { + fmt.Fprintf(GinkgoWriter, "%s", events) + return strings.Contains(events, eventReason) + } + fmt.Fprintf(GinkgoWriter, "ERROR: %s\n", err.Error()) + return false + }, timeout, pollingInterval).Should(BeTrue()) +} diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index f25288675..ac1abcd47 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -113,6 +113,13 @@ func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*ro if typ != regexpTypePrefix { pattern.WriteByte('$') } + + var wildcardHostPort bool + if typ == regexpTypeHost { + if !strings.Contains(pattern.String(), ":") { + wildcardHostPort = true + } + } reverse.WriteString(raw) if endSlash { reverse.WriteByte('/') @@ -131,13 +138,14 @@ func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*ro // Done! return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + wildcardHostPort: wildcardHostPort, }, nil } @@ -158,11 +166,22 @@ type routeRegexp struct { varsN []string // Variable regexps (validators). varsR []*regexp.Regexp + // Wildcard host-port (no strict port match in hostname) + wildcardHostPort bool } // Match matches the regexp against the URL host or path. func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType != regexpTypeHost { + if r.regexpType == regexpTypeHost { + host := getHost(req) + if r.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } + return r.regexp.MatchString(host) + } else { if r.regexpType == regexpTypeQuery { return r.matchQueryString(req) } @@ -172,8 +191,6 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { } return r.regexp.MatchString(path) } - - return r.regexp.MatchString(getHost(req)) } // url builds a URL part using the given values. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/kubernetes-csi/external-snapshotter/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..d70526403 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,40 @@ + + +**What type of PR is this?** +> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: +> +> /kind api-change +> /kind bug +> /kind cleanup +> /kind design +> /kind documentation +> /kind failing-test +> /kind feature +> /kind flake + +**What this PR does / why we need it**: + +**Which issue(s) this PR fixes**: + +Fixes # + +**Special notes for your reviewer**: + +**Does this PR introduce a user-facing change?**: + +```release-note + +``` diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/.gitignore b/vendor/github.com/kubernetes-csi/external-snapshotter/.gitignore new file mode 100644 index 000000000..8a40d3706 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/.gitignore @@ -0,0 +1,2 @@ +# Compiled binaries and deployment files +/bin/ diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/.prow.sh b/vendor/github.com/kubernetes-csi/external-snapshotter/.prow.sh new file mode 100755 index 000000000..40dc1c3d4 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/.prow.sh @@ -0,0 +1,5 @@ +#! /bin/bash + +. release-tools/prow.sh + +main diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/.travis.yml b/vendor/github.com/kubernetes-csi/external-snapshotter/.travis.yml new file mode 120000 index 000000000..a554dfc76 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/.travis.yml @@ -0,0 +1 @@ +release-tools/travis.yml \ No newline at end of file diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/CHANGELOG-1.1.md b/vendor/github.com/kubernetes-csi/external-snapshotter/CHANGELOG-1.1.md new file mode 100644 index 000000000..bca673450 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/CHANGELOG-1.1.md @@ -0,0 +1,23 @@ +# Changelog since v1.0.1 + +## Deprecations +* Command line flag `--connection-timeout` is deprecated and has no effect. +* Command line flag `--snapshotter` is deprecated and has no effect ([#103](https://github.com/kubernetes-csi/external-snapshotter/pull/103)) + +## Notable Features +* Add Lease based Leader Election Support ([#107](https://github.com/kubernetes-csi/external-snapshotter/pull/107)) +* The external snapshotter now tries to connect to the CSI driver indefinitely ([#92](https://github.com/kubernetes-csi/external-snapshotter/pull/92)) +* A new --timeout parameter has been added for CSI operations ([#93](https://github.com/kubernetes-csi/external-snapshotter/pull/93)) +* Prow testing ([#111](https://github.com/kubernetes-csi/external-snapshotter/pull/111)) + +## Other Notable Changes +* Add PR template ([#113](https://github.com/kubernetes-csi/external-snapshotter/pull/113)) +* Un-prune code-generator scripts ([#110](https://github.com/kubernetes-csi/external-snapshotter/pull/110)) +* Refactor external snapshotter to use csi-lib-utils/rpc ([#97](https://github.com/kubernetes-csi/external-snapshotter/pull/97)) +* Fix for pre-bound snapshot empty source error ([#98](https://github.com/kubernetes-csi/external-snapshotter/pull/98)) +* Update vendor to k8s 1.14.0 ([#105](https://github.com/kubernetes-csi/external-snapshotter/pull/105)) +* Migrate to k8s.io/klog from glog. ([#88](https://github.com/kubernetes-csi/external-snapshotter/pull/88)) +* Use distroless as base image ([#101](https://github.com/kubernetes-csi/external-snapshotter/pull/101)) +* Remove constraints and update all vendor pkgs ([#100](https://github.com/kubernetes-csi/external-snapshotter/pull/100)) +* Add dep prune options and remove unused packages ([#99](https://github.com/kubernetes-csi/external-snapshotter/pull/99)) +* Release tools ([#86](https://github.com/kubernetes-csi/external-snapshotter/pull/86)) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/external-snapshotter/CONTRIBUTING.md new file mode 100644 index 000000000..15fe07e44 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/Dockerfile b/vendor/github.com/kubernetes-csi/external-snapshotter/Dockerfile new file mode 100644 index 000000000..bf355958f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/Dockerfile @@ -0,0 +1,6 @@ +FROM gcr.io/distroless/static:latest +LABEL maintainers="Kubernetes Authors" +LABEL description="CSI External Snapshotter" + +COPY ./bin/csi-snapshotter csi-snapshotter +ENTRYPOINT ["/csi-snapshotter"] diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/Gopkg.lock b/vendor/github.com/kubernetes-csi/external-snapshotter/Gopkg.lock new file mode 100644 index 000000000..2c17f2478 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/Gopkg.lock @@ -0,0 +1,813 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:7f21fa1f8ab9a529dba26a7e9cf20de217c307fa1d96cb599d3afd9e5c83e9d6" + name = "github.com/container-storage-interface/spec" + packages = ["lib/go/csi"] + pruneopts = "NUT" + revision = "f750e6765f5f6b4ac0e13e95214d58901290fb4b" + version = "v1.1.0" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "NUT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f" + name = "github.com/evanphx/json-patch" + packages = ["."] + pruneopts = "NUT" + revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" + version = "v4.1.0" + +[[projects]] + digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "NUT" + revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" + version = "v1.2.1" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "NUT" + revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b" + +[[projects]] + digest = "1:b60efdeb75d3c0ceed88783ac2495256aba3491a537d0f31401202579fd62a94" + name = "github.com/golang/mock" + packages = ["gomock"] + pruneopts = "NUT" + revision = "51421b967af1f557f93a59e0057aaf15ca02e29c" + version = "v1.2.0" + +[[projects]] + digest = "1:f0ba580759848d2bb230cb37bc288690f92812d0850112e07ecbd1b727f3d973" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "NUT" + revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" + version = "v1.3.1" + +[[projects]] + branch = "master" + digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "NUT" + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "NUT" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "NUT" + revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" + version = "v0.5.1" + +[[projects]] + digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NUT" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" + +[[projects]] + digest = "1:4e903242fe176238aaa469f59d7035f5abf2aa9acfefb8964ddd203651b574e9" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "NUT" + revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" + version = "v1.1.6" + +[[projects]] + digest = "1:cfbb7ea18e982b7f97205743fcbdd7828f42ce1ac667192b713ada9fc62a958d" + name = "github.com/kubernetes-csi/csi-lib-utils" + packages = [ + "connection", + "leaderelection", + "protosanitizer", + "rpc", + ] + pruneopts = "NUT" + revision = "b8b7a89535d80e12f2c0f4c53cfb981add8aaca2" + version = "v0.6.1" + +[[projects]] + digest = "1:189ada87769e6bd22de67cf8d5ac2f348070e7bcc4cdc5be3aaa5b19758e8fc2" + name = "github.com/kubernetes-csi/csi-test" + packages = [ + "driver", + "utils", + ] + pruneopts = "NUT" + revision = "5421d9f3c37be3b95b241b44a094a3db11bee789" + version = "v2.0.0" + +[[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "NUT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "NUT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NUT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + branch = "master" + digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NUT" + revision = "a5d413f7728c81fb97d96a2b722368945f651e78" + +[[projects]] + branch = "master" + digest = "1:8750c495cba39e0d2402c424760734a695d4ab91579374f0624f411d52cda1a6" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "NUT" + revision = "b630fd6fe46bcfc98f989005d8b8ec1400e60a6e" + +[[projects]] + branch = "master" + digest = "1:4d25d388c9ad5169b31c1a6f16cc7bcdb856489958e99f2123d8d8cdf000d7eb" + name = "golang.org/x/oauth2" + packages = [ + ".", + "internal", + ] + pruneopts = "NUT" + revision = "9f3314589c9a9136388751d9adae6b0ed400978a" + +[[projects]] + branch = "master" + digest = "1:992976af3618fd8fb06e5e4c4fa177800a1e0880cbbbc91df3f952520808806c" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "NUT" + revision = "81d4e9dc473e5e8c933f2aaeba2a3d81efb9aed2" + +[[projects]] + digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "NUT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "NUT" + revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" + +[[projects]] + branch = "master" + digest = "1:59b401940f79c311efa01c10e2fa8e78522a978538127f369fb39690a03c3eb4" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/internal/packagesdriver", + "go/packages", + "go/types/typeutil", + "imports", + "internal/fastwalk", + "internal/gopathwalk", + "internal/module", + "internal/semver", + ] + pruneopts = "NUT" + revision = "8a44e74612bcf51f0d4407df3d3a8377cb99c2d8" + +[[projects]] + digest = "1:372cd8eba449f9b6db06677d0e73fa193ec5b19aaee148f355503ab6127045ca" + name = "google.golang.org/appengine" + packages = [ + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "NUT" + revision = "54a98f90d1c46b7731eb8fb305d2a321c30ef610" + version = "v1.5.0" + +[[projects]] + branch = "master" + digest = "1:c3076e7defee87de1236f1814beb588f40a75544c60121e6eb38b3b3721783e2" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "NUT" + revision = "f467c93bbac2133ff463e1f93d18d8f9f3f04451" + +[[projects]] + digest = "1:119dce30813e7fe92f031c730bd2fb3ff1bac6fc6bb9d1450507e8bf0ad77620" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "reflection", + "reflection/grpc_reflection_v1alpha", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "NUT" + revision = "3507fb8e1a5ad030303c106fef3a47c9fdad16ad" + version = "v1.19.1" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "NUT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NUT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + digest = "1:a937ed4322409fa22924f02124fd0727c19662f73cf15406646d19bdce972df2" + name = "k8s.io/api" + packages = [ + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "auditregistration/v1alpha1", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "networking/v1beta1", + "node/v1alpha1", + "node/v1beta1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "NUT" + revision = "40a48860b5abbba9aa891b02b32da429b08d96a0" + version = "kubernetes-1.14.0" + +[[projects]] + digest = "1:edf8a3c5c2f8f46ae492c7272bb451f18a423639e86b10cd0ec72b56fe50f60a" + name = "k8s.io/apiextensions-apiserver" + packages = [ + "pkg/apis/apiextensions", + "pkg/apis/apiextensions/v1beta1", + "pkg/client/clientset/clientset", + "pkg/client/clientset/clientset/scheme", + "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", + ] + pruneopts = "NUT" + revision = "53c4693659ed354d76121458fb819202dd1635fa" + version = "kubernetes-1.14.0" + +[[projects]] + digest = "1:d8205124c900012b3395c3ef374c1e7a385a66e2587b5267cc6106552314241b" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/rand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/strategicpatch", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "NUT" + revision = "d7deff9243b165ee192f5551710ea4285dcfd615" + version = "kubernetes-1.14.0" + +[[projects]] + digest = "1:85f25c196bc700354adc889a397f6550105865ad2588704094276f78f6e9d9ba" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/fake", + "informers", + "informers/admissionregistration", + "informers/admissionregistration/v1beta1", + "informers/apps", + "informers/apps/v1", + "informers/apps/v1beta1", + "informers/apps/v1beta2", + "informers/auditregistration", + "informers/auditregistration/v1alpha1", + "informers/autoscaling", + "informers/autoscaling/v1", + "informers/autoscaling/v2beta1", + "informers/autoscaling/v2beta2", + "informers/batch", + "informers/batch/v1", + "informers/batch/v1beta1", + "informers/batch/v2alpha1", + "informers/certificates", + "informers/certificates/v1beta1", + "informers/coordination", + "informers/coordination/v1", + "informers/coordination/v1beta1", + "informers/core", + "informers/core/v1", + "informers/events", + "informers/events/v1beta1", + "informers/extensions", + "informers/extensions/v1beta1", + "informers/internalinterfaces", + "informers/networking", + "informers/networking/v1", + "informers/networking/v1beta1", + "informers/node", + "informers/node/v1alpha1", + "informers/node/v1beta1", + "informers/policy", + "informers/policy/v1beta1", + "informers/rbac", + "informers/rbac/v1", + "informers/rbac/v1alpha1", + "informers/rbac/v1beta1", + "informers/scheduling", + "informers/scheduling/v1", + "informers/scheduling/v1alpha1", + "informers/scheduling/v1beta1", + "informers/settings", + "informers/settings/v1alpha1", + "informers/storage", + "informers/storage/v1", + "informers/storage/v1alpha1", + "informers/storage/v1beta1", + "kubernetes", + "kubernetes/fake", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/admissionregistration/v1beta1/fake", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1/fake", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta1/fake", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/apps/v1beta2/fake", + "kubernetes/typed/auditregistration/v1alpha1", + "kubernetes/typed/auditregistration/v1alpha1/fake", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1/fake", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authentication/v1beta1/fake", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1/fake", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/authorization/v1beta1/fake", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v1/fake", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta1/fake", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/autoscaling/v2beta2/fake", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1/fake", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v1beta1/fake", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/batch/v2alpha1/fake", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/certificates/v1beta1/fake", + "kubernetes/typed/coordination/v1", + "kubernetes/typed/coordination/v1/fake", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/coordination/v1beta1/fake", + "kubernetes/typed/core/v1", + "kubernetes/typed/core/v1/fake", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/events/v1beta1/fake", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/extensions/v1beta1/fake", + "kubernetes/typed/networking/v1", + "kubernetes/typed/networking/v1/fake", + "kubernetes/typed/networking/v1beta1", + "kubernetes/typed/networking/v1beta1/fake", + "kubernetes/typed/node/v1alpha1", + "kubernetes/typed/node/v1alpha1/fake", + "kubernetes/typed/node/v1beta1", + "kubernetes/typed/node/v1beta1/fake", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/policy/v1beta1/fake", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1/fake", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1alpha1/fake", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/rbac/v1beta1/fake", + "kubernetes/typed/scheduling/v1", + "kubernetes/typed/scheduling/v1/fake", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1alpha1/fake", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/scheduling/v1beta1/fake", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/settings/v1alpha1/fake", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1/fake", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1alpha1/fake", + "kubernetes/typed/storage/v1beta1", + "kubernetes/typed/storage/v1beta1/fake", + "listers/admissionregistration/v1beta1", + "listers/apps/v1", + "listers/apps/v1beta1", + "listers/apps/v1beta2", + "listers/auditregistration/v1alpha1", + "listers/autoscaling/v1", + "listers/autoscaling/v2beta1", + "listers/autoscaling/v2beta2", + "listers/batch/v1", + "listers/batch/v1beta1", + "listers/batch/v2alpha1", + "listers/certificates/v1beta1", + "listers/coordination/v1", + "listers/coordination/v1beta1", + "listers/core/v1", + "listers/events/v1beta1", + "listers/extensions/v1beta1", + "listers/networking/v1", + "listers/networking/v1beta1", + "listers/node/v1alpha1", + "listers/node/v1beta1", + "listers/policy/v1beta1", + "listers/rbac/v1", + "listers/rbac/v1alpha1", + "listers/rbac/v1beta1", + "listers/scheduling/v1", + "listers/scheduling/v1alpha1", + "listers/scheduling/v1beta1", + "listers/settings/v1alpha1", + "listers/storage/v1", + "listers/storage/v1alpha1", + "listers/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "rest", + "rest/watch", + "testing", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/leaderelection", + "tools/leaderelection/resourcelock", + "tools/metrics", + "tools/pager", + "tools/record", + "tools/record/util", + "tools/reference", + "transport", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/keyutil", + "util/retry", + "util/workqueue", + ] + pruneopts = "NUT" + revision = "6ee68ca5fd8355d024d02f9db0b3b667e8357a0f" + version = "kubernetes-1.14.0" + +[[projects]] + digest = "1:2d821667dbd520a7ef31bdc923543f197ba30021b4317fd8871618ada52f23b0" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "pkg/namer", + "pkg/util", + ] + pruneopts = "T" + revision = "50b561225d70b3eb79a1faafd3dfe7b1a62cbe73" + version = "kubernetes-1.14.0" + +[[projects]] + branch = "master" + digest = "1:39912eb5f8eaf46486faae0839586c27c93423e552f76875defa048f52c15c15" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "NUT" + revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" + +[[projects]] + digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "NUT" + revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:42674e29bf0cf4662d49bd9528e24b9ecc4895b32d0be281f9cf04d3a7671846" + name = "k8s.io/kube-openapi" + packages = ["pkg/util/proto"] + pruneopts = "NUT" + revision = "94e1e7b7574c44c4c0f2007de6fe617e259191f3" + +[[projects]] + digest = "1:15eb7bbf576cc9517ba70b469f9097948ac32f6766bbf6f8b982b81a468b1b74" + name = "k8s.io/kubernetes" + packages = [ + "pkg/util/goroutinemap", + "pkg/util/goroutinemap/exponentialbackoff", + "pkg/util/slice", + ] + pruneopts = "NUT" + revision = "641856db18352033a0d96dbc99153fa3b27298e5" + version = "v1.14.0" + +[[projects]] + branch = "master" + digest = "1:14e8a3b53e6d8cb5f44783056b71bb2ca1ac7e333939cc97f3e50b579c920845" + name = "k8s.io/utils" + packages = [ + "buffer", + "integer", + "trace", + ] + pruneopts = "NUT" + revision = "21c4ce38f2a793ec01e925ddc31216500183b773" + +[[projects]] + digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" + name = "sigs.k8s.io/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" + version = "v1.1.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/mock/gomock", + "github.com/golang/protobuf/ptypes", + "github.com/golang/protobuf/ptypes/timestamp", + "github.com/kubernetes-csi/csi-lib-utils/connection", + "github.com/kubernetes-csi/csi-lib-utils/leaderelection", + "github.com/kubernetes-csi/csi-lib-utils/rpc", + "github.com/kubernetes-csi/csi-test/driver", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/status", + "k8s.io/api/core/v1", + "k8s.io/api/storage/v1", + "k8s.io/api/storage/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/meta", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/diff", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/validation", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/listers/core/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/tools/reference", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/workqueue", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/klog", + "k8s.io/kubernetes/pkg/util/goroutinemap", + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", + "k8s.io/kubernetes/pkg/util/slice", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/Gopkg.toml b/vendor/github.com/kubernetes-csi/external-snapshotter/Gopkg.toml new file mode 100644 index 000000000..550d59298 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/Gopkg.toml @@ -0,0 +1,51 @@ +# List of dependecies for CSI snapshotter + +required = [ + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", +] + + +[[constraint]] + name = "github.com/container-storage-interface/spec" + version = "=1.1.0" + +# The dependency on external-provisioner should be removed with #60. +[[constraint]] + name = "k8s.io/api" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "k8s.io/client-go" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "k8s.io/code-generator" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.14.0" + +[[constraint]] + name = "github.com/kubernetes-csi/csi-lib-utils" + version = ">=v0.6.1" + +[prune] + non-go = true + go-tests = true + unused-packages = true + [[prune.project]] + # Scripts under code-generator are required to generate code + # when snapshot APIs are changed. Therefore they should not + # be pruned. + name = "k8s.io/code-generator" + non-go = false + unused-packages = false diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/LICENSE b/vendor/github.com/kubernetes-csi/external-snapshotter/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/Makefile b/vendor/github.com/kubernetes-csi/external-snapshotter/Makefile new file mode 100644 index 000000000..4a81c5107 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/Makefile @@ -0,0 +1,19 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: all csi-snapshotter clean test + +CMDS=csi-snapshotter +all: build +include release-tools/build.make diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/OWNERS b/vendor/github.com/kubernetes-csi/external-snapshotter/OWNERS new file mode 100644 index 000000000..87ac2643f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/OWNERS @@ -0,0 +1,6 @@ +approvers: +- saad-ali +- lpabon +- jingxu97 +- xing-yang +- wackxu diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/README.md b/vendor/github.com/kubernetes-csi/external-snapshotter/README.md new file mode 100644 index 000000000..5abfeb10b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/README.md @@ -0,0 +1,85 @@ +# CSI Snapshotter + +The CSI external-snapshotter is part of Kubernetes implementation of [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec). + +The volume snapshot feature supports CSI v1.0 and it has been an Alpha feature in Kubernetes since v1.12. + +## Overview + +CSI Snapshotter is an external controller that watches Kubernetes Snapshot CRD objects and triggers CreateSnapshot/DeleteSnapshot against a CSI endpoint. Full design can be found at Kubernetes proposal at [here](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/csi-snapshot.md) + +## Design + +External snapshotter follows [controller](https://github.com/kubernetes/community/blob/master/contributors/devel/controllers.md) pattern and uses informers to watch for `VolumeSnapshot` and `VolumeSnapshotContent` create/update/delete events. It filters out these objects with `Snapshotter==` specified in the associated VolumeSnapshotClass object and then processes these events in workqueues with exponential backoff. + +### Snapshotter + +Snapshotter talks to CSI over socket (/run/csi/socket by default, configurable by -csi-address). The snapshotter then: + +* Discovers the supported snapshotter name by `GetDriverName` call. + +* Uses ControllerGetCapabilities for find out if CSI driver supports `ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT` and `ControllerServiceCapability_RPC_LIST_SNAPSHOTS` calls. Otherwise, the controller will not start. + +* Processes new/updated/deleted `VolumeSnapshots`: The snapshotter only processes `VolumeSnapshot` that has `snapshotter` specified in its `VolumeSnapshotClass` matches its driver name. The process workflow is as follows + * If the snapshot status is `Ready`, the controller checks whether the snapshot and its content still binds correctly. If there is any problem with the binding (e.g., snapshot points to a non-exist snapshot content), update the snapshot status and emit event. + * If the snapshot status is not ready, there are two cases. + * `SnapshotContentName` is not empty: the controller verifies whether the snapshot content exists and also binds to the snapshot. If verification passes, the controller binds the snapshot and its content objects and marks it is ready. Otherwise, it updates the error status of the snapshot. + * `SnapshotContentName` is set empty: the controller will first check whether there is already a content object which binds the snapshot correctly with snapshot uid (`VolumeSnapshotRef.UID`) specified. If so, the controller binds these two objects. Otherwise, the controller issues a create snapshot operation. Please note that if the error status shows that snapshot creation already failed before, it will not try to create snapshot again. + +* Processes new/updated/deleted `VolumeSnapshotContents`: The snapshotter only processes `VolumeSnapshotContent` in which the CSI driver specified in the spec matches the controller's driver name. + * If the `VolumeSnapshotRef` is set to nil, skip this content since it is not bound to any snapshot object. + * Otherwise, the controller verifies whether the content object is correctly bound to a snapshot object. In case the `VolumeSnapshotRef.UID` is set but it does not match its snapshot object or snapshot no long exists, the content object and its associated snapshot will be deleted. + +## Usage + +### Running on command line + +For debugging, it is possible to run snapshotter on command line. For example, + +```bash +csi-snapshotter -kubeconfig ~/.kube/config -v 5 -csi-address /run/csi/socket +``` + +### Running in a statefulset + +It is necessary to create a new service account and give it enough privileges to run the snapshotter. We provide .yaml files that deploy for use together with the hostpath example driver. A real production deployment must customize them: + +```bash +for i in $(find deploy/kubernetes -name '*.yaml'); do kubectl create -f $i; done +``` + +### Running with Leader Election + +If you want to run external-snapshotter with higher availability, you can enable resource based leader election. To enable this, set the following flags: +```bash +--leader-election=true +``` + +## Testing + +Running Unit Tests: + +```bash +go test -timeout 30s github.com/kubernetes-csi/external-snapshotter/pkg/controller +``` + +## Dependency Management + +```bash +dep ensure +``` + +To modify dependencies or versions change `./Gopkg.toml` + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +* [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +* [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/external-snapshotter/SECURITY_CONTACTS new file mode 100644 index 000000000..585f480fe --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/SECURITY_CONTACTS @@ -0,0 +1,13 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/create_crd.go b/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/create_crd.go new file mode 100644 index 000000000..656e1bd2d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/create_crd.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "reflect" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +// CreateCRD creates CustomResourceDefinition +func CreateCRD(clientset apiextensionsclient.Interface) error { + crd := &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: crdv1.VolumeSnapshotClassResourcePlural + "." + crdv1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: crdv1.GroupName, + Version: crdv1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.ClusterScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: crdv1.VolumeSnapshotClassResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotClass{}).Name(), + }, + }, + } + res, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + + if err != nil && !apierrors.IsAlreadyExists(err) { + klog.Fatalf("failed to create VolumeSnapshotResource: %#v, err: %#v", + res, err) + } + + crd = &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: crdv1.VolumeSnapshotContentResourcePlural + "." + crdv1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: crdv1.GroupName, + Version: crdv1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.ClusterScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: crdv1.VolumeSnapshotContentResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotContent{}).Name(), + }, + }, + } + res, err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + + if err != nil && !apierrors.IsAlreadyExists(err) { + klog.Fatalf("failed to create VolumeSnapshotContentResource: %#v, err: %#v", + res, err) + } + + crd = &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: crdv1.VolumeSnapshotResourcePlural + "." + crdv1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: crdv1.GroupName, + Version: crdv1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: crdv1.VolumeSnapshotResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), + }, + }, + } + res, err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + + if err != nil && !apierrors.IsAlreadyExists(err) { + klog.Fatalf("failed to create VolumeSnapshotResource: %#v, err: %#v", + res, err) + } + + return nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/main.go b/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/main.go new file mode 100644 index 000000000..4b93a4a22 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/main.go @@ -0,0 +1,241 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/signal" + "time" + + "google.golang.org/grpc" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-lib-utils/connection" + "github.com/kubernetes-csi/csi-lib-utils/leaderelection" + csirpc "github.com/kubernetes-csi/csi-lib-utils/rpc" + "github.com/kubernetes-csi/external-snapshotter/pkg/controller" + "github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter" + + clientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + snapshotscheme "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme" + informers "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + coreinformers "k8s.io/client-go/informers" +) + +const ( + // Number of worker threads + threads = 10 + + // Default timeout of short CSI calls like GetPluginInfo + defaultCSITimeout = time.Minute +) + +// Command line flags +var ( + snapshotterName = flag.String("snapshotter", "", "This option is deprecated.") + kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.") + connectionTimeout = flag.Duration("connection-timeout", 0, "The --connection-timeout flag is deprecated") + csiAddress = flag.String("csi-address", "/run/csi/socket", "Address of the CSI driver socket.") + createSnapshotContentRetryCount = flag.Int("create-snapshotcontent-retrycount", 5, "Number of retries when we create a snapshot content object for a snapshot.") + createSnapshotContentInterval = flag.Duration("create-snapshotcontent-interval", 10*time.Second, "Interval between retries when we create a snapshot content object for a snapshot.") + resyncPeriod = flag.Duration("resync-period", 60*time.Second, "Resync interval of the controller.") + snapshotNamePrefix = flag.String("snapshot-name-prefix", "snapshot", "Prefix to apply to the name of a created snapshot") + snapshotNameUUIDLength = flag.Int("snapshot-name-uuid-length", -1, "Length in characters for the generated uuid of a created snapshot. Defaults behavior is to NOT truncate.") + showVersion = flag.Bool("version", false, "Show version.") + csiTimeout = flag.Duration("timeout", defaultCSITimeout, "The timeout for any RPCs to the CSI driver. Default is 1 minute.") + + leaderElection = flag.Bool("leader-election", false, "Enables leader election.") + leaderElectionNamespace = flag.String("leader-election-namespace", "", "The namespace where the leader election resource exists. Defaults to the pod namespace if not set.") +) + +var ( + version = "unknown" + leaderElectionLockName = "external-snapshotter-leader-election" +) + +func main() { + klog.InitFlags(nil) + flag.Set("logtostderr", "true") + flag.Parse() + + if *showVersion { + fmt.Println(os.Args[0], version) + os.Exit(0) + } + klog.Infof("Version: %s", version) + + if *connectionTimeout != 0 { + klog.Warning("--connection-timeout is deprecated and will have no effect") + } + + if *snapshotterName != "" { + klog.Warning("--snapshotter is deprecated and will have no effect") + } + + // Create the client config. Use kubeconfig if given, otherwise assume in-cluster. + config, err := buildConfig(*kubeconfig) + if err != nil { + klog.Error(err.Error()) + os.Exit(1) + } + + kubeClient, err := kubernetes.NewForConfig(config) + if err != nil { + klog.Error(err.Error()) + os.Exit(1) + } + + snapClient, err := clientset.NewForConfig(config) + if err != nil { + klog.Errorf("Error building snapshot clientset: %s", err.Error()) + os.Exit(1) + } + + factory := informers.NewSharedInformerFactory(snapClient, *resyncPeriod) + coreFactory := coreinformers.NewSharedInformerFactory(kubeClient, *resyncPeriod) + + // Create CRD resource + aeclientset, err := apiextensionsclient.NewForConfig(config) + if err != nil { + klog.Error(err.Error()) + os.Exit(1) + } + + // initialize CRD resource if it does not exist + err = CreateCRD(aeclientset) + if err != nil { + klog.Error(err.Error()) + os.Exit(1) + } + + // Add Snapshot types to the defualt Kubernetes so events can be logged for them + snapshotscheme.AddToScheme(scheme.Scheme) + + // Connect to CSI. + csiConn, err := connection.Connect(*csiAddress) + if err != nil { + klog.Errorf("error connecting to CSI driver: %v", err) + os.Exit(1) + } + + // Pass a context with a timeout + ctx, cancel := context.WithTimeout(context.Background(), *csiTimeout) + defer cancel() + + // Find driver name + *snapshotterName, err = csirpc.GetDriverName(ctx, csiConn) + if err != nil { + klog.Errorf("error getting CSI driver name: %v", err) + os.Exit(1) + } + + klog.V(2).Infof("CSI driver name: %q", *snapshotterName) + + // Check it's ready + if err = csirpc.ProbeForever(csiConn, *csiTimeout); err != nil { + klog.Errorf("error waiting for CSI driver to be ready: %v", err) + os.Exit(1) + } + + // Find out if the driver supports create/delete snapshot. + supportsCreateSnapshot, err := supportsControllerCreateSnapshot(ctx, csiConn) + if err != nil { + klog.Errorf("error determining if driver supports create/delete snapshot operations: %v", err) + os.Exit(1) + } + if !supportsCreateSnapshot { + klog.Errorf("CSI driver %s does not support ControllerCreateSnapshot", *snapshotterName) + os.Exit(1) + } + + if len(*snapshotNamePrefix) == 0 { + klog.Error("Snapshot name prefix cannot be of length 0") + os.Exit(1) + } + + klog.V(2).Infof("Start NewCSISnapshotController with snapshotter [%s] kubeconfig [%s] connectionTimeout [%+v] csiAddress [%s] createSnapshotContentRetryCount [%d] createSnapshotContentInterval [%+v] resyncPeriod [%+v] snapshotNamePrefix [%s] snapshotNameUUIDLength [%d]", *snapshotterName, *kubeconfig, *connectionTimeout, *csiAddress, createSnapshotContentRetryCount, *createSnapshotContentInterval, *resyncPeriod, *snapshotNamePrefix, snapshotNameUUIDLength) + + snapShotter := snapshotter.NewSnapshotter(csiConn) + ctrl := controller.NewCSISnapshotController( + snapClient, + kubeClient, + *snapshotterName, + factory.Snapshot().V1alpha1().VolumeSnapshots(), + factory.Snapshot().V1alpha1().VolumeSnapshotContents(), + factory.Snapshot().V1alpha1().VolumeSnapshotClasses(), + coreFactory.Core().V1().PersistentVolumeClaims(), + *createSnapshotContentRetryCount, + *createSnapshotContentInterval, + snapShotter, + *csiTimeout, + *resyncPeriod, + *snapshotNamePrefix, + *snapshotNameUUIDLength, + ) + + run := func(context.Context) { + // run... + stopCh := make(chan struct{}) + factory.Start(stopCh) + coreFactory.Start(stopCh) + go ctrl.Run(threads, stopCh) + + // ...until SIGINT + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + <-c + close(stopCh) + } + + if !*leaderElection { + run(context.TODO()) + } else { + le := leaderelection.NewLeaderElection(kubeClient, leaderElectionLockName, run) + if *leaderElectionNamespace != "" { + le.WithNamespace(*leaderElectionNamespace) + } + if err := le.Run(); err != nil { + klog.Fatalf("failed to initialize leader election: %v", err) + } + } +} + +func buildConfig(kubeconfig string) (*rest.Config, error) { + if kubeconfig != "" { + return clientcmd.BuildConfigFromFlags("", kubeconfig) + } + return rest.InClusterConfig() +} + +func supportsControllerCreateSnapshot(ctx context.Context, conn *grpc.ClientConn) (bool, error) { + capabilities, err := csirpc.GetControllerCapabilities(ctx, conn) + if err != nil { + return false, err + } + + return capabilities[csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT], nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/main_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/main_test.go new file mode 100644 index 000000000..f13aba72b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/cmd/csi-snapshotter/main_test.go @@ -0,0 +1,161 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "github.com/kubernetes-csi/csi-lib-utils/connection" + "github.com/kubernetes-csi/csi-test/driver" + + "google.golang.org/grpc" +) + +func Test_supportsControllerCreateSnapshot(t *testing.T) { + tests := []struct { + name string + output *csi.ControllerGetCapabilitiesResponse + injectError bool + expectError bool + expectResult bool + }{ + { + name: "success", + output: &csi.ControllerGetCapabilitiesResponse{ + Capabilities: []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + }, + }, + }, + }, + }, + expectError: false, + expectResult: true, + }, + { + name: "gRPC error", + output: nil, + injectError: true, + expectError: true, + expectResult: false, + }, + { + name: "no create snapshot", + output: &csi.ControllerGetCapabilitiesResponse{ + Capabilities: []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + }, + }, + expectError: false, + expectResult: false, + }, + { + name: "empty capability", + output: &csi.ControllerGetCapabilitiesResponse{ + Capabilities: []*csi.ControllerServiceCapability{ + { + Type: nil, + }, + }, + }, + expectError: false, + expectResult: false, + }, + { + name: "no capabilities", + output: &csi.ControllerGetCapabilitiesResponse{ + Capabilities: []*csi.ControllerServiceCapability{}, + }, + expectError: false, + expectResult: false, + }, + } + + mockController, driver, _, controllerServer, csiConn, err := createMockServer(t) + if err != nil { + t.Fatal(err) + } + defer mockController.Finish() + defer driver.Stop() + defer csiConn.Close() + + for _, test := range tests { + + in := &csi.ControllerGetCapabilitiesRequest{} + + out := test.output + var injectedErr error + if test.injectError { + injectedErr = fmt.Errorf("mock error") + } + + // Setup expectation + controllerServer.EXPECT().ControllerGetCapabilities(gomock.Any(), in).Return(out, injectedErr).Times(1) + + ok, err := supportsControllerCreateSnapshot(context.Background(), csiConn) + if test.expectError && err == nil { + t.Errorf("test %q: Expected error, got none", test.name) + } + if !test.expectError && err != nil { + t.Errorf("test %q: got error: %v", test.name, err) + } + if err == nil && test.expectResult != ok { + t.Errorf("test fail expected result %t but got %t\n", test.expectResult, ok) + } + } +} + +func createMockServer(t *testing.T) (*gomock.Controller, *driver.MockCSIDriver, *driver.MockIdentityServer, *driver.MockControllerServer, *grpc.ClientConn, error) { + // Start the mock server + mockController := gomock.NewController(t) + identityServer := driver.NewMockIdentityServer(mockController) + controllerServer := driver.NewMockControllerServer(mockController) + drv := driver.NewMockCSIDriver(&driver.MockCSIDriverServers{ + Identity: identityServer, + Controller: controllerServer, + }) + drv.Start() + + // Create a client connection to it + addr := drv.Address() + csiConn, err := connection.Connect(addr) + if err != nil { + return nil, nil, nil, nil, nil, err + } + + return mockController, drv, identityServer, controllerServer, csiConn, nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/code-of-conduct.md b/vendor/github.com/kubernetes-csi/external-snapshotter/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/README.md b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/README.md new file mode 100644 index 000000000..8f2c44860 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/README.md @@ -0,0 +1,2 @@ +rbac-external-provisioner.yaml was copied from https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml +and must be refreshed when updating the external-provisioner image in setup-csi-snapshotter.yaml diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/rbac-external-provisioner.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/rbac-external-provisioner.yaml new file mode 100644 index 000000000..e4c84f612 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/rbac-external-provisioner.yaml @@ -0,0 +1,93 @@ +# This YAML file contains all RBAC objects that are necessary to run external +# CSI provisioner. +# +# In production, each CSI driver deployment has to be customized: +# - to avoid conflicts, use non-default namespace and different names +# for non-namespaced entities like the ClusterRole +# - decide whether the deployment replicates the external CSI +# provisioner, in which case leadership election must be enabled; +# this influences the RBAC setup, see below + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-provisioner + # replace with non-default namespace name + namespace: default + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role +subjects: + - kind: ServiceAccount + name: csi-provisioner + # replace with non-default namespace name + namespace: default +roleRef: + kind: ClusterRole + name: external-provisioner-runner + apiGroup: rbac.authorization.k8s.io + +--- +# Provisioner must be able to work with endpoints and leases in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # replace with non-default namespace name + namespace: default + name: external-provisioner-cfg +rules: +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role-cfg + # replace with non-default namespace name + namespace: default +subjects: + - kind: ServiceAccount + name: csi-provisioner + # replace with non-default namespace name + namespace: default +roleRef: + kind: Role + name: external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/rbac.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/rbac.yaml new file mode 100644 index 000000000..1bd9fa758 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/rbac.yaml @@ -0,0 +1,92 @@ +# Together with the RBAC file for external-provisioner, this YAML file +# contains all RBAC objects that are necessary to run external CSI +# snapshotter. +# +# In production, each CSI driver deployment has to be customized: +# - to avoid conflicts, use non-default namespace and different names +# for non-namespaced entities like the ClusterRole +# - optionally rename the non-namespaced ClusterRole if there +# are conflicts with other deployments + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshotter + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # rename if there are conflicts + name: external-snapshotter-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-role +subjects: + - kind: ServiceAccount + name: csi-snapshotter + # replace with non-default namespace name + namespace: default +roleRef: + kind: ClusterRole + # change the name also here if the ClusterRole gets renamed + name: external-snapshotter-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: default # TODO: replace with the namespace you want for your sidecar + name: external-snapshotter-leaderelection +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-snapshotter-leaderelection + namespace: default # TODO: replace with the namespace you want for your sidecar +subjects: + - kind: ServiceAccount + name: csi-snapshotter + namespace: default # TODO: replace with the namespace you want for your sidecar +roleRef: + kind: Role + name: external-snapshotter-leaderelection + apiGroup: rbac.authorization.k8s.io + diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/setup-csi-snapshotter.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/setup-csi-snapshotter.yaml new file mode 100644 index 000000000..d3b9e4ade --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/setup-csi-snapshotter.yaml @@ -0,0 +1,122 @@ +# This YAML file shows how to deploy the CSI snapshotter together +# with the hostpath CSI driver. It depends on the RBAC rules +# from rbac.yaml and rbac-external-provisioner.yaml. +# +# Because external-snapshotter and external-provisioner get +# deployed in the same pod, we have to merge the permissions +# for the provisioner into the service account. This is not +# necessary when deploying separately. + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-provisioner-role +subjects: + - kind: ServiceAccount + name: csi-snapshotter # from rbac.yaml + # replace with non-default namespace name + namespace: default +roleRef: + kind: ClusterRole + name: external-provisioner-runner # from rbac-external-provisioner.yaml + apiGroup: rbac.authorization.k8s.io + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-provisioner-role-cfg + # replace with non-default namespace name + namespace: default +subjects: + - kind: ServiceAccount + name: csi-snapshotter # from rbac.yaml + # replace with non-default namespace name + namespace: default +roleRef: + kind: Role + name: external-provisioner-cfg # from rbac-external-provisioner.yaml + apiGroup: rbac.authorization.k8s.io + +--- +kind: Service +apiVersion: v1 +metadata: + name: csi-snapshotter + labels: + app: csi-snapshotter +spec: + selector: + app: csi-snapshotter + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-snapshotter +spec: + serviceName: "csi-snapshotter" + replicas: 1 + selector: + matchLabels: + app: csi-snapshotter + template: + metadata: + labels: + app: csi-snapshotter + spec: + serviceAccount: csi-snapshotter + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.0.1 + args: + - "--provisioner=csi-hostpath" + - "--csi-address=$(ADDRESS)" + - "--connection-timeout=15s" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: Always + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-snapshotter + image: quay.io/k8scsi/csi-snapshotter:v1.0.1 + args: + - "--csi-address=$(ADDRESS)" + - "--connection-timeout=15s" + - "--leader-election=false" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: Always + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: hostpath + image: quay.io/k8scsi/hostpathplugin:v1.0.1 + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + imagePullPolicy: Always + securityContext: + privileged: true + volumeMounts: + - name: socket-dir + mountPath: /csi + volumes: + - name: socket-dir + emptyDir: diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/pvc.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/pvc.yaml new file mode 100644 index 000000000..cb3c4560d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc +spec: + storageClassName: csi-hostpath-sc + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/restore.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/restore.yaml new file mode 100644 index 000000000..942d0cf8a --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/restore.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc-restore +spec: + storageClassName: csi-hostpath-sc + dataSource: + name: new-snapshot-demo + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/snapshot.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/snapshot.yaml new file mode 100644 index 000000000..b7a913f9c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/snapshot.yaml @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshot +metadata: + name: new-snapshot-demo +spec: + snapshotClassName: csi-hostpath-snapclass + source: + name: hpvc + kind: PersistentVolumeClaim diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/snapshotclass.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/snapshotclass.yaml new file mode 100644 index 000000000..dfa34df56 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/snapshotclass.yaml @@ -0,0 +1,5 @@ +apiVersion: snapshot.storage.k8s.io/v1alpha1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass +snapshotter: csi-hostpath diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/storageclass.yaml b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/storageclass.yaml new file mode 100644 index 000000000..c92797167 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/examples/kubernetes/storageclass.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-hostpath-sc +provisioner: csi-hostpath +reclaimPolicy: Delete +volumeBindingMode: Immediate diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/hack/boilerplate.go.txt b/vendor/github.com/kubernetes-csi/external-snapshotter/hack/boilerplate.go.txt new file mode 100644 index 000000000..4b76f1fdd --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright YEAR The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/hack/update-generated-code.sh b/vendor/github.com/kubernetes-csi/external-snapshotter/hack/update-generated-code.sh new file mode 100755 index 000000000..f48150d77 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/hack/update-generated-code.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd) +#cd $ROOT +#SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} + +# generate the code with: +# --output-base because this script should also be able to run inside the vendor dir of +# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir +# instead of the $GOPATH directly. For normal projects this can be dropped. +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/kubernetes-csi/external-snapshotter/pkg/client github.com/kubernetes-csi/external-snapshotter/pkg/apis \ + volumesnapshot:v1alpha1 \ + --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt + +# To use your own boilerplate text use: +# --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/doc.go new file mode 100644 index 000000000..90642a58b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=snapshot.storage.k8s.io + +package v1alpha1 diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/register.go new file mode 100644 index 000000000..e7c38b6ba --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package. +const GroupName = "snapshot.storage.k8s.io" + +var ( + // SchemeBuilder is the new scheme builder + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds to scheme + AddToScheme = SchemeBuilder.AddToScheme + // SchemeGroupVersion is the group version used to register these objects. + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +) + +// Resource takes an unqualified resource and returns a Group-qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder.Register(addKnownTypes) +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VolumeSnapshotClass{}, + &VolumeSnapshotClassList{}, + &VolumeSnapshot{}, + &VolumeSnapshotList{}, + &VolumeSnapshotContent{}, + &VolumeSnapshotContentList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/types.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/types.go new file mode 100644 index 000000000..65c1aafaa --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/types.go @@ -0,0 +1,256 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + core_v1 "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // VolumeSnapshotContentResourcePlural is "volumesnapshotcontents" + VolumeSnapshotContentResourcePlural = "volumesnapshotcontents" + // VolumeSnapshotResourcePlural is "volumesnapshots" + VolumeSnapshotResourcePlural = "volumesnapshots" + // VolumeSnapshotClassResourcePlural is "volumesnapshotclasses" + VolumeSnapshotClassResourcePlural = "volumesnapshotclasses" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshot is a user's request for taking a snapshot. Upon successful creation of the actual +// snapshot by the volume provider it is bound to the corresponding VolumeSnapshotContent. +// Only the VolumeSnapshot object is accessible to the user in the namespace. +type VolumeSnapshot struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a snapshot requested by a user. + Spec VolumeSnapshotSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the latest observed state of the snapshot + // +optional + Status VolumeSnapshotStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshotList is a list of VolumeSnapshot objects +type VolumeSnapshotList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeSnapshots + Items []VolumeSnapshot `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeSnapshotSpec describes the common attributes of a volume snapshot +type VolumeSnapshotSpec struct { + // Source has the information about where the snapshot is created from. + // In Alpha version, only PersistentVolumeClaim is supported as the source. + // If not specified, user can create VolumeSnapshotContent and bind it with VolumeSnapshot manually. + // +optional + Source *core_v1.TypedLocalObjectReference `json:"source" protobuf:"bytes,1,opt,name=source"` + + // SnapshotContentName binds the VolumeSnapshot object with the VolumeSnapshotContent + // +optional + SnapshotContentName string `json:"snapshotContentName" protobuf:"bytes,2,opt,name=snapshotContentName"` + + // Name of the VolumeSnapshotClass used by the VolumeSnapshot. If not specified, a default snapshot class will + // be used if it is available. + // +optional + VolumeSnapshotClassName *string `json:"snapshotClassName" protobuf:"bytes,3,opt,name=snapshotClassName"` +} + +// VolumeSnapshotStatus is the status of the VolumeSnapshot +type VolumeSnapshotStatus struct { + // CreationTime is the time the snapshot was successfully created. If it is set, + // it means the snapshot was created; Otherwise the snapshot was not created. + // +optional + CreationTime *metav1.Time `json:"creationTime" protobuf:"bytes,1,opt,name=creationTime"` + + // When restoring volume from the snapshot, the volume size should be equal to or + // larger than the RestoreSize if it is specified. If RestoreSize is set to nil, it means + // that the storage plugin does not have this information available. + // +optional + RestoreSize *resource.Quantity `json:"restoreSize" protobuf:"bytes,2,opt,name=restoreSize"` + + // ReadyToUse is set to true only if the snapshot is ready to use (e.g., finish uploading if + // there is an uploading phase) and also VolumeSnapshot and its VolumeSnapshotContent + // bind correctly with each other. If any of the above condition is not true, ReadyToUse is + // set to false + // +optional + ReadyToUse bool `json:"readyToUse" protobuf:"varint,3,opt,name=readyToUse"` + + // The last error encountered during create snapshot operation, if any. + // This field must only be set by the entity completing the create snapshot + // operation, i.e. the external-snapshotter. + // +optional + Error *storage.VolumeError `json:"error,omitempty" protobuf:"bytes,4,opt,name=error,casttype=VolumeError"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshotClass describes the parameters used by storage system when +// provisioning VolumeSnapshots from PVCs. +// The name of a VolumeSnapshotClass object is significant, and is how users can request a particular class. +type VolumeSnapshotClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Snapshotter is the driver expected to handle this VolumeSnapshotClass. + Snapshotter string `json:"snapshotter" protobuf:"bytes,2,opt,name=snapshotter"` + + // Parameters holds parameters for the snapshotter. + // These values are opaque to the system and are passed directly + // to the snapshotter. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` + + // Optional: what happens to a snapshot content when released from its snapshot. + // The default policy is Delete if not specified. + // +optional + DeletionPolicy *DeletionPolicy `json:"deletionPolicy,omitempty" protobuf:"bytes,4,opt,name=deletionPolicy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshotClassList is a collection of snapshot classes. +type VolumeSnapshotClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeSnapshotClasses + Items []VolumeSnapshotClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshotContent represents the actual "on-disk" snapshot object +type VolumeSnapshotContent struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec represents the desired state of the snapshot content + Spec VolumeSnapshotContentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshotContentList is a list of VolumeSnapshotContent objects +type VolumeSnapshotContentList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeSnapshotContents + Items []VolumeSnapshotContent `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeSnapshotContentSpec is the spec of the volume snapshot content +type VolumeSnapshotContentSpec struct { + // Source represents the location and type of the volume snapshot + VolumeSnapshotSource `json:",inline" protobuf:"bytes,1,opt,name=volumeSnapshotSource"` + + // VolumeSnapshotRef is part of bi-directional binding between VolumeSnapshot + // and VolumeSnapshotContent. It becomes non-nil when bound. + // +optional + VolumeSnapshotRef *core_v1.ObjectReference `json:"volumeSnapshotRef" protobuf:"bytes,2,opt,name=volumeSnapshotRef"` + + // PersistentVolumeRef represents the PersistentVolume that the snapshot has been + // taken from. It becomes non-nil when VolumeSnapshot and VolumeSnapshotContent are bound. + // +optional + PersistentVolumeRef *core_v1.ObjectReference `json:"persistentVolumeRef" protobuf:"bytes,3,opt,name=persistentVolumeRef"` + + // Name of the VolumeSnapshotClass used by the VolumeSnapshot. If not specified, a default snapshot class will + // be used if it is available. + // +optional + VolumeSnapshotClassName *string `json:"snapshotClassName" protobuf:"bytes,4,opt,name=snapshotClassName"` + + // Optional: what happens to a snapshot content when released from its snapshot. It will be set to Delete by default + // if not specified + // +optional + DeletionPolicy *DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,5,opt,name=deletionPolicy"` +} + +// VolumeSnapshotSource represents the actual location and type of the snapshot. Only one of its members may be specified. +type VolumeSnapshotSource struct { + // CSI (Container Storage Interface) represents storage that handled by an external CSI Volume Driver (Alpha feature). + // +optional + CSI *CSIVolumeSnapshotSource `json:"csiVolumeSnapshotSource,omitempty"` +} + +// CSIVolumeSnapshotSource represents the source from CSI volume snapshot +type CSIVolumeSnapshotSource struct { + // Driver is the name of the driver to use for this snapshot. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + // Required. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // SnapshotHandle is the unique snapshot id returned by the CSI volume + // plugin’s CreateSnapshot to refer to the snapshot on all subsequent calls. + // Required. + SnapshotHandle string `json:"snapshotHandle" protobuf:"bytes,2,opt,name=snapshotHandle"` + + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This timestamp will be generated by the CSI volume driver after + // the snapshot is cut. The format of this field should be a Unix nanoseconds + // time encoded as an int64. On Unix, the command `date +%s%N` returns + // the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + // This field is required in the CSI spec but optional here to support static binding. + // +optional + CreationTime *int64 `json:"creationTime,omitempty" protobuf:"varint,3,opt,name=creationTime"` + + // When restoring volume from the snapshot, the volume size should be equal to or + // larger than the RestoreSize if it is specified. If RestoreSize is set to nil, it means + // that the storage plugin does not have this information available. + // +optional + RestoreSize *int64 `json:"restoreSize,omitempty" protobuf:"bytes,4,opt,name=restoreSize"` +} + +// DeletionPolicy describes a policy for end-of-life maintenance of volume snapshot contents +type DeletionPolicy string + +const ( + // VolumeSnapshotContentDelete means the snapshot content will be deleted from Kubernetes on release from its volume snapshot. + VolumeSnapshotContentDelete DeletionPolicy = "Delete" + + // VolumeSnapshotContentRetain means the snapshot will be left in its current state on release from its volume snapshot. + // The default policy is Retain if not specified. + VolumeSnapshotContentRetain DeletionPolicy = "Retain" +) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..38db004a8 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,359 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/storage/v1beta1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIVolumeSnapshotSource) DeepCopyInto(out *CSIVolumeSnapshotSource) { + *out = *in + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = new(int64) + **out = **in + } + if in.RestoreSize != nil { + in, out := &in.RestoreSize, &out.RestoreSize + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSnapshotSource. +func (in *CSIVolumeSnapshotSource) DeepCopy() *CSIVolumeSnapshotSource { + if in == nil { + return nil + } + out := new(CSIVolumeSnapshotSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshot) DeepCopyInto(out *VolumeSnapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshot. +func (in *VolumeSnapshot) DeepCopy() *VolumeSnapshot { + if in == nil { + return nil + } + out := new(VolumeSnapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotClass) DeepCopyInto(out *VolumeSnapshotClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DeletionPolicy != nil { + in, out := &in.DeletionPolicy, &out.DeletionPolicy + *out = new(DeletionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotClass. +func (in *VolumeSnapshotClass) DeepCopy() *VolumeSnapshotClass { + if in == nil { + return nil + } + out := new(VolumeSnapshotClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotClassList) DeepCopyInto(out *VolumeSnapshotClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeSnapshotClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotClassList. +func (in *VolumeSnapshotClassList) DeepCopy() *VolumeSnapshotClassList { + if in == nil { + return nil + } + out := new(VolumeSnapshotClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotContent) DeepCopyInto(out *VolumeSnapshotContent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContent. +func (in *VolumeSnapshotContent) DeepCopy() *VolumeSnapshotContent { + if in == nil { + return nil + } + out := new(VolumeSnapshotContent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotContent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotContentList) DeepCopyInto(out *VolumeSnapshotContentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeSnapshotContent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentList. +func (in *VolumeSnapshotContentList) DeepCopy() *VolumeSnapshotContentList { + if in == nil { + return nil + } + out := new(VolumeSnapshotContentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotContentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotContentSpec) DeepCopyInto(out *VolumeSnapshotContentSpec) { + *out = *in + in.VolumeSnapshotSource.DeepCopyInto(&out.VolumeSnapshotSource) + if in.VolumeSnapshotRef != nil { + in, out := &in.VolumeSnapshotRef, &out.VolumeSnapshotRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.PersistentVolumeRef != nil { + in, out := &in.PersistentVolumeRef, &out.PersistentVolumeRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.VolumeSnapshotClassName != nil { + in, out := &in.VolumeSnapshotClassName, &out.VolumeSnapshotClassName + *out = new(string) + **out = **in + } + if in.DeletionPolicy != nil { + in, out := &in.DeletionPolicy, &out.DeletionPolicy + *out = new(DeletionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotContentSpec. +func (in *VolumeSnapshotContentSpec) DeepCopy() *VolumeSnapshotContentSpec { + if in == nil { + return nil + } + out := new(VolumeSnapshotContentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotList) DeepCopyInto(out *VolumeSnapshotList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeSnapshot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotList. +func (in *VolumeSnapshotList) DeepCopy() *VolumeSnapshotList { + if in == nil { + return nil + } + out := new(VolumeSnapshotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotSource) DeepCopyInto(out *VolumeSnapshotSource) { + *out = *in + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIVolumeSnapshotSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotSource. +func (in *VolumeSnapshotSource) DeepCopy() *VolumeSnapshotSource { + if in == nil { + return nil + } + out := new(VolumeSnapshotSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotSpec) DeepCopyInto(out *VolumeSnapshotSpec) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.VolumeSnapshotClassName != nil { + in, out := &in.VolumeSnapshotClassName, &out.VolumeSnapshotClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotSpec. +func (in *VolumeSnapshotSpec) DeepCopy() *VolumeSnapshotSpec { + if in == nil { + return nil + } + out := new(VolumeSnapshotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotStatus) DeepCopyInto(out *VolumeSnapshotStatus) { + *out = *in + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.RestoreSize != nil { + in, out := &in.RestoreSize, &out.RestoreSize + x := (*in).DeepCopy() + *out = &x + } + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(v1beta1.VolumeError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotStatus. +func (in *VolumeSnapshotStatus) DeepCopy() *VolumeSnapshotStatus { + if in == nil { + return nil + } + out := new(VolumeSnapshotStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 000000000..5bcff9985 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + snapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + SnapshotV1alpha1() snapshotv1alpha1.SnapshotV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + snapshotV1alpha1 *snapshotv1alpha1.SnapshotV1alpha1Client +} + +// SnapshotV1alpha1 retrieves the SnapshotV1alpha1Client +func (c *Clientset) SnapshotV1alpha1() snapshotv1alpha1.SnapshotV1alpha1Interface { + return c.snapshotV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.snapshotV1alpha1, err = snapshotv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.snapshotV1alpha1 = snapshotv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.snapshotV1alpha1 = snapshotv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/doc.go new file mode 100644 index 000000000..dc992b90b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..0c5b82937 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + snapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1" + fakesnapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// SnapshotV1alpha1 retrieves the SnapshotV1alpha1Client +func (c *Clientset) SnapshotV1alpha1() snapshotv1alpha1.SnapshotV1alpha1Interface { + return &fakesnapshotv1alpha1.FakeSnapshotV1alpha1{Fake: &c.Fake} +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 000000000..acfa6173b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 000000000..903889fd9 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + snapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + snapshotv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7f61dc1f9 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..46efa8fe5 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + snapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + snapshotv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/doc.go new file mode 100644 index 000000000..9752e759c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/doc.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/doc.go new file mode 100644 index 000000000..ab4fd43ad --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot.go new file mode 100644 index 000000000..05016a1a2 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeSnapshots implements VolumeSnapshotInterface +type FakeVolumeSnapshots struct { + Fake *FakeSnapshotV1alpha1 + ns string +} + +var volumesnapshotsResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Resource: "volumesnapshots"} + +var volumesnapshotsKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshot"} + +// Get takes name of the volumeSnapshot, and returns the corresponding volumeSnapshot object, and an error if there is any. +func (c *FakeVolumeSnapshots) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(volumesnapshotsResource, c.ns, name), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// List takes label and field selectors, and returns the list of VolumeSnapshots that match those selectors. +func (c *FakeVolumeSnapshots) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(volumesnapshotsResource, volumesnapshotsKind, c.ns, opts), &v1alpha1.VolumeSnapshotList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeSnapshotList{ListMeta: obj.(*v1alpha1.VolumeSnapshotList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeSnapshotList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshots. +func (c *FakeVolumeSnapshots) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(volumesnapshotsResource, c.ns, opts)) + +} + +// Create takes the representation of a volumeSnapshot and creates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *FakeVolumeSnapshots) Create(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(volumesnapshotsResource, c.ns, volumeSnapshot), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// Update takes the representation of a volumeSnapshot and updates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *FakeVolumeSnapshots) Update(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(volumesnapshotsResource, c.ns, volumeSnapshot), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVolumeSnapshots) UpdateStatus(volumeSnapshot *v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(volumesnapshotsResource, "status", c.ns, volumeSnapshot), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} + +// Delete takes name of the volumeSnapshot and deletes it. Returns an error if one occurs. +func (c *FakeVolumeSnapshots) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(volumesnapshotsResource, c.ns, name), &v1alpha1.VolumeSnapshot{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeSnapshots) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(volumesnapshotsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeSnapshotList{}) + return err +} + +// Patch applies the patch and returns the patched volumeSnapshot. +func (c *FakeVolumeSnapshots) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshot, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(volumesnapshotsResource, c.ns, name, pt, data, subresources...), &v1alpha1.VolumeSnapshot{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshot), err +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot_client.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot_client.go new file mode 100644 index 000000000..6c8b11a57 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshot_client.go @@ -0,0 +1,48 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeSnapshotV1alpha1 struct { + *testing.Fake +} + +func (c *FakeSnapshotV1alpha1) VolumeSnapshots(namespace string) v1alpha1.VolumeSnapshotInterface { + return &FakeVolumeSnapshots{c, namespace} +} + +func (c *FakeSnapshotV1alpha1) VolumeSnapshotClasses() v1alpha1.VolumeSnapshotClassInterface { + return &FakeVolumeSnapshotClasses{c} +} + +func (c *FakeSnapshotV1alpha1) VolumeSnapshotContents() v1alpha1.VolumeSnapshotContentInterface { + return &FakeVolumeSnapshotContents{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSnapshotV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotclass.go new file mode 100644 index 000000000..5c756b20b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotclass.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeSnapshotClasses implements VolumeSnapshotClassInterface +type FakeVolumeSnapshotClasses struct { + Fake *FakeSnapshotV1alpha1 +} + +var volumesnapshotclassesResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Resource: "volumesnapshotclasses"} + +var volumesnapshotclassesKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotClass"} + +// Get takes name of the volumeSnapshotClass, and returns the corresponding volumeSnapshotClass object, and an error if there is any. +func (c *FakeVolumeSnapshotClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumesnapshotclassesResource, name), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotClasses that match those selectors. +func (c *FakeVolumeSnapshotClasses) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumesnapshotclassesResource, volumesnapshotclassesKind, opts), &v1alpha1.VolumeSnapshotClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeSnapshotClassList{ListMeta: obj.(*v1alpha1.VolumeSnapshotClassList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeSnapshotClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotClasses. +func (c *FakeVolumeSnapshotClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumesnapshotclassesResource, opts)) +} + +// Create takes the representation of a volumeSnapshotClass and creates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *FakeVolumeSnapshotClasses) Create(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumesnapshotclassesResource, volumeSnapshotClass), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} + +// Update takes the representation of a volumeSnapshotClass and updates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *FakeVolumeSnapshotClasses) Update(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumesnapshotclassesResource, volumeSnapshotClass), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} + +// Delete takes name of the volumeSnapshotClass and deletes it. Returns an error if one occurs. +func (c *FakeVolumeSnapshotClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumesnapshotclassesResource, name), &v1alpha1.VolumeSnapshotClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeSnapshotClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumesnapshotclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeSnapshotClassList{}) + return err +} + +// Patch applies the patch and returns the patched volumeSnapshotClass. +func (c *FakeVolumeSnapshotClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumesnapshotclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeSnapshotClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotClass), err +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotcontent.go new file mode 100644 index 000000000..f646d1d9d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/fake/fake_volumesnapshotcontent.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeSnapshotContents implements VolumeSnapshotContentInterface +type FakeVolumeSnapshotContents struct { + Fake *FakeSnapshotV1alpha1 +} + +var volumesnapshotcontentsResource = schema.GroupVersionResource{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Resource: "volumesnapshotcontents"} + +var volumesnapshotcontentsKind = schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1alpha1", Kind: "VolumeSnapshotContent"} + +// Get takes name of the volumeSnapshotContent, and returns the corresponding volumeSnapshotContent object, and an error if there is any. +func (c *FakeVolumeSnapshotContents) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumesnapshotcontentsResource, name), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotContents that match those selectors. +func (c *FakeVolumeSnapshotContents) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotContentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumesnapshotcontentsResource, volumesnapshotcontentsKind, opts), &v1alpha1.VolumeSnapshotContentList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeSnapshotContentList{ListMeta: obj.(*v1alpha1.VolumeSnapshotContentList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeSnapshotContentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotContents. +func (c *FakeVolumeSnapshotContents) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumesnapshotcontentsResource, opts)) +} + +// Create takes the representation of a volumeSnapshotContent and creates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *FakeVolumeSnapshotContents) Create(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumesnapshotcontentsResource, volumeSnapshotContent), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} + +// Update takes the representation of a volumeSnapshotContent and updates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *FakeVolumeSnapshotContents) Update(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumesnapshotcontentsResource, volumeSnapshotContent), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} + +// Delete takes name of the volumeSnapshotContent and deletes it. Returns an error if one occurs. +func (c *FakeVolumeSnapshotContents) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumesnapshotcontentsResource, name), &v1alpha1.VolumeSnapshotContent{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeSnapshotContents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumesnapshotcontentsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeSnapshotContentList{}) + return err +} + +// Patch applies the patch and returns the patched volumeSnapshotContent. +func (c *FakeVolumeSnapshotContents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotContent, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumesnapshotcontentsResource, name, pt, data, subresources...), &v1alpha1.VolumeSnapshotContent{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeSnapshotContent), err +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/generated_expansion.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..9b641f106 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type VolumeSnapshotExpansion interface{} + +type VolumeSnapshotClassExpansion interface{} + +type VolumeSnapshotContentExpansion interface{} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot.go new file mode 100644 index 000000000..a2f80805a --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + scheme "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VolumeSnapshotsGetter has a method to return a VolumeSnapshotInterface. +// A group's client should implement this interface. +type VolumeSnapshotsGetter interface { + VolumeSnapshots(namespace string) VolumeSnapshotInterface +} + +// VolumeSnapshotInterface has methods to work with VolumeSnapshot resources. +type VolumeSnapshotInterface interface { + Create(*v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) + Update(*v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) + UpdateStatus(*v1alpha1.VolumeSnapshot) (*v1alpha1.VolumeSnapshot, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeSnapshot, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeSnapshotList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshot, err error) + VolumeSnapshotExpansion +} + +// volumeSnapshots implements VolumeSnapshotInterface +type volumeSnapshots struct { + client rest.Interface + ns string +} + +// newVolumeSnapshots returns a VolumeSnapshots +func newVolumeSnapshots(c *SnapshotV1alpha1Client, namespace string) *volumeSnapshots { + return &volumeSnapshots{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the volumeSnapshot, and returns the corresponding volumeSnapshot object, and an error if there is any. +func (c *volumeSnapshots) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshots that match those selectors. +func (c *volumeSnapshots) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeSnapshotList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshots. +func (c *volumeSnapshots) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeSnapshot and creates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *volumeSnapshots) Create(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Post(). + Namespace(c.ns). + Resource("volumesnapshots"). + Body(volumeSnapshot). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshot and updates it. Returns the server's representation of the volumeSnapshot, and an error, if there is any. +func (c *volumeSnapshots) Update(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(volumeSnapshot.Name). + Body(volumeSnapshot). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeSnapshots) UpdateStatus(volumeSnapshot *v1alpha1.VolumeSnapshot) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(volumeSnapshot.Name). + SubResource("status"). + Body(volumeSnapshot). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeSnapshot and deletes it. Returns an error if one occurs. +func (c *volumeSnapshots) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshots"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshots) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshots"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshot. +func (c *volumeSnapshots) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshot, err error) { + result = &v1alpha1.VolumeSnapshot{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("volumesnapshots"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot_client.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot_client.go new file mode 100644 index 000000000..ba06f6576 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshot_client.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type SnapshotV1alpha1Interface interface { + RESTClient() rest.Interface + VolumeSnapshotsGetter + VolumeSnapshotClassesGetter + VolumeSnapshotContentsGetter +} + +// SnapshotV1alpha1Client is used to interact with features provided by the snapshot.storage.k8s.io group. +type SnapshotV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SnapshotV1alpha1Client) VolumeSnapshots(namespace string) VolumeSnapshotInterface { + return newVolumeSnapshots(c, namespace) +} + +func (c *SnapshotV1alpha1Client) VolumeSnapshotClasses() VolumeSnapshotClassInterface { + return newVolumeSnapshotClasses(c) +} + +func (c *SnapshotV1alpha1Client) VolumeSnapshotContents() VolumeSnapshotContentInterface { + return newVolumeSnapshotContents(c) +} + +// NewForConfig creates a new SnapshotV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SnapshotV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SnapshotV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SnapshotV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SnapshotV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SnapshotV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SnapshotV1alpha1Client { + return &SnapshotV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SnapshotV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotclass.go new file mode 100644 index 000000000..8b788df4a --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotclass.go @@ -0,0 +1,164 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + scheme "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VolumeSnapshotClassesGetter has a method to return a VolumeSnapshotClassInterface. +// A group's client should implement this interface. +type VolumeSnapshotClassesGetter interface { + VolumeSnapshotClasses() VolumeSnapshotClassInterface +} + +// VolumeSnapshotClassInterface has methods to work with VolumeSnapshotClass resources. +type VolumeSnapshotClassInterface interface { + Create(*v1alpha1.VolumeSnapshotClass) (*v1alpha1.VolumeSnapshotClass, error) + Update(*v1alpha1.VolumeSnapshotClass) (*v1alpha1.VolumeSnapshotClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeSnapshotClass, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeSnapshotClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotClass, err error) + VolumeSnapshotClassExpansion +} + +// volumeSnapshotClasses implements VolumeSnapshotClassInterface +type volumeSnapshotClasses struct { + client rest.Interface +} + +// newVolumeSnapshotClasses returns a VolumeSnapshotClasses +func newVolumeSnapshotClasses(c *SnapshotV1alpha1Client) *volumeSnapshotClasses { + return &volumeSnapshotClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeSnapshotClass, and returns the corresponding volumeSnapshotClass object, and an error if there is any. +func (c *volumeSnapshotClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Get(). + Resource("volumesnapshotclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotClasses that match those selectors. +func (c *volumeSnapshotClasses) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeSnapshotClassList{} + err = c.client.Get(). + Resource("volumesnapshotclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotClasses. +func (c *volumeSnapshotClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumesnapshotclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeSnapshotClass and creates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *volumeSnapshotClasses) Create(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Post(). + Resource("volumesnapshotclasses"). + Body(volumeSnapshotClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshotClass and updates it. Returns the server's representation of the volumeSnapshotClass, and an error, if there is any. +func (c *volumeSnapshotClasses) Update(volumeSnapshotClass *v1alpha1.VolumeSnapshotClass) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Put(). + Resource("volumesnapshotclasses"). + Name(volumeSnapshotClass.Name). + Body(volumeSnapshotClass). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeSnapshotClass and deletes it. Returns an error if one occurs. +func (c *volumeSnapshotClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumesnapshotclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshotClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumesnapshotclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshotClass. +func (c *volumeSnapshotClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotClass, err error) { + result = &v1alpha1.VolumeSnapshotClass{} + err = c.client.Patch(pt). + Resource("volumesnapshotclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotcontent.go new file mode 100644 index 000000000..e393ccdfe --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1/volumesnapshotcontent.go @@ -0,0 +1,164 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + scheme "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VolumeSnapshotContentsGetter has a method to return a VolumeSnapshotContentInterface. +// A group's client should implement this interface. +type VolumeSnapshotContentsGetter interface { + VolumeSnapshotContents() VolumeSnapshotContentInterface +} + +// VolumeSnapshotContentInterface has methods to work with VolumeSnapshotContent resources. +type VolumeSnapshotContentInterface interface { + Create(*v1alpha1.VolumeSnapshotContent) (*v1alpha1.VolumeSnapshotContent, error) + Update(*v1alpha1.VolumeSnapshotContent) (*v1alpha1.VolumeSnapshotContent, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeSnapshotContent, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeSnapshotContentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotContent, err error) + VolumeSnapshotContentExpansion +} + +// volumeSnapshotContents implements VolumeSnapshotContentInterface +type volumeSnapshotContents struct { + client rest.Interface +} + +// newVolumeSnapshotContents returns a VolumeSnapshotContents +func newVolumeSnapshotContents(c *SnapshotV1alpha1Client) *volumeSnapshotContents { + return &volumeSnapshotContents{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeSnapshotContent, and returns the corresponding volumeSnapshotContent object, and an error if there is any. +func (c *volumeSnapshotContents) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Get(). + Resource("volumesnapshotcontents"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotContents that match those selectors. +func (c *volumeSnapshotContents) List(opts v1.ListOptions) (result *v1alpha1.VolumeSnapshotContentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeSnapshotContentList{} + err = c.client.Get(). + Resource("volumesnapshotcontents"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotContents. +func (c *volumeSnapshotContents) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumesnapshotcontents"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeSnapshotContent and creates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *volumeSnapshotContents) Create(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Post(). + Resource("volumesnapshotcontents"). + Body(volumeSnapshotContent). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshotContent and updates it. Returns the server's representation of the volumeSnapshotContent, and an error, if there is any. +func (c *volumeSnapshotContents) Update(volumeSnapshotContent *v1alpha1.VolumeSnapshotContent) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Put(). + Resource("volumesnapshotcontents"). + Name(volumeSnapshotContent.Name). + Body(volumeSnapshotContent). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeSnapshotContent and deletes it. Returns an error if one occurs. +func (c *volumeSnapshotContents) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumesnapshotcontents"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshotContents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumesnapshotcontents"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshotContent. +func (c *volumeSnapshotContents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeSnapshotContent, err error) { + result = &v1alpha1.VolumeSnapshotContent{} + err = c.client.Patch(pt). + Resource("volumesnapshotcontents"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/factory.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/factory.go new file mode 100644 index 000000000..f01d7993b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces" + volumesnapshot "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Snapshot() volumesnapshot.Interface +} + +func (f *sharedInformerFactory) Snapshot() volumesnapshot.Interface { + return volumesnapshot.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/generic.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/generic.go new file mode 100644 index 000000000..193942d0e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=snapshot.storage.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("volumesnapshots"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Snapshot().V1alpha1().VolumeSnapshots().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("volumesnapshotclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Snapshot().V1alpha1().VolumeSnapshotClasses().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("volumesnapshotcontents"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Snapshot().V1alpha1().VolumeSnapshotContents().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..daf0bfe89 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/interface.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/interface.go new file mode 100644 index 000000000..ba406ea94 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package snapshot + +import ( + internalinterfaces "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/interface.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/interface.go new file mode 100644 index 000000000..ded10e911 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/interface.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // VolumeSnapshots returns a VolumeSnapshotInformer. + VolumeSnapshots() VolumeSnapshotInformer + // VolumeSnapshotClasses returns a VolumeSnapshotClassInformer. + VolumeSnapshotClasses() VolumeSnapshotClassInformer + // VolumeSnapshotContents returns a VolumeSnapshotContentInformer. + VolumeSnapshotContents() VolumeSnapshotContentInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// VolumeSnapshots returns a VolumeSnapshotInformer. +func (v *version) VolumeSnapshots() VolumeSnapshotInformer { + return &volumeSnapshotInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VolumeSnapshotClasses returns a VolumeSnapshotClassInformer. +func (v *version) VolumeSnapshotClasses() VolumeSnapshotClassInformer { + return &volumeSnapshotClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// VolumeSnapshotContents returns a VolumeSnapshotContentInformer. +func (v *version) VolumeSnapshotContents() VolumeSnapshotContentInformer { + return &volumeSnapshotContentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshot.go new file mode 100644 index 000000000..babb79b87 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshot.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + volumesnapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + versioned "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotInformer provides access to a shared informer and lister for +// VolumeSnapshots. +type VolumeSnapshotInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeSnapshotLister +} + +type volumeSnapshotInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVolumeSnapshotInformer constructs a new informer for VolumeSnapshot type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotInformer constructs a new informer for VolumeSnapshot type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshots(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshots(namespace).Watch(options) + }, + }, + &volumesnapshotv1alpha1.VolumeSnapshot{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&volumesnapshotv1alpha1.VolumeSnapshot{}, f.defaultInformer) +} + +func (f *volumeSnapshotInformer) Lister() v1alpha1.VolumeSnapshotLister { + return v1alpha1.NewVolumeSnapshotLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotclass.go new file mode 100644 index 000000000..0026e5572 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotclass.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + volumesnapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + versioned "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotClassInformer provides access to a shared informer and lister for +// VolumeSnapshotClasses. +type VolumeSnapshotClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeSnapshotClassLister +} + +type volumeSnapshotClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeSnapshotClassInformer constructs a new informer for VolumeSnapshotClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotClassInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotClassInformer constructs a new informer for VolumeSnapshotClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotClassInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotClasses().Watch(options) + }, + }, + &volumesnapshotv1alpha1.VolumeSnapshotClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotClassInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&volumesnapshotv1alpha1.VolumeSnapshotClass{}, f.defaultInformer) +} + +func (f *volumeSnapshotClassInformer) Lister() v1alpha1.VolumeSnapshotClassLister { + return v1alpha1.NewVolumeSnapshotClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotcontent.go new file mode 100644 index 000000000..2b34344d5 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1/volumesnapshotcontent.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + volumesnapshotv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + versioned "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotContentInformer provides access to a shared informer and lister for +// VolumeSnapshotContents. +type VolumeSnapshotContentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeSnapshotContentLister +} + +type volumeSnapshotContentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeSnapshotContentInformer constructs a new informer for VolumeSnapshotContent type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotContentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotContentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotContentInformer constructs a new informer for VolumeSnapshotContent type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotContentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotContents().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SnapshotV1alpha1().VolumeSnapshotContents().Watch(options) + }, + }, + &volumesnapshotv1alpha1.VolumeSnapshotContent{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotContentInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotContentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotContentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&volumesnapshotv1alpha1.VolumeSnapshotContent{}, f.defaultInformer) +} + +func (f *volumeSnapshotContentInformer) Lister() v1alpha1.VolumeSnapshotContentLister { + return v1alpha1.NewVolumeSnapshotContentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/expansion_generated.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..f92624ed7 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// VolumeSnapshotListerExpansion allows custom methods to be added to +// VolumeSnapshotLister. +type VolumeSnapshotListerExpansion interface{} + +// VolumeSnapshotNamespaceListerExpansion allows custom methods to be added to +// VolumeSnapshotNamespaceLister. +type VolumeSnapshotNamespaceListerExpansion interface{} + +// VolumeSnapshotClassListerExpansion allows custom methods to be added to +// VolumeSnapshotClassLister. +type VolumeSnapshotClassListerExpansion interface{} + +// VolumeSnapshotContentListerExpansion allows custom methods to be added to +// VolumeSnapshotContentLister. +type VolumeSnapshotContentListerExpansion interface{} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshot.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshot.go new file mode 100644 index 000000000..ffa8f16d8 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshot.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotLister helps list VolumeSnapshots. +type VolumeSnapshotLister interface { + // List lists all VolumeSnapshots in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) + // VolumeSnapshots returns an object that can list and get VolumeSnapshots. + VolumeSnapshots(namespace string) VolumeSnapshotNamespaceLister + VolumeSnapshotListerExpansion +} + +// volumeSnapshotLister implements the VolumeSnapshotLister interface. +type volumeSnapshotLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotLister returns a new VolumeSnapshotLister. +func NewVolumeSnapshotLister(indexer cache.Indexer) VolumeSnapshotLister { + return &volumeSnapshotLister{indexer: indexer} +} + +// List lists all VolumeSnapshots in the indexer. +func (s *volumeSnapshotLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshot)) + }) + return ret, err +} + +// VolumeSnapshots returns an object that can list and get VolumeSnapshots. +func (s *volumeSnapshotLister) VolumeSnapshots(namespace string) VolumeSnapshotNamespaceLister { + return volumeSnapshotNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VolumeSnapshotNamespaceLister helps list and get VolumeSnapshots. +type VolumeSnapshotNamespaceLister interface { + // List lists all VolumeSnapshots in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) + // Get retrieves the VolumeSnapshot from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.VolumeSnapshot, error) + VolumeSnapshotNamespaceListerExpansion +} + +// volumeSnapshotNamespaceLister implements the VolumeSnapshotNamespaceLister +// interface. +type volumeSnapshotNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VolumeSnapshots in the indexer for a given namespace. +func (s volumeSnapshotNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshot, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshot)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshot from the indexer for a given namespace and name. +func (s volumeSnapshotNamespaceLister) Get(name string) (*v1alpha1.VolumeSnapshot, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumesnapshot"), name) + } + return obj.(*v1alpha1.VolumeSnapshot), nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshotclass.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshotclass.go new file mode 100644 index 000000000..505d397bf --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshotclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotClassLister helps list VolumeSnapshotClasses. +type VolumeSnapshotClassLister interface { + // List lists all VolumeSnapshotClasses in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotClass, err error) + // Get retrieves the VolumeSnapshotClass from the index for a given name. + Get(name string) (*v1alpha1.VolumeSnapshotClass, error) + VolumeSnapshotClassListerExpansion +} + +// volumeSnapshotClassLister implements the VolumeSnapshotClassLister interface. +type volumeSnapshotClassLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotClassLister returns a new VolumeSnapshotClassLister. +func NewVolumeSnapshotClassLister(indexer cache.Indexer) VolumeSnapshotClassLister { + return &volumeSnapshotClassLister{indexer: indexer} +} + +// List lists all VolumeSnapshotClasses in the indexer. +func (s *volumeSnapshotClassLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshotClass)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshotClass from the index for a given name. +func (s *volumeSnapshotClassLister) Get(name string) (*v1alpha1.VolumeSnapshotClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumesnapshotclass"), name) + } + return obj.(*v1alpha1.VolumeSnapshotClass), nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshotcontent.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshotcontent.go new file mode 100644 index 000000000..97d4e90aa --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1/volumesnapshotcontent.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotContentLister helps list VolumeSnapshotContents. +type VolumeSnapshotContentLister interface { + // List lists all VolumeSnapshotContents in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotContent, err error) + // Get retrieves the VolumeSnapshotContent from the index for a given name. + Get(name string) (*v1alpha1.VolumeSnapshotContent, error) + VolumeSnapshotContentListerExpansion +} + +// volumeSnapshotContentLister implements the VolumeSnapshotContentLister interface. +type volumeSnapshotContentLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotContentLister returns a new VolumeSnapshotContentLister. +func NewVolumeSnapshotContentLister(indexer cache.Indexer) VolumeSnapshotContentLister { + return &volumeSnapshotContentLister{indexer: indexer} +} + +// List lists all VolumeSnapshotContents in the indexer. +func (s *volumeSnapshotContentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeSnapshotContent, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeSnapshotContent)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshotContent from the index for a given name. +func (s *volumeSnapshotContentLister) Get(name string) (*v1alpha1.VolumeSnapshotContent, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumesnapshotcontent"), name) + } + return obj.(*v1alpha1.VolumeSnapshotContent), nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/csi_handler.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/csi_handler.go new file mode 100644 index 000000000..2abee0978 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/csi_handler.go @@ -0,0 +1,118 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strings" + "time" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter" + + "k8s.io/api/core/v1" +) + +// Handler is responsible for handling VolumeSnapshot events from informer. +type Handler interface { + CreateSnapshot(snapshot *crdv1.VolumeSnapshot, volume *v1.PersistentVolume, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, int64, int64, bool, error) + DeleteSnapshot(content *crdv1.VolumeSnapshotContent, snapshotterCredentials map[string]string) error + GetSnapshotStatus(content *crdv1.VolumeSnapshotContent) (bool, int64, int64, error) +} + +// csiHandler is a handler that calls CSI to create/delete volume snapshot. +type csiHandler struct { + snapshotter snapshotter.Snapshotter + timeout time.Duration + snapshotNamePrefix string + snapshotNameUUIDLength int +} + +// NewCSIHandler returns a handler which includes the csi connection and Snapshot name details +func NewCSIHandler( + snapshotter snapshotter.Snapshotter, + timeout time.Duration, + snapshotNamePrefix string, + snapshotNameUUIDLength int, +) Handler { + return &csiHandler{ + snapshotter: snapshotter, + timeout: timeout, + snapshotNamePrefix: snapshotNamePrefix, + snapshotNameUUIDLength: snapshotNameUUIDLength, + } +} + +func (handler *csiHandler) CreateSnapshot(snapshot *crdv1.VolumeSnapshot, volume *v1.PersistentVolume, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, int64, int64, bool, error) { + + ctx, cancel := context.WithTimeout(context.Background(), handler.timeout) + defer cancel() + + snapshotName, err := makeSnapshotName(handler.snapshotNamePrefix, string(snapshot.UID), handler.snapshotNameUUIDLength) + if err != nil { + return "", "", 0, 0, false, err + } + newParameters, err := removePrefixedParameters(parameters) + if err != nil { + return "", "", 0, 0, false, fmt.Errorf("failed to remove CSI Parameters of prefixed keys: %v", err) + } + return handler.snapshotter.CreateSnapshot(ctx, snapshotName, volume, newParameters, snapshotterCredentials) +} + +func (handler *csiHandler) DeleteSnapshot(content *crdv1.VolumeSnapshotContent, snapshotterCredentials map[string]string) error { + if content.Spec.CSI == nil { + return fmt.Errorf("CSISnapshot not defined in spec") + } + ctx, cancel := context.WithTimeout(context.Background(), handler.timeout) + defer cancel() + + err := handler.snapshotter.DeleteSnapshot(ctx, content.Spec.CSI.SnapshotHandle, snapshotterCredentials) + if err != nil { + return fmt.Errorf("failed to delete snapshot content %s: %q", content.Name, err) + } + + return nil +} + +func (handler *csiHandler) GetSnapshotStatus(content *crdv1.VolumeSnapshotContent) (bool, int64, int64, error) { + if content.Spec.CSI == nil { + return false, 0, 0, fmt.Errorf("CSISnapshot not defined in spec") + } + ctx, cancel := context.WithTimeout(context.Background(), handler.timeout) + defer cancel() + + csiSnapshotStatus, timestamp, size, err := handler.snapshotter.GetSnapshotStatus(ctx, content.Spec.CSI.SnapshotHandle) + if err != nil { + return false, 0, 0, fmt.Errorf("failed to list snapshot content %s: %q", content.Name, err) + } + + return csiSnapshotStatus, timestamp, size, nil +} + +func makeSnapshotName(prefix, snapshotUID string, snapshotNameUUIDLength int) (string, error) { + // create persistent name based on a volumeNamePrefix and volumeNameUUIDLength + // of PVC's UID + if len(snapshotUID) == 0 { + return "", fmt.Errorf("Corrupted snapshot object, it is missing UID") + } + if snapshotNameUUIDLength == -1 { + // Default behavior is to not truncate or remove dashes + return fmt.Sprintf("%s-%s", prefix, snapshotUID), nil + } + return fmt.Sprintf("%s-%s", prefix, strings.Replace(snapshotUID, "-", "", -1)[0:snapshotNameUUIDLength]), nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/framework_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/framework_test.go new file mode 100644 index 000000000..01e3541f3 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/framework_test.go @@ -0,0 +1,1413 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "reflect" + sysruntime "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + clientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/fake" + snapshotscheme "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned/scheme" + informers "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions" + storagelisters "github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1" + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + coreinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" + corelisters "k8s.io/client-go/listers/core/v1" + core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/slice" +) + +// This is a unit test framework for snapshot controller. +// It fills the controller with test snapshots/contents and can simulate these +// scenarios: +// 1) Call syncSnapshot/syncContent once. +// 2) Call syncSnapshot/syncContent several times (both simulating "snapshot/content +// modified" events and periodic sync), until the controller settles down and +// does not modify anything. +// 3) Simulate almost real API server/etcd and call add/update/delete +// content/snapshot. +// In all these scenarios, when the test finishes, the framework can compare +// resulting snapshots/contents with list of expected snapshots/contents and report +// differences. + +// controllerTest contains a single controller test input. +// Each test has initial set of contents and snapshots that are filled into the +// controller before the test starts. The test then contains a reference to +// function to call as the actual test. Available functions are: +// - testSyncSnapshot - calls syncSnapshot on the first snapshot in initialSnapshots. +// - testSyncSnapshotError - calls syncSnapshot on the first snapshot in initialSnapshots +// and expects an error to be returned. +// - testSyncContent - calls syncContent on the first content in initialContents. +// - any custom function for specialized tests. +// The test then contains list of contents/snapshots that are expected at the end +// of the test and list of generated events. +type controllerTest struct { + // Name of the test, for logging + name string + // Initial content of controller content cache. + initialContents []*crdv1.VolumeSnapshotContent + // Expected content of controller content cache at the end of the test. + expectedContents []*crdv1.VolumeSnapshotContent + // Initial content of controller snapshot cache. + initialSnapshots []*crdv1.VolumeSnapshot + // Expected content of controller snapshot cache at the end of the test. + expectedSnapshots []*crdv1.VolumeSnapshot + // Initial content of controller volume cache. + initialVolumes []*v1.PersistentVolume + // Initial content of controller claim cache. + initialClaims []*v1.PersistentVolumeClaim + // Initial content of controller StorageClass cache. + initialStorageClasses []*storagev1.StorageClass + // Initial content of controller Secret cache. + initialSecrets []*v1.Secret + // Expected events - any event with prefix will pass, we don't check full + // event message. + expectedEvents []string + // Errors to produce on matching action + errors []reactorError + // List of expected CSI Create snapshot calls + expectedCreateCalls []createCall + // List of expected CSI Delete snapshot calls + expectedDeleteCalls []deleteCall + // List of expected CSI list snapshot calls + expectedListCalls []listCall + // Function to call as the test. + test testCall + expectSuccess bool +} + +type testCall func(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest) error + +const testNamespace = "default" +const mockDriverName = "csi-mock-plugin" + +var errVersionConflict = errors.New("VersionError") +var nocontents []*crdv1.VolumeSnapshotContent +var nosnapshots []*crdv1.VolumeSnapshot +var noevents = []string{} +var noerrors = []reactorError{} + +// snapshotReactor is a core.Reactor that simulates etcd and API server. It +// stores: +// - Latest version of snapshots contents saved by the controller. +// - Queue of all saves (to simulate "content/snapshot updated" events). This queue +// contains all intermediate state of an object - e.g. a snapshot.VolumeName +// is updated first and snapshot.Phase second. This queue will then contain both +// updates as separate entries. +// - Number of changes since the last call to snapshotReactor.syncAll(). +// - Optionally, content and snapshot fake watchers which should be the same ones +// used by the controller. Any time an event function like deleteContentEvent +// is called to simulate an event, the reactor's stores are updated and the +// controller is sent the event via the fake watcher. +// - Optionally, list of error that should be returned by reactor, simulating +// etcd / API server failures. These errors are evaluated in order and every +// error is returned only once. I.e. when the reactor finds matching +// reactorError, it return appropriate error and removes the reactorError from +// the list. +type snapshotReactor struct { + secrets map[string]*v1.Secret + storageClasses map[string]*storagev1.StorageClass + volumes map[string]*v1.PersistentVolume + claims map[string]*v1.PersistentVolumeClaim + contents map[string]*crdv1.VolumeSnapshotContent + snapshots map[string]*crdv1.VolumeSnapshot + changedObjects []interface{} + changedSinceLastSync int + ctrl *csiSnapshotController + fakeContentWatch *watch.FakeWatcher + fakeSnapshotWatch *watch.FakeWatcher + lock sync.Mutex + errors []reactorError +} + +// reactorError is an error that is returned by test reactor (=simulated +// etcd+/API server) when an action performed by the reactor matches given verb +// ("get", "update", "create", "delete" or "*"") on given resource +// ("volumesnapshotcontents", "volumesnapshots" or "*"). +type reactorError struct { + verb string + resource string + error error +} + +func withSnapshotFinalizer(snapshot *crdv1.VolumeSnapshot) *crdv1.VolumeSnapshot { + snapshot.ObjectMeta.Finalizers = append(snapshot.ObjectMeta.Finalizers, VolumeSnapshotFinalizer) + return snapshot +} + +func withContentFinalizer(content *crdv1.VolumeSnapshotContent) *crdv1.VolumeSnapshotContent { + content.ObjectMeta.Finalizers = append(content.ObjectMeta.Finalizers, VolumeSnapshotContentFinalizer) + return content +} + +func withPVCFinalizer(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim { + pvc.ObjectMeta.Finalizers = append(pvc.ObjectMeta.Finalizers, PVCFinalizer) + return pvc +} + +// React is a callback called by fake kubeClient from the controller. +// In other words, every snapshot/content change performed by the controller ends +// here. +// This callback checks versions of the updated objects and refuse those that +// are too old (simulating real etcd). +// All updated objects are stored locally to keep track of object versions and +// to evaluate test results. +// All updated objects are also inserted into changedObjects queue and +// optionally sent back to the controller via its watchers. +func (r *snapshotReactor) React(action core.Action) (handled bool, ret runtime.Object, err error) { + r.lock.Lock() + defer r.lock.Unlock() + + klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) + + // Inject error when requested + err = r.injectReactError(action) + if err != nil { + return true, nil, err + } + + // Test did not request to inject an error, continue simulating API server. + switch { + case action.Matches("create", "volumesnapshotcontents"): + obj := action.(core.UpdateAction).GetObject() + content := obj.(*crdv1.VolumeSnapshotContent) + + // check the content does not exist + _, found := r.contents[content.Name] + if found { + return true, nil, fmt.Errorf("cannot create content %s: content already exists", content.Name) + } + + // Store the updated object to appropriate places. + r.contents[content.Name] = content + r.changedObjects = append(r.changedObjects, content) + r.changedSinceLastSync++ + klog.V(5).Infof("created content %s", content.Name) + return true, content, nil + + case action.Matches("update", "volumesnapshotcontents"): + obj := action.(core.UpdateAction).GetObject() + content := obj.(*crdv1.VolumeSnapshotContent) + + // Check and bump object version + storedVolume, found := r.contents[content.Name] + if found { + storedVer, _ := strconv.Atoi(storedVolume.ResourceVersion) + requestedVer, _ := strconv.Atoi(content.ResourceVersion) + if storedVer != requestedVer { + return true, obj, errVersionConflict + } + // Don't modify the existing object + content = content.DeepCopy() + content.ResourceVersion = strconv.Itoa(storedVer + 1) + } else { + return true, nil, fmt.Errorf("cannot update content %s: content not found", content.Name) + } + + // Store the updated object to appropriate places. + r.contents[content.Name] = content + r.changedObjects = append(r.changedObjects, content) + r.changedSinceLastSync++ + klog.V(4).Infof("saved updated content %s", content.Name) + return true, content, nil + + case action.Matches("update", "volumesnapshots"): + obj := action.(core.UpdateAction).GetObject() + snapshot := obj.(*crdv1.VolumeSnapshot) + + // Check and bump object version + storedSnapshot, found := r.snapshots[snapshot.Name] + if found { + storedVer, _ := strconv.Atoi(storedSnapshot.ResourceVersion) + requestedVer, _ := strconv.Atoi(snapshot.ResourceVersion) + if storedVer != requestedVer { + return true, obj, errVersionConflict + } + // Don't modify the existing object + snapshot = snapshot.DeepCopy() + snapshot.ResourceVersion = strconv.Itoa(storedVer + 1) + } else { + return true, nil, fmt.Errorf("cannot update snapshot %s: snapshot not found", snapshot.Name) + } + + // Store the updated object to appropriate places. + r.snapshots[snapshot.Name] = snapshot + r.changedObjects = append(r.changedObjects, snapshot) + r.changedSinceLastSync++ + klog.V(4).Infof("saved updated snapshot %s", snapshot.Name) + return true, snapshot, nil + + case action.Matches("get", "volumesnapshotcontents"): + name := action.(core.GetAction).GetName() + content, found := r.contents[name] + if found { + klog.V(4).Infof("GetVolume: found %s", content.Name) + return true, content, nil + } + klog.V(4).Infof("GetVolume: content %s not found", name) + return true, nil, fmt.Errorf("cannot find content %s", name) + + case action.Matches("get", "volumesnapshots"): + name := action.(core.GetAction).GetName() + snapshot, found := r.snapshots[name] + if found { + klog.V(4).Infof("GetSnapshot: found %s", snapshot.Name) + return true, snapshot, nil + } + klog.V(4).Infof("GetSnapshot: content %s not found", name) + return true, nil, fmt.Errorf("cannot find snapshot %s", name) + + case action.Matches("delete", "volumesnapshotcontents"): + name := action.(core.DeleteAction).GetName() + klog.V(4).Infof("deleted content %s", name) + _, found := r.contents[name] + if found { + delete(r.contents, name) + r.changedSinceLastSync++ + return true, nil, nil + } + return true, nil, fmt.Errorf("cannot delete content %s: not found", name) + + case action.Matches("delete", "volumesnapshots"): + name := action.(core.DeleteAction).GetName() + klog.V(4).Infof("deleted snapshot %s", name) + _, found := r.contents[name] + if found { + delete(r.snapshots, name) + r.changedSinceLastSync++ + return true, nil, nil + } + return true, nil, fmt.Errorf("cannot delete snapshot %s: not found", name) + + case action.Matches("get", "persistentvolumes"): + name := action.(core.GetAction).GetName() + volume, found := r.volumes[name] + if found { + klog.V(4).Infof("GetVolume: found %s", volume.Name) + return true, volume, nil + } + klog.V(4).Infof("GetVolume: volume %s not found", name) + return true, nil, fmt.Errorf("cannot find volume %s", name) + + case action.Matches("get", "persistentvolumeclaims"): + name := action.(core.GetAction).GetName() + claim, found := r.claims[name] + if found { + klog.V(4).Infof("GetClaim: found %s", claim.Name) + return true, claim, nil + } + klog.V(4).Infof("GetClaim: claim %s not found", name) + return true, nil, fmt.Errorf("cannot find claim %s", name) + + case action.Matches("update", "persistentvolumeclaims"): + obj := action.(core.UpdateAction).GetObject() + claim := obj.(*v1.PersistentVolumeClaim) + + // Check and bump object version + storedClaim, found := r.claims[claim.Name] + if found { + storedVer, _ := strconv.Atoi(storedClaim.ResourceVersion) + requestedVer, _ := strconv.Atoi(claim.ResourceVersion) + if storedVer != requestedVer { + return true, obj, errVersionConflict + } + // Don't modify the existing object + claim = claim.DeepCopy() + claim.ResourceVersion = strconv.Itoa(storedVer + 1) + } else { + return true, nil, fmt.Errorf("cannot update claim %s: claim not found", claim.Name) + } + + // Store the updated object to appropriate places. + r.claims[claim.Name] = claim + r.changedObjects = append(r.changedObjects, claim) + r.changedSinceLastSync++ + klog.V(4).Infof("saved updated claim %s", claim.Name) + return true, claim, nil + + case action.Matches("get", "storageclasses"): + name := action.(core.GetAction).GetName() + storageClass, found := r.storageClasses[name] + if found { + klog.V(4).Infof("GetStorageClass: found %s", storageClass.Name) + return true, storageClass, nil + } + klog.V(4).Infof("GetStorageClass: storageClass %s not found", name) + return true, nil, fmt.Errorf("cannot find storageClass %s", name) + + case action.Matches("get", "secrets"): + name := action.(core.GetAction).GetName() + secret, found := r.secrets[name] + if found { + klog.V(4).Infof("GetSecret: found %s", secret.Name) + return true, secret, nil + } + klog.V(4).Infof("GetSecret: secret %s not found", name) + return true, nil, fmt.Errorf("cannot find secret %s", name) + + } + + return false, nil, nil +} + +// injectReactError returns an error when the test requested given action to +// fail. nil is returned otherwise. +func (r *snapshotReactor) injectReactError(action core.Action) error { + if len(r.errors) == 0 { + // No more errors to inject, everything should succeed. + return nil + } + + for i, expected := range r.errors { + klog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource()) + if action.Matches(expected.verb, expected.resource) { + // That's the action we're waiting for, remove it from injectedErrors + r.errors = append(r.errors[:i], r.errors[i+1:]...) + klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error) + return expected.error + } + } + return nil +} + +// checkContents compares all expectedContents with set of contents at the end of +// the test and reports differences. +func (r *snapshotReactor) checkContents(expectedContents []*crdv1.VolumeSnapshotContent) error { + r.lock.Lock() + defer r.lock.Unlock() + + expectedMap := make(map[string]*crdv1.VolumeSnapshotContent) + gotMap := make(map[string]*crdv1.VolumeSnapshotContent) + // Clear any ResourceVersion from both sets + for _, v := range expectedContents { + // Don't modify the existing object + v := v.DeepCopy() + v.ResourceVersion = "" + if v.Spec.VolumeSnapshotRef != nil { + v.Spec.VolumeSnapshotRef.ResourceVersion = "" + } + if v.Spec.PersistentVolumeRef != nil { + v.Spec.PersistentVolumeRef.ResourceVersion = "" + } + if v.Spec.CSI != nil { + v.Spec.CSI.CreationTime = nil + } + expectedMap[v.Name] = v + } + for _, v := range r.contents { + // We must clone the content because of golang race check - it was + // written by the controller without any locks on it. + v := v.DeepCopy() + v.ResourceVersion = "" + if v.Spec.VolumeSnapshotRef != nil { + v.Spec.VolumeSnapshotRef.ResourceVersion = "" + } + if v.Spec.PersistentVolumeRef != nil { + v.Spec.PersistentVolumeRef.ResourceVersion = "" + } + if v.Spec.CSI != nil { + v.Spec.CSI.CreationTime = nil + } + gotMap[v.Name] = v + } + if !reflect.DeepEqual(expectedMap, gotMap) { + // Print ugly but useful diff of expected and received objects for + // easier debugging. + return fmt.Errorf("content check failed [A-expected, B-got]: %s", diff.ObjectDiff(expectedMap, gotMap)) + } + return nil +} + +// checkSnapshots compares all expectedSnapshots with set of snapshots at the end of the +// test and reports differences. +func (r *snapshotReactor) checkSnapshots(expectedSnapshots []*crdv1.VolumeSnapshot) error { + r.lock.Lock() + defer r.lock.Unlock() + + expectedMap := make(map[string]*crdv1.VolumeSnapshot) + gotMap := make(map[string]*crdv1.VolumeSnapshot) + for _, c := range expectedSnapshots { + // Don't modify the existing object + c = c.DeepCopy() + c.ResourceVersion = "" + if c.Status.Error != nil { + c.Status.Error.Time = metav1.Time{} + } + expectedMap[c.Name] = c + } + for _, c := range r.snapshots { + // We must clone the snapshot because of golang race check - it was + // written by the controller without any locks on it. + c = c.DeepCopy() + c.ResourceVersion = "" + if c.Status.Error != nil { + c.Status.Error.Time = metav1.Time{} + } + gotMap[c.Name] = c + } + if !reflect.DeepEqual(expectedMap, gotMap) { + // Print ugly but useful diff of expected and received objects for + // easier debugging. + return fmt.Errorf("snapshot check failed [A-expected, B-got result]: %s", diff.ObjectDiff(expectedMap, gotMap)) + } + return nil +} + +// checkEvents compares all expectedEvents with events generated during the test +// and reports differences. +func checkEvents(t *testing.T, expectedEvents []string, ctrl *csiSnapshotController) error { + var err error + + // Read recorded events - wait up to 1 minute to get all the expected ones + // (just in case some goroutines are slower with writing) + timer := time.NewTimer(time.Minute) + defer timer.Stop() + + fakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder) + gotEvents := []string{} + finished := false + for len(gotEvents) < len(expectedEvents) && !finished { + select { + case event, ok := <-fakeRecorder.Events: + if ok { + klog.V(5).Infof("event recorder got event %s", event) + gotEvents = append(gotEvents, event) + } else { + klog.V(5).Infof("event recorder finished") + finished = true + } + case _, _ = <-timer.C: + klog.V(5).Infof("event recorder timeout") + finished = true + } + } + + // Evaluate the events + for i, expected := range expectedEvents { + if len(gotEvents) <= i { + t.Errorf("Event %q not emitted", expected) + err = fmt.Errorf("Events do not match") + continue + } + received := gotEvents[i] + if !strings.HasPrefix(received, expected) { + t.Errorf("Unexpected event received, expected %q, got %q", expected, received) + err = fmt.Errorf("Events do not match") + } + } + for i := len(expectedEvents); i < len(gotEvents); i++ { + t.Errorf("Unexpected event received: %q", gotEvents[i]) + err = fmt.Errorf("Events do not match") + } + return err +} + +// popChange returns one recorded updated object, either *crdv1.VolumeSnapshotContent +// or *crdv1.VolumeSnapshot. Returns nil when there are no changes. +func (r *snapshotReactor) popChange() interface{} { + r.lock.Lock() + defer r.lock.Unlock() + + if len(r.changedObjects) == 0 { + return nil + } + + // For debugging purposes, print the queue + for _, obj := range r.changedObjects { + switch obj.(type) { + case *crdv1.VolumeSnapshotContent: + vol, _ := obj.(*crdv1.VolumeSnapshotContent) + klog.V(4).Infof("reactor queue: %s", vol.Name) + case *crdv1.VolumeSnapshot: + snapshot, _ := obj.(*crdv1.VolumeSnapshot) + klog.V(4).Infof("reactor queue: %s", snapshot.Name) + } + } + + // Pop the first item from the queue and return it + obj := r.changedObjects[0] + r.changedObjects = r.changedObjects[1:] + return obj +} + +// syncAll simulates the controller periodic sync of contents and snapshot. It +// simply adds all these objects to the internal queue of updates. This method +// should be used when the test manually calls syncSnapshot/syncContent. Test that +// use real controller loop (ctrl.Run()) will get periodic sync automatically. +func (r *snapshotReactor) syncAll() { + r.lock.Lock() + defer r.lock.Unlock() + + for _, c := range r.snapshots { + r.changedObjects = append(r.changedObjects, c) + } + for _, v := range r.contents { + r.changedObjects = append(r.changedObjects, v) + } + for _, pvc := range r.claims { + r.changedObjects = append(r.changedObjects, pvc) + } + r.changedSinceLastSync = 0 +} + +func (r *snapshotReactor) getChangeCount() int { + r.lock.Lock() + defer r.lock.Unlock() + return r.changedSinceLastSync +} + +// waitForIdle waits until all tests, controllers and other goroutines do their +// job and no new actions are registered for 10 milliseconds. +func (r *snapshotReactor) waitForIdle() { + r.ctrl.runningOperations.WaitForCompletion() + // Check every 10ms if the controller does something and stop if it's + // idle. + oldChanges := -1 + for { + time.Sleep(10 * time.Millisecond) + changes := r.getChangeCount() + if changes == oldChanges { + // No changes for last 10ms -> controller must be idle. + break + } + oldChanges = changes + } +} + +// waitTest waits until all tests, controllers and other goroutines do their +// job and list of current contents/snapshots is equal to list of expected +// contents/snapshots (with ~10 second timeout). +func (r *snapshotReactor) waitTest(test controllerTest) error { + // start with 10 ms, multiply by 2 each step, 10 steps = 10.23 seconds + backoff := wait.Backoff{ + Duration: 10 * time.Millisecond, + Jitter: 0, + Factor: 2, + Steps: 10, + } + err := wait.ExponentialBackoff(backoff, func() (done bool, err error) { + // Finish all operations that are in progress + r.ctrl.runningOperations.WaitForCompletion() + + // Return 'true' if the reactor reached the expected state + err1 := r.checkSnapshots(test.expectedSnapshots) + err2 := r.checkContents(test.expectedContents) + if err1 == nil && err2 == nil { + return true, nil + } + return false, nil + }) + return err +} + +// deleteContentEvent simulates that a content has been deleted in etcd and +// the controller receives 'content deleted' event. +func (r *snapshotReactor) deleteContentEvent(content *crdv1.VolumeSnapshotContent) { + r.lock.Lock() + defer r.lock.Unlock() + + // Remove the content from list of resulting contents. + delete(r.contents, content.Name) + + // Generate deletion event. Cloned content is needed to prevent races (and we + // would get a clone from etcd too). + if r.fakeContentWatch != nil { + r.fakeContentWatch.Delete(content.DeepCopy()) + } +} + +// deleteSnapshotEvent simulates that a snapshot has been deleted in etcd and the +// controller receives 'snapshot deleted' event. +func (r *snapshotReactor) deleteSnapshotEvent(snapshot *crdv1.VolumeSnapshot) { + r.lock.Lock() + defer r.lock.Unlock() + + // Remove the snapshot from list of resulting snapshots. + delete(r.snapshots, snapshot.Name) + + // Generate deletion event. Cloned content is needed to prevent races (and we + // would get a clone from etcd too). + if r.fakeSnapshotWatch != nil { + r.fakeSnapshotWatch.Delete(snapshot.DeepCopy()) + } +} + +// addContentEvent simulates that a content has been added in etcd and the +// controller receives 'content added' event. +func (r *snapshotReactor) addContentEvent(content *crdv1.VolumeSnapshotContent) { + r.lock.Lock() + defer r.lock.Unlock() + + r.contents[content.Name] = content + // Generate event. No cloning is needed, this snapshot is not stored in the + // controller cache yet. + if r.fakeContentWatch != nil { + r.fakeContentWatch.Add(content) + } +} + +// modifyContentEvent simulates that a content has been modified in etcd and the +// controller receives 'content modified' event. +func (r *snapshotReactor) modifyContentEvent(content *crdv1.VolumeSnapshotContent) { + r.lock.Lock() + defer r.lock.Unlock() + + r.contents[content.Name] = content + // Generate deletion event. Cloned content is needed to prevent races (and we + // would get a clone from etcd too). + if r.fakeContentWatch != nil { + r.fakeContentWatch.Modify(content.DeepCopy()) + } +} + +// addSnapshotEvent simulates that a snapshot has been deleted in etcd and the +// controller receives 'snapshot added' event. +func (r *snapshotReactor) addSnapshotEvent(snapshot *crdv1.VolumeSnapshot) { + r.lock.Lock() + defer r.lock.Unlock() + + r.snapshots[snapshot.Name] = snapshot + // Generate event. No cloning is needed, this snapshot is not stored in the + // controller cache yet. + if r.fakeSnapshotWatch != nil { + r.fakeSnapshotWatch.Add(snapshot) + } +} + +func newSnapshotReactor(kubeClient *kubefake.Clientset, client *fake.Clientset, ctrl *csiSnapshotController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []reactorError) *snapshotReactor { + reactor := &snapshotReactor{ + secrets: make(map[string]*v1.Secret), + storageClasses: make(map[string]*storagev1.StorageClass), + volumes: make(map[string]*v1.PersistentVolume), + claims: make(map[string]*v1.PersistentVolumeClaim), + contents: make(map[string]*crdv1.VolumeSnapshotContent), + snapshots: make(map[string]*crdv1.VolumeSnapshot), + ctrl: ctrl, + fakeContentWatch: fakeVolumeWatch, + fakeSnapshotWatch: fakeClaimWatch, + errors: errors, + } + + client.AddReactor("create", "volumesnapshotcontents", reactor.React) + client.AddReactor("update", "volumesnapshotcontents", reactor.React) + client.AddReactor("update", "volumesnapshots", reactor.React) + client.AddReactor("get", "volumesnapshotcontents", reactor.React) + client.AddReactor("get", "volumesnapshots", reactor.React) + client.AddReactor("delete", "volumesnapshotcontents", reactor.React) + client.AddReactor("delete", "volumesnapshots", reactor.React) + kubeClient.AddReactor("get", "persistentvolumeclaims", reactor.React) + kubeClient.AddReactor("update", "persistentvolumeclaims", reactor.React) + kubeClient.AddReactor("get", "persistentvolumes", reactor.React) + kubeClient.AddReactor("get", "storageclasses", reactor.React) + kubeClient.AddReactor("get", "secrets", reactor.React) + + return reactor +} + +func alwaysReady() bool { return true } + +func newTestController(kubeClient kubernetes.Interface, clientset clientset.Interface, + informerFactory informers.SharedInformerFactory, t *testing.T, test controllerTest) (*csiSnapshotController, error) { + if informerFactory == nil { + informerFactory = informers.NewSharedInformerFactory(clientset, NoResyncPeriodFunc()) + } + + coreFactory := coreinformers.NewSharedInformerFactory(kubeClient, NoResyncPeriodFunc()) + + // Construct controller + fakeSnapshot := &fakeSnapshotter{ + t: t, + listCalls: test.expectedListCalls, + createCalls: test.expectedCreateCalls, + deleteCalls: test.expectedDeleteCalls, + } + + ctrl := NewCSISnapshotController( + clientset, + kubeClient, + mockDriverName, + informerFactory.Snapshot().V1alpha1().VolumeSnapshots(), + informerFactory.Snapshot().V1alpha1().VolumeSnapshotContents(), + informerFactory.Snapshot().V1alpha1().VolumeSnapshotClasses(), + coreFactory.Core().V1().PersistentVolumeClaims(), + 3, + 5*time.Millisecond, + fakeSnapshot, + 5*time.Millisecond, + 60*time.Second, + "snapshot", + -1, + ) + + ctrl.eventRecorder = record.NewFakeRecorder(1000) + + ctrl.contentListerSynced = alwaysReady + ctrl.snapshotListerSynced = alwaysReady + ctrl.classListerSynced = alwaysReady + ctrl.pvcListerSynced = alwaysReady + + return ctrl, nil +} + +// newContent returns a new content with given attributes +func newContent(name, className, snapshotHandle, volumeUID, volumeName, boundToSnapshotUID, boundToSnapshotName string, deletionPolicy *crdv1.DeletionPolicy, size *int64, creationTime *int64, withFinalizer bool) *crdv1.VolumeSnapshotContent { + content := crdv1.VolumeSnapshotContent{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: "1", + }, + Spec: crdv1.VolumeSnapshotContentSpec{ + VolumeSnapshotSource: crdv1.VolumeSnapshotSource{ + CSI: &crdv1.CSIVolumeSnapshotSource{ + RestoreSize: size, + Driver: mockDriverName, + SnapshotHandle: snapshotHandle, + CreationTime: creationTime, + }, + }, + VolumeSnapshotClassName: &className, + DeletionPolicy: deletionPolicy, + }, + } + if volumeName != noVolume { + content.Spec.PersistentVolumeRef = &v1.ObjectReference{ + Kind: "PersistentVolume", + APIVersion: "v1", + UID: types.UID(volumeUID), + Name: volumeName, + } + } + if boundToSnapshotName != "" { + content.Spec.VolumeSnapshotRef = &v1.ObjectReference{ + Kind: "VolumeSnapshot", + APIVersion: "snapshot.storage.k8s.io/v1alpha1", + UID: types.UID(boundToSnapshotUID), + Namespace: testNamespace, + Name: boundToSnapshotName, + } + } + + if withFinalizer { + return withContentFinalizer(&content) + } + return &content +} + +func newContentArray(name, className, snapshotHandle, volumeUID, volumeName, boundToSnapshotUID, boundToSnapshotName string, deletionPolicy *crdv1.DeletionPolicy, size *int64, creationTime *int64, withFinalizer bool) []*crdv1.VolumeSnapshotContent { + return []*crdv1.VolumeSnapshotContent{ + newContent(name, className, snapshotHandle, volumeUID, volumeName, boundToSnapshotUID, boundToSnapshotName, deletionPolicy, size, creationTime, withFinalizer), + } +} + +func newContentWithUnmatchDriverArray(name, className, snapshotHandle, volumeUID, volumeName, boundToSnapshotUID, boundToSnapshotName string, deletionPolicy *crdv1.DeletionPolicy, size *int64, creationTime *int64) []*crdv1.VolumeSnapshotContent { + content := newContent(name, className, snapshotHandle, volumeUID, volumeName, boundToSnapshotUID, boundToSnapshotName, deletionPolicy, size, creationTime, false) + content.Spec.VolumeSnapshotSource.CSI.Driver = "fake" + return []*crdv1.VolumeSnapshotContent{ + content, + } +} + +func newSnapshot(name, className, boundToContent, snapshotUID, claimName string, ready bool, err *storagev1beta1.VolumeError, creationTime *metav1.Time, size *resource.Quantity) *crdv1.VolumeSnapshot { + snapshot := crdv1.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + UID: types.UID(snapshotUID), + ResourceVersion: "1", + SelfLink: "/apis/snapshot.storage.k8s.io/v1alpha1/namespaces/" + testNamespace + "/volumesnapshots/" + name, + }, + Spec: crdv1.VolumeSnapshotSpec{ + VolumeSnapshotClassName: &className, + SnapshotContentName: boundToContent, + }, + Status: crdv1.VolumeSnapshotStatus{ + CreationTime: creationTime, + ReadyToUse: ready, + Error: err, + RestoreSize: size, + }, + } + if claimName != noClaim { + snapshot.Spec.Source = &v1.TypedLocalObjectReference{ + Name: claimName, + Kind: "PersistentVolumeClaim", + } + } + + return withSnapshotFinalizer(&snapshot) +} + +func newSnapshotArray(name, className, boundToContent, snapshotUID, claimName string, ready bool, err *storagev1beta1.VolumeError, creationTime *metav1.Time, size *resource.Quantity) []*crdv1.VolumeSnapshot { + return []*crdv1.VolumeSnapshot{ + newSnapshot(name, className, boundToContent, snapshotUID, claimName, ready, err, creationTime, size), + } +} + +// newClaim returns a new claim with given attributes +func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.PersistentVolumeClaimPhase, class *string, bFinalizer bool) *v1.PersistentVolumeClaim { + claim := v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + UID: types.UID(claimUID), + ResourceVersion: "1", + SelfLink: "/api/v1/namespaces/" + testNamespace + "/persistentvolumeclaims/" + name, + }, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity), + }, + }, + VolumeName: boundToVolume, + StorageClassName: class, + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: phase, + }, + } + + // Bound claims must have proper Status. + if phase == v1.ClaimBound { + claim.Status.AccessModes = claim.Spec.AccessModes + // For most of the tests it's enough to copy claim's requested capacity, + // individual tests can adjust it using withExpectedCapacity() + claim.Status.Capacity = claim.Spec.Resources.Requests + } + + if bFinalizer { + return withPVCFinalizer(&claim) + } + return &claim +} + +// newClaimArray returns array with a single claim that would be returned by +// newClaim() with the same parameters. +func newClaimArray(name, claimUID, capacity, boundToVolume string, phase v1.PersistentVolumeClaimPhase, class *string) []*v1.PersistentVolumeClaim { + return []*v1.PersistentVolumeClaim{ + newClaim(name, claimUID, capacity, boundToVolume, phase, class, false), + } +} + +// newClaimArrayFinalizer returns array with a single claim that would be returned by +// newClaim() with the same parameters plus finalizer. +func newClaimArrayFinalizer(name, claimUID, capacity, boundToVolume string, phase v1.PersistentVolumeClaimPhase, class *string) []*v1.PersistentVolumeClaim { + return []*v1.PersistentVolumeClaim{ + newClaim(name, claimUID, capacity, boundToVolume, phase, class, true), + } +} + +// newVolume returns a new volume with given attributes +func newVolume(name, volumeUID, volumeHandle, capacity, boundToClaimUID, boundToClaimName string, phase v1.PersistentVolumePhase, reclaimPolicy v1.PersistentVolumeReclaimPolicy, class string, annotations ...string) *v1.PersistentVolume { + volume := v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: "1", + UID: types.UID(volumeUID), + SelfLink: "/api/v1/persistentvolumes/" + name, + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity), + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + Driver: mockDriverName, + VolumeHandle: volumeHandle, + }, + }, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, + PersistentVolumeReclaimPolicy: reclaimPolicy, + StorageClassName: class, + }, + Status: v1.PersistentVolumeStatus{ + Phase: phase, + }, + } + + if boundToClaimName != "" { + volume.Spec.ClaimRef = &v1.ObjectReference{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + UID: types.UID(boundToClaimUID), + Namespace: testNamespace, + Name: boundToClaimName, + } + } + + return &volume +} + +// newVolumeArray returns array with a single volume that would be returned by +// newVolume() with the same parameters. +func newVolumeArray(name, volumeUID, volumeHandle, capacity, boundToClaimUID, boundToClaimName string, phase v1.PersistentVolumePhase, reclaimPolicy v1.PersistentVolumeReclaimPolicy, class string) []*v1.PersistentVolume { + return []*v1.PersistentVolume{ + newVolume(name, volumeUID, volumeHandle, capacity, boundToClaimUID, boundToClaimName, phase, reclaimPolicy, class), + } +} + +func newVolumeError(message string) *storagev1beta1.VolumeError { + return &storagev1beta1.VolumeError{ + Time: metav1.Time{}, + Message: message, + } +} + +func testSyncSnapshot(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest) error { + return ctrl.syncSnapshot(test.initialSnapshots[0]) +} + +func testSyncSnapshotError(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest) error { + err := ctrl.syncSnapshot(test.initialSnapshots[0]) + + if err != nil { + return nil + } + return fmt.Errorf("syncSnapshot succeeded when failure was expected") +} + +func testSyncContent(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest) error { + return ctrl.syncContent(test.initialContents[0]) +} + +func testAddPVCFinalizer(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest) error { + return ctrl.ensureSnapshotSourceFinalizer(test.initialSnapshots[0]) +} + +func testRemovePVCFinalizer(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest) error { + return ctrl.checkandRemoveSnapshotSourceFinalizer(test.initialSnapshots[0]) +} + +var ( + classEmpty string + classGold = "gold" + classSilver = "silver" + classNonExisting = "non-existing" + defaultClass = "default-class" + emptySecretClass = "empty-secret-class" + invalidSecretClass = "invalid-secret-class" + validSecretClass = "valid-secret-class" + sameDriver = "sameDriver" + diffDriver = "diffDriver" + noClaim = "" + noBoundUID = "" + noVolume = "" +) + +// wrapTestWithInjectedOperation returns a testCall that: +// - starts the controller and lets it run original testCall until +// scheduleOperation() call. It blocks the controller there and calls the +// injected function to simulate that something is happening when the +// controller waits for the operation lock. Controller is then resumed and we +// check how it behaves. +func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *csiSnapshotController, reactor *snapshotReactor)) testCall { + + return func(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest) error { + // Inject a hook before async operation starts + klog.V(4).Infof("reactor:injecting call") + injectBeforeOperation(ctrl, reactor) + + // Run the tested function (typically syncSnapshot/syncContent) in a + // separate goroutine. + var testError error + var testFinished int32 + + go func() { + testError = toWrap(ctrl, reactor, test) + // Let the "main" test function know that syncContent has finished. + atomic.StoreInt32(&testFinished, 1) + }() + + // Wait for the controller to finish the test function. + for atomic.LoadInt32(&testFinished) == 0 { + time.Sleep(time.Millisecond * 10) + } + + return testError + } +} + +func evaluateTestResults(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest, t *testing.T) { + // Evaluate results + if err := reactor.checkSnapshots(test.expectedSnapshots); err != nil { + t.Errorf("Test %q: %v", test.name, err) + + } + if err := reactor.checkContents(test.expectedContents); err != nil { + t.Errorf("Test %q: %v", test.name, err) + } + + if err := checkEvents(t, test.expectedEvents, ctrl); err != nil { + t.Errorf("Test %q: %v", test.name, err) + } +} + +// Test single call to syncSnapshot and syncContent methods. +// For all tests: +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncSnapshot/syncContent) via +// controllerTest.testCall *once*. +// 3. Compare resulting contents and snapshots with expected contents and snapshots. +func runSyncTests(t *testing.T, tests []controllerTest, snapshotClasses []*crdv1.VolumeSnapshotClass) { + snapshotscheme.AddToScheme(scheme.Scheme) + for _, test := range tests { + klog.V(4).Infof("starting test %q", test.name) + + // Initialize the controller + kubeClient := &kubefake.Clientset{} + client := &fake.Clientset{} + + ctrl, err := newTestController(kubeClient, client, nil, t, test) + if err != nil { + t.Fatalf("Test %q construct persistent content failed: %v", test.name, err) + } + + reactor := newSnapshotReactor(kubeClient, client, ctrl, nil, nil, test.errors) + for _, snapshot := range test.initialSnapshots { + ctrl.snapshotStore.Add(snapshot) + reactor.snapshots[snapshot.Name] = snapshot + } + for _, content := range test.initialContents { + if ctrl.isDriverMatch(test.initialContents[0]) { + ctrl.contentStore.Add(content) + reactor.contents[content.Name] = content + } + } + + pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, claim := range test.initialClaims { + reactor.claims[claim.Name] = claim + pvcIndexer.Add(claim) + } + ctrl.pvcLister = corelisters.NewPersistentVolumeClaimLister(pvcIndexer) + + for _, volume := range test.initialVolumes { + reactor.volumes[volume.Name] = volume + } + for _, storageClass := range test.initialStorageClasses { + reactor.storageClasses[storageClass.Name] = storageClass + } + for _, secret := range test.initialSecrets { + reactor.secrets[secret.Name] = secret + } + + // Inject classes into controller via a custom lister. + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, class := range snapshotClasses { + indexer.Add(class) + } + ctrl.classLister = storagelisters.NewVolumeSnapshotClassLister(indexer) + + // Run the tested functions + err = test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q failed: %v", test.name, err) + } + + // Wait for the target state + err = reactor.waitTest(test) + if err != nil { + t.Errorf("Test %q failed: %v", test.name, err) + } + + evaluateTestResults(ctrl, reactor, test, t) + } +} + +// This tests ensureSnapshotSourceFinalizer and checkandRemoveSnapshotSourceFinalizer +func runPVCFinalizerTests(t *testing.T, tests []controllerTest, snapshotClasses []*crdv1.VolumeSnapshotClass) { + snapshotscheme.AddToScheme(scheme.Scheme) + for _, test := range tests { + klog.V(4).Infof("starting test %q", test.name) + + // Initialize the controller + kubeClient := &kubefake.Clientset{} + client := &fake.Clientset{} + + ctrl, err := newTestController(kubeClient, client, nil, t, test) + if err != nil { + t.Fatalf("Test %q construct persistent content failed: %v", test.name, err) + } + + reactor := newSnapshotReactor(kubeClient, client, ctrl, nil, nil, test.errors) + for _, snapshot := range test.initialSnapshots { + ctrl.snapshotStore.Add(snapshot) + reactor.snapshots[snapshot.Name] = snapshot + } + for _, content := range test.initialContents { + if ctrl.isDriverMatch(test.initialContents[0]) { + ctrl.contentStore.Add(content) + reactor.contents[content.Name] = content + } + } + + pvcIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, claim := range test.initialClaims { + reactor.claims[claim.Name] = claim + pvcIndexer.Add(claim) + } + ctrl.pvcLister = corelisters.NewPersistentVolumeClaimLister(pvcIndexer) + + for _, volume := range test.initialVolumes { + reactor.volumes[volume.Name] = volume + } + for _, storageClass := range test.initialStorageClasses { + reactor.storageClasses[storageClass.Name] = storageClass + } + for _, secret := range test.initialSecrets { + reactor.secrets[secret.Name] = secret + } + + // Inject classes into controller via a custom lister. + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, class := range snapshotClasses { + indexer.Add(class) + } + ctrl.classLister = storagelisters.NewVolumeSnapshotClassLister(indexer) + + // Run the tested functions + err = test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q failed: %v", test.name, err) + } + + // Verify PVCFinalizer tests results + evaluatePVCFinalizerTests(ctrl, reactor, test, t) + } +} + +// Evaluate PVCFinalizer tests results +func evaluatePVCFinalizerTests(ctrl *csiSnapshotController, reactor *snapshotReactor, test controllerTest, t *testing.T) { + // Evaluate results + bHasPVCFinalizer := false + name := sysruntime.FuncForPC(reflect.ValueOf(test.test).Pointer()).Name() + index := strings.LastIndex(name, ".") + if index == -1 { + t.Errorf("Test %q: failed to test finalizer - invalid test call name [%s]", test.name, name) + return + } + names := []rune(name) + funcName := string(names[index+1 : len(name)]) + klog.V(4).Infof("test %q: PVCFinalizer test func name: [%s]", test.name, funcName) + + if funcName == "testAddPVCFinalizer" { + for _, pvc := range reactor.claims { + if test.initialClaims[0].Name == pvc.Name { + if !slice.ContainsString(test.initialClaims[0].ObjectMeta.Finalizers, PVCFinalizer, nil) && slice.ContainsString(pvc.ObjectMeta.Finalizers, PVCFinalizer, nil) { + klog.V(4).Infof("test %q succeeded. PVCFinalizer is added to PVC %s", test.name, pvc.Name) + bHasPVCFinalizer = true + } + break + } + } + if test.expectSuccess && !bHasPVCFinalizer { + t.Errorf("Test %q: failed to add finalizer to PVC %s", test.name, test.initialClaims[0].Name) + } + } + bHasPVCFinalizer = true + if funcName == "testRemovePVCFinalizer" { + for _, pvc := range reactor.claims { + if test.initialClaims[0].Name == pvc.Name { + if slice.ContainsString(test.initialClaims[0].ObjectMeta.Finalizers, PVCFinalizer, nil) && !slice.ContainsString(pvc.ObjectMeta.Finalizers, PVCFinalizer, nil) { + klog.V(4).Infof("test %q succeeded. PVCFinalizer is removed from PVC %s", test.name, pvc.Name) + bHasPVCFinalizer = false + } + break + } + } + if test.expectSuccess && bHasPVCFinalizer { + t.Errorf("Test %q: failed to remove finalizer from PVC %s", test.name, test.initialClaims[0].Name) + } + } +} + +func getSize(size int64) *resource.Quantity { + return resource.NewQuantity(size, resource.BinarySI) +} + +func emptySecret() *v1.Secret { + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "emptysecret", + Namespace: "default", + }, + } +} + +func secret() *v1.Secret { + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "foo": []byte("bar"), + }, + } +} + +type listCall struct { + snapshotID string + // information to return + readyToUse bool + createTime int64 + size int64 + err error +} + +type deleteCall struct { + snapshotID string + secrets map[string]string + err error +} + +type createCall struct { + // expected request parameter + snapshotName string + volume *v1.PersistentVolume + parameters map[string]string + secrets map[string]string + // information to return + driverName string + snapshotId string + timestamp int64 + size int64 + readyToUse bool + err error +} + +// Fake SnapShotter implementation that check that Attach/Detach is called +// with the right parameters and it returns proper error code and metadata. +type fakeSnapshotter struct { + createCalls []createCall + createCallCounter int + deleteCalls []deleteCall + deleteCallCounter int + listCalls []listCall + listCallCounter int + t *testing.T +} + +func (f *fakeSnapshotter) CreateSnapshot(ctx context.Context, snapshotName string, volume *v1.PersistentVolume, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, int64, int64, bool, error) { + if f.createCallCounter >= len(f.createCalls) { + f.t.Errorf("Unexpected CSI Create Snapshot call: snapshotName=%s, volume=%v, index: %d, calls: %+v", snapshotName, volume.Name, f.createCallCounter, f.createCalls) + return "", "", 0, 0, false, fmt.Errorf("unexpected call") + } + call := f.createCalls[f.createCallCounter] + f.createCallCounter++ + + var err error + if call.snapshotName != snapshotName { + f.t.Errorf("Wrong CSI Create Snapshot call: snapshotName=%s, volume=%s, expected snapshotName: %s", snapshotName, volume.Name, call.snapshotName) + err = fmt.Errorf("unexpected create snapshot call") + } + + if !reflect.DeepEqual(call.volume, volume) { + f.t.Errorf("Wrong CSI Create Snapshot call: snapshotName=%s, volume=%s, diff %s", snapshotName, volume.Name, diff.ObjectDiff(call.volume, volume)) + err = fmt.Errorf("unexpected create snapshot call") + } + + if !reflect.DeepEqual(call.parameters, parameters) { + f.t.Errorf("Wrong CSI Create Snapshot call: snapshotName=%s, volume=%s, expected parameters %+v, got %+v", snapshotName, volume.Name, call.parameters, parameters) + err = fmt.Errorf("unexpected create snapshot call") + } + + if !reflect.DeepEqual(call.secrets, snapshotterCredentials) { + f.t.Errorf("Wrong CSI Create Snapshot call: snapshotName=%s, volume=%s, expected secrets %+v, got %+v", snapshotName, volume.Name, call.secrets, snapshotterCredentials) + err = fmt.Errorf("unexpected create snapshot call") + } + + if err != nil { + return "", "", 0, 0, false, fmt.Errorf("unexpected call") + } + + return call.driverName, call.snapshotId, call.timestamp, call.size, call.readyToUse, call.err +} + +func (f *fakeSnapshotter) DeleteSnapshot(ctx context.Context, snapshotID string, snapshotterCredentials map[string]string) error { + if f.deleteCallCounter >= len(f.deleteCalls) { + f.t.Errorf("Unexpected CSI Delete Snapshot call: snapshotID=%s, index: %d, calls: %+v", snapshotID, f.createCallCounter, f.createCalls) + return fmt.Errorf("unexpected call") + } + call := f.deleteCalls[f.deleteCallCounter] + f.deleteCallCounter++ + + var err error + if call.snapshotID != snapshotID { + f.t.Errorf("Wrong CSI Create Snapshot call: snapshotID=%s, expected snapshotID: %s", snapshotID, call.snapshotID) + err = fmt.Errorf("unexpected Delete snapshot call") + } + + if !reflect.DeepEqual(call.secrets, snapshotterCredentials) { + f.t.Errorf("Wrong CSI Delete Snapshot call: snapshotID=%s, expected secrets %+v, got %+v", snapshotID, call.secrets, snapshotterCredentials) + err = fmt.Errorf("unexpected Delete Snapshot call") + } + + if err != nil { + return fmt.Errorf("unexpected call") + } + + return call.err +} + +func (f *fakeSnapshotter) GetSnapshotStatus(ctx context.Context, snapshotID string) (bool, int64, int64, error) { + if f.listCallCounter >= len(f.listCalls) { + f.t.Errorf("Unexpected CSI list Snapshot call: snapshotID=%s, index: %d, calls: %+v", snapshotID, f.createCallCounter, f.createCalls) + return false, 0, 0, fmt.Errorf("unexpected call") + } + call := f.listCalls[f.listCallCounter] + f.listCallCounter++ + + var err error + if call.snapshotID != snapshotID { + f.t.Errorf("Wrong CSI List Snapshot call: snapshotID=%s, expected snapshotID: %s", snapshotID, call.snapshotID) + err = fmt.Errorf("unexpected List snapshot call") + } + + if err != nil { + return false, 0, 0, fmt.Errorf("unexpected call") + } + + return call.readyToUse, call.createTime, call.size, call.err +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller.go new file mode 100644 index 000000000..1618f36ea --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller.go @@ -0,0 +1,1195 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "strings" + "time" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + storage "k8s.io/api/storage/v1beta1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes/scheme" + ref "k8s.io/client-go/tools/reference" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/goroutinemap" + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" + "k8s.io/kubernetes/pkg/util/slice" +) + +// ================================================================== +// PLEASE DO NOT ATTEMPT TO SIMPLIFY THIS CODE. +// KEEP THE SPACE SHUTTLE FLYING. +// ================================================================== + +// Design: +// +// The fundamental key to this design is the bi-directional "pointer" between +// VolumeSnapshots and VolumeSnapshotContents, which is represented here +// as snapshot.Spec.SnapshotContentName and content.Spec.VolumeSnapshotRef. +// The bi-directionality is complicated to manage in a transactionless system, but +// without it we can't ensure sane behavior in the face of different forms of +// trouble. For example, a rogue HA controller instance could end up racing +// and making multiple bindings that are indistinguishable, resulting in +// potential data loss. +// +// This controller is designed to work in active-passive high availability +// mode. It *could* work also in active-active HA mode, all the object +// transitions are designed to cope with this, however performance could be +// lower as these two active controllers will step on each other toes +// frequently. +// +// This controller supports both dynamic snapshot creation and pre-bound snapshot. +// In pre-bound mode, objects are created with pre-defined pointers: a VolumeSnapshot +// points to a specific VolumeSnapshotContent and the VolumeSnapshotContent also +// points back for this VolumeSnapshot. +// +// The dynamic snapshot creation is multi-step process: first controller triggers +// snapshot creation though csi volume plugin which should return a snapshot after +// it is created successfully (however, the snapshot might not be ready to use yet if +// there is an uploading phase). The creationTimestamp will be updated according to +// VolumeSnapshot, and then a VolumeSnapshotContent object is created to represent +// this snapshot. After that, the controller will keep checking the snapshot status +// though csi snapshot calls. When the snapshot is ready to use, the controller set +// the status "Bound" to true to indicate the snapshot is bound and ready to use. +// If the createtion failed for any reason, the Error status is set accordingly. +// In alpha version, the controller not retry to create the snapshot after it failed. +// In the future version, a retry policy will be added. + +const pvcKind = "PersistentVolumeClaim" +const apiGroup = "" +const snapshotKind = "VolumeSnapshot" +const snapshotAPIGroup = crdv1.GroupName + +const controllerUpdateFailMsg = "snapshot controller failed to update" + +const IsDefaultSnapshotClassAnnotation = "snapshot.storage.kubernetes.io/is-default-class" + +// syncContent deals with one key off the queue. It returns false when it's time to quit. +func (ctrl *csiSnapshotController) syncContent(content *crdv1.VolumeSnapshotContent) error { + klog.V(5).Infof("synchronizing VolumeSnapshotContent[%s]", content.Name) + + if isContentDeletionCandidate(content) { + // Volume snapshot content should be deleted. Check if it's used + // and remove finalizer if it's not. + // Check if snapshot content is still bound to a snapshot. + isUsed := ctrl.isSnapshotContentBeingUsed(content) + if !isUsed { + klog.V(5).Infof("syncContent: Remove Finalizer for VolumeSnapshotContent[%s]", content.Name) + return ctrl.removeContentFinalizer(content) + } + } + + if needToAddContentFinalizer(content) { + // Content is not being deleted -> it should have the finalizer. + klog.V(5).Infof("syncContent: Add Finalizer for VolumeSnapshotContent[%s]", content.Name) + return ctrl.addContentFinalizer(content) + } + + // VolumeSnapshotContent is not bound to any VolumeSnapshot, in this case we just return err + if content.Spec.VolumeSnapshotRef == nil { + // content is not bound + klog.V(4).Infof("synchronizing VolumeSnapshotContent[%s]: VolumeSnapshotContent is not bound to any VolumeSnapshot", content.Name) + ctrl.eventRecorder.Event(content, v1.EventTypeWarning, "SnapshotContentNotBound", "VolumeSnapshotContent is not bound to any VolumeSnapshot") + return fmt.Errorf("volumeSnapshotContent %s is not bound to any VolumeSnapshot", content.Name) + } + klog.V(4).Infof("synchronizing VolumeSnapshotContent[%s]: content is bound to snapshot %s", content.Name, snapshotRefKey(content.Spec.VolumeSnapshotRef)) + // The VolumeSnapshotContent is reserved for a VolumeSnapshot; + // that VolumeSnapshot has not yet been bound to this VolumeSnapshotContent; the VolumeSnapshot sync will handle it. + if content.Spec.VolumeSnapshotRef.UID == "" { + klog.V(4).Infof("synchronizing VolumeSnapshotContent[%s]: VolumeSnapshotContent is pre-bound to VolumeSnapshot %s", content.Name, snapshotRefKey(content.Spec.VolumeSnapshotRef)) + return nil + } + // Get the VolumeSnapshot by _name_ + var snapshot *crdv1.VolumeSnapshot + snapshotName := snapshotRefKey(content.Spec.VolumeSnapshotRef) + obj, found, err := ctrl.snapshotStore.GetByKey(snapshotName) + if err != nil { + return err + } + if !found { + klog.V(4).Infof("synchronizing VolumeSnapshotContent[%s]: snapshot %s not found", content.Name, snapshotRefKey(content.Spec.VolumeSnapshotRef)) + // Fall through with snapshot = nil + } else { + var ok bool + snapshot, ok = obj.(*crdv1.VolumeSnapshot) + if !ok { + return fmt.Errorf("cannot convert object from snapshot cache to snapshot %q!?: %#v", content.Name, obj) + } + klog.V(4).Infof("synchronizing VolumeSnapshotContent[%s]: snapshot %s found", content.Name, snapshotRefKey(content.Spec.VolumeSnapshotRef)) + } + if snapshot != nil && snapshot.UID != content.Spec.VolumeSnapshotRef.UID { + // The snapshot that the content was pointing to was deleted, and another + // with the same name created. + klog.V(4).Infof("synchronizing VolumeSnapshotContent[%s]: content %s has different UID, the old one must have been deleted", content.Name, snapshotRefKey(content.Spec.VolumeSnapshotRef)) + // Treat the content as bound to a missing snapshot. + snapshot = nil + } + if snapshot == nil { + if content.Spec.DeletionPolicy != nil { + switch *content.Spec.DeletionPolicy { + case crdv1.VolumeSnapshotContentRetain: + klog.V(4).Infof("VolumeSnapshotContent[%s]: policy is Retain, nothing to do", content.Name) + + case crdv1.VolumeSnapshotContentDelete: + klog.V(4).Infof("VolumeSnapshotContent[%s]: policy is Delete", content.Name) + ctrl.deleteSnapshotContent(content) + default: + // Unknown VolumeSnapshotDeletionolicy + ctrl.eventRecorder.Event(content, v1.EventTypeWarning, "SnapshotUnknownDeletionPolicy", "Volume Snapshot Content has unrecognized deletion policy") + } + return nil + } + // By default, we use Retain policy if it is not set by users + klog.V(4).Infof("VolumeSnapshotContent[%s]: by default the policy is Retain", content.Name) + + } + return nil +} + +// syncSnapshot is the main controller method to decide what to do with a snapshot. +// It's invoked by appropriate cache.Controller callbacks when a snapshot is +// created, updated or periodically synced. We do not differentiate between +// these events. +// For easier readability, it is split into syncUnreadySnapshot and syncReadySnapshot +func (ctrl *csiSnapshotController) syncSnapshot(snapshot *crdv1.VolumeSnapshot) error { + klog.V(5).Infof("synchonizing VolumeSnapshot[%s]: %s", snapshotKey(snapshot), getSnapshotStatusForLogging(snapshot)) + + if isSnapshotDeletionCandidate(snapshot) { + // Volume snapshot should be deleted. Check if it's used + // and remove finalizer if it's not. + // Check if a volume is being created from snapshot. + isUsed := ctrl.isVolumeBeingCreatedFromSnapshot(snapshot) + if !isUsed { + klog.V(5).Infof("syncSnapshot: Remove Finalizer for VolumeSnapshot[%s]", snapshotKey(snapshot)) + return ctrl.removeSnapshotFinalizer(snapshot) + } + } + + if needToAddSnapshotFinalizer(snapshot) { + // Snapshot is not being deleted -> it should have the finalizer. + klog.V(5).Infof("syncSnapshot: Add Finalizer for VolumeSnapshot[%s]", snapshotKey(snapshot)) + return ctrl.addSnapshotFinalizer(snapshot) + } + + klog.V(5).Infof("syncSnapshot[%s]: check if we should remove finalizer on snapshot source and remove it if we can", snapshotKey(snapshot)) + // Check if we should remove finalizer on snapshot source and remove it if we can. + errFinalizer := ctrl.checkandRemoveSnapshotSourceFinalizer(snapshot) + if errFinalizer != nil { + klog.Errorf("error check and remove snapshot source finalizer for snapshot [%s]: %v", snapshot.Name, errFinalizer) + // Log an event and keep the original error from syncUnready/ReadySnapshot + ctrl.eventRecorder.Event(snapshot, v1.EventTypeWarning, "ErrorSnapshotSourceFinalizer", "Error check and remove PVC Finalizer for VolumeSnapshot") + } + + if !snapshot.Status.ReadyToUse { + return ctrl.syncUnreadySnapshot(snapshot) + } + return ctrl.syncReadySnapshot(snapshot) +} + +// syncReadySnapshot checks the snapshot which has been bound to snapshot content successfully before. +// If there is any problem with the binding (e.g., snapshot points to a non-exist snapshot content), update the snapshot status and emit event. +func (ctrl *csiSnapshotController) syncReadySnapshot(snapshot *crdv1.VolumeSnapshot) error { + if snapshot.Spec.SnapshotContentName == "" { + if err := ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotLost", "Bound snapshot has lost reference to VolumeSnapshotContent"); err != nil { + return err + } + return nil + } + obj, found, err := ctrl.contentStore.GetByKey(snapshot.Spec.SnapshotContentName) + if err != nil { + return err + } + if !found { + if err = ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotContentMissing", "VolumeSnapshotContent is missing"); err != nil { + return err + } + return nil + } else { + content, ok := obj.(*crdv1.VolumeSnapshotContent) + if !ok { + return fmt.Errorf("Cannot convert object from snapshot content store to VolumeSnapshotContent %q!?: %#v", snapshot.Spec.SnapshotContentName, obj) + } + + klog.V(5).Infof("syncReadySnapshot[%s]: VolumeSnapshotContent %q found", snapshotKey(snapshot), content.Name) + if !IsSnapshotBound(snapshot, content) { + // snapshot is bound but content is not bound to snapshot correctly + if err = ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotMisbound", "VolumeSnapshotContent is not bound to the VolumeSnapshot correctly"); err != nil { + return err + } + return nil + } + // Snapshot is correctly bound. + return nil + } +} + +// syncUnreadySnapshot is the main controller method to decide what to do with a snapshot which is not set to ready. +func (ctrl *csiSnapshotController) syncUnreadySnapshot(snapshot *crdv1.VolumeSnapshot) error { + uniqueSnapshotName := snapshotKey(snapshot) + klog.V(5).Infof("syncUnreadySnapshot %s", uniqueSnapshotName) + + if snapshot.Spec.SnapshotContentName != "" { + contentObj, found, err := ctrl.contentStore.GetByKey(snapshot.Spec.SnapshotContentName) + if err != nil { + return err + } + if !found { + // snapshot is bound to a non-existing content. + ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotContentMissing", "VolumeSnapshotContent is missing") + klog.V(4).Infof("synchronizing unready snapshot[%s]: snapshotcontent %q requested and not found, will try again next time", uniqueSnapshotName, snapshot.Spec.SnapshotContentName) + return fmt.Errorf("snapshot %s is bound to a non-existing content %s", uniqueSnapshotName, snapshot.Spec.SnapshotContentName) + } + content, ok := contentObj.(*crdv1.VolumeSnapshotContent) + if !ok { + return fmt.Errorf("expected volume snapshot content, got %+v", contentObj) + } + contentBound, err := ctrl.checkandBindSnapshotContent(snapshot, content) + if err != nil { + // snapshot is bound but content is not bound to snapshot correctly + ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotBindFailed", fmt.Sprintf("Snapshot failed to bind VolumeSnapshotContent, %v", err)) + return fmt.Errorf("snapshot %s is bound, but VolumeSnapshotContent %s is not bound to the VolumeSnapshot correctly, %v", uniqueSnapshotName, content.Name, err) + } + // snapshot is already bound correctly, check the status and update if it is ready. + klog.V(5).Infof("Check and update snapshot %s status", uniqueSnapshotName) + if err = ctrl.checkandUpdateBoundSnapshotStatus(snapshot, contentBound); err != nil { + return err + } + return nil + } else { // snapshot.Spec.SnapshotContentName == nil + if contentObj := ctrl.getMatchSnapshotContent(snapshot); contentObj != nil { + klog.V(5).Infof("Find VolumeSnapshotContent object %s for snapshot %s", contentObj.Name, uniqueSnapshotName) + newSnapshot, err := ctrl.bindandUpdateVolumeSnapshot(contentObj, snapshot) + if err != nil { + return err + } + klog.V(5).Infof("bindandUpdateVolumeSnapshot %v", newSnapshot) + return nil + } else if snapshot.Status.Error == nil || isControllerUpdateFailError(snapshot.Status.Error) { // Try to create snapshot if no error status is set + if err := ctrl.createSnapshot(snapshot); err != nil { + ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotCreationFailed", fmt.Sprintf("Failed to create snapshot with error %v", err)) + return err + } + return nil + } + return nil + } +} + +// getMatchSnapshotContent looks up VolumeSnapshotContent for a VolumeSnapshot named snapshotName +func (ctrl *csiSnapshotController) getMatchSnapshotContent(snapshot *crdv1.VolumeSnapshot) *crdv1.VolumeSnapshotContent { + var snapshotContentObj *crdv1.VolumeSnapshotContent + var found bool + + objs := ctrl.contentStore.List() + for _, obj := range objs { + content := obj.(*crdv1.VolumeSnapshotContent) + if content.Spec.VolumeSnapshotRef != nil && + content.Spec.VolumeSnapshotRef.Name == snapshot.Name && + content.Spec.VolumeSnapshotRef.Namespace == snapshot.Namespace && + content.Spec.VolumeSnapshotRef.UID == snapshot.UID && + content.Spec.VolumeSnapshotClassName != nil && snapshot.Spec.VolumeSnapshotClassName != nil && + *(content.Spec.VolumeSnapshotClassName) == *(snapshot.Spec.VolumeSnapshotClassName) { + found = true + snapshotContentObj = content + break + } + } + + if !found { + klog.V(4).Infof("No VolumeSnapshotContent for VolumeSnapshot %s found", snapshotKey(snapshot)) + return nil + } + + return snapshotContentObj +} + +// deleteSnapshotContent starts delete action. +func (ctrl *csiSnapshotController) deleteSnapshotContent(content *crdv1.VolumeSnapshotContent) { + operationName := fmt.Sprintf("delete-%s[%s]", content.Name, string(content.UID)) + klog.V(5).Infof("Snapshotter is about to delete volume snapshot content and the operation named %s", operationName) + ctrl.scheduleOperation(operationName, func() error { + return ctrl.deleteSnapshotContentOperation(content) + }) +} + +// scheduleOperation starts given asynchronous operation on given volume. It +// makes sure the operation is already not running. +func (ctrl *csiSnapshotController) scheduleOperation(operationName string, operation func() error) { + klog.V(5).Infof("scheduleOperation[%s]", operationName) + + err := ctrl.runningOperations.Run(operationName, operation) + if err != nil { + switch { + case goroutinemap.IsAlreadyExists(err): + klog.V(4).Infof("operation %q is already running, skipping", operationName) + case exponentialbackoff.IsExponentialBackoff(err): + klog.V(4).Infof("operation %q postponed due to exponential backoff", operationName) + default: + klog.Errorf("error scheduling operation %q: %v", operationName, err) + } + } +} + +func (ctrl *csiSnapshotController) storeSnapshotUpdate(snapshot interface{}) (bool, error) { + return storeObjectUpdate(ctrl.snapshotStore, snapshot, "snapshot") +} + +func (ctrl *csiSnapshotController) storeContentUpdate(content interface{}) (bool, error) { + return storeObjectUpdate(ctrl.contentStore, content, "content") +} + +// createSnapshot starts new asynchronous operation to create snapshot +func (ctrl *csiSnapshotController) createSnapshot(snapshot *crdv1.VolumeSnapshot) error { + klog.V(5).Infof("createSnapshot[%s]: started", snapshotKey(snapshot)) + opName := fmt.Sprintf("create-%s[%s]", snapshotKey(snapshot), string(snapshot.UID)) + ctrl.scheduleOperation(opName, func() error { + snapshotObj, err := ctrl.createSnapshotOperation(snapshot) + if err != nil { + ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotCreationFailed", fmt.Sprintf("Failed to create snapshot: %v", err)) + klog.Errorf("createSnapshot [%s]: error occurred in createSnapshotOperation: %v", opName, err) + return err + } + _, updateErr := ctrl.storeSnapshotUpdate(snapshotObj) + if updateErr != nil { + // We will get an "snapshot update" event soon, this is not a big error + klog.V(4).Infof("createSnapshot [%s]: cannot update internal cache: %v", snapshotKey(snapshotObj), updateErr) + } + return nil + }) + return nil +} + +func (ctrl *csiSnapshotController) checkandUpdateBoundSnapshotStatus(snapshot *crdv1.VolumeSnapshot, content *crdv1.VolumeSnapshotContent) error { + klog.V(5).Infof("checkandUpdateSnapshotStatus[%s] started", snapshotKey(snapshot)) + opName := fmt.Sprintf("check-%s[%s]", snapshotKey(snapshot), string(snapshot.UID)) + ctrl.scheduleOperation(opName, func() error { + snapshotObj, err := ctrl.checkandUpdateBoundSnapshotStatusOperation(snapshot, content) + if err != nil { + ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SnapshotCheckandUpdateFailed", fmt.Sprintf("Failed to check and update snapshot: %v", err)) + klog.Errorf("checkandUpdateSnapshotStatus [%s]: error occured %v", snapshotKey(snapshot), err) + return err + } + _, updateErr := ctrl.storeSnapshotUpdate(snapshotObj) + if updateErr != nil { + // We will get an "snapshot update" event soon, this is not a big error + klog.V(4).Infof("checkandUpdateSnapshotStatus [%s]: cannot update internal cache: %v", snapshotKey(snapshotObj), updateErr) + } + + return nil + }) + return nil +} + +// updateSnapshotStatusWithEvent saves new snapshot.Status to API server and emits +// given event on the snapshot. It saves the status and emits the event only when +// the status has actually changed from the version saved in API server. +// Parameters: +// snapshot - snapshot to update +// eventtype, reason, message - event to send, see EventRecorder.Event() +func (ctrl *csiSnapshotController) updateSnapshotErrorStatusWithEvent(snapshot *crdv1.VolumeSnapshot, eventtype, reason, message string) error { + klog.V(5).Infof("updateSnapshotStatusWithEvent[%s]", snapshotKey(snapshot)) + + if snapshot.Status.Error != nil && snapshot.Status.Error.Message == message { + klog.V(4).Infof("updateSnapshotStatusWithEvent[%s]: the same error %v is already set", snapshot.Name, snapshot.Status.Error) + return nil + } + snapshotClone := snapshot.DeepCopy() + statusError := &storage.VolumeError{ + Time: metav1.Time{ + Time: time.Now(), + }, + Message: message, + } + snapshotClone.Status.Error = statusError + + snapshotClone.Status.ReadyToUse = false + newSnapshot, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(snapshotClone.Namespace).Update(snapshotClone) + if err != nil { + klog.V(4).Infof("updating VolumeSnapshot[%s] error status failed %v", snapshotKey(snapshot), err) + return err + } + + _, err = ctrl.storeSnapshotUpdate(newSnapshot) + if err != nil { + klog.V(4).Infof("updating VolumeSnapshot[%s] error status: cannot update internal cache %v", snapshotKey(snapshot), err) + return err + } + // Emit the event only when the status change happens + ctrl.eventRecorder.Event(newSnapshot, eventtype, reason, message) + + return nil +} + +// Stateless functions +func getSnapshotStatusForLogging(snapshot *crdv1.VolumeSnapshot) string { + return fmt.Sprintf("bound to: %q, Completed: %v", snapshot.Spec.SnapshotContentName, snapshot.Status.ReadyToUse) +} + +// IsSnapshotBound returns true/false if snapshot is bound +func IsSnapshotBound(snapshot *crdv1.VolumeSnapshot, content *crdv1.VolumeSnapshotContent) bool { + if content.Spec.VolumeSnapshotRef != nil && content.Spec.VolumeSnapshotRef.Name == snapshot.Name && + content.Spec.VolumeSnapshotRef.UID == snapshot.UID { + return true + } + return false +} + +// isSnapshotConentBeingUsed checks if snapshot content is bound to snapshot. +func (ctrl *csiSnapshotController) isSnapshotContentBeingUsed(content *crdv1.VolumeSnapshotContent) bool { + if content.Spec.VolumeSnapshotRef != nil { + snapshotObj, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(content.Spec.VolumeSnapshotRef.Namespace).Get(content.Spec.VolumeSnapshotRef.Name, metav1.GetOptions{}) + if err != nil { + klog.Infof("isSnapshotContentBeingUsed: Cannot get snapshot %s from api server: [%v]. VolumeSnapshot object may be deleted already.", content.Spec.VolumeSnapshotRef.Name, err) + return false + } + + // Check if the snapshot content is bound to the snapshot + if IsSnapshotBound(snapshotObj, content) && snapshotObj.Spec.SnapshotContentName == content.Name { + klog.Infof("isSnapshotContentBeingUsed: VolumeSnapshot %s is bound to volumeSnapshotContent [%s]", snapshotObj.Name, content.Name) + return true + } + } + + klog.V(5).Infof("isSnapshotContentBeingUsed: Snapshot content %s is not being used", content.Name) + return false +} + +// isVolumeBeingCreatedFromSnapshot checks if an volume is being created from the snapshot. +func (ctrl *csiSnapshotController) isVolumeBeingCreatedFromSnapshot(snapshot *crdv1.VolumeSnapshot) bool { + pvcList, err := ctrl.pvcLister.PersistentVolumeClaims(snapshot.Namespace).List(labels.Everything()) + if err != nil { + klog.Errorf("Failed to retrieve PVCs from the lister to check if volume snapshot %s is being used by a volume: %q", snapshotKey(snapshot), err) + return false + } + for _, pvc := range pvcList { + if pvc.Spec.DataSource != nil && len(pvc.Spec.DataSource.Name) > 0 && pvc.Spec.DataSource.Name == snapshot.Name { + if pvc.Spec.DataSource.Kind == snapshotKind && *(pvc.Spec.DataSource.APIGroup) == snapshotAPIGroup { + if pvc.Status.Phase == v1.ClaimPending { + // A volume is being created from the snapshot + klog.Infof("isVolumeBeingCreatedFromSnapshot: volume %s is being created from snapshot %s", pvc.Name, pvc.Spec.DataSource.Name) + return true + } + } + } + } + klog.V(5).Infof("isVolumeBeingCreatedFromSnapshot: no volume is being created from snapshot %s", snapshotKey(snapshot)) + return false +} + +// The function checks whether the volumeSnapshotRef in snapshot content matches the given snapshot. If match, it binds the content with the snapshot +func (ctrl *csiSnapshotController) checkandBindSnapshotContent(snapshot *crdv1.VolumeSnapshot, content *crdv1.VolumeSnapshotContent) (*crdv1.VolumeSnapshotContent, error) { + if content.Spec.VolumeSnapshotRef == nil || content.Spec.VolumeSnapshotRef.Name != snapshot.Name { + return nil, fmt.Errorf("Could not bind snapshot %s and content %s, the VolumeSnapshotRef does not match", snapshot.Name, content.Name) + } else if content.Spec.VolumeSnapshotRef.UID != "" && content.Spec.VolumeSnapshotRef.UID != snapshot.UID { + return nil, fmt.Errorf("Could not bind snapshot %s and content %s, the VolumeSnapshotRef does not match", snapshot.Name, content.Name) + } else if content.Spec.VolumeSnapshotRef.UID != "" && content.Spec.VolumeSnapshotClassName != nil { + return content, nil + } + contentClone := content.DeepCopy() + contentClone.Spec.VolumeSnapshotRef.UID = snapshot.UID + className := *(snapshot.Spec.VolumeSnapshotClassName) + contentClone.Spec.VolumeSnapshotClassName = &className + newContent, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshotContents().Update(contentClone) + if err != nil { + klog.V(4).Infof("updating VolumeSnapshotContent[%s] error status failed %v", newContent.Name, err) + return nil, err + } + _, err = ctrl.storeContentUpdate(newContent) + if err != nil { + klog.V(4).Infof("updating VolumeSnapshotContent[%s] error status: cannot update internal cache %v", newContent.Name, err) + return nil, err + } + return newContent, nil +} + +func (ctrl *csiSnapshotController) getCreateSnapshotInput(snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshotClass, *v1.PersistentVolume, string, map[string]string, error) { + className := snapshot.Spec.VolumeSnapshotClassName + klog.V(5).Infof("getCreateSnapshotInput [%s]: VolumeSnapshotClassName [%s]", snapshot.Name, *className) + var class *crdv1.VolumeSnapshotClass + var err error + if className != nil { + class, err = ctrl.GetSnapshotClass(*className) + if err != nil { + klog.Errorf("getCreateSnapshotInput failed to getClassFromVolumeSnapshot %s", err) + return nil, nil, "", nil, err + } + } else { + klog.Errorf("failed to getCreateSnapshotInput %s without a snapshot class", snapshot.Name) + return nil, nil, "", nil, fmt.Errorf("failed to take snapshot %s without a snapshot class", snapshot.Name) + } + + volume, err := ctrl.getVolumeFromVolumeSnapshot(snapshot) + if err != nil { + klog.Errorf("getCreateSnapshotInput failed to get PersistentVolume object [%s]: Error: [%#v]", snapshot.Name, err) + return nil, nil, "", nil, err + } + + // Create VolumeSnapshotContent name + contentName := GetSnapshotContentNameForSnapshot(snapshot) + + // Resolve snapshotting secret credentials. + snapshotterSecretRef, err := getSecretReference(class.Parameters, contentName, snapshot) + if err != nil { + return nil, nil, "", nil, err + } + snapshotterCredentials, err := getCredentials(ctrl.client, snapshotterSecretRef) + if err != nil { + return nil, nil, "", nil, err + } + + return class, volume, contentName, snapshotterCredentials, nil +} + +func (ctrl *csiSnapshotController) checkandUpdateBoundSnapshotStatusOperation(snapshot *crdv1.VolumeSnapshot, content *crdv1.VolumeSnapshotContent) (*crdv1.VolumeSnapshot, error) { + var err error + var timestamp int64 + var size int64 + var readyToUse = false + var driverName string + var snapshotID string + + if snapshot.Spec.Source == nil { + klog.V(5).Infof("checkandUpdateBoundSnapshotStatusOperation: checking whether snapshot [%s] is pre-bound to content [%s]", snapshot.Name, content.Name) + readyToUse, timestamp, size, err = ctrl.handler.GetSnapshotStatus(content) + if err != nil { + klog.Errorf("checkandUpdateBoundSnapshotStatusOperation: failed to call get snapshot status to check whether snapshot is ready to use %q", err) + return nil, err + } + if content.Spec.CSI != nil { + driverName, snapshotID = content.Spec.CSI.Driver, content.Spec.CSI.SnapshotHandle + } + } else { + class, volume, _, snapshotterCredentials, err := ctrl.getCreateSnapshotInput(snapshot) + if err != nil { + return nil, fmt.Errorf("failed to get input parameters to create snapshot %s: %q", snapshot.Name, err) + } + driverName, snapshotID, timestamp, size, readyToUse, err = ctrl.handler.CreateSnapshot(snapshot, volume, class.Parameters, snapshotterCredentials) + if err != nil { + klog.Errorf("checkandUpdateBoundSnapshotStatusOperation: failed to call create snapshot to check whether the snapshot is ready to use %q", err) + return nil, err + } + } + klog.V(5).Infof("checkandUpdateBoundSnapshotStatusOperation: driver %s, snapshotId %s, timestamp %d, size %d, readyToUse %t", driverName, snapshotID, timestamp, size, readyToUse) + + if timestamp == 0 { + timestamp = time.Now().UnixNano() + } + newSnapshot, err := ctrl.updateSnapshotStatus(snapshot, readyToUse, timestamp, size, IsSnapshotBound(snapshot, content)) + if err != nil { + return nil, err + } + err = ctrl.updateSnapshotContentSize(content, size) + if err != nil { + return nil, err + } + return newSnapshot, nil +} + +// The function goes through the whole snapshot creation process. +// 1. Trigger the snapshot through csi storage provider. +// 2. Update VolumeSnapshot status with creationtimestamp information +// 3. Create the VolumeSnapshotContent object with the snapshot id information. +// 4. Bind the VolumeSnapshot and VolumeSnapshotContent object +func (ctrl *csiSnapshotController) createSnapshotOperation(snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshot, error) { + klog.Infof("createSnapshot: Creating snapshot %s through the plugin ...", snapshotKey(snapshot)) + + if snapshot.Status.Error != nil && !isControllerUpdateFailError(snapshot.Status.Error) { + klog.V(4).Infof("error is already set in snapshot, do not retry to create: %s", snapshot.Status.Error.Message) + return snapshot, nil + } + + // If PVC is not being deleted and finalizer is not added yet, a finalizer should be added. + klog.V(5).Infof("createSnapshotOperation: Check if PVC is not being deleted and add Finalizer for source of snapshot [%s] if needed", snapshot.Name) + err := ctrl.ensureSnapshotSourceFinalizer(snapshot) + if err != nil { + klog.Errorf("createSnapshotOperation failed to add finalizer for source of snapshot %s", err) + return nil, err + } + + class, volume, contentName, snapshotterCredentials, err := ctrl.getCreateSnapshotInput(snapshot) + if err != nil { + return nil, fmt.Errorf("failed to get input parameters to create snapshot %s: %q", snapshot.Name, err) + } + + driverName, snapshotID, timestamp, size, readyToUse, err := ctrl.handler.CreateSnapshot(snapshot, volume, class.Parameters, snapshotterCredentials) + if err != nil { + return nil, fmt.Errorf("failed to take snapshot of the volume, %s: %q", volume.Name, err) + } + klog.V(5).Infof("Created snapshot: driver %s, snapshotId %s, timestamp %d, size %d, readyToUse %t", driverName, snapshotID, timestamp, size, readyToUse) + + var newSnapshot *crdv1.VolumeSnapshot + // Update snapshot status with timestamp + for i := 0; i < ctrl.createSnapshotContentRetryCount; i++ { + klog.V(5).Infof("createSnapshot [%s]: trying to update snapshot creation timestamp", snapshotKey(snapshot)) + newSnapshot, err = ctrl.updateSnapshotStatus(snapshot, readyToUse, timestamp, size, false) + if err == nil { + break + } + klog.V(4).Infof("failed to update snapshot %s creation timestamp: %v", snapshotKey(snapshot), err) + } + + if err != nil { + return nil, err + } + // Create VolumeSnapshotContent in the database + volumeRef, err := ref.GetReference(scheme.Scheme, volume) + if err != nil { + return nil, err + } + snapshotRef, err := ref.GetReference(scheme.Scheme, snapshot) + if err != nil { + return nil, err + } + + if class.DeletionPolicy == nil { + class.DeletionPolicy = new(crdv1.DeletionPolicy) + *class.DeletionPolicy = crdv1.VolumeSnapshotContentDelete + } + snapshotContent := &crdv1.VolumeSnapshotContent{ + ObjectMeta: metav1.ObjectMeta{ + Name: contentName, + }, + Spec: crdv1.VolumeSnapshotContentSpec{ + VolumeSnapshotRef: snapshotRef, + PersistentVolumeRef: volumeRef, + VolumeSnapshotSource: crdv1.VolumeSnapshotSource{ + CSI: &crdv1.CSIVolumeSnapshotSource{ + Driver: driverName, + SnapshotHandle: snapshotID, + CreationTime: ×tamp, + RestoreSize: &size, + }, + }, + VolumeSnapshotClassName: &(class.Name), + DeletionPolicy: class.DeletionPolicy, + }, + } + klog.V(3).Infof("volume snapshot content %v", snapshotContent) + // Try to create the VolumeSnapshotContent object several times + for i := 0; i < ctrl.createSnapshotContentRetryCount; i++ { + klog.V(5).Infof("createSnapshot [%s]: trying to save volume snapshot content %s", snapshotKey(snapshot), snapshotContent.Name) + if _, err = ctrl.clientset.SnapshotV1alpha1().VolumeSnapshotContents().Create(snapshotContent); err == nil || apierrs.IsAlreadyExists(err) { + // Save succeeded. + if err != nil { + klog.V(3).Infof("volume snapshot content %q for snapshot %q already exists, reusing", snapshotContent.Name, snapshotKey(snapshot)) + err = nil + } else { + klog.V(3).Infof("volume snapshot content %q for snapshot %q saved, %v", snapshotContent.Name, snapshotKey(snapshot), snapshotContent) + } + break + } + // Save failed, try again after a while. + klog.V(3).Infof("failed to save volume snapshot content %q for snapshot %q: %v", snapshotContent.Name, snapshotKey(snapshot), err) + time.Sleep(ctrl.createSnapshotContentInterval) + } + + if err != nil { + // Save failed. Now we have a snapshot asset outside of Kubernetes, + // but we don't have appropriate volumesnapshot content object for it. + // Emit some event here and controller should try to create the content in next sync period. + strerr := fmt.Sprintf("Error creating volume snapshot content object for snapshot %s: %v.", snapshotKey(snapshot), err) + klog.Error(strerr) + ctrl.eventRecorder.Event(newSnapshot, v1.EventTypeWarning, "CreateSnapshotContentFailed", strerr) + return nil, newControllerUpdateError(snapshotKey(snapshot), err.Error()) + } + + // save succeeded, bind and update status for snapshot. + result, err := ctrl.bindandUpdateVolumeSnapshot(snapshotContent, newSnapshot) + if err != nil { + return nil, err + } + return result, nil +} + +// Delete a snapshot +// 1. Find the SnapshotContent corresponding to Snapshot +// 1a: Not found => finish (it's been deleted already) +// 2. Ask the backend to remove the snapshot device +// 3. Delete the SnapshotContent object +// 4. Remove the Snapshot from store +// 5. Finish +func (ctrl *csiSnapshotController) deleteSnapshotContentOperation(content *crdv1.VolumeSnapshotContent) error { + klog.V(5).Infof("deleteSnapshotOperation [%s] started", content.Name) + + // get secrets if VolumeSnapshotClass specifies it + var snapshotterCredentials map[string]string + snapshotClassName := content.Spec.VolumeSnapshotClassName + if snapshotClassName != nil { + if snapshotClass, err := ctrl.classLister.Get(*snapshotClassName); err == nil { + // Resolve snapshotting secret credentials. + // No VolumeSnapshot is provided when resolving delete secret names, since the VolumeSnapshot may or may not exist at delete time. + snapshotterSecretRef, err := getSecretReference(snapshotClass.Parameters, content.Name, nil) + if err != nil { + return err + } + snapshotterCredentials, err = getCredentials(ctrl.client, snapshotterSecretRef) + if err != nil { + return err + } + } + } + + err := ctrl.handler.DeleteSnapshot(content, snapshotterCredentials) + if err != nil { + ctrl.eventRecorder.Event(content, v1.EventTypeWarning, "SnapshotDeleteError", "Failed to delete snapshot") + return fmt.Errorf("failed to delete snapshot %#v, err: %v", content.Name, err) + } + + err = ctrl.clientset.SnapshotV1alpha1().VolumeSnapshotContents().Delete(content.Name, &metav1.DeleteOptions{}) + if err != nil { + ctrl.eventRecorder.Event(content, v1.EventTypeWarning, "SnapshotContentObjectDeleteError", "Failed to delete snapshot content API object") + return fmt.Errorf("failed to delete VolumeSnapshotContent %s from API server: %q", content.Name, err) + } + + return nil +} + +func (ctrl *csiSnapshotController) bindandUpdateVolumeSnapshot(snapshotContent *crdv1.VolumeSnapshotContent, snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshot, error) { + klog.V(5).Infof("bindandUpdateVolumeSnapshot for snapshot [%s]: snapshotContent [%s]", snapshot.Name, snapshotContent.Name) + snapshotObj, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(snapshot.Namespace).Get(snapshot.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("error get snapshot %s from api server: %v", snapshotKey(snapshot), err) + } + + // Copy the snapshot object before updating it + snapshotCopy := snapshotObj.DeepCopy() + + if snapshotObj.Spec.SnapshotContentName == snapshotContent.Name { + klog.Infof("bindVolumeSnapshotContentToVolumeSnapshot: VolumeSnapshot %s already bind to volumeSnapshotContent [%s]", snapshot.Name, snapshotContent.Name) + } else { + klog.Infof("bindVolumeSnapshotContentToVolumeSnapshot: before bind VolumeSnapshot %s to volumeSnapshotContent [%s]", snapshot.Name, snapshotContent.Name) + snapshotCopy.Spec.SnapshotContentName = snapshotContent.Name + updateSnapshot, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(snapshot.Namespace).Update(snapshotCopy) + if err != nil { + klog.Infof("bindVolumeSnapshotContentToVolumeSnapshot: Error binding VolumeSnapshot %s to volumeSnapshotContent [%s]. Error [%#v]", snapshot.Name, snapshotContent.Name, err) + return nil, newControllerUpdateError(snapshotKey(snapshot), err.Error()) + } + snapshotCopy = updateSnapshot + _, err = ctrl.storeSnapshotUpdate(snapshotCopy) + if err != nil { + klog.Errorf("%v", err) + } + } + + klog.V(5).Infof("bindandUpdateVolumeSnapshot for snapshot completed [%#v]", snapshotCopy) + return snapshotCopy, nil +} + +// updateSnapshotContentSize update the restore size for snapshot content +func (ctrl *csiSnapshotController) updateSnapshotContentSize(content *crdv1.VolumeSnapshotContent, size int64) error { + if content.Spec.VolumeSnapshotSource.CSI == nil || size <= 0 { + return nil + } + if content.Spec.VolumeSnapshotSource.CSI.RestoreSize != nil && *content.Spec.VolumeSnapshotSource.CSI.RestoreSize == size { + return nil + } + contentClone := content.DeepCopy() + contentClone.Spec.VolumeSnapshotSource.CSI.RestoreSize = &size + _, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshotContents().Update(contentClone) + if err != nil { + return newControllerUpdateError(content.Name, err.Error()) + } + + _, err = ctrl.storeContentUpdate(contentClone) + if err != nil { + klog.Errorf("failed to update content store %v", err) + } + return nil +} + +// UpdateSnapshotStatus converts snapshot status to crdv1.VolumeSnapshotCondition +func (ctrl *csiSnapshotController) updateSnapshotStatus(snapshot *crdv1.VolumeSnapshot, readyToUse bool, createdAt, size int64, bound bool) (*crdv1.VolumeSnapshot, error) { + klog.V(5).Infof("updating VolumeSnapshot[]%s, readyToUse %v, timestamp %v", snapshotKey(snapshot), readyToUse, createdAt) + status := snapshot.Status + change := false + timeAt := &metav1.Time{ + Time: time.Unix(0, createdAt), + } + + snapshotClone := snapshot.DeepCopy() + if readyToUse { + if bound { + status.ReadyToUse = true + // Remove the error if checking snapshot is already bound and ready + status.Error = nil + change = true + } + } + if status.CreationTime == nil { + status.CreationTime = timeAt + change = true + } + + if change { + if size > 0 { + status.RestoreSize = resource.NewQuantity(size, resource.BinarySI) + } + snapshotClone.Status = status + newSnapshotObj, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(snapshotClone.Namespace).Update(snapshotClone) + if err != nil { + return nil, newControllerUpdateError(snapshotKey(snapshot), err.Error()) + } + return newSnapshotObj, nil + + } + return snapshot, nil +} + +// getVolumeFromVolumeSnapshot is a helper function to get PV from VolumeSnapshot. +func (ctrl *csiSnapshotController) getVolumeFromVolumeSnapshot(snapshot *crdv1.VolumeSnapshot) (*v1.PersistentVolume, error) { + pvc, err := ctrl.getClaimFromVolumeSnapshot(snapshot) + if err != nil { + return nil, err + } + + if pvc.Status.Phase != v1.ClaimBound { + return nil, fmt.Errorf("the PVC %s is not yet bound to a PV, will not attempt to take a snapshot", pvc.Name) + } + + pvName := pvc.Spec.VolumeName + pv, err := ctrl.client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to retrieve PV %s from the API server: %q", pvName, err) + } + + klog.V(5).Infof("getVolumeFromVolumeSnapshot: snapshot [%s] PV name [%s]", snapshot.Name, pvName) + + return pv, nil +} + +func (ctrl *csiSnapshotController) getStorageClassFromVolumeSnapshot(snapshot *crdv1.VolumeSnapshot) (*storagev1.StorageClass, error) { + // Get storage class from PVC or PV + pvc, err := ctrl.getClaimFromVolumeSnapshot(snapshot) + if err != nil { + return nil, err + } + storageclassName := *pvc.Spec.StorageClassName + if len(storageclassName) == 0 { + volume, err := ctrl.getVolumeFromVolumeSnapshot(snapshot) + if err != nil { + return nil, err + } + storageclassName = volume.Spec.StorageClassName + } + if len(storageclassName) == 0 { + return nil, fmt.Errorf("cannot figure out the snapshot class automatically, please specify one in snapshot spec") + } + storageclass, err := ctrl.client.StorageV1().StorageClasses().Get(storageclassName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return storageclass, nil +} + +// GetSnapshotClass is a helper function to get snapshot class from the class name. +func (ctrl *csiSnapshotController) GetSnapshotClass(className string) (*crdv1.VolumeSnapshotClass, error) { + klog.V(5).Infof("getSnapshotClass: VolumeSnapshotClassName [%s]", className) + + class, err := ctrl.classLister.Get(className) + if err != nil { + klog.Errorf("failed to retrieve snapshot class %s from the informer: %q", className, err) + return nil, fmt.Errorf("failed to retrieve snapshot class %s from the informer: %q", className, err) + } + + return class, nil +} + +// SetDefaultSnapshotClass is a helper function to figure out the default snapshot class from +// PVC/PV StorageClass and update VolumeSnapshot with this snapshot class name. +func (ctrl *csiSnapshotController) SetDefaultSnapshotClass(snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshotClass, *crdv1.VolumeSnapshot, error) { + klog.V(5).Infof("SetDefaultSnapshotClass for snapshot [%s]", snapshot.Name) + + storageclass, err := ctrl.getStorageClassFromVolumeSnapshot(snapshot) + if err != nil { + return nil, nil, err + } + // Find default snapshot class if available + list, err := ctrl.classLister.List(labels.Everything()) + if err != nil { + return nil, nil, err + } + defaultClasses := []*crdv1.VolumeSnapshotClass{} + + for _, class := range list { + if IsDefaultAnnotation(class.ObjectMeta) && storageclass.Provisioner == class.Snapshotter && ctrl.snapshotterName == class.Snapshotter { + defaultClasses = append(defaultClasses, class) + klog.V(5).Infof("get defaultClass added: %s", class.Name) + } + } + if len(defaultClasses) == 0 { + return nil, nil, fmt.Errorf("cannot find default snapshot class") + } + if len(defaultClasses) > 1 { + klog.V(4).Infof("get DefaultClass %d defaults found", len(defaultClasses)) + return nil, nil, fmt.Errorf("%d default snapshot classes were found", len(defaultClasses)) + } + klog.V(5).Infof("setDefaultSnapshotClass [%s]: default VolumeSnapshotClassName [%s]", snapshot.Name, defaultClasses[0].Name) + snapshotClone := snapshot.DeepCopy() + snapshotClone.Spec.VolumeSnapshotClassName = &(defaultClasses[0].Name) + newSnapshot, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(snapshotClone.Namespace).Update(snapshotClone) + if err != nil { + klog.V(4).Infof("updating VolumeSnapshot[%s] default class failed %v", snapshotKey(snapshot), err) + } + _, updateErr := ctrl.storeSnapshotUpdate(newSnapshot) + if updateErr != nil { + // We will get an "snapshot update" event soon, this is not a big error + klog.V(4).Infof("setDefaultSnapshotClass [%s]: cannot update internal cache: %v", snapshotKey(snapshot), updateErr) + } + + return defaultClasses[0], newSnapshot, nil +} + +// getClaimFromVolumeSnapshot is a helper function to get PVC from VolumeSnapshot. +func (ctrl *csiSnapshotController) getClaimFromVolumeSnapshot(snapshot *crdv1.VolumeSnapshot) (*v1.PersistentVolumeClaim, error) { + if snapshot.Spec.Source == nil { + return nil, fmt.Errorf("the snapshot source is not specified") + } + if snapshot.Spec.Source.Kind != pvcKind { + return nil, fmt.Errorf("the snapshot source is not the right type. Expected %s, Got %v", pvcKind, snapshot.Spec.Source.Kind) + } + pvcName := snapshot.Spec.Source.Name + if pvcName == "" { + return nil, fmt.Errorf("the PVC name is not specified in snapshot %s", snapshotKey(snapshot)) + } + if snapshot.Spec.Source.APIGroup != nil && *(snapshot.Spec.Source.APIGroup) != apiGroup { + return nil, fmt.Errorf("the snapshot source does not have the right APIGroup. Expected empty string, Got %s", *(snapshot.Spec.Source.APIGroup)) + } + + pvc, err := ctrl.pvcLister.PersistentVolumeClaims(snapshot.Namespace).Get(pvcName) + if err != nil { + return nil, fmt.Errorf("failed to retrieve PVC %s from the lister: %q", pvcName, err) + } + + return pvc, nil +} + +var _ error = controllerUpdateError{} + +type controllerUpdateError struct { + message string +} + +func newControllerUpdateError(name, message string) error { + return controllerUpdateError{ + message: fmt.Sprintf("%s %s on API server: %s", controllerUpdateFailMsg, name, message), + } +} + +func (e controllerUpdateError) Error() string { + return e.message +} + +func isControllerUpdateFailError(err *storage.VolumeError) bool { + if err != nil { + if strings.Contains(err.Message, controllerUpdateFailMsg) { + return true + } + } + return false +} + +// addContentFinalizer adds a Finalizer for VolumeSnapshotContent. +func (ctrl *csiSnapshotController) addContentFinalizer(content *crdv1.VolumeSnapshotContent) error { + contentClone := content.DeepCopy() + contentClone.ObjectMeta.Finalizers = append(contentClone.ObjectMeta.Finalizers, VolumeSnapshotContentFinalizer) + + _, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshotContents().Update(contentClone) + if err != nil { + return newControllerUpdateError(content.Name, err.Error()) + } + + _, err = ctrl.storeContentUpdate(contentClone) + if err != nil { + klog.Errorf("failed to update content store %v", err) + } + + klog.V(5).Infof("Added protection finalizer to volume snapshot content %s", content.Name) + return nil +} + +// removeContentFinalizer removes a Finalizer for VolumeSnapshotContent. +func (ctrl *csiSnapshotController) removeContentFinalizer(content *crdv1.VolumeSnapshotContent) error { + contentClone := content.DeepCopy() + contentClone.ObjectMeta.Finalizers = slice.RemoveString(contentClone.ObjectMeta.Finalizers, VolumeSnapshotContentFinalizer, nil) + + _, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshotContents().Update(contentClone) + if err != nil { + return newControllerUpdateError(content.Name, err.Error()) + } + + _, err = ctrl.storeContentUpdate(contentClone) + if err != nil { + klog.Errorf("failed to update content store %v", err) + } + + klog.V(5).Infof("Removed protection finalizer from volume snapshot content %s", content.Name) + return nil +} + +// addSnapshotFinalizer adds a Finalizer for VolumeSnapshot. +func (ctrl *csiSnapshotController) addSnapshotFinalizer(snapshot *crdv1.VolumeSnapshot) error { + snapshotClone := snapshot.DeepCopy() + snapshotClone.ObjectMeta.Finalizers = append(snapshotClone.ObjectMeta.Finalizers, VolumeSnapshotFinalizer) + _, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(snapshotClone.Namespace).Update(snapshotClone) + if err != nil { + return newControllerUpdateError(snapshot.Name, err.Error()) + } + + _, err = ctrl.storeSnapshotUpdate(snapshotClone) + if err != nil { + klog.Errorf("failed to update snapshot store %v", err) + } + + klog.V(5).Infof("Added protection finalizer to volume snapshot %s", snapshotKey(snapshot)) + return nil +} + +// removeContentFinalizer removes a Finalizer for VolumeSnapshot. +func (ctrl *csiSnapshotController) removeSnapshotFinalizer(snapshot *crdv1.VolumeSnapshot) error { + snapshotClone := snapshot.DeepCopy() + snapshotClone.ObjectMeta.Finalizers = slice.RemoveString(snapshotClone.ObjectMeta.Finalizers, VolumeSnapshotFinalizer, nil) + + _, err := ctrl.clientset.SnapshotV1alpha1().VolumeSnapshots(snapshotClone.Namespace).Update(snapshotClone) + if err != nil { + return newControllerUpdateError(snapshot.Name, err.Error()) + } + + _, err = ctrl.storeSnapshotUpdate(snapshotClone) + if err != nil { + klog.Errorf("failed to update snapshot store %v", err) + } + + klog.V(5).Infof("Removed protection finalizer from volume snapshot %s", snapshotKey(snapshot)) + return nil +} + +// ensureSnapshotSourceFinalizer checks if a Finalizer needs to be added for the snapshot source; +// if true, adds a Finalizer for VolumeSnapshot Source PVC +func (ctrl *csiSnapshotController) ensureSnapshotSourceFinalizer(snapshot *crdv1.VolumeSnapshot) error { + // Get snapshot source which is a PVC + pvc, err := ctrl.getClaimFromVolumeSnapshot(snapshot) + if err != nil { + klog.Infof("cannot get claim from snapshot [%s]: [%v] Claim may be deleted already.", snapshot.Name, err) + return nil + } + + // If PVC is not being deleted and PVCFinalizer is not added yet, the PVCFinalizer should be added. + if pvc.ObjectMeta.DeletionTimestamp == nil && !slice.ContainsString(pvc.ObjectMeta.Finalizers, PVCFinalizer, nil) { + // Add the finalizer + pvcClone := pvc.DeepCopy() + pvcClone.ObjectMeta.Finalizers = append(pvcClone.ObjectMeta.Finalizers, PVCFinalizer) + _, err = ctrl.client.CoreV1().PersistentVolumeClaims(pvcClone.Namespace).Update(pvcClone) + if err != nil { + klog.Errorf("cannot add finalizer on claim [%s] for snapshot [%s]: [%v]", pvc.Name, snapshot.Name, err) + return newControllerUpdateError(pvcClone.Name, err.Error()) + } + klog.Infof("Added protection finalizer to persistent volume claim %s", pvc.Name) + } + + return nil +} + +// removeSnapshotSourceFinalizer removes a Finalizer for VolumeSnapshot Source PVC. +func (ctrl *csiSnapshotController) removeSnapshotSourceFinalizer(snapshot *crdv1.VolumeSnapshot) error { + // Get snapshot source which is a PVC + pvc, err := ctrl.getClaimFromVolumeSnapshot(snapshot) + if err != nil { + klog.Infof("cannot get claim from snapshot [%s]: [%v] Claim may be deleted already. No need to remove finalizer on the claim.", snapshot.Name, err) + return nil + } + + pvcClone := pvc.DeepCopy() + pvcClone.ObjectMeta.Finalizers = slice.RemoveString(pvcClone.ObjectMeta.Finalizers, PVCFinalizer, nil) + + _, err = ctrl.client.CoreV1().PersistentVolumeClaims(pvcClone.Namespace).Update(pvcClone) + if err != nil { + return newControllerUpdateError(pvcClone.Name, err.Error()) + } + + klog.V(5).Infof("Removed protection finalizer from persistent volume claim %s", pvc.Name) + return nil +} + +// isSnapshotSourceBeingUsed checks if a PVC is being used as a source to create a snapshot +func (ctrl *csiSnapshotController) isSnapshotSourceBeingUsed(snapshot *crdv1.VolumeSnapshot) bool { + klog.V(5).Infof("isSnapshotSourceBeingUsed[%s]: started", snapshotKey(snapshot)) + // Get snapshot source which is a PVC + pvc, err := ctrl.getClaimFromVolumeSnapshot(snapshot) + if err != nil { + klog.Infof("isSnapshotSourceBeingUsed: cannot to get claim from snapshot: %v", err) + return false + } + + // Going through snapshots in the cache (snapshotLister). If a snapshot's PVC source + // is the same as the input snapshot's PVC source and snapshot's ReadyToUse status + // is false, the snapshot is still being created from the PVC and the PVC is in-use. + snapshots, err := ctrl.snapshotLister.VolumeSnapshots(snapshot.Namespace).List(labels.Everything()) + if err != nil { + return false + } + for _, snap := range snapshots { + // Skip static bound snapshot without a PVC source + if snap.Spec.Source == nil { + klog.V(4).Infof("Skipping static bound snapshot %s when checking PVC %s/%s", snap.Name, pvc.Namespace, pvc.Name) + continue + } + if pvc.Name == snap.Spec.Source.Name && snap.Status.ReadyToUse == false { + klog.V(2).Infof("Keeping PVC %s/%s, it is used by snapshot %s/%s", pvc.Namespace, pvc.Name, snap.Namespace, snap.Name) + return true + } + } + + klog.V(5).Infof("isSnapshotSourceBeingUsed: no snapshot is being created from PVC %s/%s", pvc.Namespace, pvc.Name) + return false +} + +// checkandRemoveSnapshotSourceFinalizer checks if the snapshot source finalizer should be removed +// and removed it if needed. +func (ctrl *csiSnapshotController) checkandRemoveSnapshotSourceFinalizer(snapshot *crdv1.VolumeSnapshot) error { + // Get snapshot source which is a PVC + pvc, err := ctrl.getClaimFromVolumeSnapshot(snapshot) + if err != nil { + klog.Infof("cannot get claim from snapshot [%s]: [%v] Claim may be deleted already. No need to remove finalizer on the claim.", snapshot.Name, err) + return nil + } + + klog.V(5).Infof("checkandRemoveSnapshotSourceFinalizer for snapshot [%s]: snapshot status [%#v]", snapshot.Name, snapshot.Status) + + // Check if there is a Finalizer on PVC to be removed + if slice.ContainsString(pvc.ObjectMeta.Finalizers, PVCFinalizer, nil) { + // There is a Finalizer on PVC. Check if PVC is used + // and remove finalizer if it's not used. + isUsed := ctrl.isSnapshotSourceBeingUsed(snapshot) + if !isUsed { + klog.Infof("checkandRemoveSnapshotSourceFinalizer[%s]: Remove Finalizer for PVC %s as it is not used by snapshots in creation", snapshot.Name, pvc.Name) + err = ctrl.removeSnapshotSourceFinalizer(snapshot) + if err != nil { + klog.Errorf("checkandRemoveSnapshotSourceFinalizer [%s]: removeSnapshotSourceFinalizer failed to remove finalizer %v", snapshot.Name, err) + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller_base.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller_base.go new file mode 100644 index 000000000..5458b1bcf --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller_base.go @@ -0,0 +1,493 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "time" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + clientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned" + storageinformers "github.com/kubernetes-csi/external-snapshotter/pkg/client/informers/externalversions/volumesnapshot/v1alpha1" + storagelisters "github.com/kubernetes-csi/external-snapshotter/pkg/client/listers/volumesnapshot/v1alpha1" + "github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/goroutinemap" +) + +type csiSnapshotController struct { + clientset clientset.Interface + client kubernetes.Interface + snapshotterName string + eventRecorder record.EventRecorder + snapshotQueue workqueue.RateLimitingInterface + contentQueue workqueue.RateLimitingInterface + + snapshotLister storagelisters.VolumeSnapshotLister + snapshotListerSynced cache.InformerSynced + contentLister storagelisters.VolumeSnapshotContentLister + contentListerSynced cache.InformerSynced + classLister storagelisters.VolumeSnapshotClassLister + classListerSynced cache.InformerSynced + pvcLister corelisters.PersistentVolumeClaimLister + pvcListerSynced cache.InformerSynced + + snapshotStore cache.Store + contentStore cache.Store + + handler Handler + // Map of scheduled/running operations. + runningOperations goroutinemap.GoRoutineMap + + createSnapshotContentRetryCount int + createSnapshotContentInterval time.Duration + resyncPeriod time.Duration +} + +// NewCSISnapshotController returns a new *csiSnapshotController +func NewCSISnapshotController( + clientset clientset.Interface, + client kubernetes.Interface, + snapshotterName string, + volumeSnapshotInformer storageinformers.VolumeSnapshotInformer, + volumeSnapshotContentInformer storageinformers.VolumeSnapshotContentInformer, + volumeSnapshotClassInformer storageinformers.VolumeSnapshotClassInformer, + pvcInformer coreinformers.PersistentVolumeClaimInformer, + createSnapshotContentRetryCount int, + createSnapshotContentInterval time.Duration, + snapshotter snapshotter.Snapshotter, + timeout time.Duration, + resyncPeriod time.Duration, + snapshotNamePrefix string, + snapshotNameUUIDLength int, +) *csiSnapshotController { + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events(v1.NamespaceAll)}) + var eventRecorder record.EventRecorder + eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("csi-snapshotter %s", snapshotterName)}) + + ctrl := &csiSnapshotController{ + clientset: clientset, + client: client, + snapshotterName: snapshotterName, + eventRecorder: eventRecorder, + handler: NewCSIHandler(snapshotter, timeout, snapshotNamePrefix, snapshotNameUUIDLength), + runningOperations: goroutinemap.NewGoRoutineMap(true), + createSnapshotContentRetryCount: createSnapshotContentRetryCount, + createSnapshotContentInterval: createSnapshotContentInterval, + resyncPeriod: resyncPeriod, + snapshotStore: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc), + contentStore: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc), + snapshotQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "csi-snapshotter-snapshot"), + contentQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "csi-snapshotter-content"), + } + + ctrl.pvcLister = pvcInformer.Lister() + ctrl.pvcListerSynced = pvcInformer.Informer().HasSynced + + volumeSnapshotInformer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { ctrl.enqueueSnapshotWork(obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { ctrl.enqueueSnapshotWork(newObj) }, + DeleteFunc: func(obj interface{}) { ctrl.enqueueSnapshotWork(obj) }, + }, + ctrl.resyncPeriod, + ) + ctrl.snapshotLister = volumeSnapshotInformer.Lister() + ctrl.snapshotListerSynced = volumeSnapshotInformer.Informer().HasSynced + + volumeSnapshotContentInformer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { ctrl.enqueueContentWork(obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { ctrl.enqueueContentWork(newObj) }, + DeleteFunc: func(obj interface{}) { ctrl.enqueueContentWork(obj) }, + }, + ctrl.resyncPeriod, + ) + ctrl.contentLister = volumeSnapshotContentInformer.Lister() + ctrl.contentListerSynced = volumeSnapshotContentInformer.Informer().HasSynced + + ctrl.classLister = volumeSnapshotClassInformer.Lister() + ctrl.classListerSynced = volumeSnapshotClassInformer.Informer().HasSynced + + return ctrl +} + +func (ctrl *csiSnapshotController) Run(workers int, stopCh <-chan struct{}) { + defer ctrl.snapshotQueue.ShutDown() + defer ctrl.contentQueue.ShutDown() + + klog.Infof("Starting CSI snapshotter") + defer klog.Infof("Shutting CSI snapshotter") + + if !cache.WaitForCacheSync(stopCh, ctrl.snapshotListerSynced, ctrl.contentListerSynced, ctrl.classListerSynced, ctrl.pvcListerSynced) { + klog.Errorf("Cannot sync caches") + return + } + + ctrl.initializeCaches(ctrl.snapshotLister, ctrl.contentLister) + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.snapshotWorker, 0, stopCh) + go wait.Until(ctrl.contentWorker, 0, stopCh) + } + + <-stopCh +} + +// enqueueSnapshotWork adds snapshot to given work queue. +func (ctrl *csiSnapshotController) enqueueSnapshotWork(obj interface{}) { + // Beware of "xxx deleted" events + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + obj = unknown.Obj + } + if snapshot, ok := obj.(*crdv1.VolumeSnapshot); ok { + objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(snapshot) + if err != nil { + klog.Errorf("failed to get key from object: %v, %v", err, snapshot) + return + } + klog.V(5).Infof("enqueued %q for sync", objName) + ctrl.snapshotQueue.Add(objName) + } +} + +// enqueueContentWork adds snapshot content to given work queue. +func (ctrl *csiSnapshotController) enqueueContentWork(obj interface{}) { + // Beware of "xxx deleted" events + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + obj = unknown.Obj + } + if content, ok := obj.(*crdv1.VolumeSnapshotContent); ok { + objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(content) + if err != nil { + klog.Errorf("failed to get key from object: %v, %v", err, content) + return + } + klog.V(5).Infof("enqueued %q for sync", objName) + ctrl.contentQueue.Add(objName) + } +} + +// snapshotWorker processes items from snapshotQueue. It must run only once, +// syncSnapshot is not assured to be reentrant. +func (ctrl *csiSnapshotController) snapshotWorker() { + workFunc := func() bool { + keyObj, quit := ctrl.snapshotQueue.Get() + if quit { + return true + } + defer ctrl.snapshotQueue.Done(keyObj) + key := keyObj.(string) + klog.V(5).Infof("snapshotWorker[%s]", key) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + klog.V(5).Infof("snapshotWorker: snapshot namespace [%s] name [%s]", namespace, name) + if err != nil { + klog.Errorf("error getting namespace & name of snapshot %q to get snapshot from informer: %v", key, err) + return false + } + snapshot, err := ctrl.snapshotLister.VolumeSnapshots(namespace).Get(name) + if err == nil { + // The volume snapshot still exists in informer cache, the event must have + // been add/update/sync + newSnapshot, err := ctrl.checkAndUpdateSnapshotClass(snapshot) + if err == nil { + klog.V(5).Infof("passed checkAndUpdateSnapshotClass for snapshot %q", key) + ctrl.updateSnapshot(newSnapshot) + } + return false + } + if err != nil && !errors.IsNotFound(err) { + klog.V(2).Infof("error getting snapshot %q from informer: %v", key, err) + return false + } + // The snapshot is not in informer cache, the event must have been "delete" + vsObj, found, err := ctrl.snapshotStore.GetByKey(key) + if err != nil { + klog.V(2).Infof("error getting snapshot %q from cache: %v", key, err) + return false + } + if !found { + // The controller has already processed the delete event and + // deleted the snapshot from its cache + klog.V(2).Infof("deletion of snapshot %q was already processed", key) + return false + } + snapshot, ok := vsObj.(*crdv1.VolumeSnapshot) + if !ok { + klog.Errorf("expected vs, got %+v", vsObj) + return false + } + newSnapshot, err := ctrl.checkAndUpdateSnapshotClass(snapshot) + if err == nil { + ctrl.deleteSnapshot(newSnapshot) + } + return false + } + + for { + if quit := workFunc(); quit { + klog.Infof("snapshot worker queue shutting down") + return + } + } +} + +// contentWorker processes items from contentQueue. It must run only once, +// syncContent is not assured to be reentrant. +func (ctrl *csiSnapshotController) contentWorker() { + workFunc := func() bool { + keyObj, quit := ctrl.contentQueue.Get() + if quit { + return true + } + defer ctrl.contentQueue.Done(keyObj) + key := keyObj.(string) + klog.V(5).Infof("contentWorker[%s]", key) + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + klog.V(4).Infof("error getting name of snapshotContent %q to get snapshotContent from informer: %v", key, err) + return false + } + content, err := ctrl.contentLister.Get(name) + // The content still exists in informer cache, the event must have + // been add/update/sync + if err == nil { + if ctrl.isDriverMatch(content) { + ctrl.updateContent(content) + } + return false + } + if !errors.IsNotFound(err) { + klog.V(2).Infof("error getting content %q from informer: %v", key, err) + return false + } + + // The content is not in informer cache, the event must have been + // "delete" + contentObj, found, err := ctrl.contentStore.GetByKey(key) + if err != nil { + klog.V(2).Infof("error getting content %q from cache: %v", key, err) + return false + } + if !found { + // The controller has already processed the delete event and + // deleted the content from its cache + klog.V(2).Infof("deletion of content %q was already processed", key) + return false + } + content, ok := contentObj.(*crdv1.VolumeSnapshotContent) + if !ok { + klog.Errorf("expected content, got %+v", content) + return false + } + ctrl.deleteContent(content) + return false + } + + for { + if quit := workFunc(); quit { + klog.Infof("content worker queue shutting down") + return + } + } +} + +// verify whether the driver specified in VolumeSnapshotContent matches the controller's driver name +func (ctrl *csiSnapshotController) isDriverMatch(content *crdv1.VolumeSnapshotContent) bool { + if content.Spec.VolumeSnapshotSource.CSI == nil { + // Skip this snapshot content if it not a CSI snapshot + return false + } + if content.Spec.VolumeSnapshotSource.CSI.Driver != ctrl.snapshotterName { + // Skip this snapshot content if the driver does not match + return false + } + snapshotClassName := content.Spec.VolumeSnapshotClassName + if snapshotClassName != nil { + if snapshotClass, err := ctrl.classLister.Get(*snapshotClassName); err == nil { + if snapshotClass.Snapshotter != ctrl.snapshotterName { + return false + } + } + } + return true +} + +// checkAndUpdateSnapshotClass gets the VolumeSnapshotClass from VolumeSnapshot. If it is not set, +// gets it from default VolumeSnapshotClass and sets it. It also detects if snapshotter in the +// VolumeSnapshotClass is the same as the snapshotter in external controller. +func (ctrl *csiSnapshotController) checkAndUpdateSnapshotClass(snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshot, error) { + className := snapshot.Spec.VolumeSnapshotClassName + var class *crdv1.VolumeSnapshotClass + var err error + newSnapshot := snapshot + if className != nil { + klog.V(5).Infof("checkAndUpdateSnapshotClass [%s]: VolumeSnapshotClassName [%s]", snapshot.Name, *className) + class, err = ctrl.GetSnapshotClass(*className) + if err != nil { + klog.Errorf("checkAndUpdateSnapshotClass failed to getSnapshotClass %v", err) + ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "GetSnapshotClassFailed", fmt.Sprintf("Failed to get snapshot class with error %v", err)) + return nil, err + } + } else { + klog.V(5).Infof("checkAndUpdateSnapshotClass [%s]: SetDefaultSnapshotClass", snapshot.Name) + class, newSnapshot, err = ctrl.SetDefaultSnapshotClass(snapshot) + if err != nil { + klog.Errorf("checkAndUpdateSnapshotClass failed to setDefaultClass %v", err) + ctrl.updateSnapshotErrorStatusWithEvent(snapshot, v1.EventTypeWarning, "SetDefaultSnapshotClassFailed", fmt.Sprintf("Failed to set default snapshot class with error %v", err)) + return nil, err + } + } + + klog.V(5).Infof("VolumeSnapshotClass Snapshotter [%s] Snapshot Controller snapshotterName [%s]", class.Snapshotter, ctrl.snapshotterName) + if class.Snapshotter != ctrl.snapshotterName { + klog.V(4).Infof("Skipping VolumeSnapshot %s for snapshotter [%s] in VolumeSnapshotClass because it does not match with the snapshotter for controller [%s]", snapshotKey(snapshot), class.Snapshotter, ctrl.snapshotterName) + return nil, fmt.Errorf("volumeSnapshotClass does not match with the snapshotter for controller") + } + return newSnapshot, nil +} + +// updateSnapshot runs in worker thread and handles "snapshot added", +// "snapshot updated" and "periodic sync" events. +func (ctrl *csiSnapshotController) updateSnapshot(snapshot *crdv1.VolumeSnapshot) { + // Store the new snapshot version in the cache and do not process it if this is + // an old version. + klog.V(5).Infof("updateSnapshot %q", snapshotKey(snapshot)) + newSnapshot, err := ctrl.storeSnapshotUpdate(snapshot) + if err != nil { + klog.Errorf("%v", err) + } + if !newSnapshot { + return + } + err = ctrl.syncSnapshot(snapshot) + if err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + klog.V(3).Infof("could not sync claim %q: %+v", snapshotKey(snapshot), err) + } else { + klog.Errorf("could not sync volume %q: %+v", snapshotKey(snapshot), err) + } + } +} + +// updateContent runs in worker thread and handles "content added", +// "content updated" and "periodic sync" events. +func (ctrl *csiSnapshotController) updateContent(content *crdv1.VolumeSnapshotContent) { + // Store the new content version in the cache and do not process it if this is + // an old version. + new, err := ctrl.storeContentUpdate(content) + if err != nil { + klog.Errorf("%v", err) + } + if !new { + return + } + err = ctrl.syncContent(content) + if err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + klog.V(3).Infof("could not sync content %q: %+v", content.Name, err) + } else { + klog.Errorf("could not sync content %q: %+v", content.Name, err) + } + } +} + +// deleteSnapshot runs in worker thread and handles "snapshot deleted" event. +func (ctrl *csiSnapshotController) deleteSnapshot(snapshot *crdv1.VolumeSnapshot) { + _ = ctrl.snapshotStore.Delete(snapshot) + klog.V(4).Infof("snapshot %q deleted", snapshotKey(snapshot)) + + snapshotContentName := snapshot.Spec.SnapshotContentName + if snapshotContentName == "" { + klog.V(5).Infof("deleteSnapshot[%q]: content not bound", snapshotKey(snapshot)) + return + } + // sync the content when its snapshot is deleted. Explicitly sync'ing the + // content here in response to snapshot deletion prevents the content from + // waiting until the next sync period for its Release. + klog.V(5).Infof("deleteSnapshot[%q]: scheduling sync of content %s", snapshotKey(snapshot), snapshotContentName) + ctrl.contentQueue.Add(snapshotContentName) +} + +// deleteContent runs in worker thread and handles "content deleted" event. +func (ctrl *csiSnapshotController) deleteContent(content *crdv1.VolumeSnapshotContent) { + _ = ctrl.contentStore.Delete(content) + klog.V(4).Infof("content %q deleted", content.Name) + + snapshotName := snapshotRefKey(content.Spec.VolumeSnapshotRef) + if snapshotName == "" { + klog.V(5).Infof("deleteContent[%q]: content not bound", content.Name) + return + } + // sync the snapshot when its content is deleted. Explicitly sync'ing the + // snapshot here in response to content deletion prevents the snapshot from + // waiting until the next sync period for its Release. + klog.V(5).Infof("deleteContent[%q]: scheduling sync of snapshot %s", content.Name, snapshotName) + ctrl.snapshotQueue.Add(snapshotName) +} + +// initializeCaches fills all controller caches with initial data from etcd in +// order to have the caches already filled when first addSnapshot/addContent to +// perform initial synchronization of the controller. +func (ctrl *csiSnapshotController) initializeCaches(snapshotLister storagelisters.VolumeSnapshotLister, contentLister storagelisters.VolumeSnapshotContentLister) { + snapshotList, err := snapshotLister.List(labels.Everything()) + if err != nil { + klog.Errorf("CSISnapshotController can't initialize caches: %v", err) + return + } + for _, snapshot := range snapshotList { + snapshotClone := snapshot.DeepCopy() + if _, err = ctrl.storeSnapshotUpdate(snapshotClone); err != nil { + klog.Errorf("error updating volume snapshot cache: %v", err) + } + } + + contentList, err := contentLister.List(labels.Everything()) + if err != nil { + klog.Errorf("CSISnapshotController can't initialize caches: %v", err) + return + } + for _, content := range contentList { + contentClone := content.DeepCopy() + if _, err = ctrl.storeContentUpdate(contentClone); err != nil { + klog.Errorf("error updating volume snapshot content cache: %v", err) + } + } + + klog.V(4).Infof("controller initialized") +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller_test.go new file mode 100644 index 000000000..625f9d700 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_controller_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/client-go/tools/cache" + "testing" +) + +func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) { + content := newContent("contentName", classEmpty, "sid1-1", "vuid1-1", "volume1-1", "snapuid1-1", "snap1-1", nil, nil, nil, false) + content.ResourceVersion = version + ret, err := storeObjectUpdate(c, content, "content") + if err != nil { + t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err) + } + if expectedReturn != ret { + t.Errorf("%s: expected storeObjectUpdate to return %v, got: %v", prefix, expectedReturn, ret) + } + + // find the stored version + + contentObj, found, err := c.GetByKey("contentName") + if err != nil { + t.Errorf("expected content 'contentName' in the cache, got error instead: %v", err) + } + if !found { + t.Errorf("expected content 'contentName' in the cache but it was not found") + } + content, ok := contentObj.(*crdv1.VolumeSnapshotContent) + if !ok { + t.Errorf("expected content in the cache, got different object instead: %#v", contentObj) + } + + if ret { + if content.ResourceVersion != version { + t.Errorf("expected content with version %s in the cache, got %s instead", version, content.ResourceVersion) + } + } else { + if content.ResourceVersion == version { + t.Errorf("expected content with version other than %s in the cache, got %s instead", version, content.ResourceVersion) + } + } +} + +// TestControllerCache tests func storeObjectUpdate() +func TestControllerCache(t *testing.T) { + // Cache under test + c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + + // Store new PV + storeVersion(t, "Step1", c, "1", true) + // Store the same PV + storeVersion(t, "Step2", c, "1", true) + // Store newer PV + storeVersion(t, "Step3", c, "2", true) + // Store older PV - simulating old "PV updated" event or periodic sync with + // old data + storeVersion(t, "Step4", c, "1", false) + // Store newer PV - test integer parsing ("2" > "10" as string, + // while 2 < 10 as integers) + storeVersion(t, "Step5", c, "10", true) +} + +func TestControllerCacheParsingError(t *testing.T) { + c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + // There must be something in the cache to compare with + storeVersion(t, "Step1", c, "1", true) + + content := newContent("contentName", classEmpty, "sid1-1", "vuid1-1", "volume1-1", "snapuid1-1", "snap1-1", nil, nil, nil, false) + content.ResourceVersion = "xxx" + _, err := storeObjectUpdate(c, content, "content") + if err == nil { + t.Errorf("Expected parsing error, got nil instead") + } +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_create_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_create_test.go new file mode 100644 index 000000000..bfb925a6d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_create_test.go @@ -0,0 +1,369 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "errors" + "testing" + "time" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var timeNow = time.Now().UnixNano() + +var metaTimeNowUnix = &metav1.Time{ + Time: time.Unix(0, timeNow), +} + +var defaultSize int64 = 1000 +var deletePolicy = crdv1.VolumeSnapshotContentDelete +var retainPolicy = crdv1.VolumeSnapshotContentRetain +var sameDriverStorageClass = &storage.StorageClass{ + TypeMeta: metav1.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sameDriver", + }, + Provisioner: mockDriverName, + Parameters: class1Parameters, +} + +var diffDriverStorageClass = &storage.StorageClass{ + TypeMeta: metav1.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "diffDriver", + }, + Provisioner: mockDriverName, + Parameters: class1Parameters, +} + +// Test single call to SyncSnapshot, expecting create snapshot to happen. +// 1. Fill in the controller with initial data +// 2. Call the SyncSnapshot *once*. +// 3. Compare resulting contents with expected contents. +func TestCreateSnapshotSync(t *testing.T) { + + tests := []controllerTest{ + { + name: "6-1 - successful create snapshot with snapshot class gold", + initialContents: nocontents, + expectedContents: newContentArray("snapcontent-snapuid6-1", classGold, "sid6-1", "pv-uid6-1", "volume6-1", "snapuid6-1", "snap6-1", &deletePolicy, &defaultSize, &timeNow, false), + initialSnapshots: newSnapshotArray("snap6-1", classGold, "", "snapuid6-1", "claim6-1", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap6-1", classGold, "snapcontent-snapuid6-1", "snapuid6-1", "claim6-1", false, nil, metaTimeNowUnix, getSize(defaultSize)), + initialClaims: newClaimArray("claim6-1", "pvc-uid6-1", "1Gi", "volume6-1", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume6-1", "pv-uid6-1", "pv-handle6-1", "1Gi", "pvc-uid6-1", "claim6-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid6-1", + volume: newVolume("volume6-1", "pv-uid6-1", "pv-handle6-1", "1Gi", "pvc-uid6-1", "claim6-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: map[string]string{"param1": "value1"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid6-1", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "6-2 - successful create snapshot with snapshot class silver", + initialContents: nocontents, + expectedContents: newContentArray("snapcontent-snapuid6-2", classSilver, "sid6-2", "pv-uid6-2", "volume6-2", "snapuid6-2", "snap6-2", &deletePolicy, &defaultSize, &timeNow, false), + initialSnapshots: newSnapshotArray("snap6-2", classSilver, "", "snapuid6-2", "claim6-2", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap6-2", classSilver, "snapcontent-snapuid6-2", "snapuid6-2", "claim6-2", false, nil, metaTimeNowUnix, getSize(defaultSize)), + initialClaims: newClaimArray("claim6-2", "pvc-uid6-2", "1Gi", "volume6-2", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume6-2", "pv-uid6-2", "pv-handle6-2", "1Gi", "pvc-uid6-2", "claim6-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid6-2", + volume: newVolume("volume6-2", "pv-uid6-2", "pv-handle6-2", "1Gi", "pvc-uid6-2", "claim6-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: map[string]string{"param2": "value2"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid6-2", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "6-3 - successful create snapshot with snapshot class valid-secret-class", + initialContents: nocontents, + expectedContents: newContentArray("snapcontent-snapuid6-3", validSecretClass, "sid6-3", "pv-uid6-3", "volume6-3", "snapuid6-3", "snap6-3", &deletePolicy, &defaultSize, &timeNow, false), + initialSnapshots: newSnapshotArray("snap6-3", validSecretClass, "", "snapuid6-3", "claim6-3", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap6-3", validSecretClass, "snapcontent-snapuid6-3", "snapuid6-3", "claim6-3", false, nil, metaTimeNowUnix, getSize(defaultSize)), + initialClaims: newClaimArray("claim6-3", "pvc-uid6-3", "1Gi", "volume6-3", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume6-3", "pv-uid6-3", "pv-handle6-3", "1Gi", "pvc-uid6-3", "claim6-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + initialSecrets: []*v1.Secret{secret()}, + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid6-3", + volume: newVolume("volume6-3", "pv-uid6-3", "pv-handle6-3", "1Gi", "pvc-uid6-3", "claim6-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: class5Parameters, + secrets: map[string]string{"foo": "bar"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid6-3", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "6-4 - successful create snapshot with snapshot class empty-secret-class", + initialContents: nocontents, + expectedContents: newContentArray("snapcontent-snapuid6-4", emptySecretClass, "sid6-4", "pv-uid6-4", "volume6-4", "snapuid6-4", "snap6-4", &deletePolicy, &defaultSize, &timeNow, false), + initialSnapshots: newSnapshotArray("snap6-4", emptySecretClass, "", "snapuid6-4", "claim6-4", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap6-4", emptySecretClass, "snapcontent-snapuid6-4", "snapuid6-4", "claim6-4", false, nil, metaTimeNowUnix, getSize(defaultSize)), + initialClaims: newClaimArray("claim6-4", "pvc-uid6-4", "1Gi", "volume6-4", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume6-4", "pv-uid6-4", "pv-handle6-4", "1Gi", "pvc-uid6-4", "claim6-4", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + initialSecrets: []*v1.Secret{emptySecret()}, + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid6-4", + volume: newVolume("volume6-4", "pv-uid6-4", "pv-handle6-4", "1Gi", "pvc-uid6-4", "claim6-4", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: class4Parameters, + secrets: map[string]string{}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid6-4", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "6-5 - successful create snapshot with status uploading", + initialContents: nocontents, + expectedContents: newContentArray("snapcontent-snapuid6-5", classGold, "sid6-5", "pv-uid6-5", "volume6-5", "snapuid6-5", "snap6-5", &deletePolicy, &defaultSize, &timeNow, false), + initialSnapshots: newSnapshotArray("snap6-5", classGold, "", "snapuid6-5", "claim6-5", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap6-5", classGold, "snapcontent-snapuid6-5", "snapuid6-5", "claim6-5", false, nil, metaTimeNowUnix, getSize(defaultSize)), + initialClaims: newClaimArray("claim6-5", "pvc-uid6-5", "1Gi", "volume6-5", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume6-5", "pv-uid6-5", "pv-handle6-5", "1Gi", "pvc-uid6-5", "claim6-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid6-5", + volume: newVolume("volume6-5", "pv-uid6-5", "pv-handle6-5", "1Gi", "pvc-uid6-5", "claim6-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: map[string]string{"param1": "value1"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid6-5", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "6-6 - successful create snapshot with status error uploading", + initialContents: nocontents, + expectedContents: newContentArray("snapcontent-snapuid6-6", classGold, "sid6-6", "pv-uid6-6", "volume6-6", "snapuid6-6", "snap6-6", &deletePolicy, &defaultSize, &timeNow, false), + initialSnapshots: newSnapshotArray("snap6-6", classGold, "", "snapuid6-6", "claim6-6", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap6-6", classGold, "snapcontent-snapuid6-6", "snapuid6-6", "claim6-6", false, nil, metaTimeNowUnix, getSize(defaultSize)), + initialClaims: newClaimArray("claim6-6", "pvc-uid6-6", "1Gi", "volume6-6", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume6-6", "pv-uid6-6", "pv-handle6-6", "1Gi", "pvc-uid6-6", "claim6-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid6-6", + volume: newVolume("volume6-6", "pv-uid6-6", "pv-handle6-6", "1Gi", "pvc-uid6-6", "claim6-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: map[string]string{"param1": "value1"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid6-6", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "7-1 - fail create snapshot with snapshot class non-existing", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-1", classNonExisting, "", "snapuid7-1", "claim7-1", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-1", classNonExisting, "", "snapuid7-1", "claim7-1", false, newVolumeError("Failed to create snapshot: failed to get input parameters to create snapshot snap7-1: \"failed to retrieve snapshot class non-existing from the informer: \\\"volumesnapshotclass.snapshot.storage.k8s.io \\\\\\\"non-existing\\\\\\\" not found\\\"\""), nil, nil), + initialClaims: newClaimArray("claim7-1", "pvc-uid7-1", "1Gi", "volume7-1", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume7-1", "pv-uid7-1", "pv-handle7-1", "1Gi", "pvc-uid7-1", "claim7-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "7-2 - fail create snapshot with snapshot class invalid-secret-class", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-2", invalidSecretClass, "", "snapuid7-2", "claim7-2", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-2", invalidSecretClass, "", "snapuid7-2", "claim7-2", false, newVolumeError("Failed to create snapshot: failed to get input parameters to create snapshot snap7-2: \"failed to get name and namespace template from params: either name and namespace for Snapshotter secrets specified, Both must be specified\""), nil, nil), + initialClaims: newClaimArray("claim7-2", "pvc-uid7-2", "1Gi", "volume7-2", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume7-2", "pv-uid7-2", "pv-handle7-2", "1Gi", "pvc-uid7-2", "claim7-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "7-3 - fail create snapshot with none snapshot class ", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-3", "", "", "snapuid7-3", "claim7-3", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-3", "", "", "snapuid7-3", "claim7-3", false, newVolumeError("Failed to create snapshot: failed to get input parameters to create snapshot snap7-3: \"failed to retrieve snapshot class from the informer: \\\"volumesnapshotclass.snapshot.storage.k8s.io \\\\\\\"\\\\\\\" not found\\\"\""), nil, nil), + initialClaims: newClaimArray("claim7-3", "pvc-uid7-3", "1Gi", "volume7-3", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume7-3", "pv-uid7-3", "pv-handle7-3", "1Gi", "pvc-uid7-3", "claim7-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + initialStorageClasses: []*storage.StorageClass{diffDriverStorageClass}, + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "7-4 - fail create snapshot with no-existing claim", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-4", classGold, "", "snapuid7-4", "claim7-4", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-4", classGold, "", "snapuid7-4", "claim7-4", false, newVolumeError("Failed to create snapshot: failed to get input parameters to create snapshot snap7-4: \"failed to retrieve PVC claim7-4 from the lister: \\\"persistentvolumeclaim \\\\\\\"claim7-4\\\\\\\" not found\\\"\""), nil, nil), + initialVolumes: newVolumeArray("volume7-4", "pv-uid7-4", "pv-handle7-4", "1Gi", "pvc-uid7-4", "claim7-4", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "7-5 - fail create snapshot with no-existing volume", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-5", classGold, "", "snapuid7-5", "claim7-5", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-5", classGold, "", "snapuid7-5", "claim7-5", false, newVolumeError("Failed to create snapshot: failed to get input parameters to create snapshot snap7-5: \"failed to retrieve PV volume7-5 from the API server: \\\"cannot find volume volume7-5\\\"\""), nil, nil), + initialClaims: newClaimArray("claim7-5", "pvc-uid7-5", "1Gi", "volume7-5", v1.ClaimBound, &classEmpty), + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "7-6 - fail create snapshot with claim that is not yet bound", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-6", classGold, "", "snapuid7-6", "claim7-6", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-6", classGold, "", "snapuid7-6", "claim7-6", false, newVolumeError("Failed to create snapshot: failed to get input parameters to create snapshot snap7-6: \"the PVC claim7-6 is not yet bound to a PV, will not attempt to take a snapshot\""), nil, nil), + initialClaims: newClaimArray("claim7-6", "pvc-uid7-6", "1Gi", "", v1.ClaimPending, &classEmpty), + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "7-7 - fail create snapshot due to csi driver error", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-7", classGold, "", "snapuid7-7", "claim7-7", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-7", classGold, "", "snapuid7-7", "claim7-7", false, newVolumeError("Failed to create snapshot: failed to take snapshot of the volume, volume7-7: \"mock create snapshot error\""), nil, nil), + initialClaims: newClaimArray("claim7-7", "pvc-uid7-7", "1Gi", "volume7-7", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume7-7", "pv-uid7-7", "pv-handle7-7", "1Gi", "pvc-uid7-7", "claim7-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid7-7", + volume: newVolume("volume7-7", "pv-uid7-7", "pv-handle7-7", "1Gi", "pvc-uid7-7", "claim7-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: map[string]string{"param1": "value1"}, + // information to return + err: errors.New("mock create snapshot error"), + }, + }, + errors: noerrors, + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + test: testSyncSnapshot, + }, + { + name: "7-8 - fail create snapshot due to cannot update snapshot status", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-8", classGold, "", "snapuid7-8", "claim7-8", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-8", classGold, "", "snapuid7-8", "claim7-8", false, newVolumeError("Failed to create snapshot: snapshot controller failed to update default/snap7-8 on API server: mock update error"), nil, nil), + initialClaims: newClaimArray("claim7-8", "pvc-uid7-8", "1Gi", "volume7-8", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume7-8", "pv-uid7-8", "pv-handle7-8", "1Gi", "pvc-uid7-8", "claim7-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid7-8", + volume: newVolume("volume7-8", "pv-uid7-8", "pv-handle7-8", "1Gi", "pvc-uid7-8", "claim7-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: map[string]string{"param1": "value1"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid7-8", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: []reactorError{ + // Inject error to the forth client.VolumesnapshotV1alpha1().VolumeSnapshots().Update call. + // All other calls will succeed. + {"update", "volumesnapshots", errors.New("mock update error")}, + {"update", "volumesnapshots", errors.New("mock update error")}, + {"update", "volumesnapshots", errors.New("mock update error")}, + }, + expectedEvents: []string{"Warning SnapshotCreationFailed"}, + test: testSyncSnapshot, + }, + { + name: "7-9 - fail create snapshot due to cannot save snapshot content", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap7-9", classGold, "", "snapuid7-9", "claim7-9", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap7-9", classGold, "", "snapuid7-9", "claim7-9", false, nil, metaTimeNowUnix, getSize(defaultSize)), + initialClaims: newClaimArray("claim7-9", "pvc-uid7-9", "1Gi", "volume7-9", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume7-9", "pv-uid7-9", "pv-handle7-9", "1Gi", "pvc-uid7-9", "claim7-9", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid7-9", + volume: newVolume("volume7-9", "pv-uid7-9", "pv-handle7-9", "1Gi", "pvc-uid7-9", "claim7-9", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: map[string]string{"param1": "value1"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid7-9", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: []reactorError{ + {"create", "volumesnapshotcontents", errors.New("mock create error")}, + {"create", "volumesnapshotcontents", errors.New("mock create error")}, + {"create", "volumesnapshotcontents", errors.New("mock create error")}, + }, + expectedEvents: []string{"Warning CreateSnapshotContentFailed"}, + test: testSyncSnapshot, + }, + } + runSyncTests(t, tests, snapshotClasses) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_delete_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_delete_test.go new file mode 100644 index 000000000..6be2be119 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_delete_test.go @@ -0,0 +1,325 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "errors" + "testing" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var class1Parameters = map[string]string{ + "param1": "value1", +} + +var class2Parameters = map[string]string{ + "param2": "value2", +} + +var class3Parameters = map[string]string{ + "param3": "value3", + snapshotterSecretNameKey: "name", +} + +var class4Parameters = map[string]string{ + snapshotterSecretNameKey: "emptysecret", + snapshotterSecretNamespaceKey: "default", +} + +var class5Parameters = map[string]string{ + snapshotterSecretNameKey: "secret", + snapshotterSecretNamespaceKey: "default", +} + +var snapshotClasses = []*crdv1.VolumeSnapshotClass{ + { + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: classGold, + }, + Snapshotter: mockDriverName, + Parameters: class1Parameters, + }, + { + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: classSilver, + }, + Snapshotter: mockDriverName, + Parameters: class2Parameters, + }, + { + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: emptySecretClass, + }, + Snapshotter: mockDriverName, + Parameters: class4Parameters, + }, + { + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: invalidSecretClass, + }, + Snapshotter: mockDriverName, + Parameters: class3Parameters, + }, + { + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: validSecretClass, + }, + Snapshotter: mockDriverName, + Parameters: class5Parameters, + }, + { + TypeMeta: metav1.TypeMeta{ + Kind: "VolumeSnapshotClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: defaultClass, + Annotations: map[string]string{IsDefaultSnapshotClassAnnotation: "true"}, + }, + Snapshotter: mockDriverName, + }, +} + +// Test single call to syncContent, expecting deleting to happen. +// 1. Fill in the controller with initial data +// 2. Call the syncContent *once*. +// 3. Compare resulting contents with expected contents. +func TestDeleteSync(t *testing.T) { + tests := []controllerTest{ + { + name: "1-1 - content with empty snapshot class is deleted if it is bound to a non-exist snapshot and also has a snapshot uid specified", + initialContents: newContentArray("content1-1", classEmpty, "sid1-1", "vuid1-1", "volume1-1", "snapuid1-1", "snap1-1", &deletePolicy, nil, nil, true), + expectedContents: nocontents, + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + expectedEvents: noevents, + errors: noerrors, + expectedDeleteCalls: []deleteCall{{"sid1-1", nil, nil}}, + test: testSyncContent, + }, + { + name: "2-1 - content with empty snapshot class will not be deleted if it is bound to a non-exist snapshot but it does not have a snapshot uid specified", + initialContents: newContentArray("content2-1", classEmpty, "sid2-1", "vuid2-1", "volume2-1", "", "snap2-1", &deletePolicy, nil, nil, true), + expectedContents: newContentArray("content2-1", classEmpty, "sid2-1", "vuid2-1", "volume2-1", "", "snap2-1", &deletePolicy, nil, nil, true), + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + expectedEvents: noevents, + errors: noerrors, + expectedDeleteCalls: []deleteCall{{"sid2-1", nil, nil}}, + test: testSyncContent, + }, + { + name: "1-2 - successful delete with snapshot class that has empty secret parameter", + initialContents: newContentArray("content1-2", emptySecretClass, "sid1-2", "vuid1-2", "volume1-2", "snapuid1-2", "snap1-2", &deletePolicy, nil, nil, true), + expectedContents: nocontents, + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + initialSecrets: []*v1.Secret{emptySecret()}, + expectedEvents: noevents, + errors: noerrors, + expectedDeleteCalls: []deleteCall{{"sid1-2", map[string]string{}, nil}}, + test: testSyncContent, + }, + { + name: "1-3 - successful delete with snapshot class that has valid secret parameter", + initialContents: newContentArray("content1-3", validSecretClass, "sid1-3", "vuid1-3", "volume1-3", "snapuid1-3", "snap1-3", &deletePolicy, nil, nil, true), + expectedContents: nocontents, + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + expectedEvents: noevents, + errors: noerrors, + initialSecrets: []*v1.Secret{secret()}, + expectedDeleteCalls: []deleteCall{{"sid1-3", map[string]string{"foo": "bar"}, nil}}, + test: testSyncContent, + }, + { + name: "1-4 - fail delete with snapshot class that has invalid secret parameter", + initialContents: newContentArray("content1-4", invalidSecretClass, "sid1-4", "vuid1-4", "volume1-4", "snapuid1-4", "snap1-4", &deletePolicy, nil, nil, true), + expectedContents: newContentArray("content1-4", invalidSecretClass, "sid1-4", "vuid1-4", "volume1-4", "snapuid1-4", "snap1-4", &deletePolicy, nil, nil, true), + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + expectedEvents: noevents, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-5 - csi driver delete snapshot returns error", + initialContents: newContentArray("content1-5", validSecretClass, "sid1-5", "vuid1-5", "volume1-5", "snapuid1-5", "snap1-5", &deletePolicy, nil, nil, true), + expectedContents: newContentArray("content1-5", validSecretClass, "sid1-5", "vuid1-5", "volume1-5", "snapuid1-5", "snap1-5", &deletePolicy, nil, nil, true), + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + initialSecrets: []*v1.Secret{secret()}, + expectedDeleteCalls: []deleteCall{{"sid1-5", map[string]string{"foo": "bar"}, errors.New("mock csi driver delete error")}}, + expectedEvents: []string{"Warning SnapshotDeleteError"}, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-6 - api server delete content returns error", + initialContents: newContentArray("content1-6", validSecretClass, "sid1-6", "vuid1-6", "volume1-6", "snapuid1-6", "snap1-6", &deletePolicy, nil, nil, true), + expectedContents: newContentArray("content1-6", validSecretClass, "sid1-6", "vuid1-6", "volume1-6", "snapuid1-6", "snap1-6", &deletePolicy, nil, nil, true), + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + initialSecrets: []*v1.Secret{secret()}, + expectedDeleteCalls: []deleteCall{{"sid1-6", map[string]string{"foo": "bar"}, nil}}, + expectedEvents: []string{"Warning SnapshotContentObjectDeleteError"}, + errors: []reactorError{ + // Inject error to the first client.VolumesnapshotV1alpha1().VolumeSnapshotContents().Delete call. + // All other calls will succeed. + {"delete", "volumesnapshotcontents", errors.New("mock delete error")}, + }, + test: testSyncContent, + }, + { + // delete success - snapshot that the content was pointing to was deleted, and another + // with the same name created. + name: "1-7 - prebound content is deleted while the snapshot exists", + initialContents: newContentArray("content1-7", validSecretClass, "sid1-7", "vuid1-7", "volume1-7", "snapuid1-7", "snap1-7", &deletePolicy, nil, nil, true), + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap1-7", validSecretClass, "content1-7", "snapuid1-7-x", "claim1-7", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap1-7", validSecretClass, "content1-7", "snapuid1-7-x", "claim1-7", false, nil, nil, nil), + initialSecrets: []*v1.Secret{secret()}, + expectedDeleteCalls: []deleteCall{{"sid1-7", map[string]string{"foo": "bar"}, nil}}, + expectedEvents: noevents, + errors: noerrors, + test: testSyncContent, + }, + { + // delete success(?) - content is deleted before doDelete() starts + name: "1-8 - content is deleted before deleting", + initialContents: newContentArray("content1-8", validSecretClass, "sid1-8", "vuid1-8", "volume1-8", "snapuid1-8", "snap1-8", &deletePolicy, nil, nil, true), + expectedContents: nocontents, + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + initialSecrets: []*v1.Secret{secret()}, + expectedDeleteCalls: []deleteCall{{"sid1-8", map[string]string{"foo": "bar"}, nil}}, + expectedEvents: noevents, + errors: noerrors, + test: wrapTestWithInjectedOperation(testSyncContent, func(ctrl *csiSnapshotController, reactor *snapshotReactor) { + // Delete the volume before delete operation starts + reactor.lock.Lock() + delete(reactor.contents, "content1-8") + reactor.lock.Unlock() + }), + }, + { + name: "1-9 - content will not be deleted if it is bound to a snapshot correctly, snapshot uid is specified", + initialContents: newContentArray("content1-9", validSecretClass, "sid1-9", "vuid1-9", "volume1-9", "snapuid1-9", "snap1-9", &deletePolicy, nil, nil, true), + expectedContents: newContentArray("content1-9", validSecretClass, "sid1-9", "vuid1-9", "volume1-9", "snapuid1-9", "snap1-9", &deletePolicy, nil, nil, true), + initialSnapshots: newSnapshotArray("snap1-9", validSecretClass, "content1-9", "snapuid1-9", "claim1-9", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap1-9", validSecretClass, "content1-9", "snapuid1-9", "claim1-9", false, nil, nil, nil), + expectedEvents: noevents, + initialSecrets: []*v1.Secret{secret()}, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-10 - should delete content which is bound to a snapshot incorrectly", + initialContents: newContentArray("content1-10", validSecretClass, "sid1-10", "vuid1-10", "volume1-10", "snapuid1-10-x", "snap1-10", &deletePolicy, nil, nil, true), + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap1-10", validSecretClass, "content1-10", "snapuid1-10", "claim1-10", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap1-10", validSecretClass, "content1-10", "snapuid1-10", "claim1-10", false, nil, nil, nil), + expectedEvents: noevents, + initialSecrets: []*v1.Secret{secret()}, + errors: noerrors, + expectedDeleteCalls: []deleteCall{{"sid1-10", map[string]string{"foo": "bar"}, nil}}, + test: testSyncContent, + }, + { + name: "1-10 - will not delete content with retain policy set which is bound to a snapshot incorrectly", + initialContents: newContentArray("content1-10", validSecretClass, "sid1-10", "vuid1-10", "volume1-10", "snapuid1-10-x", "snap1-10", &retainPolicy, nil, nil, true), + expectedContents: newContentArray("content1-10", validSecretClass, "sid1-10", "vuid1-10", "volume1-10", "snapuid1-10-x", "snap1-10", &retainPolicy, nil, nil, true), + initialSnapshots: newSnapshotArray("snap1-10", validSecretClass, "content1-10", "snapuid1-10", "claim1-10", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap1-10", validSecretClass, "content1-10", "snapuid1-10", "claim1-10", false, nil, nil, nil), + expectedEvents: noevents, + initialSecrets: []*v1.Secret{secret()}, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-11 - content will not be deleted if it is bound to a snapshot correctly, snapsht uid is not specified", + initialContents: newContentArray("content1-11", validSecretClass, "sid1-11", "vuid1-11", "volume1-11", "", "snap1-11", &deletePolicy, nil, nil, true), + expectedContents: newContentArray("content1-11", validSecretClass, "sid1-11", "vuid1-11", "volume1-11", "", "snap1-11", &deletePolicy, nil, nil, true), + initialSnapshots: newSnapshotArray("snap1-11", validSecretClass, "content1-11", "snapuid1-11", "claim1-11", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap1-11", validSecretClass, "content1-11", "snapuid1-11", "claim1-11", false, nil, nil, nil), + expectedEvents: noevents, + initialSecrets: []*v1.Secret{secret()}, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-12 - content with retain policy will not be deleted if it is bound to a non-exist snapshot and also has a snapshot uid specified", + initialContents: newContentArray("content1-12", classEmpty, "sid1-12", "vuid1-12", "volume1-12", "snapuid1-12", "snap1-12", &retainPolicy, nil, nil, true), + expectedContents: newContentArray("content1-12", classEmpty, "sid1-12", "vuid1-12", "volume1-12", "snapuid1-12", "snap1-12", &retainPolicy, nil, nil, true), + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + expectedEvents: noevents, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-13 - content with empty snapshot class is not deleted when Deletion policy is not set even if it is bound to a non-exist snapshot and also has a snapshot uid specified", + initialContents: newContentArray("content1-1", classEmpty, "sid1-1", "vuid1-1", "volume1-1", "snapuid1-1", "snap1-1", nil, nil, nil, true), + expectedContents: newContentArray("content1-1", classEmpty, "sid1-1", "vuid1-1", "volume1-1", "snapuid1-1", "snap1-1", nil, nil, nil, true), + initialSnapshots: nosnapshots, + expectedSnapshots: nosnapshots, + expectedEvents: noevents, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-14 - content will not be deleted if it is bound to a snapshot correctly, snapshot uid is specified", + initialContents: newContentArray("content1-14", validSecretClass, "sid1-14", "vuid1-14", "volume1-14", "snapuid1-14", "snap1-14", &retainPolicy, nil, nil, true), + expectedContents: newContentArray("content1-14", validSecretClass, "sid1-14", "vuid1-14", "volume1-14", "snapuid1-14", "snap1-14", &retainPolicy, nil, nil, true), + initialSnapshots: newSnapshotArray("snap1-14", validSecretClass, "content1-14", "snapuid1-14", "claim1-14", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap1-14", validSecretClass, "content1-14", "snapuid1-14", "claim1-14", false, nil, nil, nil), + expectedEvents: noevents, + initialSecrets: []*v1.Secret{secret()}, + errors: noerrors, + test: testSyncContent, + }, + { + name: "1-15 - content will not be deleted which is bound to a snapshot incorrectly if Deletion policy is not set", + initialContents: newContentArray("content1-10", validSecretClass, "sid1-15", "vuid1-15", "volume1-15", "snapuid1-15-x", "snap1-15", nil, nil, nil, true), + expectedContents: newContentArray("content1-10", validSecretClass, "sid1-15", "vuid1-15", "volume1-15", "snapuid1-15-x", "snap1-15", nil, nil, nil, true), + initialSnapshots: newSnapshotArray("snap1-10", validSecretClass, "content1-15", "snapuid1-15", "claim1-15", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap1-10", validSecretClass, "content1-15", "snapuid1-15", "claim1-15", false, nil, nil, nil), + expectedEvents: noevents, + initialSecrets: []*v1.Secret{secret()}, + errors: noerrors, + test: testSyncContent, + }, + } + runSyncTests(t, tests, snapshotClasses) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_finalizer_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_finalizer_test.go new file mode 100644 index 000000000..bfce33a9b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_finalizer_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "testing" + + "k8s.io/api/core/v1" +) + +// Test single call to ensureSnapshotSourceFinalizer and checkandRemoveSnapshotSourceFinalizer, +// expecting PVCFinalizer to be added or removed +func TestPVCFinalizer(t *testing.T) { + + tests := []controllerTest{ + { + name: "1-1 - successful add PVC finalizer", + initialSnapshots: newSnapshotArray("snap6-2", classSilver, "", "snapuid6-2", "claim6-2", false, nil, nil, nil), + initialClaims: newClaimArray("claim6-2", "pvc-uid6-2", "1Gi", "volume6-2", v1.ClaimBound, &classEmpty), + test: testAddPVCFinalizer, + expectSuccess: true, + }, + { + name: "1-2 - won't add PVC finalizer; already added", + initialSnapshots: newSnapshotArray("snap6-2", classSilver, "", "snapuid6-2", "claim6-2", false, nil, nil, nil), + initialClaims: newClaimArrayFinalizer("claim6-2", "pvc-uid6-2", "1Gi", "volume6-2", v1.ClaimBound, &classEmpty), + test: testAddPVCFinalizer, + expectSuccess: false, + }, + { + name: "1-3 - successful remove PVC finalizer", + initialSnapshots: newSnapshotArray("snap6-2", classSilver, "", "snapuid6-2", "claim6-2", false, nil, nil, nil), + initialClaims: newClaimArrayFinalizer("claim6-2", "pvc-uid6-2", "1Gi", "volume6-2", v1.ClaimBound, &classEmpty), + test: testRemovePVCFinalizer, + expectSuccess: true, + }, + { + name: "1-4 - won't remove PVC finalizer; already removed", + initialSnapshots: newSnapshotArray("snap6-2", classSilver, "", "snapuid6-2", "claim6-2", false, nil, nil, nil), + initialClaims: newClaimArray("claim6-2", "pvc-uid6-2", "1Gi", "volume6-2", v1.ClaimBound, &classEmpty), + test: testRemovePVCFinalizer, + expectSuccess: false, + }, + { + name: "1-5 - won't remove PVC finalizer; PVC in-use", + initialSnapshots: newSnapshotArray("snap6-2", classSilver, "", "snapuid6-2", "claim6-2", false, nil, nil, nil), + initialClaims: newClaimArray("claim6-2", "pvc-uid6-2", "1Gi", "volume6-2", v1.ClaimBound, &classEmpty), + test: testRemovePVCFinalizer, + expectSuccess: false, + }, + } + runPVCFinalizerTests(t, tests, snapshotClasses) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_ready_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_ready_test.go new file mode 100644 index 000000000..b495af781 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/snapshot_ready_test.go @@ -0,0 +1,274 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "errors" + "testing" + "time" + + "k8s.io/api/core/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var metaTimeNow = &metav1.Time{ + Time: time.Now(), +} + +var volumeErr = &storagev1beta1.VolumeError{ + Time: *metaTimeNow, + Message: "Failed to upload the snapshot", +} + +// Test single call to syncSnapshot and syncContent methods. +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncSnapshot/syncContent) via +// controllerTest.testCall *once*. +// 3. Compare resulting contents and snapshots with expected contents and snapshots. +func TestSync(t *testing.T) { + tests := []controllerTest{ + { + // snapshot is bound to a non-existing content + name: "2-1 - snapshot is bound to a non-existing content", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap2-1", validSecretClass, "content2-1", "snapuid2-1", "claim2-1", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap2-1", validSecretClass, "content2-1", "snapuid2-1", "claim2-1", false, newVolumeError("VolumeSnapshotContent is missing"), nil, nil), + expectedEvents: []string{"Warning SnapshotContentMissing"}, + errors: noerrors, + test: testSyncSnapshotError, + }, + { + name: "2-2 - could not bind snapshot and content, the VolumeSnapshotRef does not match", + initialContents: newContentArray("content2-2", validSecretClass, "sid2-2", "vuid2-2", "volume2-2", "snapuid2-2-x", "snap2-2", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-2", validSecretClass, "sid2-2", "vuid2-2", "volume2-2", "snapuid2-2-x", "snap2-2", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-2", validSecretClass, "content2-2", "snapuid2-2", "claim2-2", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap2-2", validSecretClass, "content2-2", "snapuid2-2", "claim2-2", false, newVolumeError("Snapshot failed to bind VolumeSnapshotContent, Could not bind snapshot snap2-2 and content content2-2, the VolumeSnapshotRef does not match"), nil, nil), + expectedEvents: []string{"Warning SnapshotBindFailed"}, + errors: noerrors, + test: testSyncSnapshotError, + }, + { + name: "2-3 - success bind snapshot and content but not ready, no status changed", + initialContents: newContentArray("content2-3", validSecretClass, "sid2-3", "vuid2-3", "volume2-3", "", "snap2-3", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-3", validSecretClass, "sid2-3", "vuid2-3", "volume2-3", "snapuid2-3", "snap2-3", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-3", validSecretClass, "content2-3", "snapuid2-3", "claim2-3", false, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap2-3", validSecretClass, "content2-3", "snapuid2-3", "claim2-3", false, nil, metaTimeNow, nil), + initialClaims: newClaimArray("claim2-3", "pvc-uid2-3", "1Gi", "volume2-3", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume2-3", "pv-uid2-3", "pv-handle2-3", "1Gi", "pvc-uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + initialSecrets: []*v1.Secret{secret()}, + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid2-3", + volume: newVolume("volume2-3", "pv-uid2-3", "pv-handle2-3", "1Gi", "pvc-uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: class5Parameters, + secrets: map[string]string{"foo": "bar"}, + // information to return + driverName: mockDriverName, + snapshotId: "sid2-3", + timestamp: timeNow, + readyToUse: false, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + // nothing changed + name: "2-4 - noop", + initialContents: newContentArray("content2-4", validSecretClass, "sid2-4", "vuid2-4", "volume2-4", "snapuid2-4", "snap2-4", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-4", validSecretClass, "sid2-4", "vuid2-4", "volume2-4", "snapuid2-4", "snap2-4", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-4", validSecretClass, "content2-4", "snapuid2-4", "claim2-4", true, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap2-4", validSecretClass, "content2-4", "snapuid2-4", "claim2-4", true, nil, metaTimeNow, nil), + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "2-5 - snapshot and content bound, status ready false -> true", + initialContents: newContentArray("content2-5", validSecretClass, "sid2-5", "vuid2-5", "volume2-5", "snapuid2-5", "snap2-5", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-5", validSecretClass, "sid2-5", "vuid2-5", "volume2-5", "snapuid2-5", "snap2-5", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-5", validSecretClass, "content2-5", "snapuid2-5", "claim2-5", false, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap2-5", validSecretClass, "content2-5", "snapuid2-5", "claim2-5", true, nil, metaTimeNow, nil), + initialClaims: newClaimArray("claim2-5", "pvc-uid2-5", "1Gi", "volume2-5", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume2-5", "pv-uid2-5", "pv-handle2-5", "1Gi", "pvc-uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + initialSecrets: []*v1.Secret{secret()}, + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid2-5", + volume: newVolume("volume2-5", "pv-uid2-5", "pv-handle2-5", "1Gi", "pvc-uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: class5Parameters, + secrets: map[string]string{"foo": "bar"}, + // information to return + driverName: mockDriverName, + snapshotId: "sid2-5", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "2-6 - snapshot bound to prebound content correctly, status ready false -> true, ref.UID '' -> 'snapuid2-6'", + initialContents: newContentArray("content2-6", validSecretClass, "sid2-6", noVolume, noVolume, noBoundUID, "snap2-6", &deletePolicy, nil, &timeNow, false), + expectedContents: newContentArray("content2-6", validSecretClass, "sid2-6", noVolume, noVolume, "snapuid2-6", "snap2-6", &deletePolicy, nil, &timeNow, false), + initialSnapshots: newSnapshotArray("snap2-6", validSecretClass, "content2-6", "snapuid2-6", noClaim, false, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap2-6", validSecretClass, "content2-6", "snapuid2-6", noClaim, true, nil, metaTimeNow, nil), + expectedListCalls: []listCall{ + { + snapshotID: "sid2-6", + readyToUse: true, + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "2-7 - snapshot and content bound, csi driver get status error", + initialContents: newContentArray("content2-7", validSecretClass, "sid2-7", "vuid2-7", "volume2-7", "snapuid2-7", "snap2-7", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-7", validSecretClass, "sid2-7", "vuid2-7", "volume2-7", "snapuid2-7", "snap2-7", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-7", validSecretClass, "content2-7", "snapuid2-7", "claim2-7", false, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap2-7", validSecretClass, "content2-7", "snapuid2-7", "claim2-7", false, newVolumeError("Failed to check and update snapshot: mock create snapshot error"), metaTimeNow, nil), + expectedEvents: []string{"Warning SnapshotCheckandUpdateFailed"}, + initialClaims: newClaimArray("claim2-7", "pvc-uid2-7", "1Gi", "volume2-7", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume2-7", "pv-uid2-7", "pv-handle2-7", "1Gi", "pvc-uid2-7", "claim2-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + initialSecrets: []*v1.Secret{secret()}, + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid2-7", + volume: newVolume("volume2-7", "pv-uid2-7", "pv-handle2-7", "1Gi", "pvc-uid2-7", "claim2-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: class5Parameters, + secrets: map[string]string{"foo": "bar"}, + // information to return + err: errors.New("mock create snapshot error"), + }, + }, + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "2-8 - snapshot and content bound, apiserver update status error", + initialContents: newContentArray("content2-8", validSecretClass, "sid2-8", "vuid2-8", "volume2-8", "snapuid2-8", "snap2-8", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-8", validSecretClass, "sid2-8", "vuid2-8", "volume2-8", "snapuid2-8", "snap2-8", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-8", validSecretClass, "content2-8", "snapuid2-8", "claim2-8", false, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap2-8", validSecretClass, "content2-8", "snapuid2-8", "claim2-8", false, newVolumeError("Failed to check and update snapshot: snapshot controller failed to update default/snap2-8 on API server: mock update error"), metaTimeNow, nil), + expectedEvents: []string{"Warning SnapshotCheckandUpdateFailed"}, + initialClaims: newClaimArray("claim2-8", "pvc-uid2-8", "1Gi", "volume2-8", v1.ClaimBound, &classEmpty), + initialVolumes: newVolumeArray("volume2-8", "pv-uid2-8", "pv-handle2-8", "1Gi", "pvc-uid2-8", "claim2-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + initialSecrets: []*v1.Secret{secret()}, + expectedCreateCalls: []createCall{ + { + snapshotName: "snapshot-snapuid2-8", + volume: newVolume("volume2-8", "pv-uid2-8", "pv-handle2-8", "1Gi", "pvc-uid2-8", "claim2-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty), + parameters: class5Parameters, + secrets: map[string]string{"foo": "bar"}, + // information to return + driverName: mockDriverName, + size: defaultSize, + snapshotId: "sid2-8", + timestamp: timeNow, + readyToUse: true, + }, + }, + errors: []reactorError{ + // Inject error to the first client.VolumesnapshotV1alpha1().VolumeSnapshots().Update call. + // All other calls will succeed. + {"update", "volumesnapshots", errors.New("mock update error")}, + }, + test: testSyncSnapshot, + }, + { + name: "2-9 - bind when snapshot and content matches", + initialContents: newContentArray("content2-9", validSecretClass, "sid2-9", "vuid2-9", "volume2-9", "snapuid2-9", "snap2-9", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-9", validSecretClass, "sid2-9", "vuid2-9", "volume2-9", "snapuid2-9", "snap2-9", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-9", validSecretClass, "", "snapuid2-9", "claim2-9", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap2-9", validSecretClass, "content2-9", "snapuid2-9", "claim2-9", false, nil, nil, nil), + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "2-10 - do not bind when snapshot and content not match", + initialContents: newContentArray("content2-10", validSecretClass, "sid2-10", "vuid2-10", "volume2-10", "snapuid2-10-x", "snap2-10", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content2-10", validSecretClass, "sid2-10", "vuid2-10", "volume2-10", "snapuid2-10-x", "snap2-10", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap2-10", validSecretClass, "", "snapuid2-10", "claim2-10", false, newVolumeError("mock driver error"), nil, nil), + expectedSnapshots: newSnapshotArray("snap2-10", validSecretClass, "", "snapuid2-10", "claim2-10", false, newVolumeError("mock driver error"), nil, nil), + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "3-1 - ready snapshot lost reference to VolumeSnapshotContent", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap3-1", validSecretClass, "", "snapuid3-1", "claim3-1", true, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap3-1", validSecretClass, "", "snapuid3-1", "claim3-1", false, newVolumeError("Bound snapshot has lost reference to VolumeSnapshotContent"), metaTimeNow, nil), + errors: noerrors, + expectedEvents: []string{"Warning SnapshotLost"}, + test: testSyncSnapshot, + }, + { + name: "3-2 - ready snapshot bound to none-exist content", + initialContents: nocontents, + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap3-2", validSecretClass, "content3-2", "snapuid3-2", "claim3-2", true, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap3-2", validSecretClass, "content3-2", "snapuid3-2", "claim3-2", false, newVolumeError("VolumeSnapshotContent is missing"), metaTimeNow, nil), + errors: noerrors, + expectedEvents: []string{"Warning SnapshotContentMissing"}, + test: testSyncSnapshot, + }, + { + name: "3-3 - ready snapshot(everything is well, do nothing)", + initialContents: newContentArray("content3-3", validSecretClass, "sid3-3", "vuid3-3", "volume3-3", "snapuid3-3", "snap3-3", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content3-3", validSecretClass, "sid3-3", "vuid3-3", "volume3-3", "snapuid3-3", "snap3-3", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap3-3", validSecretClass, "content3-3", "snapuid3-3", "claim3-3", true, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap3-3", validSecretClass, "content3-3", "snapuid3-3", "claim3-3", true, nil, metaTimeNow, nil), + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "3-4 - ready snapshot misbound to VolumeSnapshotContent", + initialContents: newContentArray("content3-4", validSecretClass, "sid3-4", "vuid3-4", "volume3-4", "snapuid3-4-x", "snap3-4", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content3-4", validSecretClass, "sid3-4", "vuid3-4", "volume3-4", "snapuid3-4-x", "snap3-4", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap3-4", validSecretClass, "content3-4", "snapuid3-4", "claim3-4", true, nil, metaTimeNow, nil), + expectedSnapshots: newSnapshotArray("snap3-4", validSecretClass, "content3-4", "snapuid3-4", "claim3-4", false, newVolumeError("VolumeSnapshotContent is not bound to the VolumeSnapshot correctly"), metaTimeNow, nil), + errors: noerrors, + test: testSyncSnapshot, + }, + { + name: "3-5 - snapshot bound to content in which the driver does not match", + initialContents: newContentWithUnmatchDriverArray("content3-5", validSecretClass, "sid3-5", "vuid3-5", "volume3-5", "", "snap3-5", &deletePolicy, nil, nil), + expectedContents: nocontents, + initialSnapshots: newSnapshotArray("snap3-5", validSecretClass, "content3-5", "snapuid3-5", "claim3-5", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap3-5", validSecretClass, "content3-5", "snapuid3-5", "claim3-5", false, newVolumeError("VolumeSnapshotContent is missing"), nil, nil), + expectedEvents: []string{"Warning SnapshotContentMissing"}, + errors: noerrors, + test: testSyncSnapshotError, + }, + { + name: "3-6 - snapshot bound to content in which the snapshot uid does not match", + initialContents: newContentArray("content3-4", validSecretClass, "sid3-4", "vuid3-4", "volume3-4", "snapuid3-4-x", "snap3-6", &deletePolicy, nil, nil, false), + expectedContents: newContentArray("content3-4", validSecretClass, "sid3-4", "vuid3-4", "volume3-4", "snapuid3-4-x", "snap3-6", &deletePolicy, nil, nil, false), + initialSnapshots: newSnapshotArray("snap3-5", validSecretClass, "content3-5", "snapuid3-5", "claim3-5", false, nil, nil, nil), + expectedSnapshots: newSnapshotArray("snap3-5", validSecretClass, "content3-5", "snapuid3-5", "claim3-5", false, newVolumeError("VolumeSnapshotContent is missing"), nil, nil), + expectedEvents: []string{"Warning SnapshotContentMissing"}, + errors: noerrors, + test: testSyncSnapshotError, + }, + } + + runSyncTests(t, tests, snapshotClasses) +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/util.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/util.go new file mode 100644 index 000000000..b91f32591 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/util.go @@ -0,0 +1,380 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "strings" + + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/slice" + "os" + "strconv" + "time" +) + +var ( + keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc +) + +type deprecatedSecretParamsMap struct { + name string + deprecatedSecretNameKey string + deprecatedSecretNamespaceKey string + secretNameKey string + secretNamespaceKey string +} + +const ( + // CSI Parameters prefixed with csiParameterPrefix are not passed through + // to the driver on CreateSnapshotRequest calls. Instead they are intended + // to be used by the CSI external-snapshotter and maybe used to populate + // fields in subsequent CSI calls or Kubernetes API objects. + csiParameterPrefix = "csi.storage.k8s.io/" + + prefixedSnapshotterSecretNameKey = csiParameterPrefix + "snapshotter-secret-name" + prefixedSnapshotterSecretNamespaceKey = csiParameterPrefix + "snapshotter-secret-namespace" + + // [Deprecated] CSI Parameters that are put into fields but + // NOT stripped from the parameters passed to CreateSnapshot + snapshotterSecretNameKey = "csiSnapshotterSecretName" + snapshotterSecretNamespaceKey = "csiSnapshotterSecretNamespace" + + // Name of finalizer on VolumeSnapshotContents that are bound by VolumeSnapshots + VolumeSnapshotContentFinalizer = "snapshot.storage.kubernetes.io/volumesnapshotcontent-protection" + VolumeSnapshotFinalizer = "snapshot.storage.kubernetes.io/volumesnapshot-protection" +) + +var snapshotterSecretParams = deprecatedSecretParamsMap{ + name: "Snapshotter", + deprecatedSecretNameKey: snapshotterSecretNameKey, + deprecatedSecretNamespaceKey: snapshotterSecretNamespaceKey, + secretNameKey: prefixedSnapshotterSecretNameKey, + secretNamespaceKey: prefixedSnapshotterSecretNamespaceKey, +} + +// Name of finalizer on PVCs that have been used as a source to create VolumeSnapshots +const PVCFinalizer = "snapshot.storage.kubernetes.io/pvc-protection" + +func snapshotKey(vs *crdv1.VolumeSnapshot) string { + return fmt.Sprintf("%s/%s", vs.Namespace, vs.Name) +} + +func snapshotRefKey(vsref *v1.ObjectReference) string { + return fmt.Sprintf("%s/%s", vsref.Namespace, vsref.Name) +} + +// storeObjectUpdate updates given cache with a new object version from Informer +// callback (i.e. with events from etcd) or with an object modified by the +// controller itself. Returns "true", if the cache was updated, false if the +// object is an old version and should be ignored. +func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) { + objName, err := keyFunc(obj) + if err != nil { + return false, fmt.Errorf("Couldn't get key for object %+v: %v", obj, err) + } + oldObj, found, err := store.Get(obj) + if err != nil { + return false, fmt.Errorf("Error finding %s %q in controller cache: %v", className, objName, err) + } + + objAccessor, err := meta.Accessor(obj) + if err != nil { + return false, err + } + + if !found { + // This is a new object + klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion()) + if err = store.Add(obj); err != nil { + return false, fmt.Errorf("error adding %s %q to controller cache: %v", className, objName, err) + } + return true, nil + } + + oldObjAccessor, err := meta.Accessor(oldObj) + if err != nil { + return false, err + } + + objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return false, fmt.Errorf("error parsing ResourceVersion %q of %s %q: %s", objAccessor.GetResourceVersion(), className, objName, err) + } + oldObjResourceVersion, err := strconv.ParseInt(oldObjAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return false, fmt.Errorf("error parsing old ResourceVersion %q of %s %q: %s", oldObjAccessor.GetResourceVersion(), className, objName, err) + } + + // Throw away only older version, let the same version pass - we do want to + // get periodic sync events. + if oldObjResourceVersion > objResourceVersion { + klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion()) + return false, nil + } + + klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion()) + if err = store.Update(obj); err != nil { + return false, fmt.Errorf("error updating %s %q in controller cache: %v", className, objName, err) + } + return true, nil +} + +// GetSnapshotContentNameForSnapshot returns SnapshotContent.Name for the create VolumeSnapshotContent. +// The name must be unique. +func GetSnapshotContentNameForSnapshot(snapshot *crdv1.VolumeSnapshot) string { + // If VolumeSnapshot object has SnapshotContentName, use it directly. + // This might be the case for static provisioning. + if len(snapshot.Spec.SnapshotContentName) > 0 { + return snapshot.Spec.SnapshotContentName + } + // Construct SnapshotContentName for dynamic provisioning. + return "snapcontent-" + string(snapshot.UID) +} + +// IsDefaultAnnotation returns a boolean if +// the annotation is set +func IsDefaultAnnotation(obj metav1.ObjectMeta) bool { + if obj.Annotations[IsDefaultSnapshotClassAnnotation] == "true" { + return true + } + + return false +} + +// verifyAndGetSecretNameAndNamespaceTemplate gets the values (templates) associated +// with the parameters specified in "secret" and verifies that they are specified correctly. +func verifyAndGetSecretNameAndNamespaceTemplate(secret deprecatedSecretParamsMap, snapshotClassParams map[string]string) (nameTemplate, namespaceTemplate string, err error) { + numName := 0 + numNamespace := 0 + if t, ok := snapshotClassParams[secret.deprecatedSecretNameKey]; ok { + nameTemplate = t + numName++ + klog.Warning(deprecationWarning(secret.deprecatedSecretNameKey, secret.secretNameKey, "")) + } + if t, ok := snapshotClassParams[secret.deprecatedSecretNamespaceKey]; ok { + namespaceTemplate = t + numNamespace++ + klog.Warning(deprecationWarning(secret.deprecatedSecretNamespaceKey, secret.secretNamespaceKey, "")) + } + if t, ok := snapshotClassParams[secret.secretNameKey]; ok { + nameTemplate = t + numName++ + } + if t, ok := snapshotClassParams[secret.secretNamespaceKey]; ok { + namespaceTemplate = t + numNamespace++ + } + + if numName > 1 || numNamespace > 1 { + // Double specified error + return "", "", fmt.Errorf("%s secrets specified in paramaters with both \"csi\" and \"%s\" keys", secret.name, csiParameterPrefix) + } else if numName != numNamespace { + // Not both 0 or both 1 + return "", "", fmt.Errorf("either name and namespace for %s secrets specified, Both must be specified", secret.name) + } else if numName == 1 { + // Case where we've found a name and a namespace template + if nameTemplate == "" || namespaceTemplate == "" { + return "", "", fmt.Errorf("%s secrets specified in parameters but value of either namespace or name is empty", secret.name) + } + return nameTemplate, namespaceTemplate, nil + } else if numName == 0 { + // No secrets specified + return "", "", nil + } else { + // THIS IS NOT A VALID CASE + return "", "", fmt.Errorf("unknown error with getting secret name and namespace templates") + } +} + +// getSecretReference returns a reference to the secret specified in the given nameTemplate +// and namespaceTemplate, or an error if the templates are not specified correctly. +// No lookup of the referenced secret is performed, and the secret may or may not exist. +// +// supported tokens for name resolution: +// - ${volumesnapshotcontent.name} +// - ${volumesnapshot.namespace} +// - ${volumesnapshot.name} +// - ${volumesnapshot.annotations['ANNOTATION_KEY']} (e.g. ${pvc.annotations['example.com/snapshot-create-secret-name']}) +// +// supported tokens for namespace resolution: +// - ${volumesnapshotcontent.name} +// - ${volumesnapshot.namespace} +// +// an error is returned in the following situations: +// - the nameTemplate or namespaceTemplate contains a token that cannot be resolved +// - the resolved name is not a valid secret name +// - the resolved namespace is not a valid namespace name +func getSecretReference(snapshotClassParams map[string]string, snapContentName string, snapshot *crdv1.VolumeSnapshot) (*v1.SecretReference, error) { + nameTemplate, namespaceTemplate, err := verifyAndGetSecretNameAndNamespaceTemplate(snapshotterSecretParams, snapshotClassParams) + if err != nil { + return nil, fmt.Errorf("failed to get name and namespace template from params: %v", err) + } + + if nameTemplate == "" && namespaceTemplate == "" { + return nil, nil + } + + ref := &v1.SecretReference{} + + // Secret namespace template can make use of the VolumeSnapshotContent name or the VolumeSnapshot namespace. + // Note that neither of those things are under the control of the VolumeSnapshot user. + namespaceParams := map[string]string{"volumesnapshotcontent.name": snapContentName} + // snapshot may be nil when resolving create/delete snapshot secret names because the + // snapshot may or may not exist at delete time + if snapshot != nil { + namespaceParams["volumesnapshot.namespace"] = snapshot.Namespace + } + + resolvedNamespace, err := resolveTemplate(namespaceTemplate, namespaceParams) + if err != nil { + return nil, fmt.Errorf("error resolving value %q: %v", namespaceTemplate, err) + } + klog.V(4).Infof("GetSecretReference namespaceTemplate %s, namespaceParams: %+v, resolved %s", namespaceTemplate, namespaceParams, resolvedNamespace) + + if len(validation.IsDNS1123Label(resolvedNamespace)) > 0 { + if namespaceTemplate != resolvedNamespace { + return nil, fmt.Errorf("%q resolved to %q which is not a valid namespace name", namespaceTemplate, resolvedNamespace) + } + return nil, fmt.Errorf("%q is not a valid namespace name", namespaceTemplate) + } + ref.Namespace = resolvedNamespace + + // Secret name template can make use of the VolumeSnapshotContent name, VolumeSnapshot name or namespace, + // or a VolumeSnapshot annotation. + // Note that VolumeSnapshot name and annotations are under the VolumeSnapshot user's control. + nameParams := map[string]string{"volumesnapshotcontent.name": snapContentName} + if snapshot != nil { + nameParams["volumesnapshot.name"] = snapshot.Name + nameParams["volumesnapshot.namespace"] = snapshot.Namespace + for k, v := range snapshot.Annotations { + nameParams["volumesnapshot.annotations['"+k+"']"] = v + } + } + resolvedName, err := resolveTemplate(nameTemplate, nameParams) + if err != nil { + return nil, fmt.Errorf("error resolving value %q: %v", nameTemplate, err) + } + if len(validation.IsDNS1123Subdomain(resolvedName)) > 0 { + if nameTemplate != resolvedName { + return nil, fmt.Errorf("%q resolved to %q which is not a valid secret name", nameTemplate, resolvedName) + } + return nil, fmt.Errorf("%q is not a valid secret name", nameTemplate) + } + ref.Name = resolvedName + + klog.V(4).Infof("GetSecretReference validated Secret: %+v", ref) + return ref, nil +} + +// resolveTemplate resolves the template by checking if the value is missing for a key +func resolveTemplate(template string, params map[string]string) (string, error) { + missingParams := sets.NewString() + resolved := os.Expand(template, func(k string) string { + v, ok := params[k] + if !ok { + missingParams.Insert(k) + } + return v + }) + if missingParams.Len() > 0 { + return "", fmt.Errorf("invalid tokens: %q", missingParams.List()) + } + return resolved, nil +} + +// getCredentials retrieves credentials stored in v1.SecretReference +func getCredentials(k8s kubernetes.Interface, ref *v1.SecretReference) (map[string]string, error) { + if ref == nil { + return nil, nil + } + + secret, err := k8s.CoreV1().Secrets(ref.Namespace).Get(ref.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("error getting secret %s in namespace %s: %v", ref.Name, ref.Namespace, err) + } + + credentials := map[string]string{} + for key, value := range secret.Data { + credentials[key] = string(value) + } + return credentials, nil +} + +// NoResyncPeriodFunc Returns 0 for resyncPeriod in case resyncing is not needed. +func NoResyncPeriodFunc() time.Duration { + return 0 +} + +// isContentDeletionCandidate checks if a volume snapshot content is a deletion candidate. +func isContentDeletionCandidate(content *crdv1.VolumeSnapshotContent) bool { + return content.ObjectMeta.DeletionTimestamp != nil && slice.ContainsString(content.ObjectMeta.Finalizers, VolumeSnapshotContentFinalizer, nil) +} + +// needToAddContentFinalizer checks if a Finalizer needs to be added for the volume snapshot content. +func needToAddContentFinalizer(content *crdv1.VolumeSnapshotContent) bool { + return content.ObjectMeta.DeletionTimestamp == nil && !slice.ContainsString(content.ObjectMeta.Finalizers, VolumeSnapshotContentFinalizer, nil) +} + +// isSnapshotDeletionCandidate checks if a volume snapshot is a deletion candidate. +func isSnapshotDeletionCandidate(snapshot *crdv1.VolumeSnapshot) bool { + return snapshot.ObjectMeta.DeletionTimestamp != nil && slice.ContainsString(snapshot.ObjectMeta.Finalizers, VolumeSnapshotFinalizer, nil) +} + +// needToAddSnapshotFinalizer checks if a Finalizer needs to be added for the volume snapshot. +func needToAddSnapshotFinalizer(snapshot *crdv1.VolumeSnapshot) bool { + return snapshot.ObjectMeta.DeletionTimestamp == nil && !slice.ContainsString(snapshot.ObjectMeta.Finalizers, VolumeSnapshotFinalizer, nil) +} + +func deprecationWarning(deprecatedParam, newParam, removalVersion string) string { + if removalVersion == "" { + removalVersion = "a future release" + } + newParamPhrase := "" + if len(newParam) != 0 { + newParamPhrase = fmt.Sprintf(", please use \"%s\" instead", newParam) + } + return fmt.Sprintf("\"%s\" is deprecated and will be removed in %s%s", deprecatedParam, removalVersion, newParamPhrase) +} + +func removePrefixedParameters(param map[string]string) (map[string]string, error) { + newParam := map[string]string{} + for k, v := range param { + if strings.HasPrefix(k, csiParameterPrefix) { + // Check if its well known + switch k { + case prefixedSnapshotterSecretNameKey: + case prefixedSnapshotterSecretNamespaceKey: + default: + return map[string]string{}, fmt.Errorf("found unknown parameter key \"%s\" with reserved namespace %s", k, csiParameterPrefix) + } + } else { + // Don't strip, add this key-value to new map + // Deprecated parameters prefixed with "csi" are not stripped to preserve backwards compatibility + newParam[k] = v + } + } + return newParam, nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/util_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/util_test.go new file mode 100644 index 000000000..4da945761 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/controller/util_test.go @@ -0,0 +1,195 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + crdv1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "reflect" + "testing" +) + +func TestGetSecretReference(t *testing.T) { + testcases := map[string]struct { + params map[string]string + snapContentName string + snapshot *crdv1.VolumeSnapshot + expectRef *v1.SecretReference + expectErr bool + }{ + "no params": { + params: nil, + expectRef: nil, + }, + "empty err": { + params: map[string]string{snapshotterSecretNameKey: "", snapshotterSecretNamespaceKey: ""}, + expectErr: true, + }, + "[deprecated] name, no namespace": { + params: map[string]string{snapshotterSecretNameKey: "foo"}, + expectErr: true, + }, + "namespace, no name": { + params: map[string]string{prefixedSnapshotterSecretNamespaceKey: "foo"}, + expectErr: true, + }, + "simple - valid": { + params: map[string]string{prefixedSnapshotterSecretNameKey: "name", prefixedSnapshotterSecretNamespaceKey: "ns"}, + snapshot: &crdv1.VolumeSnapshot{}, + expectRef: &v1.SecretReference{Name: "name", Namespace: "ns"}, + }, + "[deprecated] simple - valid, no pvc": { + params: map[string]string{snapshotterSecretNameKey: "name", snapshotterSecretNamespaceKey: "ns"}, + snapshot: nil, + expectRef: &v1.SecretReference{Name: "name", Namespace: "ns"}, + }, + "simple - invalid name": { + params: map[string]string{prefixedSnapshotterSecretNameKey: "bad name", prefixedSnapshotterSecretNamespaceKey: "ns"}, + snapshot: &crdv1.VolumeSnapshot{}, + expectRef: nil, + expectErr: true, + }, + "[deprecated] simple - invalid namespace": { + params: map[string]string{snapshotterSecretNameKey: "name", snapshotterSecretNamespaceKey: "bad ns"}, + snapshot: &crdv1.VolumeSnapshot{}, + expectRef: nil, + expectErr: true, + }, + "template - valid": { + params: map[string]string{ + prefixedSnapshotterSecretNameKey: "static-${volumesnapshotcontent.name}-${volumesnapshot.namespace}-${volumesnapshot.name}-${volumesnapshot.annotations['akey']}", + prefixedSnapshotterSecretNamespaceKey: "static-${volumesnapshotcontent.name}-${volumesnapshot.namespace}", + }, + snapContentName: "snapcontentname", + snapshot: &crdv1.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "snapshotname", + Namespace: "snapshotnamespace", + Annotations: map[string]string{"akey": "avalue"}, + }, + }, + expectRef: &v1.SecretReference{Name: "static-snapcontentname-snapshotnamespace-snapshotname-avalue", Namespace: "static-snapcontentname-snapshotnamespace"}, + }, + "template - invalid namespace tokens": { + params: map[string]string{ + snapshotterSecretNameKey: "myname", + snapshotterSecretNamespaceKey: "mynamespace${bar}", + }, + snapshot: &crdv1.VolumeSnapshot{}, + expectRef: nil, + expectErr: true, + }, + "template - invalid name tokens": { + params: map[string]string{ + snapshotterSecretNameKey: "myname${foo}", + snapshotterSecretNamespaceKey: "mynamespace", + }, + snapshot: &crdv1.VolumeSnapshot{}, + expectRef: nil, + expectErr: true, + }, + } + + for k, tc := range testcases { + t.Run(k, func(t *testing.T) { + ref, err := getSecretReference(tc.params, tc.snapContentName, tc.snapshot) + if err != nil { + if tc.expectErr { + return + } + t.Fatalf("Did not expect error but got: %v", err) + + } else { + if tc.expectErr { + t.Fatalf("Expected error but got none") + } + } + if !reflect.DeepEqual(ref, tc.expectRef) { + t.Errorf("Expected %v, got %v", tc.expectRef, ref) + } + }) + } +} + +func TestRemovePrefixedCSIParams(t *testing.T) { + testcases := []struct { + name string + params map[string]string + expectedParams map[string]string + expectErr bool + }{ + { + name: "no prefix", + params: map[string]string{"csiFoo": "bar", "bim": "baz"}, + expectedParams: map[string]string{"csiFoo": "bar", "bim": "baz"}, + }, + { + name: "one prefixed", + params: map[string]string{prefixedSnapshotterSecretNameKey: "bar", "bim": "baz"}, + expectedParams: map[string]string{"bim": "baz"}, + }, + { + name: "all known prefixed", + params: map[string]string{ + prefixedSnapshotterSecretNameKey: "csiBar", + prefixedSnapshotterSecretNamespaceKey: "csiBar", + }, + expectedParams: map[string]string{}, + }, + { + name: "all known deprecated params not stripped", + params: map[string]string{ + snapshotterSecretNameKey: "csiBar", + snapshotterSecretNamespaceKey: "csiBar", + }, + expectedParams: map[string]string{ + snapshotterSecretNameKey: "csiBar", + snapshotterSecretNamespaceKey: "csiBar", + }, + }, + { + name: "unknown prefixed var", + params: map[string]string{csiParameterPrefix + "bim": "baz"}, + expectErr: true, + }, + { + name: "empty", + params: map[string]string{}, + expectedParams: map[string]string{}, + }, + } + for _, tc := range testcases { + t.Logf("test: %v", tc.name) + newParams, err := removePrefixedParameters(tc.params) + if err != nil { + if tc.expectErr { + continue + } else { + t.Fatalf("Encountered unexpected error: %v", err) + } + } else { + if tc.expectErr { + t.Fatalf("Did not get error when one was expected") + } + } + eq := reflect.DeepEqual(newParams, tc.expectedParams) + if !eq { + t.Fatalf("Stripped paramaters: %v not equal to expected paramaters: %v", newParams, tc.expectedParams) + } + } +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter/snapshotter.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter/snapshotter.go new file mode 100644 index 000000000..ee5031dfc --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter/snapshotter.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshotter + +import ( + "context" + "fmt" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + csirpc "github.com/kubernetes-csi/csi-lib-utils/rpc" + + "google.golang.org/grpc" + + "k8s.io/api/core/v1" + "k8s.io/klog" +) + +// Snapshotter implements CreateSnapshot/DeleteSnapshot operations against a remote CSI driver. +type Snapshotter interface { + // CreateSnapshot creates a snapshot for a volume + CreateSnapshot(ctx context.Context, snapshotName string, volume *v1.PersistentVolume, parameters map[string]string, snapshotterCredentials map[string]string) (driverName string, snapshotId string, timestamp int64, size int64, readyToUse bool, err error) + + // DeleteSnapshot deletes a snapshot from a volume + DeleteSnapshot(ctx context.Context, snapshotID string, snapshotterCredentials map[string]string) (err error) + + // GetSnapshotStatus returns if a snapshot is ready to use, creation time, and restore size. + GetSnapshotStatus(ctx context.Context, snapshotID string) (bool, int64, int64, error) +} + +type snapshot struct { + conn *grpc.ClientConn +} + +func NewSnapshotter(conn *grpc.ClientConn) Snapshotter { + return &snapshot{ + conn: conn, + } +} + +func (s *snapshot) CreateSnapshot(ctx context.Context, snapshotName string, volume *v1.PersistentVolume, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, int64, int64, bool, error) { + klog.V(5).Infof("CSI CreateSnapshot: %s", snapshotName) + if volume.Spec.CSI == nil { + return "", "", 0, 0, false, fmt.Errorf("CSIPersistentVolumeSource not defined in spec") + } + + client := csi.NewControllerClient(s.conn) + + driverName, err := csirpc.GetDriverName(ctx, s.conn) + if err != nil { + return "", "", 0, 0, false, err + } + + req := csi.CreateSnapshotRequest{ + SourceVolumeId: volume.Spec.CSI.VolumeHandle, + Name: snapshotName, + Parameters: parameters, + Secrets: snapshotterCredentials, + } + + rsp, err := client.CreateSnapshot(ctx, &req) + if err != nil { + return "", "", 0, 0, false, err + } + + klog.V(5).Infof("CSI CreateSnapshot: %s driver name [%s] snapshot ID [%s] time stamp [%d] size [%d] readyToUse [%v]", snapshotName, driverName, rsp.Snapshot.SnapshotId, rsp.Snapshot.CreationTime, rsp.Snapshot.SizeBytes, rsp.Snapshot.ReadyToUse) + creationTime, err := timestampToUnixTime(rsp.Snapshot.CreationTime) + if err != nil { + return "", "", 0, 0, false, err + } + return driverName, rsp.Snapshot.SnapshotId, creationTime, rsp.Snapshot.SizeBytes, rsp.Snapshot.ReadyToUse, nil +} + +func (s *snapshot) DeleteSnapshot(ctx context.Context, snapshotID string, snapshotterCredentials map[string]string) (err error) { + client := csi.NewControllerClient(s.conn) + + req := csi.DeleteSnapshotRequest{ + SnapshotId: snapshotID, + Secrets: snapshotterCredentials, + } + + if _, err := client.DeleteSnapshot(ctx, &req); err != nil { + return err + } + + return nil +} + +func (s *snapshot) GetSnapshotStatus(ctx context.Context, snapshotID string) (bool, int64, int64, error) { + client := csi.NewControllerClient(s.conn) + + req := csi.ListSnapshotsRequest{ + SnapshotId: snapshotID, + } + + rsp, err := client.ListSnapshots(ctx, &req) + if err != nil { + return false, 0, 0, err + } + + if rsp.Entries == nil || len(rsp.Entries) == 0 { + return false, 0, 0, fmt.Errorf("can not find snapshot for snapshotID %s", snapshotID) + } + + creationTime, err := timestampToUnixTime(rsp.Entries[0].Snapshot.CreationTime) + if err != nil { + return false, 0, 0, err + } + return rsp.Entries[0].Snapshot.ReadyToUse, creationTime, rsp.Entries[0].Snapshot.SizeBytes, nil +} + +func timestampToUnixTime(t *timestamp.Timestamp) (int64, error) { + time, err := ptypes.Timestamp(t) + if err != nil { + return -1, err + } + // TODO: clean this up, we probably don't need this translation layer + // and can just use time.Time + return time.UnixNano(), nil +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter/snapshotter_test.go b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter/snapshotter_test.go new file mode 100644 index 000000000..dbe973d38 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/pkg/snapshotter/snapshotter_test.go @@ -0,0 +1,504 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshotter + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes" + "github.com/kubernetes-csi/csi-lib-utils/connection" + "github.com/kubernetes-csi/csi-test/driver" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + driverName = "foo/bar" +) + +func createMockServer(t *testing.T) (*gomock.Controller, *driver.MockCSIDriver, *driver.MockIdentityServer, *driver.MockControllerServer, *grpc.ClientConn, error) { + // Start the mock server + mockController := gomock.NewController(t) + identityServer := driver.NewMockIdentityServer(mockController) + controllerServer := driver.NewMockControllerServer(mockController) + drv := driver.NewMockCSIDriver(&driver.MockCSIDriverServers{ + Identity: identityServer, + Controller: controllerServer, + }) + drv.Start() + + // Create a client connection to it + addr := drv.Address() + csiConn, err := connection.Connect(addr) + if err != nil { + return nil, nil, nil, nil, nil, err + } + + return mockController, drv, identityServer, controllerServer, csiConn, nil +} + +func TestCreateSnapshot(t *testing.T) { + defaultName := "snapshot-test" + defaultID := "testid" + createTimestamp := ptypes.TimestampNow() + createTime, err := ptypes.Timestamp(createTimestamp) + if err != nil { + t.Fatalf("Failed to convert timestamp to time: %v", err) + } + + createSecrets := map[string]string{"foo": "bar"} + defaultParameter := map[string]string{ + "param1": "value1", + "param2": "value2", + } + + csiVolume := FakeCSIVolume() + volumeWithoutCSI := FakeVolume() + + defaultRequest := &csi.CreateSnapshotRequest{ + Name: defaultName, + SourceVolumeId: csiVolume.Spec.CSI.VolumeHandle, + } + + attributesRequest := &csi.CreateSnapshotRequest{ + Name: defaultName, + Parameters: defaultParameter, + SourceVolumeId: csiVolume.Spec.CSI.VolumeHandle, + } + + secretsRequest := &csi.CreateSnapshotRequest{ + Name: defaultName, + SourceVolumeId: csiVolume.Spec.CSI.VolumeHandle, + Secrets: createSecrets, + } + + defaultResponse := &csi.CreateSnapshotResponse{ + Snapshot: &csi.Snapshot{ + SnapshotId: defaultID, + SizeBytes: 1000, + SourceVolumeId: csiVolume.Spec.CSI.VolumeHandle, + CreationTime: createTimestamp, + ReadyToUse: true, + }, + } + + pluginInfoOutput := &csi.GetPluginInfoResponse{ + Name: driverName, + VendorVersion: "0.3.0", + Manifest: map[string]string{ + "hello": "world", + }, + } + + type snapshotResult struct { + driverName string + snapshotId string + timestamp int64 + size int64 + readyToUse bool + } + + result := &snapshotResult{ + size: 1000, + driverName: driverName, + snapshotId: defaultID, + timestamp: createTime.UnixNano(), + readyToUse: true, + } + + tests := []struct { + name string + snapshotName string + volume *v1.PersistentVolume + parameters map[string]string + secrets map[string]string + input *csi.CreateSnapshotRequest + output *csi.CreateSnapshotResponse + injectError codes.Code + expectError bool + expectResult *snapshotResult + }{ + { + name: "success", + snapshotName: defaultName, + volume: csiVolume, + input: defaultRequest, + output: defaultResponse, + expectError: false, + expectResult: result, + }, + { + name: "attributes", + snapshotName: defaultName, + volume: csiVolume, + parameters: defaultParameter, + input: attributesRequest, + output: defaultResponse, + expectError: false, + expectResult: result, + }, + { + name: "secrets", + snapshotName: defaultName, + volume: csiVolume, + secrets: createSecrets, + input: secretsRequest, + output: defaultResponse, + expectError: false, + expectResult: result, + }, + { + name: "fail for volume without csi source", + snapshotName: defaultName, + volume: volumeWithoutCSI, + input: nil, + output: nil, + expectError: true, + }, + { + name: "gRPC transient error", + snapshotName: defaultName, + volume: csiVolume, + input: defaultRequest, + output: nil, + injectError: codes.DeadlineExceeded, + expectError: true, + }, + { + name: "gRPC final error", + snapshotName: defaultName, + volume: csiVolume, + input: defaultRequest, + output: nil, + injectError: codes.NotFound, + expectError: true, + }, + } + + mockController, driver, identityServer, controllerServer, csiConn, err := createMockServer(t) + if err != nil { + t.Fatal(err) + } + defer mockController.Finish() + defer driver.Stop() + defer csiConn.Close() + + for _, test := range tests { + in := test.input + out := test.output + var injectedErr error + if test.injectError != codes.OK { + injectedErr = status.Error(test.injectError, fmt.Sprintf("Injecting error %d", test.injectError)) + } + + // Setup expectation + if in != nil { + identityServer.EXPECT().GetPluginInfo(gomock.Any(), gomock.Any()).Return(pluginInfoOutput, nil).Times(1) + controllerServer.EXPECT().CreateSnapshot(gomock.Any(), in).Return(out, injectedErr).Times(1) + } + + s := NewSnapshotter(csiConn) + driverName, snapshotId, timestamp, size, readyToUse, err := s.CreateSnapshot(context.Background(), test.snapshotName, test.volume, test.parameters, test.secrets) + if test.expectError && err == nil { + t.Errorf("test %q: Expected error, got none", test.name) + } + if !test.expectError && err != nil { + t.Errorf("test %q: got error: %v", test.name, err) + } + if test.expectResult != nil { + if driverName != test.expectResult.driverName { + t.Errorf("test %q: expected driverName: %q, got: %q", test.name, test.expectResult.driverName, driverName) + } + + if snapshotId != test.expectResult.snapshotId { + t.Errorf("test %q: expected snapshotId: %v, got: %v", test.name, test.expectResult.snapshotId, snapshotId) + } + + if timestamp != test.expectResult.timestamp { + t.Errorf("test %q: expected create time: %v, got: %v", test.name, test.expectResult.timestamp, timestamp) + } + + if size != test.expectResult.size { + t.Errorf("test %q: expected size: %v, got: %v", test.name, test.expectResult.size, size) + } + + if !reflect.DeepEqual(readyToUse, test.expectResult.readyToUse) { + t.Errorf("test %q: expected readyToUse: %v, got: %v", test.name, test.expectResult.readyToUse, readyToUse) + } + } + } +} + +func TestDeleteSnapshot(t *testing.T) { + defaultID := "testid" + secrets := map[string]string{"foo": "bar"} + + defaultRequest := &csi.DeleteSnapshotRequest{ + SnapshotId: defaultID, + } + + secretsRequest := &csi.DeleteSnapshotRequest{ + SnapshotId: defaultID, + Secrets: secrets, + } + + tests := []struct { + name string + snapshotID string + secrets map[string]string + input *csi.DeleteSnapshotRequest + output *csi.DeleteSnapshotResponse + injectError codes.Code + expectError bool + }{ + { + name: "success", + snapshotID: defaultID, + input: defaultRequest, + output: &csi.DeleteSnapshotResponse{}, + expectError: false, + }, + { + name: "secrets", + snapshotID: defaultID, + secrets: secrets, + input: secretsRequest, + output: &csi.DeleteSnapshotResponse{}, + expectError: false, + }, + { + name: "gRPC transient error", + snapshotID: defaultID, + input: defaultRequest, + output: nil, + injectError: codes.DeadlineExceeded, + expectError: true, + }, + { + name: "gRPC final error", + snapshotID: defaultID, + input: defaultRequest, + output: nil, + injectError: codes.NotFound, + expectError: true, + }, + } + + mockController, driver, _, controllerServer, csiConn, err := createMockServer(t) + if err != nil { + t.Fatal(err) + } + defer mockController.Finish() + defer driver.Stop() + defer csiConn.Close() + + for _, test := range tests { + in := test.input + out := test.output + var injectedErr error + if test.injectError != codes.OK { + injectedErr = status.Error(test.injectError, fmt.Sprintf("Injecting error %d", test.injectError)) + } + + // Setup expectation + if in != nil { + controllerServer.EXPECT().DeleteSnapshot(gomock.Any(), in).Return(out, injectedErr).Times(1) + } + + s := NewSnapshotter(csiConn) + err := s.DeleteSnapshot(context.Background(), test.snapshotID, test.secrets) + if test.expectError && err == nil { + t.Errorf("test %q: Expected error, got none", test.name) + } + if !test.expectError && err != nil { + t.Errorf("test %q: got error: %v", test.name, err) + } + } +} + +func TestGetSnapshotStatus(t *testing.T) { + defaultID := "testid" + size := int64(1000) + createTimestamp := ptypes.TimestampNow() + createTime, err := ptypes.Timestamp(createTimestamp) + if err != nil { + t.Fatalf("Failed to convert timestamp to time: %v", err) + } + + defaultRequest := &csi.ListSnapshotsRequest{ + SnapshotId: defaultID, + } + + defaultResponse := &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &csi.Snapshot{ + SnapshotId: defaultID, + SizeBytes: size, + SourceVolumeId: "volumeid", + CreationTime: createTimestamp, + ReadyToUse: true, + }, + }, + }, + } + + tests := []struct { + name string + snapshotID string + input *csi.ListSnapshotsRequest + output *csi.ListSnapshotsResponse + injectError codes.Code + expectError bool + expectReady bool + expectCreateAt int64 + expectSize int64 + }{ + { + name: "success", + snapshotID: defaultID, + input: defaultRequest, + output: defaultResponse, + expectError: false, + expectReady: true, + expectCreateAt: createTime.UnixNano(), + expectSize: size, + }, + { + name: "gRPC transient error", + snapshotID: defaultID, + input: defaultRequest, + output: nil, + injectError: codes.DeadlineExceeded, + expectError: true, + }, + { + name: "gRPC final error", + snapshotID: defaultID, + input: defaultRequest, + output: nil, + injectError: codes.NotFound, + expectError: true, + }, + } + + mockController, driver, _, controllerServer, csiConn, err := createMockServer(t) + if err != nil { + t.Fatal(err) + } + defer mockController.Finish() + defer driver.Stop() + defer csiConn.Close() + + for _, test := range tests { + in := test.input + out := test.output + var injectedErr error + if test.injectError != codes.OK { + injectedErr = status.Error(test.injectError, fmt.Sprintf("Injecting error %d", test.injectError)) + } + + // Setup expectation + if in != nil { + controllerServer.EXPECT().ListSnapshots(gomock.Any(), in).Return(out, injectedErr).Times(1) + } + + s := NewSnapshotter(csiConn) + ready, createTime, size, err := s.GetSnapshotStatus(context.Background(), test.snapshotID) + if test.expectError && err == nil { + t.Errorf("test %q: Expected error, got none", test.name) + } + if !test.expectError && err != nil { + t.Errorf("test %q: got error: %v", test.name, err) + } + if test.expectReady != ready { + t.Errorf("test %q: expected status: %v, got: %v", test.name, test.expectReady, ready) + } + if test.expectCreateAt != createTime { + t.Errorf("test %q: expected createTime: %v, got: %v", test.name, test.expectCreateAt, createTime) + } + if test.expectSize != size { + t.Errorf("test %q: expected size: %v, got: %v", test.name, test.expectSize, size) + } + } +} + +func FakeCSIVolume() *v1.PersistentVolume { + volume := v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-csi-volume", + }, + Spec: v1.PersistentVolumeSpec{ + ClaimRef: &v1.ObjectReference{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + UID: types.UID("uid123"), + Namespace: "default", + Name: "test-claim", + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + Driver: driverName, + VolumeHandle: "foo", + }, + }, + StorageClassName: "default", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeBound, + }, + } + + return &volume +} + +func FakeVolume() *v1.PersistentVolume { + volume := v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-csi-volume", + }, + Spec: v1.PersistentVolumeSpec{ + ClaimRef: &v1.ObjectReference{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + UID: types.UID("uid123"), + Namespace: "default", + Name: "test-claim", + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, + }, + StorageClassName: "default", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeBound, + }, + } + + return &volume +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/.prow.sh b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/.prow.sh new file mode 100755 index 000000000..b18c53581 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/.prow.sh @@ -0,0 +1,7 @@ +#! /bin/bash -e +# +# This is for testing csi-release-tools itself in Prow. All other +# repos use prow.sh for that, but as csi-release-tools isn't a normal +# repo with some Go code in it, it has a custom Prow test script. + +./verify-shellcheck.sh "$(pwd)" diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/CONTRIBUTING.md new file mode 100644 index 000000000..de4711513 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/LICENSE b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/OWNERS b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/OWNERS new file mode 100644 index 000000000..6d2f474e1 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: +- saad-ali +- msau42 +- pohly + +reviewers: +- saad-ali +- msau42 +- pohly diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/README.md b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/README.md new file mode 100644 index 000000000..bc061aeeb --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/README.md @@ -0,0 +1,108 @@ +# [csi-release-tools](https://github.com/kubernetes-csi/csi-release-tools) + +These build and test rules can be shared between different Go projects +without modifications. Customization for the different projects happen +in the top-level Makefile. + +The rules include support for building and pushing Docker images, with +the following features: + - one or more command and image per project + - push canary and/or tagged release images + - automatically derive the image tag(s) from repo tags + - the source code revision is stored in a "revision" image label + - never overwrites an existing release image + +Usage +----- + +The expected repository layout is: + - `cmd/*/*.go` - source code for each command + - `cmd/*/Dockerfile` - docker file for each command or + Dockerfile in the root when only building a single command + - `Makefile` - includes `release-tools/build.make` and sets + configuration variables + - `.travis.yml` - a symlink to `release-tools/.travis.yml` + +To create a release, tag a certain revision with a name that +starts with `v`, for example `v1.0.0`, then `make push` +while that commit is checked out. + +It does not matter on which branch that revision exists, i.e. it is +possible to create releases directly from master. A release branch can +still be created for maintenance releases later if needed. + +Release branches are expected to be named `release-x.y` for releases +`x.y.z`. Building from such a branch creates `x.y-canary` +images. Building from master creates the main `canary` image. + +Sharing and updating +-------------------- + +[`git subtree`](https://github.com/git/git/blob/master/contrib/subtree/git-subtree.txt) +is the recommended way of maintaining a copy of the rules inside the +`release-tools` directory of a project. This way, it is possible to make +changes also locally, test them and then push them back to the shared +repository at a later time. + +Cheat sheet: + +- `git subtree add --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once) +- `git subtree pull --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - update local copy to latest upstream (whenever upstream changes) +- edit, `git commit`, `git subtree push --prefix=release-tools git@github.com:/csi-release-tools.git ` - push to a new branch before submitting a PR + +verify-shellcheck.sh +-------------------- + +The [verify-shellcheck.sh](./verify-shellcheck.sh) script in this repo +is a stripped down copy of the [corresponding +script](https://github.com/kubernetes/kubernetes/blob/release-1.14/hack/verify-shellcheck.sh) +in the Kubernetes repository. It can be used to check for certain +errors shell scripts, like missing quotation marks. The default +`test-shellcheck` target in [build.make](./build.make) only checks the +scripts in this directory. Components can add more directories to +`TEST_SHELLCHECK_DIRS` to check also other scripts. + +End-to-end testing +------------------ + +A repo that wants to opt into testing via Prow must set up a top-level +`.prow.sh`. Typically that will source `prow.sh` and then transfer +control to it: + +``` bash +#! /bin/bash -e + +. release-tools/prow.sh +main +``` + +All Kubernetes-CSI repos are expected to switch to Prow. For details +on what is enabled in Prow, see +https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-csi + +Test results for periodic jobs are visible in +https://testgrid.k8s.io/sig-storage-csi + +It is possible to reproduce the Prow testing locally on a suitable machine: +- Linux host +- Docker installed +- code to be tested checkout out in `$GOPATH/src/` +- `cd $GOPATH/src/ && ./.prow.sh` + +Beware that the script intentionally doesn't clean up after itself and +modifies the content of `$GOPATH`, in particular the `kubernetes` and +`kind` repositories there. Better run it in an empty, disposable +`$GOPATH`. + +When it terminates, the following command can be used to get access to +the Kubernetes cluster that was brought up for testing (assuming that +this step succeeded): + + export KUBECONFIG="$(kind get kubeconfig-path --name="csi-prow")" + +It is possible to control the execution via environment variables. See +`prow.sh` for details. Particularly useful is testing against different +Kubernetes releases: + + CSI_PROW_KUBERNETES_VERSION=1.13.3 ./.prow.sh + CSI_PROW_KUBERNETES_VERSION=latest ./.prow.sh diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/RELEASE.md b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/RELEASE.md new file mode 100644 index 000000000..a0fd815b0 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/RELEASE.md @@ -0,0 +1,5 @@ +# Release Process + +No tagged releases are planned at this point. The intention is to keep +the master branch in a state such that it can be used for all +supported branches in downstream repos which use these files. diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/SECURITY_CONTACTS new file mode 100644 index 000000000..2af1414e0 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali +msau42 diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/build.make b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/build.make new file mode 100644 index 000000000..e3d4795da --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/build.make @@ -0,0 +1,153 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: build-% build container-% container push-% push clean test + +# A space-separated list of all commands in the repository, must be +# set in main Makefile of a repository. +# CMDS= + +# This is the default. It can be overridden in the main Makefile after +# including build.make. +REGISTRY_NAME=quay.io/k8scsi + +# Revision that gets built into each binary via the main.version +# string. Uses the `git describe` output based on the most recent +# version tag with a short revision suffix or, if nothing has been +# tagged yet, just the revision. +# +# Beware that tags may also be missing in shallow clones as done by +# some CI systems (like TravisCI, which pulls only 50 commits). +REV=$(shell git describe --long --tags --match='v*' --dirty 2>/dev/null || git rev-list -n1 HEAD) + +# A space-separated list of image tags under which the current build is to be pushed. +# Determined dynamically. +IMAGE_TAGS= + +# A "canary" image gets built if the current commit is the head of the remote "master" branch. +# That branch does not exist when building some other branch in TravisCI. +IMAGE_TAGS+=$(shell if [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 origin/master 2>/dev/null)" ]; then echo "canary"; fi) + +# A "X.Y.Z-canary" image gets built if the current commit is the head of a "origin/release-X.Y.Z" branch. +# The actual suffix does not matter, only the "release-" prefix is checked. +IMAGE_TAGS+=$(shell git branch -r --points-at=HEAD | grep 'origin/release-' | grep -v -e ' -> ' | sed -e 's;.*/release-\(.*\);\1-canary;') + +# A release image "vX.Y.Z" gets built if there is a tag of that format for the current commit. +# --abbrev=0 suppresses long format, only showing the closest tag. +IMAGE_TAGS+=$(shell tagged="$$(git describe --tags --match='v*' --abbrev=0)"; if [ "$$tagged" ] && [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 $$tagged)" ]; then echo $$tagged; fi) + +# Images are named after the command contained in them. +IMAGE_NAME=$(REGISTRY_NAME)/$* + +ifdef V +# Adding "-alsologtostderr" assumes that all test binaries contain glog. This is not guaranteed. +TESTARGS = -v -args -alsologtostderr -v 5 +else +TESTARGS = +endif + +# Specific packages can be excluded from each of the tests below by setting the *_FILTER_CMD variables +# to something like "| grep -v 'github.com/kubernetes-csi/project/pkg/foobar'". See usage below. + +build-%: + mkdir -p bin + CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* + +container-%: build-% + docker build -t $*:latest -f $(shell if [ -e ./cmd/$*/Dockerfile ]; then echo ./cmd/$*/Dockerfile; else echo Dockerfile; fi) --label revision=$(REV) . + +push-%: container-% + set -ex; \ + push_image () { \ + docker tag $*:latest $(IMAGE_NAME):$$tag; \ + docker push $(IMAGE_NAME):$$tag; \ + }; \ + for tag in $(IMAGE_TAGS); do \ + if [ "$$tag" = "canary" ] || echo "$$tag" | grep -q -e '-canary$$'; then \ + : "creating or overwriting canary image"; \ + push_image; \ + elif docker pull $(IMAGE_NAME):$$tag 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$$tag not found"; then \ + : "creating release image"; \ + push_image; \ + else \ + : "release image $(IMAGE_NAME):$$tag already exists, skipping push"; \ + fi; \ + done + +build: $(CMDS:%=build-%) +container: $(CMDS:%=container-%) +push: $(CMDS:%=push-%) + +clean: + -rm -rf bin + +test: + +.PHONY: test-go +test: test-go +test-go: + @ echo; echo "### $@:" + go test `go list ./... | grep -v -e 'vendor' -e '/test/e2e$$' $(TEST_GO_FILTER_CMD)` $(TESTARGS) + +.PHONY: test-vet +test: test-vet +test-vet: + @ echo; echo "### $@:" + go vet `go list ./... | grep -v vendor $(TEST_VET_FILTER_CMD)` + +.PHONY: test-fmt +test: test-fmt +test-fmt: + @ echo; echo "### $@:" + files=$$(find . -name '*.go' | grep -v './vendor' $(TEST_FMT_FILTER_CMD)); \ + if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ + echo "formatting errors:"; \ + gofmt -d $$files; \ + false; \ + fi + +# This test only runs when dep >= 0.5 is installed, which is the case for the CI setup. +.PHONY: test-vendor +test: test-vendor +test-vendor: + @ echo; echo "### $@:" + @ case "$$(dep version 2>/dev/null | grep 'version *:')" in \ + *v0.[56789]*) dep check && echo "vendor up-to-date" || false;; \ + *) echo "skipping check, dep >= 0.5 required";; \ + esac + +.PHONY: test-subtree +test: test-subtree +test-subtree: + @ echo; echo "### $@:" + ./release-tools/verify-subtree.sh release-tools + +# Components can extend the set of directories which must pass shellcheck. +# The default is to check only the release-tools directory itself. +TEST_SHELLCHECK_DIRS=release-tools +.PHONY: test-shellcheck +test: test-shellcheck +test-shellcheck: + @ echo; echo "### $@:" + @ ret=0; \ + if ! command -v docker; then \ + echo "skipped, no Docker"; \ + exit 0; \ + fi; \ + for dir in $(abspath $(TEST_SHELLCHECK_DIRS)); do \ + echo; \ + echo "$$dir:"; \ + ./release-tools/verify-shellcheck.sh "$$dir" || ret=1; \ + done; \ + exit $$ret diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/code-of-conduct.md b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/filter-junit.go b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/filter-junit.go new file mode 100644 index 000000000..2f51be00e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/filter-junit.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + * This command filters a JUnit file such that only tests with a name + * matching a regular expression are passed through. By concatenating + * multiple input files it is possible to merge them into a single file. + */ +package main + +import ( + "encoding/xml" + "flag" + "io/ioutil" + "os" + "regexp" +) + +var ( + output = flag.String("o", "-", "junit file to write, - for stdout") + tests = flag.String("t", "", "regular expression matching the test names that are to be included in the output") +) + +/* + * TestSuite represents a JUnit file. Due to how encoding/xml works, we have + * represent all fields that we want to be passed through. It's therefore + * not a complete solution, but good enough for Ginkgo + Spyglass. + */ +type TestSuite struct { + XMLName string `xml:"testsuite"` + TestCases []TestCase `xml:"testcase"` +} + +type TestCase struct { + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` + Failure string `xml:"failure,omitempty"` + Skipped SkipReason `xml:"skipped,omitempty"` +} + +// SkipReason deals with the special : +// if present, we must re-encode it, even if empty. +type SkipReason string + +func (s *SkipReason) UnmarshalText(text []byte) error { + *s = SkipReason(text) + if *s == "" { + *s = " " + } + return nil +} + +func (s SkipReason) MarshalText() ([]byte, error) { + if s == " " { + return []byte{}, nil + } + return []byte(s), nil +} + +func main() { + var junit TestSuite + var data []byte + + flag.Parse() + + re := regexp.MustCompile(*tests) + + // Read all input files. + for _, input := range flag.Args() { + if input == "-" { + if _, err := os.Stdin.Read(data); err != nil { + panic(err) + } + } else { + var err error + data, err = ioutil.ReadFile(input) + if err != nil { + panic(err) + } + } + if err := xml.Unmarshal(data, &junit); err != nil { + panic(err) + } + } + + // Keep only matching testcases. Testcases skipped in all test runs are only stored once. + filtered := map[string]TestCase{} + for _, testcase := range junit.TestCases { + if !re.MatchString(testcase.Name) { + continue + } + entry, ok := filtered[testcase.Name] + if !ok || // not present yet + entry.Skipped != "" && testcase.Skipped == "" { // replaced skipped test with real test run + filtered[testcase.Name] = testcase + } + } + junit.TestCases = nil + for _, testcase := range filtered { + junit.TestCases = append(junit.TestCases, testcase) + } + + // Re-encode. + data, err := xml.MarshalIndent(junit, "", " ") + if err != nil { + panic(err) + } + + // Write to output. + if *output == "-" { + if _, err := os.Stdout.Write(data); err != nil { + panic(err) + } + } else { + if err := ioutil.WriteFile(*output, data, 0644); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/prow.sh b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/prow.sh new file mode 100755 index 000000000..204cc294c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/prow.sh @@ -0,0 +1,992 @@ +#! /bin/bash +# +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This script runs inside a Prow job. It can run unit tests ("make test") +# and E2E testing. This E2E testing covers different scenarios (see +# https://github.com/kubernetes/enhancements/pull/807): +# - running the stable hostpath example against a Kubernetes release +# - running the canary hostpath example against a Kubernetes release +# - building the component in the current repo and running the +# stable hostpath example with that one component replaced against +# a Kubernetes release +# +# The intended usage of this script is that individual repos import +# csi-release-tools, then link their top-level prow.sh to this or +# include it in that file. When including it, several of the variables +# can be overridden in the top-level prow.sh to customize the script +# for the repo. +# +# The expected environment is: +# - $GOPATH/src/ for the repository that is to be tested, +# with PR branch merged (when testing a PR) +# - running on linux-amd64 +# - bazel installed (when testing against Kubernetes master), must be recent +# enough for Kubernetes master +# - kind (https://github.com/kubernetes-sigs/kind) installed +# - optional: Go already installed + +RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +REPO_DIR="$(pwd)" + +# Sets the default value for a variable if not set already and logs the value. +# Any variable set this way is usually something that a repo's .prow.sh +# or the job can set. +configvar () { + # Ignore: Word is of the form "A"B"C" (B indicated). Did you mean "ABC" or "A\"B\"C"? + # shellcheck disable=SC2140 + eval : \$\{"$1":="\$2"\} + eval echo "\$3:" "$1=\${$1}" +} + +# Prints the value of a variable + version suffix, falling back to variable + "LATEST". +get_versioned_variable () { + local var="$1" + local version="$2" + local value + + eval value="\${${var}_${version}}" + if ! [ "$value" ]; then + eval value="\${${var}_LATEST}" + fi + echo "$value" +} + +# Go versions can be specified seperately for different tasks +# If the pre-installed Go is missing or a different +# version, the required version here will get installed +# from https://golang.org/dl/. +go_from_travis_yml () { + grep "^ *- go:" "${RELEASE_TOOLS_ROOT}/travis.yml" | sed -e 's/.*go: *//' +} +configvar CSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code +configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e +configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below +configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below +configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below + +# kind version to use. If the pre-installed version is different, +# the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases/download/ +# (if available), otherwise it is built from source. +configvar CSI_PROW_KIND_VERSION 0.2.1 "kind" + +# ginkgo test runner version to use. If the pre-installed version is +# different, the desired version is built from source. +configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo" + +# Ginkgo runs the E2E test in parallel. The default is based on the number +# of CPUs, but typically this can be set to something higher in the job. +configvar CSI_PROW_GINKO_PARALLEL "-p" "Ginko parallelism parameter(s)" + +# Enables building the code in the repository. On by default, can be +# disabled in jobs which only use pre-built components. +configvar CSI_PROW_BUILD_JOB true "building code in repo enabled" + +# Kubernetes version to test against. This must be a version number +# (like 1.13.3) for which there is a pre-built kind image (see +# https://hub.docker.com/r/kindest/node/tags), "latest" (builds +# Kubernetes from the master branch) or "release-x.yy" (builds +# Kubernetes from a release branch). +# +# This can also be a version that was not released yet at the time +# that the settings below were chose. The script will then +# use the same settings as for "latest" Kubernetes. This works +# as long as there are no breaking changes in Kubernetes, like +# deprecating or changing the implementation of an alpha feature. +configvar CSI_PROW_KUBERNETES_VERSION 1.13.3 "Kubernetes" + +# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and +# with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST +# instead of latest). +# +# This is used to derive the right defaults for the variables below +# when a Prow job just defines the Kubernetes version. +csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')" + +# Work directory. It has to allow running executables, therefore /tmp +# is avoided. Cleaning up after the script is intentionally left to +# the caller. +configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory" + +# The hostpath deployment script is searched for in several places. +# +# - The "deploy" directory in the current repository: this is useful +# for the situation that a component becomes incompatible with the +# shared deployment, because then it can (temporarily!) provide its +# own example until the shared one can be updated; it's also how +# csi-driver-host-path itself provides the example. +# +# - CSI_PROW_HOSTPATH_VERSION of the CSI_PROW_HOSTPATH_REPO is checked +# out: this allows other repos to reference a version of the example +# that is known to be compatible. +# +# - The csi-driver-host-path/deploy directory has multiple sub-directories, +# each with different deployments (stable set of images for Kubernetes 1.13, +# stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.). +# This is necessary because there may be incompatible changes in the +# "API" of a component (for example, its command line options or RBAC rules) +# or in its support for different Kubernetes versions (CSIDriverInfo as +# CRD in Kubernetes 1.13 vs builtin API in Kubernetes 1.14). +# +# When testing an update for a component in a PR job, the +# CSI_PROW_DEPLOYMENT variable can be set in the +# .prow.sh of each component when there are breaking changes +# that require using a non-default deployment. The default +# is a deployment named "kubernetes-x.yy" (if available), +# otherwise "kubernetes-latest". +# "none" disables the deployment of the hostpath driver. +# +# When no deploy script is found (nothing in `deploy` directory, +# CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. +configvar CSI_PROW_HOSTPATH_VERSION fc52d13ba07922c80555a24616a5b16480350c3f "hostpath driver" # pre-1.1.0 +configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" +configvar CSI_PROW_DEPLOYMENT "" "deployment" + +# If CSI_PROW_HOSTPATH_CANARY is set (typically to "canary", but also +# "1.0-canary"), then all image versions are replaced with that +# version tag. +configvar CSI_PROW_HOSTPATH_CANARY "" "hostpath image" + +# The E2E testing can come from an arbitrary repo. The expectation is that +# the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836) +# after setting KUBECONFIG. As a special case, if the repository is Kubernetes, +# then `make WHAT=test/e2e/e2e.test` is called first to ensure that +# all generated files are present. +# +# CSI_PROW_E2E_REPO=none disables E2E testing. +configvar CSI_PROW_E2E_VERSION_1_13 v1.14.0 "E2E version for Kubernetes 1.13.x" # we can't use the one from 1.13.x because it didn't have --storage.testdriver +configvar CSI_PROW_E2E_VERSION_1_14 v1.14.0 "E2E version for Kubernetes 1.14.x" +# TODO: add new CSI_PROW_E2E_VERSION entry for future Kubernetes releases +configvar CSI_PROW_E2E_VERSION_LATEST master "E2E version for Kubernetes master" # testing against Kubernetes master is already tracking a moving target, so we might as well use a moving E2E version +configvar CSI_PROW_E2E_REPO_LATEST https://github.com/kubernetes/kubernetes "E2E repo for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_IMPORT_PATH_LATEST k8s.io/kubernetes "E2E package for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_VERSION "$(get_versioned_variable CSI_PROW_E2E_VERSION "${csi_prow_kubernetes_version_suffix}")" "E2E version" +configvar CSI_PROW_E2E_REPO "$(get_versioned_variable CSI_PROW_E2E_REPO "${csi_prow_kubernetes_version_suffix}")" "E2E repo" +configvar CSI_PROW_E2E_IMPORT_PATH "$(get_versioned_variable CSI_PROW_E2E_IMPORT_PATH "${csi_prow_kubernetes_version_suffix}")" "E2E package" + +# csi-sanity testing from the csi-test repo can be run against the installed +# CSI driver. For this to work, deploying the driver must expose the Unix domain +# csi.sock as a TCP service for use by the csi-sanity command, which runs outside +# of the cluster. The alternative would have been to (cross-)compile csi-sanity +# and install it inside the cluster, which is not necessarily easier. +configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo" +configvar CSI_PROW_SANITY_VERSION 5421d9f3c37be3b95b241b44a094a3db11bee789 "csi-test version" # latest master +configvar CSI_PROW_SANITY_IMPORT_PATH github.com/kubernetes-csi/csi-test "csi-test package" +configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock" +configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver" +configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI driver" + +# Each job can run one or more of the following tests, identified by +# a single word: +# - unit testing +# - parallel excluding alpha features +# - serial excluding alpha features +# - parallel, only alpha feature +# - serial, only alpha features +# - sanity +# +# Unknown or unsupported entries are ignored. +# +# Sanity testing with csi-sanity only covers the CSI driver itself and +# thus only makes sense in repos which provide their own CSI +# driver. Repos can enable sanity testing by setting +# CSI_PROW_TESTS_SANITY=sanity. +configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha sanity" "tests to run" +tests_enabled () { + local t1 t2 + # We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a. + # shellcheck disable=SC2206 + local tests=(${CSI_PROW_TESTS}) + for t1 in "$@"; do + for t2 in "${tests[@]}"; do + if [ "$t1" = "$t2" ]; then + return + fi + done + done + return 1 +} +sanity_enabled () { + [ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity" +} +tests_need_kind () { + tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" || + sanity_enabled +} +tests_need_non_alpha_cluster () { + tests_enabled "parallel" "serial" || + sanity_enabled +} +tests_need_alpha_cluster () { + tests_enabled "parallel-alpha" "serial-alpha" +} + + +# Serial vs. parallel is always determined by these regular expressions. +# Individual regular expressions are seperated by spaces for readability +# and expected to not contain spaces. Use dots instead. The complete +# regex for Ginkgo will be created by joining the individual terms. +configvar CSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests" +regex_join () { + echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g' +} + +# Which tests are alpha depends on the Kubernetes version. We could +# use the same E2E test for all Kubernetes version. This would have +# the advantage that new tests can be applied to older versions +# without having to backport tests. +# +# But the feature tag gets removed from E2E tests when the corresponding +# feature becomes beta, so we would have to track which tests were +# alpha in previous Kubernetes releases. This was considered too +# error prone. Therefore we use E2E tests that match the Kubernetes +# version that is getting tested. +# +# However, for 1.13.x testing we have to use the E2E tests from 1.14 +# because 1.13 didn't have --storage.testdriver yet, so for that (and only +# that version) we have to define alpha tests differently. +configvar CSI_PROW_E2E_ALPHA_1_13 '\[Feature: \[Testpattern:.Dynamic.PV..block.volmode.\] should.create.and.delete.block.persistent.volumes' "alpha tests for Kubernetes 1.13" # Raw block was an alpha feature in 1.13. +configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for Kubernetes >= 1.14" # there's no need to update this, adding a new case for CSI_PROW_E2E for a new Kubernetes is enough +configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests" + +# After the parallel E2E test without alpha features, a test cluster +# with alpha features is brought up and tests that were previously +# disabled are run. The alpha gates in each release have to be listed +# explicitly. If none are set (= variable empty), alpha testing +# is skipped. +# +# Testing against "latest" Kubernetes is problematic because some alpha +# feature which used to work might stop working or change their behavior +# such that the current tests no longer pass. If that happens, +# kubernetes-csi components must be updated, either by disabling +# the failing test for "latest" or by updating the test and not running +# it anymore for older releases. +configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" +configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.14" +# TODO: add new CSI_PROW_ALPHA_GATES_xxx entry for future Kubernetes releases and +# add new gates to CSI_PROW_E2E_ALPHA_GATES_LATEST. +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for latest Kubernetes" +configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" + +# Some tests are known to be unusable in a KinD cluster. For example, +# stopping kubelet with "ssh systemctl stop kubelet" simply +# doesn't work. Such tests should be written in a way that they verify +# whether they can run with the current cluster provider, but until +# they are, we filter them out by name. Like the other test selection +# variables, this is again a space separated list of regular expressions. +configvar CSI_PROW_E2E_SKIP 'while.kubelet.is.down.*Disruptive' "tests that need to be skipped" + +# This is the directory for additional result files. Usually set by Prow, but +# if not (for example, when invoking manually) it defaults to the work directory. +configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts" +mkdir -p "${ARTIFACTS}" + +run () { + echo "$(date) $(go version | sed -e 's/.*version \(go[^ ]*\).*/\1/') $(if [ "$(pwd)" != "${REPO_DIR}" ]; then pwd; fi)\$" "$@" >&2 + "$@" +} + +info () { + echo >&2 INFO: "$@" +} + +warn () { + echo >&2 WARNING: "$@" +} + +die () { + echo >&2 ERROR: "$@" + exit 1 +} + +# For additional tools. +CSI_PROW_BIN="${CSI_PROW_WORK}/bin" +mkdir -p "${CSI_PROW_BIN}" +PATH="${CSI_PROW_BIN}:$PATH" + +# Ensure that PATH has the desired version of the Go tools, then run command given as argument. +# Empty parameter uses the already installed Go. In Prow, that version is kept up-to-date by +# bumping the container image regularly. +run_with_go () { + local version + version="$1" + shift + + if ! [ "$version" ] || go version 2>/dev/null | grep -q "go$version"; then + run "$@" + else + if ! [ -d "${CSI_PROW_WORK}/go-$version" ]; then + run curl --fail --location "https://dl.google.com/go/go$version.linux-amd64.tar.gz" | tar -C "${CSI_PROW_WORK}" -zxf - || die "installation of Go $version failed" + mv "${CSI_PROW_WORK}/go" "${CSI_PROW_WORK}/go-$version" + fi + PATH="${CSI_PROW_WORK}/go-$version/bin:$PATH" run "$@" + fi +} + +# Ensure that we have the desired version of kind. +install_kind () { + if kind --version 2>/dev/null | grep -q " ${CSI_PROW_KIND_VERSION}$"; then + return + fi + if run curl --fail --location -o "${CSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${CSI_PROW_KIND_VERSION}/kind-linux-amd64"; then + chmod u+x "${CSI_PROW_WORK}/bin/kind" + else + git_checkout https://github.com/kubernetes-sigs/kind "$GOPATH/src/sigs.k8s.io/kind" "${CSI_PROW_KIND_VERSION}" --depth=1 && + run_with_go "${CSI_PROW_GO_VERSION_KIND}" go build -o "${CSI_PROW_WORK}/bin/kind" sigs.k8s.io/kind + fi +} + +# Ensure that we have the desired version of the ginkgo test runner. +install_ginkgo () { + # CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not. + if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then + return + fi + git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${CSI_PROW_GINKGO_VERSION}" --depth=1 && + # We have to get dependencies and hence can't call just "go build". + run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" && + mv "$GOPATH/bin/ginkgo" "${CSI_PROW_BIN}" +} + +# This checks out a repo ("https://github.com/kubernetes/kubernetes") +# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at +# a certain revision (a hex commit hash, v1.13.1, master). It's okay +# for that directory to exist already. +git_checkout () { + local repo path revision + repo="$1" + shift + path="$1" + shift + revision="$1" + shift + + mkdir -p "$path" + if ! [ -d "$path/.git" ]; then + run git init "$path" + fi + if (cd "$path" && run git fetch "$@" "$repo" "$revision"); then + (cd "$path" && run git checkout FETCH_HEAD) || die "checking out $repo $revision failed" + else + # Might have been because fetching by revision is not + # supported by GitHub (https://github.com/isaacs/github/issues/436). + # Fall back to fetching everything. + (cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed" + (cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed" + fi + # This is useful for local testing or when switching between different revisions in the same + # repo. + (cd "$path" && run git clean -fdx) || die "failed to clean $path" +} + +list_gates () ( + set -f; IFS=',' + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + set -- $1 + while [ "$1" ]; do + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + echo "$1" | sed -e 's/ *\([^ =]*\) *= *\([^ ]*\) */ \1: \2/' + shift + done +) + +go_version_for_kubernetes () ( + local path="$1" + local version="$2" + local go_version + + # We use the minimal Go version specified for each K8S release (= minimum_go_version in hack/lib/golang.sh). + # More recent versions might also work, but we don't want to count on that. + go_version="$(grep minimum_go_version= "$path/hack/lib/golang.sh" | sed -e 's/.*=go//')" + if ! [ "$go_version" ]; then + die "Unable to determine Go version for Kubernetes $version from hack/lib/golang.sh." + fi + echo "$go_version" +) + +csi_prow_kind_have_kubernetes=false +# Brings up a Kubernetes cluster and sets KUBECONFIG. +# Accepts additional feature gates in the form gate1=true|false,gate2=... +start_cluster () { + local image gates + gates="$1" + + if kind get clusters | grep -q csi-prow; then + run kind delete cluster --name=csi-prow || die "kind delete failed" + fi + + # Build from source? + if [[ "${CSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then + if ! ${csi_prow_kind_have_kubernetes}; then + local version="${CSI_PROW_KUBERNETES_VERSION}" + if [ "$version" = "latest" ]; then + version=master + fi + git_checkout https://github.com/kubernetes/kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version" --depth=1 || die "checking out Kubernetes $version failed" + + # "kind build" and/or the Kubernetes build rules need at least one tag, which we don't have + # when doing a shallow fetch. Therefore we fake one: + # release-1.12 -> v1.12.0-release..csiprow + # latest or -> v1.14.0-.csiprow + case "${CSI_PROW_KUBERNETES_VERSION}" in + release-*) + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + tag="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/release-\(.*\)/v\1.0-release./')";; + *) + # We have to make something up. v1.0.0 did not work for some reasons. + tag="v1.14.0-";; + esac + tag="$tag$(cd "$GOPATH/src/k8s.io/kubernetes" && git rev-list --abbrev-commit HEAD).csiprow" + (cd "$GOPATH/src/k8s.io/kubernetes" && run git tag -f "$tag") || die "git tag failed" + go_version="$(go_version_for_kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes" + run_with_go "$go_version" kind build node-image --type bazel --image csiprow/node:latest --kube-root "$GOPATH/src/k8s.io/kubernetes" || die "'kind build node-image' failed" + csi_prow_kind_have_kubernetes=true + fi + image="csiprow/node:latest" + else + image="kindest/node:v${CSI_PROW_KUBERNETES_VERSION}" + fi + cat >"${CSI_PROW_WORK}/kind-config.yaml" </dev/null; wait) + info "For container output see job artifacts." + die "deploying the hostpath driver with ${deploy_hostpath} failed" + fi +} + +# collect logs and cluster status (like the version of all components, Kubernetes version, test version) +collect_cluster_info () { + cat <>"${ARTIFACTS}/$pod/$container.log" & + echo "$!" + done + done +} + +# Makes the E2E test suite binary available as "${CSI_PROW_WORK}/e2e.test". +install_e2e () { + if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then + return + fi + + git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 && + if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then + go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" && + run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" + else + run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e" + fi +} + +# Makes the csi-sanity test suite binary available as +# "${CSI_PROW_WORK}/csi-sanity". +install_sanity () ( + if [ -e "${CSI_PROW_WORK}/csi-sanity" ]; then + return + fi + + git_checkout "${CSI_PROW_SANITY_REPO}" "${GOPATH}/src/${CSI_PROW_SANITY_IMPORT_PATH}" "${CSI_PROW_SANITY_VERSION}" --depth=1 || die "checking out csi-sanity failed" + run_with_go "${CSI_PROW_GO_VERSION_SANITY}" go test -c -o "${CSI_PROW_WORK}/csi-sanity" "${CSI_PROW_SANITY_IMPORT_PATH}/cmd/csi-sanity" || die "building csi-sanity failed" +) + +# Whether the hostpath driver supports raw block devices depends on which version +# we are testing. It would be much nicer if we could determine that by querying the +# installed driver's capabilities instead of having to do a version check. +hostpath_supports_block () { + local result + result="$(docker exec csi-prow-control-plane docker image ls --format='{{.Repository}} {{.Tag}} {{.ID}}' | grep hostpath | while read -r repo tag id; do + if [ "$tag" == "v1.0.1" ]; then + # Old version because the revision label is missing: didn't have support yet. + echo "false" + return + fi + done)" + # If not set, then it must be a newer driver with support. + echo "${result:-true}" +} + +# Captures pod output while running some other command. +run_with_loggers () ( + loggers=$(start_loggers -f) + trap 'kill $loggers' EXIT + + run "$@" +) + +# Invokes the filter-junit.go tool. +run_filter_junit () { + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" go run "${RELEASE_TOOLS_ROOT}/filter-junit.go" "$@" +} + +# Runs the E2E test suite in a sub-shell. +run_e2e () ( + name="$1" + shift + + install_e2e || die "building e2e.test failed" + install_ginkgo || die "installing ginkgo failed" + + # TODO (?): multi-node cluster (depends on https://github.com/kubernetes-csi/csi-driver-host-path/pull/14). + # When running on a multi-node cluster, we need to figure out where the + # hostpath driver was deployed and set ClientNodeName accordingly. + + # The content of this file depends on both what the E2E suite expects and + # what the installed hostpath driver supports. Generating it here seems + # prone to breakage, but it is uncertain where a better place might be. + cat >"${CSI_PROW_WORK}/hostpath-test-driver.yaml" </dev/null >/dev/null; then + run_filter_junit -t="External Storage" -o "${ARTIFACTS}/junit_${name}.xml" "${ARTIFACTS}"/junit_[0-9]*.xml && rm -f "${ARTIFACTS}"/junit_[0-9]*.xml + fi + } + trap move_junit EXIT + + cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + run_with_loggers ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" -storage.testdriver="${CSI_PROW_WORK}/hostpath-test-driver.yaml" +) + +# Run csi-sanity against installed CSI driver. +run_sanity () ( + install_sanity || die "installing csi-sanity failed" + + cat >"${CSI_PROW_WORK}/mkdir_in_pod.sh" <"${CSI_PROW_WORK}/rmdir_in_pod.sh" </\>/g' -e 's/\x1B...//g' +} + +# The "make test" output starts each test with "### :" +# and then ends when the next test starts or with "make: *** +# [] Error 1" when there was a failure. Here we read each +# line of that output, split it up into individual tests and generate +# a make-test.xml file in JUnit format. +make_test_to_junit () { + local ret out testname testoutput + ret=0 + # Plain make-test.xml was not delivered as text/xml by the web + # server and ignored by spyglass. It seems that the name has to + # match junit*.xml. + out="${ARTIFACTS}/junit_make_test.xml" + testname= + echo "" >>"$out" + + while IFS= read -r line; do + echo "$line" # pass through + if echo "$line" | grep -q "^### [^ ]*:$"; then + if [ "$testname" ]; then + # previous test succesful + echo " " >>"$out" + echo " " >>"$out" + fi + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + # + # start new test + testname="$(echo "$line" | sed -e 's/^### \([^ ]*\):$/\1/')" + testoutput= + echo " " >>"$out" + echo " " >>"$out" + elif echo "$line" | grep -q '^make: .*Error [0-9]*$'; then + if [ "$testname" ]; then + # Ignore: Consider using { cmd1; cmd2; } >> file instead of individual redirects. + # shellcheck disable=SC2129 + # + # end test with failure + echo " " >>"$out" + # Include the same text as in also in , + # because then it is easier to view in spyglass (shown directly + # instead of having to click through to stdout). + echo " " >>"$out" + echo -n "$testoutput" | ascii_to_xml >>"$out" + echo " " >>"$out" + echo " " >>"$out" + fi + # remember failure for exit code + ret=1 + # not currently inside a test + testname= + else + if [ "$testname" ]; then + # Test output. + echo "$line" | ascii_to_xml >>"$out" + testoutput="$testoutput$line +" + fi + fi + done + # if still in a test, close it now + if [ "$testname" ]; then + echo " " >>"$out" + echo " " >>"$out" + fi + echo "" >>"$out" + + # this makes the error more visible in spyglass + if [ "$ret" -ne 0 ]; then + echo "ERROR: 'make test' failed" + return 1 + fi +} + +main () { + local images ret + ret=0 + + images= + if ${CSI_PROW_BUILD_JOB}; then + # A successful build is required for testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make all || die "'make all' failed" + # We don't want test failures to prevent E2E testing below, because the failure + # might have been minor or unavoidable, for example when experimenting with + # changes in "release-tools" in a PR (that fails the "is release-tools unmodified" + # test). + if tests_enabled "unit"; then + if ! run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make -k test 2>&1 | make_test_to_junit; then + warn "'make test' failed, proceeding anyway" + ret=1 + fi + fi + # Required for E2E testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make container || die "'make container' failed" + fi + + if tests_need_kind; then + install_kind || die "installing kind failed" + + if ${CSI_PROW_BUILD_JOB}; then + cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" + # Get the image that was just built (if any) from the + # top-level Makefile CMDS variable and set the + # deploy-hostpath.sh env variables for it. We also need to + # side-load those images into the cluster. + for i in $cmds; do + e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow" + + # We must avoid the tag "latest" because that implies + # always pulling the image + # (https://github.com/kubernetes-sigs/kind/issues/328). + docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed" + done + + if [ -e deploy/kubernetes/rbac.yaml ]; then + # This is one of those components which has its own RBAC rules (like external-provisioner). + # We are testing a locally built image and also want to test with the the current, + # potentially modified RBAC rules. + if [ "$(echo "$cmds" | wc -w)" != 1 ]; then + die "ambiguous deploy/kubernetes/rbac.yaml: need exactly one command, got: $cmds" + fi + e=$(echo "$cmds" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_RBAC=$(pwd)/deploy/kubernetes/rbac.yaml" + fi + fi + + if tests_need_non_alpha_cluster; then + start_cluster || die "starting the non-alpha cluster failed" + + # Installing the driver might be disabled. + if install_hostpath "$images"; then + collect_cluster_info + + if sanity_enabled; then + if ! run_sanity; then + ret=1 + fi + fi + + if tests_enabled "parallel"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel failed" + ret=1 + fi + fi + + if tests_enabled "serial"; then + if ! run_e2e serial \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial failed" + ret=1 + fi + fi + fi + fi + + if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then + # Need to (re)create the cluster. + start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed" + + # Installing the driver might be disabled. + if install_hostpath "$images"; then + collect_cluster_info + + if tests_enabled "parallel-alpha"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel alpha failed" + ret=1 + fi + fi + + if tests_enabled "serial-alpha"; then + if ! run_e2e serial-alpha \ + -focus="External.Storage.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial alpha failed" + ret=1 + fi + fi + fi + fi + fi + + # Merge all junit files into one. This gets rid of duplicated "skipped" tests. + if ls "${ARTIFACTS}"/junit_*.xml 2>/dev/null >&2; then + run_filter_junit -o "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}" + fi + + return "$ret" +} diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/travis.yml b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/travis.yml new file mode 100644 index 000000000..1c05dfd97 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: required +services: + - docker +matrix: + include: + - go: 1.11.1 +before_script: +- mkdir -p bin +- wget https://github.com/golang/dep/releases/download/v0.5.1/dep-linux-amd64 -O bin/dep +- chmod u+x bin/dep +- export PATH=$PWD/bin:$PATH +script: +- make -k all test +after_success: + - if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; + make push; + fi diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/util.sh b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/util.sh new file mode 100755 index 000000000..abeb1b2e1 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/util.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function kube::util::sourced_variable { + # Call this function to tell shellcheck that a variable is supposed to + # be used from other calling context. This helps quiet an "unused + # variable" warning from shellcheck and also document your code. + true +} + +kube::util::sortable_date() { + date "+%Y%m%d-%H%M%S" +} + +# arguments: target, item1, item2, item3, ... +# returns 0 if target is in the given items, 1 otherwise. +kube::util::array_contains() { + local search="$1" + local element + shift + for element; do + if [[ "${element}" == "${search}" ]]; then + return 0 + fi + done + return 1 +} + +# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG +# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal +kube::util::trap_add() { + local trap_add_cmd + trap_add_cmd=$1 + shift + + for trap_add_name in "$@"; do + local existing_cmd + local new_cmd + + # Grab the currently defined trap commands for this trap + existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}') + + if [[ -z "${existing_cmd}" ]]; then + new_cmd="${trap_add_cmd}" + else + new_cmd="${trap_add_cmd};${existing_cmd}" + fi + + # Assign the test. Disable the shellcheck warning telling that trap + # commands should be single quoted to avoid evaluating them at this + # point instead evaluating them at run time. The logic of adding new + # commands to a single trap requires them to be evaluated right away. + # shellcheck disable=SC2064 + trap "${new_cmd}" "${trap_add_name}" + done +} + +kube::util::download_file() { + local -r url=$1 + local -r destination_file=$2 + + rm "${destination_file}" 2&> /dev/null || true + + for i in $(seq 5) + do + if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then + echo "Downloading ${url} failed. $((5-i)) retries left." + sleep 1 + else + echo "Downloading ${url} succeed" + return 0 + fi + done + return 1 +} + +# Wait for background jobs to finish. Return with +# an error status if any of the jobs failed. +kube::util::wait-for-jobs() { + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + return ${fail} +} + +# kube::util::join +# Concatenates the list elements with the delimiter passed as first parameter +# +# Ex: kube::util::join , a b c +# -> a,b,c +function kube::util::join { + local IFS="$1" + shift + echo "$*" +} + +# kube::util::check-file-in-alphabetical-order +# Check that the file is in alphabetical order +# +function kube::util::check-file-in-alphabetical-order { + local failure_file="$1" + if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then + { + echo + echo "${failure_file} is not in alphabetical order. Please sort it:" + echo + echo " LC_ALL=C sort -o ${failure_file} ${failure_file}" + echo + } >&2 + false + fi +} + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_blue="${color_start}1;34m" + declare -r color_cyan="${color_start}1;36m" + declare -r color_norm="${color_start}0m" + + kube::util::sourced_variable "${color_start}" + kube::util::sourced_variable "${color_red}" + kube::util::sourced_variable "${color_yellow}" + kube::util::sourced_variable "${color_green}" + kube::util::sourced_variable "${color_blue}" + kube::util::sourced_variable "${color_cyan}" + kube::util::sourced_variable "${color_norm}" +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/verify-shellcheck.sh b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/verify-shellcheck.sh new file mode 100755 index 000000000..fd28021ac --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/verify-shellcheck.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# The csi-release-tools directory. +TOOLS="$(dirname "${BASH_SOURCE[0]}")" +. "${TOOLS}/util.sh" + +# Directory to check. Default is the parent of the tools themselves. +ROOT="${1:-${TOOLS}/..}" + +# required version for this script, if not installed on the host we will +# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE +SHELLCHECK_VERSION="0.6.0" +# upstream shellcheck latest stable image as of January 10th, 2019 +SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52" + +# fixed name for the shellcheck docker container so we can reliably clean it up +SHELLCHECK_CONTAINER="k8s-shellcheck" + +# disabled lints +disabled=( + # this lint disallows non-constant source, which we use extensively without + # any known bugs + 1090 + # this lint prefers command -v to which, they are not the same + 2230 +) +# comma separate for passing to shellcheck +join_by() { + local IFS="$1"; + shift; + echo "$*"; +} +SHELLCHECK_DISABLED="$(join_by , "${disabled[@]}")" +readonly SHELLCHECK_DISABLED + +# creates the shellcheck container for later use +create_container () { + # TODO(bentheelder): this is a performance hack, we create the container with + # a sleep MAX_INT32 so that it is effectively paused. + # We then repeatedly exec to it to run each shellcheck, and later rm it when + # we're done. + # This is incredibly much faster than creating a container for each shellcheck + # call ... + docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${ROOT}:${ROOT}" -w "${ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 +} +# removes the shellcheck container +remove_container () { + docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true +} + +# ensure we're linting the source tree +cd "${ROOT}" + +# find all shell scripts excluding ./_*, ./.git/*, ./vendor*, +# and anything git-ignored +all_shell_scripts=() +while IFS=$'\n' read -r script; + do git check-ignore -q "$script" || all_shell_scripts+=("$script"); +done < <(find . -name "*.sh" \ + -not \( \ + -path ./_\* -o \ + -path ./.git\* -o \ + -path ./vendor\* \ + \)) + +# detect if the host machine has the required shellcheck version installed +# if so, we will use that instead. +HAVE_SHELLCHECK=false +if which shellcheck &>/dev/null; then + detected_version="$(shellcheck --version | grep 'version: .*')" + if [[ "${detected_version}" = "version: ${SHELLCHECK_VERSION}" ]]; then + HAVE_SHELLCHECK=true + fi +fi + +# tell the user which we've selected and possibly set up the container +if ${HAVE_SHELLCHECK}; then + echo "Using host shellcheck ${SHELLCHECK_VERSION} binary." +else + echo "Using shellcheck ${SHELLCHECK_VERSION} docker image." + # remove any previous container, ensure we will attempt to cleanup on exit, + # and create the container + remove_container + kube::util::trap_add 'remove_container' EXIT + if ! output="$(create_container 2>&1)"; then + { + echo "Failed to create shellcheck container with output: " + echo "" + echo "${output}" + } >&2 + exit 1 + fi +fi + +# lint each script, tracking failures +errors=() +for f in "${all_shell_scripts[@]}"; do + set +o errexit + if ${HAVE_SHELLCHECK}; then + failedLint=$(shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + else + failedLint=$(docker exec -t ${SHELLCHECK_CONTAINER} \ + shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + fi + set -o errexit + if [[ -n "${failedLint}" ]]; then + errors+=( "${failedLint}" ) + fi +done + +# Check to be sure all the packages that should pass lint are. +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All shell files are passing lint.' +else + { + echo "Errors from shellcheck:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"' + echo 'If the above warnings do not make sense, you can exempt them from shellcheck' + echo 'checking by adding the "shellcheck disable" directive' + echo '(https://github.com/koalaman/shellcheck/wiki/Directive#disable).' + echo + } >&2 + false +fi diff --git a/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/verify-subtree.sh b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/verify-subtree.sh new file mode 100755 index 000000000..f04a9fa26 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/external-snapshotter/release-tools/verify-subtree.sh @@ -0,0 +1,41 @@ +#! /bin/sh -e +# +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script verifies that the content of a directory managed +# by "git subtree" has not been modified locally. It does that +# by looking for commits that modify the files with the +# subtree prefix (aka directory) while ignoring merge +# commits. Merge commits are where "git subtree" pulls the +# upstream files into the directory. +# +# Theoretically a developer can subvert this check by modifying files +# in a merge commit, but in practice that shouldn't happen. + +DIR="$1" +if [ ! "$DIR" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +REV=$(git log -n1 --remove-empty --format=format:%H --no-merges -- "$DIR") +if [ "$REV" ]; then + echo "Directory '$DIR' contains non-upstream changes:" + echo + git log --no-merges -- "$DIR" + exit 1 +else + echo "$DIR is a clean copy of upstream." +fi diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index c3a326dd4..9ec8893cb 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -269,6 +269,22 @@ func ContainElement(element interface{}) types.GomegaMatcher { } } +//BeElementOf succeeds if actual is contained in the passed in elements. +//BeElementOf() always uses Equal() to perform the match. +//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves +//as the reverse of ContainElement() that operates with Equal() to perform the match. +// Expect(2).Should(BeElementOf([]int{1, 2})) +// Expect(2).Should(BeElementOf([2]int{1, 2})) +//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): +// Expect(2).Should(BeElementOf(1, 2)) +// +//Actual must be typed. +func BeElementOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.BeElementOfMatcher{ + Elements: elements, + } +} + //ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. //By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go new file mode 100644 index 000000000..0abb2d125 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go @@ -0,0 +1,55 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeElementOfMatcher struct { + Elements []interface{} +} + +func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual) == nil { + return false, fmt.Errorf("BeElement matcher expects actual to be typed") + } + + length := len(matcher.Elements) + valueAt := func(i int) interface{} { + return matcher.Elements[i] + } + // Special handling of a single element of type Array or Slice + if length == 1 && isArrayOrSlice(valueAt(0)) { + element := valueAt(0) + value := reflect.ValueOf(element) + length = value.Len() + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + } + + var lastError error + for i := 0; i < length; i++ { + matcher := &EqualMatcher{Expected: valueAt(i)} + success, err := matcher.Match(actual) + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be an element of", matcher.Elements) +} + +func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be an element of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher_test.go new file mode 100644 index 000000000..141ff7595 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher_test.go @@ -0,0 +1,56 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeElementOf", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Expect(2).Should(BeElementOf([2]int{1, 2})) + Expect(3).ShouldNot(BeElementOf([2]int{1, 2})) + + Expect(2).Should(BeElementOf([]int{1, 2})) + Expect(3).ShouldNot(BeElementOf([]int{1, 2})) + + Expect(2).Should(BeElementOf(1, 2)) + Expect(3).ShouldNot(BeElementOf(1, 2)) + + Expect("abc").Should(BeElementOf("abc")) + Expect("abc").ShouldNot(BeElementOf("def")) + + Expect("abc").ShouldNot(BeElementOf()) + Expect(7).ShouldNot(BeElementOf(nil)) + + arr := make([]myCustomType, 2) + arr[0] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}} + arr[1] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "c"}} + Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).Should(BeElementOf(arr)) + Expect(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"b", "c"}}).ShouldNot(BeElementOf(arr)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Expect(1).ShouldNot(BeElementOf(nilSlice)) + + var nilMap map[int]string + Expect("foo").ShouldNot(BeElementOf(nilMap)) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&BeElementOfMatcher{Elements: []interface{}{0}}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + + success, err = (&BeElementOfMatcher{Elements: nil}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + }) + }) +})