mirror of
https://github.com/kubevirt/containerized-data-importer.git
synced 2025-06-03 06:30:22 +00:00
Update clone controller to use runtime library. (#1075)
Signed-off-by: Alexander Wels <awels@redhat.com> Co-authored-by: Michael Henriksen <mhenriks@redhat.com>
This commit is contained in:
parent
041b5a3ba2
commit
17db2920b1
@ -181,16 +181,10 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cloneController := controller.NewCloneController(client,
|
||||
cdiClient,
|
||||
pvcInformer,
|
||||
podInformer,
|
||||
clonerImage,
|
||||
pullPolicy,
|
||||
verbose,
|
||||
uploadClientCertGenerator,
|
||||
uploadServerBundleFetcher,
|
||||
getAPIServerPublicKey())
|
||||
if _, err := controller.NewCloneController(mgr, client, log, clonerImage, pullPolicy, verbose, uploadClientCertGenerator, uploadServerBundleFetcher, getAPIServerPublicKey()); err != nil {
|
||||
klog.Errorf("Unable to setup clone controller: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
smartCloneController := controller.NewSmartCloneController(client,
|
||||
cdiClient,
|
||||
@ -241,13 +235,6 @@ func start(cfg *rest.Config, stopCh <-chan struct{}) {
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
err = cloneController.Run(1, stopCh)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error running clone controller: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
err = uploadController.Run(1, stopCh)
|
||||
if err != nil {
|
||||
|
@ -73,6 +73,8 @@ const (
|
||||
ClonerSourcePodName = "cdi-clone-source"
|
||||
// ClonerMountPath (controller pkg only)
|
||||
ClonerMountPath = "/var/run/cdi/clone/source"
|
||||
// ClonerSourcePodNameSuffix (controller pkg only)
|
||||
ClonerSourcePodNameSuffix = "-source-pod"
|
||||
|
||||
// SmartClonerCDILabel is the label applied to resources created by the smart-clone controller
|
||||
SmartClonerCDILabel = "cdi-smart-clone"
|
||||
|
@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"clone-controller.go",
|
||||
"config-controller.go",
|
||||
"controller.go",
|
||||
"datavolume-controller.go",
|
||||
"import-controller.go",
|
||||
"runtime-util.go",
|
||||
@ -78,7 +77,6 @@ go_test(
|
||||
"clone-controller_test.go",
|
||||
"config-controller_test.go",
|
||||
"controller_suite_test.go",
|
||||
"controller_test.go",
|
||||
"datavolume-controller_test.go",
|
||||
"import-controller_test.go",
|
||||
"upload-controller_test.go",
|
||||
@ -95,6 +93,7 @@ go_test(
|
||||
"//pkg/token:go_default_library",
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//pkg/util/cert/fetcher:go_default_library",
|
||||
"//pkg/util/cert/triple:go_default_library",
|
||||
"//tests/reporters:go_default_library",
|
||||
"//vendor/github.com/appscode/jsonpatch:go_default_library",
|
||||
"//vendor/github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1:go_default_library",
|
||||
|
@ -1,23 +1,30 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
clientset "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"kubevirt.io/containerized-data-importer/pkg/token"
|
||||
@ -47,6 +54,9 @@ const (
|
||||
// APIServerPublicKeyPath is the path to the apiserver public key
|
||||
APIServerPublicKeyPath = APIServerPublicKeyDir + "/id_rsa.pub"
|
||||
|
||||
// CloneSucceededPVC provides a const to indicate a clone to the PVC succeeded
|
||||
CloneSucceededPVC = "CloneSucceeded"
|
||||
|
||||
cloneSourcePodFinalizer = "cdi.kubevirt.io/cloneSource"
|
||||
|
||||
cloneTokenLeeway = 10 * time.Second
|
||||
@ -54,52 +64,193 @@ const (
|
||||
uploadClientCertDuration = 365 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// CloneController represents the CDI Clone Controller
|
||||
type CloneController struct {
|
||||
Controller
|
||||
// CloneReconciler members
|
||||
type CloneReconciler struct {
|
||||
Client client.Client
|
||||
Scheme *runtime.Scheme
|
||||
K8sClient kubernetes.Interface
|
||||
recorder record.EventRecorder
|
||||
tokenValidator token.Validator
|
||||
cdiClient clientset.Interface
|
||||
clientCertGenerator generator.CertGenerator
|
||||
serverCAFetcher fetcher.CertBundleFetcher
|
||||
Log logr.Logger
|
||||
tokenValidator token.Validator
|
||||
Image string
|
||||
Verbose string
|
||||
PullPolicy string
|
||||
}
|
||||
|
||||
// NewCloneController sets up a Clone Controller, and returns a pointer to
|
||||
// to the newly created Controller
|
||||
func NewCloneController(client kubernetes.Interface,
|
||||
cdiClientSet clientset.Interface,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
podInformer coreinformers.PodInformer,
|
||||
image string,
|
||||
pullPolicy string,
|
||||
// NewCloneController creates a new instance of the config controller.
|
||||
func NewCloneController(mgr manager.Manager,
|
||||
k8sClient kubernetes.Interface,
|
||||
log logr.Logger,
|
||||
image, pullPolicy,
|
||||
verbose string,
|
||||
clientCertGenerator generator.CertGenerator,
|
||||
serverCAFetcher fetcher.CertBundleFetcher,
|
||||
apiServerKey *rsa.PublicKey) *CloneController {
|
||||
|
||||
// Create event broadcaster
|
||||
klog.V(3).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.V(2).Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: cloneControllerAgentName})
|
||||
|
||||
c := &CloneController{
|
||||
Controller: *NewController(client, pvcInformer, podInformer, image, pullPolicy, verbose),
|
||||
recorder: recorder,
|
||||
apiServerKey *rsa.PublicKey) (controller.Controller, error) {
|
||||
reconciler := &CloneReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: log.WithName("clone-controller"),
|
||||
tokenValidator: newCloneTokenValidator(apiServerKey),
|
||||
cdiClient: cdiClientSet,
|
||||
Image: image,
|
||||
Verbose: verbose,
|
||||
PullPolicy: pullPolicy,
|
||||
recorder: mgr.GetEventRecorderFor("clone-controller"),
|
||||
K8sClient: k8sClient,
|
||||
clientCertGenerator: clientCertGenerator,
|
||||
serverCAFetcher: serverCAFetcher,
|
||||
}
|
||||
return c
|
||||
cloneController, err := controller.New("clone-controller", mgr, controller.Options{
|
||||
Reconciler: reconciler,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addCloneControllerWatches(mgr, cloneController); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cloneController, nil
|
||||
}
|
||||
|
||||
// addConfigControllerWatches sets up the watches used by the config controller.
|
||||
func addCloneControllerWatches(mgr manager.Manager, cloneController controller.Controller) error {
|
||||
// Setup watches
|
||||
if err := cloneController.Watch(&source.Kind{Type: &corev1.PersistentVolumeClaim{}}, &handler.EnqueueRequestForObject{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cloneController.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
|
||||
OwnerType: &corev1.PersistentVolumeClaim{},
|
||||
IsController: true,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCloneTokenValidator(key *rsa.PublicKey) token.Validator {
|
||||
return token.NewValidator(common.CloneTokenIssuer, key, cloneTokenLeeway)
|
||||
}
|
||||
|
||||
func (cc *CloneController) findCloneSourcePodFromCache(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
func (r *CloneReconciler) shouldReconcile(pvc *corev1.PersistentVolumeClaim) bool {
|
||||
return checkPVC(pvc, AnnCloneRequest) && !metav1.HasAnnotation(pvc.ObjectMeta, AnnCloneOf)
|
||||
}
|
||||
|
||||
// Reconcile the reconcile loop for host assisted clone pvc.
|
||||
func (r *CloneReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
|
||||
// Get the PVC.
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
if err := r.Client.Get(context.TODO(), req.NamespacedName, pvc); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
log := r.Log.WithValues("PVC", req.NamespacedName)
|
||||
log.V(1).Info("reconciling Clone PVCs")
|
||||
if !r.shouldReconcile(pvc) {
|
||||
log.V(1).Info("Should not reconcile this PVC", "checkPVC(AnnCloneRequest)", checkPVC(pvc, AnnCloneRequest), "NOT has annotation(AnnCloneOf)", !metav1.HasAnnotation(pvc.ObjectMeta, AnnCloneOf), "has finalizer?", r.hasFinalizer(pvc, cloneSourcePodFinalizer))
|
||||
if r.hasFinalizer(pvc, cloneSourcePodFinalizer) {
|
||||
// Clone completed, remove source pod and finalizer.
|
||||
if err := r.cleanup(pvc, log); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
ready, err := r.waitTargetPodRunningOrSucceeded(pvc, log)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, errors.Wrap(err, "error ensuring target upload pod running")
|
||||
}
|
||||
|
||||
if !ready {
|
||||
log.V(3).Info("Upload pod not ready yet for PVC")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
sourcePod, err := r.findCloneSourcePod(pvc)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.reconcileSourcePod(sourcePod, pvc, log); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.updatePvcFromPod(sourcePod, pvc, log); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) reconcileSourcePod(sourcePod *corev1.Pod, pvc *corev1.PersistentVolumeClaim, log logr.Logger) error {
|
||||
if sourcePod == nil {
|
||||
if err := r.validateSourceAndTarget(pvc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientName, ok := pvc.Annotations[AnnUploadClientName]
|
||||
if !ok {
|
||||
return errors.Errorf("PVC %s/%s missing required %s annotation", pvc.Namespace, pvc.Name, AnnUploadClientName)
|
||||
}
|
||||
|
||||
sourcePod, err := r.CreateCloneSourcePod(r.Image, r.PullPolicy, clientName, pvc, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.V(3).Info("Created source pod ", "sourcePod.Namespace", sourcePod.Namespace, "sourcePod.Name", sourcePod.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) updatePvcFromPod(sourcePod *corev1.Pod, pvc *corev1.PersistentVolumeClaim, log logr.Logger) error {
|
||||
currentPvcCopy := pvc.DeepCopyObject()
|
||||
log.V(1).Info("Updating PVC from pod")
|
||||
|
||||
pvc = r.addFinalizer(pvc, cloneSourcePodFinalizer)
|
||||
|
||||
log.V(3).Info("Pod phase for PVC", "PVC phase", pvc.Annotations[AnnPodPhase])
|
||||
|
||||
if podSucceededFromPVC(pvc) && pvc.Annotations[AnnCloneOf] != "true" {
|
||||
log.V(1).Info("Adding CloneOf annotation to PVC")
|
||||
pvc.Annotations[AnnCloneOf] = "true"
|
||||
r.recorder.Event(pvc, corev1.EventTypeNormal, CloneSucceededPVC, "Clone Successful")
|
||||
}
|
||||
if !reflect.DeepEqual(currentPvcCopy, pvc) {
|
||||
return r.updatePVC(pvc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) updatePVC(pvc *corev1.PersistentVolumeClaim) error {
|
||||
if err := r.Client.Update(context.TODO(), pvc); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) waitTargetPodRunningOrSucceeded(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, error) {
|
||||
rs, ok := pvc.Annotations[AnnPodReady]
|
||||
if !ok {
|
||||
log.V(3).Info("clone target pod not ready")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
ready, err := strconv.ParseBool(rs)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "error parsing %s annotation", AnnPodReady)
|
||||
}
|
||||
|
||||
if !ready {
|
||||
log.V(3).Info("clone target pod not ready")
|
||||
return podSucceededFromPVC(pvc), nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) findCloneSourcePod(pvc *corev1.PersistentVolumeClaim) (*corev1.Pod, error) {
|
||||
isCloneRequest, sourceNamespace, _ := ParseCloneRequestAnnotation(pvc)
|
||||
if !isCloneRequest {
|
||||
return nil, nil
|
||||
@ -114,228 +265,346 @@ func (cc *CloneController) findCloneSourcePodFromCache(pvc *v1.PersistentVolumeC
|
||||
return nil, errors.Wrap(err, "error creating label selector")
|
||||
}
|
||||
|
||||
podList, err := cc.podLister.Pods(sourceNamespace).List(selector)
|
||||
if err != nil {
|
||||
podList := &corev1.PodList{}
|
||||
if err := r.Client.List(context.TODO(), podList, &client.ListOptions{Namespace: sourceNamespace, LabelSelector: selector}); err != nil {
|
||||
return nil, errors.Wrap(err, "error listing pods")
|
||||
}
|
||||
|
||||
if len(podList) > 1 {
|
||||
if len(podList.Items) > 1 {
|
||||
return nil, errors.Errorf("multiple source pods found for clone PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
}
|
||||
|
||||
if len(podList) == 0 {
|
||||
if len(podList.Items) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return podList[0], nil
|
||||
return &podList.Items[0], nil
|
||||
}
|
||||
|
||||
// Create the cloning source and target pods based the pvc. The pvc is checked (again) to ensure that we are not already
|
||||
// processing this pvc, which would result in multiple pods for the same pvc.
|
||||
func (cc *CloneController) processPvcItem(pvc *v1.PersistentVolumeClaim) error {
|
||||
ready, err := cc.waitTargetPodRunningOrSucceeded(pvc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error unsuring target upload pod running")
|
||||
}
|
||||
|
||||
if !ready {
|
||||
klog.V(3).Infof("Upload pod not ready yet for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
pvcKey, err := cache.MetaNamespaceKeyFunc(pvc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting pvcKey")
|
||||
}
|
||||
|
||||
// source pod not seen yet
|
||||
if !cc.podExpectations.SatisfiedExpectations(pvcKey) {
|
||||
klog.V(3).Infof("Waiting on expectations for %s/%s", pvc.Namespace, pvc.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
sourcePod, err := cc.findCloneSourcePodFromCache(pvc)
|
||||
func (r *CloneReconciler) validateSourceAndTarget(targetPvc *corev1.PersistentVolumeClaim) error {
|
||||
sourcePvc, err := r.getCloneRequestSourcePVC(targetPvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sourcePod == nil {
|
||||
if err = cc.validateSourceAndTarget(pvc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientName, ok := pvc.Annotations[AnnUploadClientName]
|
||||
if !ok {
|
||||
return errors.Errorf("PVC %s/%s missing required %s annotation", pvc.Namespace, pvc.Name, AnnUploadClientName)
|
||||
}
|
||||
|
||||
pvc, err = addFinalizer(cc.clientset, pvc, cloneSourcePodFinalizer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientCert, clientKey, err := cc.clientCertGenerator.MakeClientCert(clientName, nil, uploadClientCertDuration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serverCABundle, err := cc.serverCAFetcher.BundleBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cc.raisePodCreate(pvcKey)
|
||||
|
||||
args := CloneSourcePodArgs{
|
||||
Client: cc.clientset,
|
||||
CDIClient: cc.cdiClient,
|
||||
Image: cc.image,
|
||||
PullPolicy: cc.pullPolicy,
|
||||
ServerCACert: serverCABundle,
|
||||
ClientCert: clientCert,
|
||||
ClientKey: clientKey,
|
||||
PVC: pvc,
|
||||
}
|
||||
|
||||
sourcePod, err = CreateCloneSourcePod(args)
|
||||
if err != nil {
|
||||
cc.observePodCreate(pvcKey)
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Created pod %s/%s", sourcePod.Namespace, sourcePod.Name)
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Pod phase for PVC %s/%s is %s", pvc.Namespace, pvc.Name, pvc.Annotations[AnnPodPhase])
|
||||
|
||||
if podSucceededFromPVC(pvc) && pvc.Annotations[AnnCloneOf] != "true" {
|
||||
klog.V(1).Infof("Adding CloneOf annotation to PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
pvc.Annotations[AnnCloneOf] = "true"
|
||||
|
||||
_, err := cc.clientset.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error updating pvc")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cc *CloneController) waitTargetPodRunningOrSucceeded(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
rs, ok := pvc.Annotations[AnnPodReady]
|
||||
if !ok {
|
||||
klog.V(3).Infof("clone target pod for %s/%s not ready", pvc.Namespace, pvc.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
ready, err := strconv.ParseBool(rs)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "error parsing %s annotation", AnnPodReady)
|
||||
}
|
||||
|
||||
if !ready {
|
||||
klog.V(3).Infof("clone target pod for %s/%s not ready", pvc.Namespace, pvc.Name)
|
||||
return podSucceededFromPVC(pvc), nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Controller) raisePodCreate(pvcKey string) {
|
||||
c.podExpectations.ExpectCreations(pvcKey, 1)
|
||||
}
|
||||
|
||||
// Select only pvcs with the 'CloneRequest' annotation and that are not being processed.
|
||||
// We forget the key unless `processPvcItem` returns an error in which case the key can be
|
||||
//ProcessNextPvcItem retried.
|
||||
|
||||
//ProcessNextPvcItem ...
|
||||
func (cc *CloneController) ProcessNextPvcItem() bool {
|
||||
key, shutdown := cc.queue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
defer cc.queue.Done(key)
|
||||
|
||||
err := cc.syncPvc(key.(string))
|
||||
if err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
cc.queue.AddRateLimited(key.(string))
|
||||
// processPvcItem errors may not have been logged so log here
|
||||
klog.Errorf("error processing pvc %q: %v", key, err)
|
||||
return true
|
||||
}
|
||||
return cc.forgetKey(key, fmt.Sprintf("ProcessNextPvcItem: processing pvc %q completed", key))
|
||||
}
|
||||
|
||||
func (cc *CloneController) syncPvc(key string) error {
|
||||
pvc, exists, err := cc.pvcFromKey(key)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting PVC")
|
||||
} else if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !checkPVC(pvc, AnnCloneRequest) || pvc.DeletionTimestamp != nil || metav1.HasAnnotation(pvc.ObjectMeta, AnnCloneOf) {
|
||||
cc.cleanup(key, pvc)
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(3).Infof("ProcessNextPvcItem: next pvc to process: \"%s/%s\"\n", pvc.Namespace, pvc.Name)
|
||||
return cc.processPvcItem(pvc)
|
||||
}
|
||||
|
||||
func (cc *CloneController) cleanup(key string, pvc *v1.PersistentVolumeClaim) error {
|
||||
klog.V(3).Infof("Cleaning up for PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||
|
||||
pod, err := cc.findCloneSourcePodFromCache(pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pod != nil && pod.DeletionTimestamp == nil {
|
||||
if podSucceededFromPVC(pvc) && pod.Status.Phase == v1.PodRunning {
|
||||
klog.V(3).Infof("Clone succeeded, waiting for source pod %s/%s to stop running", pod.Namespace, pod.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = cc.clientset.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
if !k8serrors.IsNotFound(err) {
|
||||
return errors.Wrap(err, "error deleting clone source pod")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = removeFinalizer(cc.clientset, pvc, cloneSourcePodFinalizer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cc.podExpectations.DeleteExpectations(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cc *CloneController) validateSourceAndTarget(targetPvc *v1.PersistentVolumeClaim) error {
|
||||
sourcePvc, err := getCloneRequestSourcePVC(targetPvc, cc.Controller.pvcLister)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = validateCloneToken(cc.tokenValidator, sourcePvc, targetPvc); err != nil {
|
||||
if err = validateCloneToken(r.tokenValidator, sourcePvc, targetPvc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ValidateCanCloneSourceAndTargetSpec(&sourcePvc.Spec, &targetPvc.Spec)
|
||||
}
|
||||
|
||||
//Run is being called from cdi-controller (cmd)
|
||||
func (cc *CloneController) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
cc.Controller.run(threadiness, stopCh, cc.runPVCWorkers)
|
||||
func (r *CloneReconciler) addFinalizer(pvc *corev1.PersistentVolumeClaim, name string) *corev1.PersistentVolumeClaim {
|
||||
if r.hasFinalizer(pvc, name) {
|
||||
return pvc
|
||||
}
|
||||
|
||||
pvc.Finalizers = append(pvc.Finalizers, name)
|
||||
return pvc
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) removeFinalizer(pvc *corev1.PersistentVolumeClaim, name string) *corev1.PersistentVolumeClaim {
|
||||
if !r.hasFinalizer(pvc, name) {
|
||||
return pvc
|
||||
}
|
||||
|
||||
var finalizers []string
|
||||
for _, f := range pvc.Finalizers {
|
||||
if f != name {
|
||||
finalizers = append(finalizers, f)
|
||||
}
|
||||
}
|
||||
|
||||
pvc.Finalizers = finalizers
|
||||
return pvc
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) hasFinalizer(object metav1.Object, value string) bool {
|
||||
for _, f := range object.GetFinalizers() {
|
||||
if f == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// returns the CloneRequest string which contains the pvc name (and namespace) from which we want to clone the image.
|
||||
func (r *CloneReconciler) getCloneRequestSourcePVC(pvc *corev1.PersistentVolumeClaim) (*corev1.PersistentVolumeClaim, error) {
|
||||
exists, namespace, name := ParseCloneRequestAnnotation(pvc)
|
||||
if !exists {
|
||||
return nil, errors.New("error parsing clone request annotation")
|
||||
}
|
||||
pvc = &corev1.PersistentVolumeClaim{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, pvc); err != nil {
|
||||
return nil, errors.Wrap(err, "error getting clone source PVC")
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func (r *CloneReconciler) cleanup(pvc *corev1.PersistentVolumeClaim, log logr.Logger) error {
|
||||
log.V(3).Info("Cleaning up for PVC", "pvc.Namespace", pvc.Namespace, "pvc.Name", pvc.Name)
|
||||
|
||||
pod, err := r.findCloneSourcePod(pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pod != nil && pod.DeletionTimestamp == nil {
|
||||
if podSucceededFromPVC(pvc) && pod.Status.Phase == corev1.PodRunning {
|
||||
log.V(3).Info("Clone succeeded, waiting for source pod to stop running", "pod.Namespace", pod.Namespace, "pod.Name", pod.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = r.Client.Delete(context.TODO(), pod); err != nil {
|
||||
if !k8serrors.IsNotFound(err) {
|
||||
return errors.Wrap(err, "error deleting clone source pod")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return r.updatePVC(r.removeFinalizer(pvc, cloneSourcePodFinalizer))
|
||||
}
|
||||
|
||||
// CreateCloneSourcePod creates our cloning src pod which will be used for out of band cloning to read the contents of the src PVC
|
||||
func (r *CloneReconciler) CreateCloneSourcePod(image, pullPolicy, clientName string, pvc *corev1.PersistentVolumeClaim, log logr.Logger) (*corev1.Pod, error) {
|
||||
exists, sourcePvcNamespace, sourcePvcName := ParseCloneRequestAnnotation(pvc)
|
||||
if !exists {
|
||||
return nil, errors.Errorf("bad CloneRequest Annotation")
|
||||
}
|
||||
|
||||
ownerKey, err := cache.MetaNamespaceKeyFunc(pvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting cache key")
|
||||
}
|
||||
|
||||
clientCert, clientKey, err := r.clientCertGenerator.MakeClientCert(clientName, nil, uploadClientCertDuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverCABundle, err := r.serverCAFetcher.BundleBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podResourceRequirements, err := GetDefaultPodResourceRequirements(r.Client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pod := MakeCloneSourcePodSpec(image, pullPolicy, sourcePvcName, sourcePvcNamespace, ownerKey, clientKey, clientCert, serverCABundle, pvc, podResourceRequirements)
|
||||
|
||||
if err := r.Client.Create(context.TODO(), pod); err != nil {
|
||||
return nil, errors.Wrap(err, "source pod API create errored")
|
||||
}
|
||||
|
||||
log.V(1).Info("cloning source pod (image) created\n", "pod.Namespace", pod.Namespace, "pod.Name", pod.Name, "image", image)
|
||||
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func getCloneSourcePodName(targetPvc *corev1.PersistentVolumeClaim) string {
|
||||
return string(targetPvc.GetUID()) + "-source-pod"
|
||||
}
|
||||
|
||||
// MakeCloneSourcePodSpec creates and returns the clone source pod spec based on the target pvc.
|
||||
func MakeCloneSourcePodSpec(image, pullPolicy, sourcePvcName, sourcePvcNamespace, ownerRefAnno string,
|
||||
clientKey, clientCert, serverCACert []byte, targetPvc *corev1.PersistentVolumeClaim, resourceRequirements *corev1.ResourceRequirements) *corev1.Pod {
|
||||
|
||||
var ownerID string
|
||||
podName := getCloneSourcePodName(targetPvc)
|
||||
url := GetUploadServerURL(targetPvc.Namespace, targetPvc.Name, common.UploadPathSync)
|
||||
pvcOwner := metav1.GetControllerOf(targetPvc)
|
||||
if pvcOwner != nil && pvcOwner.Kind == "DataVolume" {
|
||||
ownerID = string(pvcOwner.UID)
|
||||
}
|
||||
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: sourcePvcNamespace,
|
||||
Annotations: map[string]string{
|
||||
AnnCreatedBy: "yes",
|
||||
AnnOwnerRef: ownerRefAnno,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
common.CDILabelKey: common.CDILabelValue, //filtered by the podInformer
|
||||
common.CDIComponentLabel: common.ClonerSourcePodName,
|
||||
// this label is used when searching for a pvc's cloner source pod.
|
||||
CloneUniqueID: getCloneSourcePodName(targetPvc),
|
||||
common.PrometheusLabel: "",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: &[]int64{0}[0],
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: common.ClonerSourcePodName,
|
||||
Image: image,
|
||||
ImagePullPolicy: corev1.PullPolicy(pullPolicy),
|
||||
Env: []corev1.EnvVar{
|
||||
/*
|
||||
Easier to just stick key/certs in env vars directly no.
|
||||
Maybe revisit when we fix the "naming things" problem.
|
||||
*/
|
||||
{
|
||||
Name: "CLIENT_KEY",
|
||||
Value: string(clientKey),
|
||||
},
|
||||
{
|
||||
Name: "CLIENT_CERT",
|
||||
Value: string(clientCert),
|
||||
},
|
||||
{
|
||||
Name: "SERVER_CA_CERT",
|
||||
Value: string(serverCACert),
|
||||
},
|
||||
{
|
||||
Name: "UPLOAD_URL",
|
||||
Value: url,
|
||||
},
|
||||
{
|
||||
Name: common.OwnerUID,
|
||||
Value: ownerID,
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "metrics",
|
||||
ContainerPort: 8443,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: DataVolName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: sourcePvcName,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if resourceRequirements != nil {
|
||||
pod.Spec.Containers[0].Resources = *resourceRequirements
|
||||
}
|
||||
|
||||
var volumeMode corev1.PersistentVolumeMode
|
||||
var addVars []corev1.EnvVar
|
||||
|
||||
if targetPvc.Spec.VolumeMode != nil {
|
||||
volumeMode = *targetPvc.Spec.VolumeMode
|
||||
} else {
|
||||
volumeMode = corev1.PersistentVolumeFilesystem
|
||||
}
|
||||
|
||||
if volumeMode == corev1.PersistentVolumeBlock {
|
||||
pod.Spec.Containers[0].VolumeDevices = addVolumeDevices()
|
||||
addVars = []corev1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "block",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.WriteBlockPath,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
||||
{
|
||||
Name: DataVolName,
|
||||
MountPath: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
addVars = []corev1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "filesystem",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, addVars...)
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func validateCloneToken(validator token.Validator, source, target *corev1.PersistentVolumeClaim) error {
|
||||
tok, ok := target.Annotations[AnnCloneToken]
|
||||
if !ok {
|
||||
return errors.New("clone token missing")
|
||||
}
|
||||
|
||||
tokenData, err := validator.Validate(tok)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error verifying token")
|
||||
}
|
||||
|
||||
if tokenData.Operation != token.OperationClone ||
|
||||
tokenData.Name != source.Name ||
|
||||
tokenData.Namespace != source.Namespace ||
|
||||
tokenData.Resource.Resource != "persistentvolumeclaims" ||
|
||||
tokenData.Params["targetNamespace"] != target.Namespace ||
|
||||
tokenData.Params["targetName"] != target.Name {
|
||||
return errors.New("invalid token")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cc *CloneController) runPVCWorkers() {
|
||||
for cc.ProcessNextPvcItem() {
|
||||
// ParseCloneRequestAnnotation parses the clone request annotation
|
||||
func ParseCloneRequestAnnotation(pvc *corev1.PersistentVolumeClaim) (exists bool, namespace, name string) {
|
||||
var ann string
|
||||
ann, exists = pvc.Annotations[AnnCloneRequest]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
sp := strings.Split(ann, "/")
|
||||
if len(sp) != 2 {
|
||||
exists = false
|
||||
return
|
||||
}
|
||||
|
||||
namespace, name = sp[0], sp[1]
|
||||
return
|
||||
}
|
||||
|
||||
// ValidateCanCloneSourceAndTargetSpec validates the specs passed in are compatible for cloning.
|
||||
func ValidateCanCloneSourceAndTargetSpec(sourceSpec, targetSpec *corev1.PersistentVolumeClaimSpec) error {
|
||||
sourceRequest := sourceSpec.Resources.Requests[corev1.ResourceStorage]
|
||||
targetRequest := targetSpec.Resources.Requests[corev1.ResourceStorage]
|
||||
// Verify that the target PVC size is equal or larger than the source.
|
||||
if sourceRequest.Value() > targetRequest.Value() {
|
||||
return errors.New("target resources requests storage size is smaller than the source")
|
||||
}
|
||||
// Verify that the source and target volume modes are the same.
|
||||
sourceVolumeMode := corev1.PersistentVolumeFilesystem
|
||||
if sourceSpec.VolumeMode != nil && *sourceSpec.VolumeMode == corev1.PersistentVolumeBlock {
|
||||
sourceVolumeMode = corev1.PersistentVolumeBlock
|
||||
}
|
||||
targetVolumeMode := corev1.PersistentVolumeFilesystem
|
||||
if targetSpec.VolumeMode != nil && *targetSpec.VolumeMode == corev1.PersistentVolumeBlock {
|
||||
targetVolumeMode = corev1.PersistentVolumeBlock
|
||||
}
|
||||
if sourceVolumeMode != targetVolumeMode {
|
||||
return fmt.Errorf("source volumeMode (%s) and target volumeMode (%s) do not match",
|
||||
sourceVolumeMode, targetVolumeMode)
|
||||
}
|
||||
// Can clone.
|
||||
return nil
|
||||
}
|
||||
|
@ -1,26 +1,35 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"kubevirt.io/containerized-data-importer/pkg/util/cert/fetcher"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
cdifake "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned/fake"
|
||||
cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"kubevirt.io/containerized-data-importer/pkg/util/cert/fetcher"
|
||||
"kubevirt.io/containerized-data-importer/pkg/token"
|
||||
"kubevirt.io/containerized-data-importer/pkg/util/cert/triple"
|
||||
)
|
||||
|
||||
type CloneFixture struct {
|
||||
ControllerFixture
|
||||
}
|
||||
|
||||
var (
|
||||
apiServerKey *rsa.PrivateKey
|
||||
apiServerKeyOnce sync.Once
|
||||
@ -37,6 +46,348 @@ func (cg *fakeCertGenerator) MakeServerCert(namespace, service string, duration
|
||||
return []byte("foo"), []byte("bar"), nil
|
||||
}
|
||||
|
||||
type FakeValidator struct {
|
||||
match string
|
||||
Operation token.Operation
|
||||
Name string
|
||||
Namespace string
|
||||
Resource metav1.GroupVersionResource
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
var _ = Describe("Clone controller reconcile loop", func() {
|
||||
var (
|
||||
reconciler *CloneReconciler
|
||||
)
|
||||
AfterEach(func() {
|
||||
if reconciler != nil {
|
||||
close(reconciler.recorder.(*record.FakeRecorder).Events)
|
||||
reconciler = nil
|
||||
}
|
||||
})
|
||||
|
||||
It("Should return success if a PVC with no annotations is passed, due to it being ignored", func() {
|
||||
reconciler = createCloneReconciler(createPvc("testPvc1", "default", map[string]string{}, nil))
|
||||
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Should return success if no PVC can be found, due to it not existing", func() {
|
||||
reconciler = createCloneReconciler()
|
||||
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Should return success if no PVC can be found due to not existing in passed namespace", func() {
|
||||
reconciler = createCloneReconciler(createPvc("testPvc1", "default", map[string]string{AnnEndpoint: testEndPoint}, nil))
|
||||
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "invalid"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Should return success if a PVC with clone request annotation and cloneof is passed, due to it being ignored", func() {
|
||||
reconciler = createCloneReconciler(createPvc("testPvc1", "default", map[string]string{AnnCloneRequest: "cloneme", AnnCloneOf: "something"}, nil))
|
||||
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Should return success if target pod is not ready", func() {
|
||||
reconciler = createCloneReconciler(createPvc("testPvc1", "default", map[string]string{AnnCloneRequest: "cloneme"}, nil))
|
||||
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Should return error if target pod annotation is invalid", func() {
|
||||
reconciler = createCloneReconciler(createPvc("testPvc1", "default", map[string]string{AnnCloneRequest: "cloneme", AnnPodReady: "invalid"}, nil))
|
||||
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("error parsing %s annotation", AnnPodReady)))
|
||||
})
|
||||
|
||||
It("Should create new source pod if none exists, and target pod is marked ready", func() {
|
||||
testPvc := createPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient"}, nil)
|
||||
reconciler = createCloneReconciler(testPvc, createPvc("source", "default", map[string]string{}, nil))
|
||||
By("Setting up the match token")
|
||||
reconciler.tokenValidator.(*FakeValidator).match = "foobaz"
|
||||
reconciler.tokenValidator.(*FakeValidator).Name = "source"
|
||||
reconciler.tokenValidator.(*FakeValidator).Namespace = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetNamespace"] = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetName"] = "testPvc1"
|
||||
By("Verifying no source pod exists")
|
||||
sourcePod, err := reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(sourcePod).To(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Verifying source pod exists")
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod.GetLabels()[CloneUniqueID]).To(Equal("default-testPvc1-source-pod"))
|
||||
By("Verifying the PVC now has a finalizer")
|
||||
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "testPvc1", Namespace: "default"}, testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(reconciler.hasFinalizer(testPvc, cloneSourcePodFinalizer)).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Should error with missing upload client name annotation if none provided", func() {
|
||||
testPvc := createPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz"}, nil)
|
||||
reconciler = createCloneReconciler(testPvc, createPvc("source", "default", map[string]string{}, nil))
|
||||
By("Setting up the match token")
|
||||
reconciler.tokenValidator.(*FakeValidator).match = "foobaz"
|
||||
reconciler.tokenValidator.(*FakeValidator).Name = "source"
|
||||
reconciler.tokenValidator.(*FakeValidator).Namespace = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetNamespace"] = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetName"] = "testPvc1"
|
||||
By("Verifying no source pod exists")
|
||||
sourcePod, err := reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(sourcePod).To(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("missing required " + AnnUploadClientName + " annotation"))
|
||||
})
|
||||
|
||||
It("Should update the PVC from the pod status", func() {
|
||||
testPvc := createPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient"}, nil)
|
||||
reconciler = createCloneReconciler(testPvc, createPvc("source", "default", map[string]string{}, nil))
|
||||
By("Setting up the match token")
|
||||
reconciler.tokenValidator.(*FakeValidator).match = "foobaz"
|
||||
reconciler.tokenValidator.(*FakeValidator).Name = "source"
|
||||
reconciler.tokenValidator.(*FakeValidator).Namespace = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetNamespace"] = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetName"] = "testPvc1"
|
||||
By("Verifying no source pod exists")
|
||||
sourcePod, err := reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(sourcePod).To(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Verifying source pod exists")
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod.GetLabels()[CloneUniqueID]).To(Equal("default-testPvc1-source-pod"))
|
||||
By("Verifying the PVC now has a finalizer")
|
||||
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "testPvc1", Namespace: "default"}, testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(reconciler.hasFinalizer(testPvc, cloneSourcePodFinalizer)).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Should update the cloneof when complete", func() {
|
||||
testPvc := createPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient"}, nil)
|
||||
reconciler = createCloneReconciler(testPvc, createPvc("source", "default", map[string]string{}, nil))
|
||||
By("Setting up the match token")
|
||||
reconciler.tokenValidator.(*FakeValidator).match = "foobaz"
|
||||
reconciler.tokenValidator.(*FakeValidator).Name = "source"
|
||||
reconciler.tokenValidator.(*FakeValidator).Namespace = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetNamespace"] = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetName"] = "testPvc1"
|
||||
By("Verifying no source pod exists")
|
||||
sourcePod, err := reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(sourcePod).To(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Verifying source pod exists")
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod.GetLabels()[CloneUniqueID]).To(Equal("default-testPvc1-source-pod"))
|
||||
By("Verifying the PVC now has a finalizer")
|
||||
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "testPvc1", Namespace: "default"}, testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(reconciler.hasFinalizer(testPvc, cloneSourcePodFinalizer)).To(BeTrue())
|
||||
By("Updating the PVC to completed")
|
||||
testPvc = createPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient", AnnPodPhase: string(corev1.PodSucceeded)}, nil)
|
||||
reconciler.Client.Update(context.TODO(), testPvc)
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "testPvc1", Namespace: "default"}, testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(testPvc.GetAnnotations()[AnnCloneOf]).To(Equal("true"))
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod).ToNot(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Checking error event recorded")
|
||||
event := <-reconciler.recorder.(*record.FakeRecorder).Events
|
||||
Expect(event).To(ContainSubstring("Clone Successful"))
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod).To(BeNil())
|
||||
})
|
||||
|
||||
It("Should update the cloneof when complete, block mode", func() {
|
||||
testPvc := createBlockPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient"}, nil)
|
||||
reconciler = createCloneReconciler(testPvc, createBlockPvc("source", "default", map[string]string{}, nil))
|
||||
By("Setting up the match token")
|
||||
reconciler.tokenValidator.(*FakeValidator).match = "foobaz"
|
||||
reconciler.tokenValidator.(*FakeValidator).Name = "source"
|
||||
reconciler.tokenValidator.(*FakeValidator).Namespace = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetNamespace"] = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetName"] = "testPvc1"
|
||||
By("Verifying no source pod exists")
|
||||
sourcePod, err := reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(sourcePod).To(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Verifying source pod exists")
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod.GetLabels()[CloneUniqueID]).To(Equal("default-testPvc1-source-pod"))
|
||||
By("Verifying the PVC now has a finalizer")
|
||||
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "testPvc1", Namespace: "default"}, testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(reconciler.hasFinalizer(testPvc, cloneSourcePodFinalizer)).To(BeTrue())
|
||||
By("Updating the PVC to completed")
|
||||
testPvc = createPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient", AnnPodPhase: string(corev1.PodSucceeded)}, nil)
|
||||
reconciler.Client.Update(context.TODO(), testPvc)
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "testPvc1", Namespace: "default"}, testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(testPvc.GetAnnotations()[AnnCloneOf]).To(Equal("true"))
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod).ToNot(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Checking error event recorded")
|
||||
event := <-reconciler.recorder.(*record.FakeRecorder).Events
|
||||
Expect(event).To(ContainSubstring("Clone Successful"))
|
||||
sourcePod, err = reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(sourcePod).To(BeNil())
|
||||
})
|
||||
|
||||
It("Should error when source and target volume modes do not match (fs->block)", func() {
|
||||
testPvc := createBlockPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient"}, nil)
|
||||
reconciler = createCloneReconciler(testPvc, createPvc("source", "default", map[string]string{}, nil))
|
||||
By("Setting up the match token")
|
||||
reconciler.tokenValidator.(*FakeValidator).match = "foobaz"
|
||||
reconciler.tokenValidator.(*FakeValidator).Name = "source"
|
||||
reconciler.tokenValidator.(*FakeValidator).Namespace = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetNamespace"] = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetName"] = "testPvc1"
|
||||
By("Verifying no source pod exists")
|
||||
sourcePod, err := reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(sourcePod).To(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("source volumeMode (Filesystem) and target volumeMode (Block) do not match"))
|
||||
})
|
||||
|
||||
It("Should error when source and target volume modes do not match (fs->block)", func() {
|
||||
testPvc := createPvc("testPvc1", "default", map[string]string{
|
||||
AnnCloneRequest: "default/source", AnnPodReady: "true", AnnCloneToken: "foobaz", AnnUploadClientName: "uploadclient"}, nil)
|
||||
reconciler = createCloneReconciler(testPvc, createBlockPvc("source", "default", map[string]string{}, nil))
|
||||
By("Setting up the match token")
|
||||
reconciler.tokenValidator.(*FakeValidator).match = "foobaz"
|
||||
reconciler.tokenValidator.(*FakeValidator).Name = "source"
|
||||
reconciler.tokenValidator.(*FakeValidator).Namespace = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetNamespace"] = "default"
|
||||
reconciler.tokenValidator.(*FakeValidator).Params["targetName"] = "testPvc1"
|
||||
By("Verifying no source pod exists")
|
||||
sourcePod, err := reconciler.findCloneSourcePod(testPvc)
|
||||
Expect(sourcePod).To(BeNil())
|
||||
_, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "testPvc1", Namespace: "default"}})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("source volumeMode (Block) and target volumeMode (Filesystem) do not match"))
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("ParseCloneRequestAnnotation", func() {
|
||||
It("should return false/empty/empty if no annotation exists", func() {
|
||||
pvc := createPvc("testPvc1", "default", map[string]string{}, nil)
|
||||
exists, ns, name := ParseCloneRequestAnnotation(pvc)
|
||||
Expect(exists).To(BeFalse())
|
||||
Expect(ns).To(BeEmpty())
|
||||
Expect(name).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should return false/empty/empty if annotation is invalid", func() {
|
||||
pvc := createPvc("testPvc1", "default", map[string]string{AnnCloneRequest: "default"}, nil)
|
||||
exists, ns, name := ParseCloneRequestAnnotation(pvc)
|
||||
Expect(exists).To(BeFalse())
|
||||
Expect(ns).To(BeEmpty())
|
||||
Expect(name).To(BeEmpty())
|
||||
pvc = createPvc("testPvc1", "default", map[string]string{AnnCloneRequest: "default/test/something"}, nil)
|
||||
exists, ns, name = ParseCloneRequestAnnotation(pvc)
|
||||
Expect(exists).To(BeFalse())
|
||||
Expect(ns).To(BeEmpty())
|
||||
Expect(name).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should return true/default/test if annotation is valid", func() {
|
||||
pvc := createPvc("testPvc1", "default", map[string]string{AnnCloneRequest: "default/test"}, nil)
|
||||
exists, ns, name := ParseCloneRequestAnnotation(pvc)
|
||||
Expect(exists).To(BeTrue())
|
||||
Expect(ns).To(Equal("default"))
|
||||
Expect(name).To(Equal("test"))
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("CloneSourcePodName", func() {
|
||||
It("Should be unique and deterministic", func() {
|
||||
pvc1d := createPvc("testPvc1", "default", map[string]string{AnnCloneRequest: "default/test"}, nil)
|
||||
pvc1d2 := createPvc("testPvc1", "default2", map[string]string{AnnCloneRequest: "default/test"}, nil)
|
||||
pvc2d1 := createPvc("testPvc2", "default", map[string]string{AnnCloneRequest: "default/test"}, nil)
|
||||
pvcSimilar := createPvc("testP", "vc1default", map[string]string{AnnCloneRequest: "default/test"}, nil)
|
||||
podName1d := getCloneSourcePodName(pvc1d)
|
||||
podName1dagain := getCloneSourcePodName(pvc1d)
|
||||
By("Verifying rerunning getloneSourcePodName on same PVC I get same name")
|
||||
Expect(podName1d).To(Equal(podName1dagain))
|
||||
By("Verifying different namespace but same name I get different pod name")
|
||||
podName1d2 := getCloneSourcePodName(pvc1d2)
|
||||
Expect(podName1d).NotTo(Equal(podName1d2))
|
||||
By("Verifying same namespace but different name I get different pod name")
|
||||
podName2d1 := getCloneSourcePodName(pvc2d1)
|
||||
Expect(podName1d).NotTo(Equal(podName2d1))
|
||||
By("Verifying concatenated ns/name of same characters I get different pod name")
|
||||
podNameSimilar := getCloneSourcePodName(pvcSimilar)
|
||||
Expect(podName1d).NotTo(Equal(podNameSimilar))
|
||||
})
|
||||
})
|
||||
|
||||
func createCloneReconciler(objects ...runtime.Object) *CloneReconciler {
|
||||
objs := []runtime.Object{}
|
||||
objs = append(objs, objects...)
|
||||
cdiConfig := MakeEmptyCDIConfigSpec(common.ConfigName)
|
||||
cdiConfig.Status = cdiv1.CDIConfigStatus{
|
||||
DefaultPodResourceRequirements: createDefaultPodResourceRequirements(int64(0), int64(0), int64(0), int64(0)),
|
||||
}
|
||||
objs = append(objs, cdiConfig)
|
||||
|
||||
// Register operator types with the runtime scheme.
|
||||
s := scheme.Scheme
|
||||
cdiv1.AddToScheme(s)
|
||||
rec := record.NewFakeRecorder(1)
|
||||
// Create a fake client to mock API calls.
|
||||
cl := fake.NewFakeClientWithScheme(s, objs...)
|
||||
k8sfakeclientset := k8sfake.NewSimpleClientset(createStorageClass(testStorageClass, nil))
|
||||
|
||||
// Create a ReconcileMemcached object with the scheme and fake client.
|
||||
return &CloneReconciler{
|
||||
Client: cl,
|
||||
Scheme: s,
|
||||
Log: log,
|
||||
recorder: rec,
|
||||
tokenValidator: &FakeValidator{
|
||||
Params: make(map[string]string, 0),
|
||||
},
|
||||
K8sClient: k8sfakeclientset,
|
||||
Image: testImage,
|
||||
clientCertGenerator: &fakeCertGenerator{},
|
||||
serverCAFetcher: &fetcher.MemCertBundleFetcher{Bundle: []byte("baz")},
|
||||
}
|
||||
}
|
||||
|
||||
func testCreateClientKeyAndCert(ca *triple.KeyPair, commonName string, organizations []string) ([]byte, []byte, error) {
|
||||
return []byte("foo"), []byte("bar"), nil
|
||||
}
|
||||
|
||||
func getAPIServerKey() *rsa.PrivateKey {
|
||||
apiServerKeyOnce.Do(func() {
|
||||
apiServerKey, _ = rsa.GenerateKey(rand.Reader, 2048)
|
||||
@ -44,229 +395,200 @@ func getAPIServerKey() *rsa.PrivateKey {
|
||||
return apiServerKey
|
||||
}
|
||||
|
||||
func newCloneFixture(t *testing.T) *CloneFixture {
|
||||
f := &CloneFixture{
|
||||
ControllerFixture: *newControllerFixture(t),
|
||||
func (v *FakeValidator) Validate(value string) (*token.Payload, error) {
|
||||
if value != v.match {
|
||||
return nil, fmt.Errorf("Token does not match expected")
|
||||
}
|
||||
return f
|
||||
resource := metav1.GroupVersionResource{
|
||||
Resource: "persistentvolumeclaims",
|
||||
}
|
||||
return &token.Payload{
|
||||
Name: v.Name,
|
||||
Namespace: v.Namespace,
|
||||
Operation: token.OperationClone,
|
||||
Resource: resource,
|
||||
Params: v.Params,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *CloneFixture) newCloneController() *CloneController {
|
||||
v := newCloneTokenValidator(&getAPIServerKey().PublicKey)
|
||||
func createClonePvc(sourceNamespace, sourceName, targetNamespace, targetName string, annotations, labels map[string]string) *corev1.PersistentVolumeClaim {
|
||||
return createClonePvcWithSize(sourceNamespace, sourceName, targetNamespace, targetName, annotations, labels, "1G")
|
||||
}
|
||||
|
||||
storageClassName := "test"
|
||||
var cdiObjs []runtime.Object
|
||||
cdiObjs = append(cdiObjs, createCDIConfigWithStorageClass(common.ConfigName, storageClassName))
|
||||
func createClonePvcWithSize(sourceNamespace, sourceName, targetNamespace, targetName string, annotations, labels map[string]string, size string) *corev1.PersistentVolumeClaim {
|
||||
tokenData := &token.Payload{
|
||||
Operation: token.OperationClone,
|
||||
Name: sourceName,
|
||||
Namespace: sourceNamespace,
|
||||
Resource: metav1.GroupVersionResource{
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "persistentvolumeclaims",
|
||||
},
|
||||
Params: map[string]string{
|
||||
"targetNamespace": targetNamespace,
|
||||
"targetName": targetName,
|
||||
},
|
||||
}
|
||||
|
||||
return &CloneController{
|
||||
Controller: *f.newController("test/mycloneimage", "Always", "5"),
|
||||
recorder: &record.FakeRecorder{},
|
||||
tokenValidator: v,
|
||||
cdiClient: cdifake.NewSimpleClientset(cdiObjs...),
|
||||
clientCertGenerator: &fakeCertGenerator{},
|
||||
serverCAFetcher: &fetcher.MemCertBundleFetcher{Bundle: []byte("baz")},
|
||||
g := token.NewGenerator(common.CloneTokenIssuer, getAPIServerKey(), 5*time.Minute)
|
||||
|
||||
tokenString, err := g.Generate(tokenData)
|
||||
if err != nil {
|
||||
panic("error generating token")
|
||||
}
|
||||
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
annotations[AnnCloneRequest] = fmt.Sprintf("%s/%s", sourceNamespace, sourceName)
|
||||
annotations[AnnCloneToken] = tokenString
|
||||
annotations[AnnUploadClientName] = "FOOBAR"
|
||||
|
||||
return &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: targetName,
|
||||
Namespace: targetNamespace,
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
UID: "pvc-uid",
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany, corev1.ReadWriteOnce},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(size),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *CloneFixture) run(pvcName string) {
|
||||
f.runController(pvcName, true, false, false)
|
||||
func createCloneBlockPvc(sourceNamespace, sourceName, targetNamespace, targetName string, annotations, labels map[string]string) *corev1.PersistentVolumeClaim {
|
||||
pvc := createClonePvc(sourceNamespace, sourceName, targetNamespace, targetName, annotations, labels)
|
||||
VolumeMode := corev1.PersistentVolumeBlock
|
||||
pvc.Spec.VolumeMode = &VolumeMode
|
||||
return pvc
|
||||
}
|
||||
|
||||
func (f *CloneFixture) runWithExpectation(pvcName string) {
|
||||
f.runController(pvcName, true, false, true)
|
||||
}
|
||||
|
||||
func (f *CloneFixture) runExpectError(pvcName string) {
|
||||
f.runController(pvcName, true, true, false)
|
||||
}
|
||||
|
||||
func (f *CloneFixture) runController(pvcName string,
|
||||
startInformers bool,
|
||||
expectError bool,
|
||||
withCreateExpectation bool) {
|
||||
c := f.newCloneController()
|
||||
|
||||
if startInformers {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go c.pvcInformer.Run(stopCh)
|
||||
go c.podInformer.Run(stopCh)
|
||||
cache.WaitForCacheSync(stopCh, c.pvcInformer.HasSynced)
|
||||
cache.WaitForCacheSync(stopCh, c.pvcInformer.HasSynced)
|
||||
func createSourcePod(pvc *corev1.PersistentVolumeClaim, pvcUID string) *corev1.Pod {
|
||||
_, _, sourcePvcName := ParseCloneRequestAnnotation(pvc)
|
||||
podName := fmt.Sprintf("%s-%s-", common.ClonerSourcePodName, sourcePvcName)
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Annotations: map[string]string{
|
||||
AnnCreatedBy: "yes",
|
||||
AnnOwnerRef: fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name),
|
||||
},
|
||||
Labels: map[string]string{
|
||||
common.CDILabelKey: common.CDILabelValue, //filtered by the podInformer
|
||||
common.CDIComponentLabel: common.ClonerSourcePodName,
|
||||
// this label is used when searching for a pvc's cloner source pod.
|
||||
CloneUniqueID: pvcUID + "-source-pod",
|
||||
common.PrometheusLabel: "",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsUser: &[]int64{0}[0],
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: common.ClonerSourcePodName,
|
||||
Image: "test/mycloneimage",
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "CLIENT_KEY",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "CLIENT_CERT",
|
||||
Value: "foo",
|
||||
},
|
||||
{
|
||||
Name: "SERVER_CA_CERT",
|
||||
Value: string("baz"),
|
||||
},
|
||||
{
|
||||
Name: "UPLOAD_URL",
|
||||
Value: GetUploadServerURL(pvc.Namespace, pvc.Name, common.UploadPathSync),
|
||||
},
|
||||
{
|
||||
Name: common.OwnerUID,
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "metrics",
|
||||
ContainerPort: 8443,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: DataVolName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: sourcePvcName,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if withCreateExpectation {
|
||||
c.raisePodCreate(pvcName)
|
||||
}
|
||||
err := c.syncPvc(pvcName)
|
||||
if !expectError && err != nil {
|
||||
f.t.Errorf("error syncing pvc: %s: %v", pvcName, err)
|
||||
} else if expectError && err == nil {
|
||||
f.t.Error("expected error syncing pvc, got nil")
|
||||
var volumeMode corev1.PersistentVolumeMode
|
||||
var addVars []corev1.EnvVar
|
||||
|
||||
if pvc.Spec.VolumeMode != nil {
|
||||
volumeMode = *pvc.Spec.VolumeMode
|
||||
} else {
|
||||
volumeMode = corev1.PersistentVolumeFilesystem
|
||||
}
|
||||
|
||||
k8sActions := filterActions(f.kubeclient.Actions())
|
||||
for i, action := range k8sActions {
|
||||
if len(f.kubeactions) < i+1 {
|
||||
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:])
|
||||
break
|
||||
if volumeMode == corev1.PersistentVolumeBlock {
|
||||
pod.Spec.Containers[0].VolumeDevices = addVolumeDevices()
|
||||
addVars = []corev1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "block",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.WriteBlockPath,
|
||||
},
|
||||
}
|
||||
pod.Spec.SecurityContext = &corev1.PodSecurityContext{
|
||||
RunAsUser: &[]int64{0}[0],
|
||||
}
|
||||
} else {
|
||||
pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
||||
{
|
||||
Name: DataVolName,
|
||||
MountPath: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
addVars = []corev1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "filesystem",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
|
||||
expectedAction := f.kubeactions[i]
|
||||
checkAction(expectedAction, action, f.t)
|
||||
}
|
||||
|
||||
if len(f.kubeactions) > len(k8sActions) {
|
||||
f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitsTargetRunning(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnPodReady] = "false"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc)
|
||||
f.run(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
func TestWaitsTargetRunningNoAnnotation(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc)
|
||||
f.run(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
func TestCreatesSourcePod(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
sourcePvc := createPvc("golden-pvc", "source-ns", nil, nil)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnPodReady] = "true"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, sourcePvc, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, sourcePvc, pvc)
|
||||
|
||||
id := string(pvc.GetUID())
|
||||
expSourcePod := createSourcePod(pvc, id)
|
||||
pvcUpdate := pvc.DeepCopy()
|
||||
pvcUpdate.Finalizers = []string{cloneSourcePodFinalizer}
|
||||
f.expectUpdatePvcAction(pvcUpdate)
|
||||
f.expectCreatePodAction(expSourcePod)
|
||||
|
||||
f.run(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
func TestAddsCloneOfAnnotation(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnPodReady] = "true"
|
||||
pvc.Annotations[AnnPodPhase] = string(corev1.PodSucceeded)
|
||||
id := string(pvc.GetUID())
|
||||
pod := createSourcePod(pvc, id)
|
||||
pod.Namespace = "source-ns"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.podLister = append(f.podLister, pod)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc, pod)
|
||||
|
||||
updatedPVC := pvc.DeepCopy()
|
||||
updatedPVC.Annotations[AnnCloneOf] = "true"
|
||||
f.expectUpdatePvcAction(updatedPVC)
|
||||
f.run(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
func TestDeletesSourcePodAndFinalizer(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnCloneOf] = "true"
|
||||
pvc.Finalizers = []string{cloneSourcePodFinalizer}
|
||||
id := string(pvc.GetUID())
|
||||
pod := createSourcePod(pvc, id)
|
||||
pod.Name = pod.GenerateName + "random"
|
||||
pod.Namespace = "source-ns"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.podLister = append(f.podLister, pod)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc, pod)
|
||||
|
||||
pvcUpdate := pvc.DeepCopy()
|
||||
pvcUpdate.Finalizers = nil
|
||||
|
||||
f.expectDeletePodAction(pod)
|
||||
f.expectUpdatePvcAction(pvcUpdate)
|
||||
f.run(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
func TestSourceDoesNotExist(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnPodReady] = "true"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc)
|
||||
f.runExpectError(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
func TestExpectationsNotMet(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnPodReady] = "true"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc)
|
||||
f.runWithExpectation(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
// Verifies that one cannot clone a fs pvc to a block pvc
|
||||
func TestCannotCloneFSToBlockPvc(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
sourcePvc := createPvc("golden-pvc", "source-ns", nil, nil)
|
||||
pvc := createCloneBlockPvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnPodReady] = "true"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, sourcePvc, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, sourcePvc, pvc)
|
||||
f.runExpectError(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
// Verifies that one cannot clone a fs pvc to a block pvc
|
||||
func TestCannotCloneBlockToFSPvc(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
sourcePvc := createBlockPvc("golden-pvc", "source-ns", nil, nil)
|
||||
pvc := createClonePvc("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil)
|
||||
pvc.Annotations[AnnPodReady] = "true"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, sourcePvc)
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, sourcePvc)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc)
|
||||
f.runExpectError(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
// Verifies that one cannot clone a fs pvc to a block pvc
|
||||
func TestCannotCloneIfTargetIsSmaller(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
sourcePvc := createPvc("golden-pvc", "source-ns", nil, nil)
|
||||
pvc := createClonePvcWithSize("source-ns", "golden-pvc", "target-ns", "target-pvc", nil, nil, "500M")
|
||||
pvc.Annotations[AnnPodReady] = "true"
|
||||
|
||||
f.pvcLister = append(f.pvcLister, sourcePvc)
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, sourcePvc)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc)
|
||||
f.runExpectError(getPvcKey(pvc, t))
|
||||
}
|
||||
|
||||
// verifies no work is done on pvcs without our annotations
|
||||
func TestCloneIgnorePVC(t *testing.T) {
|
||||
f := newCloneFixture(t)
|
||||
pvc := createPvc("target-pvc", "target-ns", nil, nil)
|
||||
|
||||
f.pvcLister = append(f.pvcLister, pvc)
|
||||
f.kubeobjects = append(f.kubeobjects, pvc)
|
||||
|
||||
f.run(getPvcKey(pvc, t))
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, addVars...)
|
||||
|
||||
return pod
|
||||
}
|
||||
|
@ -1,256 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
|
||||
"kubevirt.io/containerized-data-importer/pkg/expectations"
|
||||
)
|
||||
|
||||
const (
|
||||
// AnnAPIGroup is the APIGroup for CDI
|
||||
AnnAPIGroup = "cdi.kubevirt.io"
|
||||
// AnnCreatedBy is a pod annotation indicating if the pod was created by the PVC
|
||||
AnnCreatedBy = AnnAPIGroup + "/storage.createdByController"
|
||||
// AnnPodPhase is a PVC annotation indicating the related pod progress (phase)
|
||||
AnnPodPhase = AnnAPIGroup + "/storage.pod.phase"
|
||||
// AnnPodReady tells whether the pod is ready
|
||||
AnnPodReady = AnnAPIGroup + "/storage.pod.ready"
|
||||
// AnnOwnerRef is used when owner is in a different namespace
|
||||
AnnOwnerRef = AnnAPIGroup + "/storage.ownerRef"
|
||||
)
|
||||
|
||||
//Controller is a struct that contains common information and functionality used by all CDI controllers.
|
||||
type Controller struct {
|
||||
clientset kubernetes.Interface
|
||||
queue workqueue.RateLimitingInterface
|
||||
pvcInformer, podInformer cache.SharedIndexInformer
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
podLister corelisters.PodLister
|
||||
pvcsSynced cache.InformerSynced
|
||||
podsSynced cache.InformerSynced
|
||||
image string
|
||||
pullPolicy string // Options: IfNotPresent, Always, or Never
|
||||
verbose string // verbose levels: 1, 2, ...
|
||||
podExpectations *expectations.UIDTrackingControllerExpectations
|
||||
}
|
||||
|
||||
//NewController is called when we instantiate any CDI controller.
|
||||
func NewController(client kubernetes.Interface,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
podInformer coreinformers.PodInformer,
|
||||
image string,
|
||||
pullPolicy string,
|
||||
verbose string) *Controller {
|
||||
c := &Controller{
|
||||
clientset: client,
|
||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
pvcInformer: pvcInformer.Informer(),
|
||||
podInformer: podInformer.Informer(),
|
||||
pvcLister: pvcInformer.Lister(),
|
||||
podLister: podInformer.Lister(),
|
||||
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||
podsSynced: podInformer.Informer().HasSynced,
|
||||
image: image,
|
||||
pullPolicy: pullPolicy,
|
||||
verbose: verbose,
|
||||
podExpectations: expectations.NewUIDTrackingControllerExpectations(expectations.NewControllerExpectations()),
|
||||
}
|
||||
|
||||
// Bind the pvc SharedIndexInformer to the pvc queue
|
||||
c.pvcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.enqueuePVC,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
c.enqueuePVC(new)
|
||||
},
|
||||
DeleteFunc: c.enqueuePVC,
|
||||
})
|
||||
|
||||
// Bind the pod SharedIndexInformer to the pod queue
|
||||
c.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.handlePodAdd,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
newDepl := new.(*v1.Pod)
|
||||
oldDepl := old.(*v1.Pod)
|
||||
if newDepl.ResourceVersion == oldDepl.ResourceVersion {
|
||||
// Periodic resync will send update events for all known PVCs.
|
||||
// Two different versions of the same PVCs will always have different RVs.
|
||||
return
|
||||
}
|
||||
c.handlePodUpdate(new)
|
||||
},
|
||||
DeleteFunc: c.handlePodDelete,
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Controller) handlePodAdd(obj interface{}) {
|
||||
c.handlePodObject(obj, "add")
|
||||
}
|
||||
func (c *Controller) handlePodUpdate(obj interface{}) {
|
||||
c.handlePodObject(obj, "update")
|
||||
}
|
||||
func (c *Controller) handlePodDelete(obj interface{}) {
|
||||
c.handlePodObject(obj, "delete")
|
||||
}
|
||||
|
||||
func (c *Controller) observePodCreate(pvcKey string) {
|
||||
c.podExpectations.CreationObserved(pvcKey)
|
||||
}
|
||||
|
||||
func (c *Controller) handlePodObject(obj interface{}, verb string) {
|
||||
var object metav1.Object
|
||||
var ok bool
|
||||
|
||||
if object, ok = obj.(metav1.Object); !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
runtime.HandleError(errors.Errorf("error decoding object, invalid type"))
|
||||
return
|
||||
}
|
||||
object, ok = tombstone.Obj.(metav1.Object)
|
||||
if !ok {
|
||||
runtime.HandleError(errors.Errorf("error decoding object tombstone, invalid type"))
|
||||
return
|
||||
}
|
||||
klog.V(3).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
|
||||
}
|
||||
|
||||
_, createdByUs := object.GetAnnotations()[AnnCreatedBy]
|
||||
if !createdByUs {
|
||||
klog.V(3).Infof("Ignoring pod %s/%s, as it's not created by us", object.GetNamespace(), object.GetName())
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Processing object: %s/%s", object.GetNamespace(), object.GetName())
|
||||
|
||||
var pvc *v1.PersistentVolumeClaim
|
||||
var err error
|
||||
|
||||
if ownerRefObj := metav1.GetControllerOf(object); ownerRefObj != nil {
|
||||
if ownerRefObj.Kind == "PersistentVolumeClaim" {
|
||||
pvc, err = c.pvcLister.PersistentVolumeClaims(object.GetNamespace()).Get(ownerRefObj.Name)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("ignoring orphaned object '%s' of pvc '%s'", object.GetSelfLink(), ownerRefObj.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pvc == nil {
|
||||
ownerRefAnno, ok := object.GetAnnotations()[AnnOwnerRef]
|
||||
if ok {
|
||||
pvc, ok, err = c.pvcFromKey(ownerRefAnno)
|
||||
if err != nil {
|
||||
runtime.HandleError(errors.Wrapf(err, "error getting PVC %s", ownerRefAnno))
|
||||
return
|
||||
} else if !ok {
|
||||
runtime.HandleError(errors.Errorf("error getting PVC %s from ownerref", ownerRefAnno))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pvc == nil {
|
||||
klog.V(3).Infof("Object: %s/%s has unexpected owner and no ownerRef annotation", object.GetNamespace(), object.GetName())
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Will queue PVC %s/%s in response to %s %s/%s", pvc.Namespace, pvc.Name, verb, object.GetNamespace(), object.GetName())
|
||||
|
||||
if verb == "add" {
|
||||
pvcKey, err := cache.MetaNamespaceKeyFunc(pvc)
|
||||
if err != nil {
|
||||
runtime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.observePodCreate(pvcKey)
|
||||
}
|
||||
|
||||
c.enqueuePVC(pvc)
|
||||
}
|
||||
|
||||
func (c *Controller) enqueuePVC(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
runtime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.queue.AddRateLimited(key)
|
||||
}
|
||||
|
||||
//Run is being called from cdi controllers
|
||||
func (c *Controller) run(threadiness int, stopCh <-chan struct{}, f func()) error { //*CloneContorler
|
||||
defer func() {
|
||||
c.queue.ShutDown()
|
||||
}()
|
||||
klog.V(3).Infoln("Starting cdi controller Run loop")
|
||||
if threadiness < 1 {
|
||||
return errors.Errorf("expected >0 threads, got %d", threadiness)
|
||||
}
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, c.pvcInformer.HasSynced) {
|
||||
return errors.New("Timeout waiting for pvc cache sync")
|
||||
}
|
||||
if !cache.WaitForCacheSync(stopCh, c.podInformer.HasSynced) {
|
||||
return errors.New("Timeout waiting for pod cache sync")
|
||||
}
|
||||
|
||||
klog.V(3).Infoln("Controller cache has synced")
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(f, time.Second, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
return nil
|
||||
}
|
||||
|
||||
// forget the passed-in key for this event and optionally log a message.
|
||||
func (c *Controller) forgetKey(key interface{}, msg string) bool {
|
||||
if len(msg) > 0 {
|
||||
klog.V(3).Info(msg)
|
||||
}
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
// return a pvc pointer based on the passed-in work queue key.
|
||||
func (c *Controller) pvcFromKey(key string) (*v1.PersistentVolumeClaim, bool, error) {
|
||||
obj, exists, err := c.objFromKey(c.pvcInformer, key)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get pvc object from key")
|
||||
} else if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return nil, false, errors.New("Object not of type *v1.PersistentVolumeClaim")
|
||||
}
|
||||
return pvc, true, nil
|
||||
}
|
||||
|
||||
func (c *Controller) objFromKey(informer cache.SharedIndexInformer, key string) (interface{}, bool, error) {
|
||||
obj, ok, err := informer.GetIndexer().GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "error getting interface obj from store")
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
return obj, true, nil
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/appscode/jsonpatch"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
type ControllerFixture struct {
|
||||
t *testing.T
|
||||
|
||||
kubeclient *k8sfake.Clientset
|
||||
|
||||
// Objects to put in the store.
|
||||
pvcLister []*corev1.PersistentVolumeClaim
|
||||
podLister []*corev1.Pod
|
||||
|
||||
// Actions expected to happen on the client.
|
||||
kubeactions []core.Action
|
||||
|
||||
// Objects from here preloaded into NewSimpleFake.
|
||||
kubeobjects []runtime.Object
|
||||
}
|
||||
|
||||
func printJSONDiff(objA, objB interface{}) string {
|
||||
aBytes, _ := json.Marshal(objA)
|
||||
bBytes, _ := json.Marshal(objB)
|
||||
patches, _ := jsonpatch.CreatePatch(aBytes, bBytes)
|
||||
pBytes, _ := json.Marshal(patches)
|
||||
return string(pBytes)
|
||||
}
|
||||
|
||||
func newControllerFixture(t *testing.T) *ControllerFixture {
|
||||
f := &ControllerFixture{}
|
||||
f.t = t
|
||||
f.kubeobjects = []runtime.Object{}
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *ControllerFixture) newController(image, pullPolicy, verbose string) *Controller {
|
||||
|
||||
f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
|
||||
|
||||
podFactory := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc())
|
||||
pvcFactory := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc())
|
||||
|
||||
podInformer := podFactory.Core().V1().Pods()
|
||||
pvcInformer := pvcFactory.Core().V1().PersistentVolumeClaims()
|
||||
|
||||
c := NewController(f.kubeclient,
|
||||
pvcInformer,
|
||||
podInformer,
|
||||
image,
|
||||
pullPolicy,
|
||||
verbose)
|
||||
|
||||
for _, pod := range f.podLister {
|
||||
c.podInformer.GetIndexer().Add(pod)
|
||||
}
|
||||
|
||||
for _, pvc := range f.pvcLister {
|
||||
c.pvcInformer.GetIndexer().Add(pvc)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// checkAction verifies that expected and actual actions are equal and both have
|
||||
// same attached resources
|
||||
func checkAction(expected, actual core.Action, t *testing.T) {
|
||||
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
|
||||
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
|
||||
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
switch a := actual.(type) {
|
||||
case core.CreateAction:
|
||||
e, _ := expected.(core.CreateAction)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
|
||||
if !reflect.DeepEqual(expObject, object) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, printJSONDiff(expObject, object))
|
||||
}
|
||||
case core.UpdateAction:
|
||||
e, _ := expected.(core.UpdateAction)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
|
||||
if !reflect.DeepEqual(expObject, object) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, printJSONDiff(expObject, object))
|
||||
}
|
||||
case core.PatchAction:
|
||||
e, _ := expected.(core.PatchAction)
|
||||
expPatch := e.GetPatch()
|
||||
patch := a.GetPatch()
|
||||
|
||||
if !reflect.DeepEqual(expPatch, expPatch) {
|
||||
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, printJSONDiff(expPatch, patch))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// filterActions filters list and watch actions for testing resources.
|
||||
// Since list and watch don't change resource state we can filter it to lower
|
||||
// nose level in our tests.
|
||||
func filterActions(actions []core.Action) []core.Action {
|
||||
ret := []core.Action{}
|
||||
for _, action := range actions {
|
||||
if len(action.GetNamespace()) == 0 &&
|
||||
(action.Matches("list", "persistentvolumeclaims") ||
|
||||
action.Matches("watch", "persistentvolumeclaims") ||
|
||||
action.Matches("list", "pods") ||
|
||||
action.Matches("watch", "pods")) {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, action)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (f *ControllerFixture) expectCreatePodAction(d *corev1.Pod) {
|
||||
f.kubeactions = append(f.kubeactions, core.NewCreateAction(schema.GroupVersionResource{Resource: "pods", Version: "v1"}, d.Namespace, d))
|
||||
}
|
||||
|
||||
func (f *ControllerFixture) expectUpdatePvcAction(d *corev1.PersistentVolumeClaim) {
|
||||
f.kubeactions = append(f.kubeactions, core.NewUpdateAction(schema.GroupVersionResource{Resource: "persistentvolumeclaims", Version: "v1"}, d.Namespace, d))
|
||||
}
|
||||
|
||||
func (f *ControllerFixture) expectDeletePodAction(p *corev1.Pod) {
|
||||
f.kubeactions = append(f.kubeactions,
|
||||
core.NewDeleteAction(schema.GroupVersionResource{Resource: "pods", Version: "v1"}, p.Namespace, p.Name))
|
||||
}
|
@ -137,7 +137,7 @@ func (r *ImportReconciler) Reconcile(req reconcile.Request) (reconcile.Result, e
|
||||
}
|
||||
|
||||
if !shouldReconcilePVC(pvc) {
|
||||
r.Log.V(1).Info("Should not reconcile this PVC", "pvc.annotation.phase.complete", isPVCComplete(pvc),
|
||||
log.V(1).Info("Should not reconcile this PVC", "pvc.annotation.phase.complete", isPVCComplete(pvc),
|
||||
"pvc.annotations.endpoint", checkPVC(pvc, AnnEndpoint), "pvc.annotations.source", checkPVC(pvc, AnnSource))
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
@ -151,15 +151,15 @@ func (r *ImportReconciler) Reconcile(req reconcile.Request) (reconcile.Result, e
|
||||
pvc.SetAnnotations(make(map[string]string, 0))
|
||||
}
|
||||
pvc.GetAnnotations()[AnnPodPhase] = string(corev1.PodSucceeded)
|
||||
if err := r.updatePVC(pvc); err != nil {
|
||||
if err := r.updatePVC(pvc, log); err != nil {
|
||||
return reconcile.Result{}, errors.WithMessage(err, fmt.Sprintf("could not update pvc %q annotation and/or label", pvc.Name))
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return r.reconcilePvc(pvc)
|
||||
return r.reconcilePvc(pvc, log)
|
||||
}
|
||||
|
||||
func (r *ImportReconciler) findImporterPod(pvc *corev1.PersistentVolumeClaim) (*corev1.Pod, error) {
|
||||
func (r *ImportReconciler) findImporterPod(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (*corev1.Pod, error) {
|
||||
podName := importPodNameFromPvc(pvc)
|
||||
pod := &corev1.Pod{}
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: podName, Namespace: pvc.GetNamespace()}, pod)
|
||||
@ -172,20 +172,20 @@ func (r *ImportReconciler) findImporterPod(pvc *corev1.PersistentVolumeClaim) (*
|
||||
return nil, errors.Errorf("Pod is not owned by PVC")
|
||||
}
|
||||
|
||||
r.Log.V(1).Info("Pod is owned by PVC", pod.Name, pvc.Name)
|
||||
log.V(1).Info("Pod is owned by PVC", pod.Name, pvc.Name)
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (r *ImportReconciler) reconcilePvc(pvc *corev1.PersistentVolumeClaim) (reconcile.Result, error) {
|
||||
func (r *ImportReconciler) reconcilePvc(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (reconcile.Result, error) {
|
||||
// See if we have a pod associated with the PVC, we know the PVC has the needed annotations.
|
||||
pod, err := r.findImporterPod(pvc)
|
||||
pod, err := r.findImporterPod(pvc, log)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if pod == nil {
|
||||
if isPVCComplete(pvc) {
|
||||
// Don't create the POD if the PVC is completed already
|
||||
r.Log.V(1).Info("PVC is already complete")
|
||||
log.V(1).Info("PVC is already complete")
|
||||
} else if pvc.DeletionTimestamp == nil {
|
||||
// Create importer pod, make sure the PVC owns it.
|
||||
if err := r.createImporterPod(pvc); err != nil {
|
||||
@ -194,7 +194,7 @@ func (r *ImportReconciler) reconcilePvc(pvc *corev1.PersistentVolumeClaim) (reco
|
||||
}
|
||||
} else {
|
||||
if pvc.DeletionTimestamp != nil {
|
||||
r.Log.V(1).Info("PVC being terminated, delete pods", "pvc.Name", pvc.Name, "pod.Name", pod.Name)
|
||||
log.V(1).Info("PVC being terminated, delete pods", "pod.Name", pod.Name)
|
||||
if err := r.Client.Delete(context.TODO(), pod); IgnoreNotFound(err) != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@ -202,25 +202,25 @@ func (r *ImportReconciler) reconcilePvc(pvc *corev1.PersistentVolumeClaim) (reco
|
||||
}
|
||||
|
||||
// Pod exists, we need to update the PVC status.
|
||||
if err := r.updatePvcFromPod(pvc, pod); err != nil {
|
||||
if err := r.updatePvcFromPod(pvc, pod, log); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ImportReconciler) updatePvcFromPod(pvc *corev1.PersistentVolumeClaim, pod *corev1.Pod) error {
|
||||
func (r *ImportReconciler) updatePvcFromPod(pvc *corev1.PersistentVolumeClaim, pod *corev1.Pod, log logr.Logger) error {
|
||||
// Keep a copy of the original for comparison later.
|
||||
currentPvcCopy := pvc.DeepCopyObject()
|
||||
|
||||
r.Log.V(1).Info("Updating PVC from pod")
|
||||
log.V(1).Info("Updating PVC from pod")
|
||||
anno := pvc.GetAnnotations()
|
||||
scratchExitCode := false
|
||||
if pod.Status.ContainerStatuses != nil && pod.Status.ContainerStatuses[0].LastTerminationState.Terminated != nil &&
|
||||
pod.Status.ContainerStatuses[0].LastTerminationState.Terminated.ExitCode > 0 {
|
||||
r.Log.Info("Pod termination code", "pod.Name", pod.Name, "ExitCode", pod.Status.ContainerStatuses[0].LastTerminationState.Terminated.ExitCode)
|
||||
log.Info("Pod termination code", "pod.Name", pod.Name, "ExitCode", pod.Status.ContainerStatuses[0].LastTerminationState.Terminated.ExitCode)
|
||||
if pod.Status.ContainerStatuses[0].LastTerminationState.Terminated.ExitCode == common.ScratchSpaceNeededExitCode {
|
||||
r.Log.V(1).Info("Pod requires scratch space, terminating pod, and restarting with scratch space", "pod.Name", pod.Name)
|
||||
log.V(1).Info("Pod requires scratch space, terminating pod, and restarting with scratch space", "pod.Name", pod.Name)
|
||||
scratchExitCode = true
|
||||
anno[AnnRequiresScratch] = "true"
|
||||
} else {
|
||||
@ -246,16 +246,16 @@ func (r *ImportReconciler) updatePvcFromPod(pvc *corev1.PersistentVolumeClaim, p
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(currentPvcCopy, pvc) {
|
||||
if err := r.updatePVC(pvc); err != nil {
|
||||
if err := r.updatePVC(pvc, log); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Log.V(1).Info("Updated PVC", "pvc.anno.Phase", anno[AnnPodPhase])
|
||||
log.V(1).Info("Updated PVC", "pvc.anno.Phase", anno[AnnPodPhase])
|
||||
}
|
||||
|
||||
if isPVCComplete(pvc) || scratchExitCode {
|
||||
if !scratchExitCode {
|
||||
r.recorder.Event(pvc, corev1.EventTypeNormal, ImportSucceededPVC, "Import Successful")
|
||||
r.Log.V(1).Info("Completed successfully, deleting POD", "pod.Name", pod.Name)
|
||||
log.V(1).Info("Completed successfully, deleting POD", "pod.Name", pod.Name)
|
||||
}
|
||||
if err := r.Client.Delete(context.TODO(), pod); IgnoreNotFound(err) != nil {
|
||||
return err
|
||||
@ -264,8 +264,8 @@ func (r *ImportReconciler) updatePvcFromPod(pvc *corev1.PersistentVolumeClaim, p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ImportReconciler) updatePVC(pvc *corev1.PersistentVolumeClaim) error {
|
||||
r.Log.V(1).Info("Phase is now", "pvc.anno.Phase", pvc.GetAnnotations()[AnnPodPhase])
|
||||
func (r *ImportReconciler) updatePVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) error {
|
||||
log.V(1).Info("Phase is now", "pvc.anno.Phase", pvc.GetAnnotations()[AnnPodPhase])
|
||||
if err := r.Client.Update(context.TODO(), pvc); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -354,7 +354,7 @@ func scratchNameFromPvc(pvc *corev1.PersistentVolumeClaim) string {
|
||||
// name, and pvc. A nil secret means the endpoint credentials are not passed to the
|
||||
// importer pod.
|
||||
func createImporterPod(log logr.Logger, client client.Client, cdiClient cdiclientset.Interface, image, verbose, pullPolicy string, podEnvVar *importPodEnvVar, pvc *corev1.PersistentVolumeClaim, scratchPvcName *string) (*v1.Pod, error) {
|
||||
podResourceRequirements, err := GetDefaultPodResourceRequirements(cdiClient)
|
||||
podResourceRequirements, err := GetDefaultPodResourceRequirements(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ var _ = Describe("Update PVC from POD", func() {
|
||||
resPod := &corev1.Pod{}
|
||||
err := reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "importer-testPvc1", Namespace: "default"}, resPod)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = reconciler.updatePvcFromPod(pvc, pod)
|
||||
err = reconciler.updatePvcFromPod(pvc, pod, reconciler.Log)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Checking import successful event recorded")
|
||||
event := <-reconciler.recorder.(*record.FakeRecorder).Events
|
||||
@ -208,7 +208,7 @@ var _ = Describe("Update PVC from POD", func() {
|
||||
resPod := &corev1.Pod{}
|
||||
err := reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: "importer-testPvc1", Namespace: "default"}, resPod)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = reconciler.updatePvcFromPod(pvc, pod)
|
||||
err = reconciler.updatePvcFromPod(pvc, pod, reconciler.Log)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Checking pvc phase has been updated")
|
||||
resPvc := &corev1.PersistentVolumeClaim{}
|
||||
@ -231,7 +231,7 @@ var _ = Describe("Update PVC from POD", func() {
|
||||
Phase: corev1.PodPending,
|
||||
}
|
||||
reconciler = createImportReconciler(pvc, pod)
|
||||
err := reconciler.updatePvcFromPod(pvc, pod)
|
||||
err := reconciler.updatePvcFromPod(pvc, pod, reconciler.Log)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Checking scratch PVC has been created")
|
||||
// Once all controllers are converted, we will use the runtime lib client instead of client-go and retrieval needs to change here.
|
||||
@ -263,7 +263,7 @@ var _ = Describe("Update PVC from POD", func() {
|
||||
},
|
||||
}
|
||||
reconciler = createImportReconciler(pvc, pod)
|
||||
err := reconciler.updatePvcFromPod(pvc, pod)
|
||||
err := reconciler.updatePvcFromPod(pvc, pod, reconciler.Log)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Checking pvc phase has been updated")
|
||||
resPvc := &corev1.PersistentVolumeClaim{}
|
||||
@ -293,7 +293,7 @@ var _ = Describe("Update PVC from POD", func() {
|
||||
},
|
||||
}
|
||||
reconciler = createImportReconciler(pvc, pod)
|
||||
err := reconciler.updatePvcFromPod(pvc, pod)
|
||||
err := reconciler.updatePvcFromPod(pvc, pod, reconciler.Log)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Checking pvc phase has been updated")
|
||||
resPvc := &corev1.PersistentVolumeClaim{}
|
||||
@ -379,6 +379,7 @@ func createImportReconciler(objects ...runtime.Object) *ImportReconciler {
|
||||
cdiConfig.Status = cdiv1.CDIConfigStatus{
|
||||
ScratchSpaceStorageClass: testStorageClass,
|
||||
}
|
||||
objs = append(objs, cdiConfig)
|
||||
cdifakeclientset := cdifake.NewSimpleClientset(cdiConfig)
|
||||
k8sfakeclientset := k8sfake.NewSimpleClientset(createStorageClass(testStorageClass, nil))
|
||||
|
||||
|
@ -325,7 +325,7 @@ func (c *UploadController) syncHandler(key string) error {
|
||||
pvcCopy := pvc.DeepCopy()
|
||||
|
||||
if isCloneTarget {
|
||||
source, err := getCloneRequestSourcePVC(pvc, c.pvcLister)
|
||||
source, err := c.getCloneRequestSourcePVC(pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -511,3 +511,16 @@ func (c *UploadController) deleteService(namespace, name string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns the CloneRequest string which contains the pvc name (and namespace) from which we want to clone the image.
|
||||
func (c *UploadController) getCloneRequestSourcePVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
|
||||
exists, namespace, name := ParseCloneRequestAnnotation(pvc)
|
||||
if !exists {
|
||||
return nil, errors.New("error parsing clone request annotation")
|
||||
}
|
||||
pvc, err := c.pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting clone source PVC")
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
|
@ -17,10 +17,14 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/appscode/jsonpatch"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -60,6 +64,58 @@ type uploadFixture struct {
|
||||
cdiobjects []runtime.Object
|
||||
}
|
||||
|
||||
func printJSONDiff(objA, objB interface{}) string {
|
||||
aBytes, _ := json.Marshal(objA)
|
||||
bBytes, _ := json.Marshal(objB)
|
||||
patches, _ := jsonpatch.CreatePatch(aBytes, bBytes)
|
||||
pBytes, _ := json.Marshal(patches)
|
||||
return string(pBytes)
|
||||
}
|
||||
|
||||
// checkAction verifies that expected and actual actions are equal and both have
|
||||
// same attached resources
|
||||
func checkAction(expected, actual core.Action, t *testing.T) {
|
||||
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
|
||||
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
|
||||
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
switch a := actual.(type) {
|
||||
case core.CreateAction:
|
||||
e, _ := expected.(core.CreateAction)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
|
||||
if !reflect.DeepEqual(expObject, object) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, printJSONDiff(expObject, object))
|
||||
}
|
||||
case core.UpdateAction:
|
||||
e, _ := expected.(core.UpdateAction)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
|
||||
if !reflect.DeepEqual(expObject, object) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, printJSONDiff(expObject, object))
|
||||
}
|
||||
case core.PatchAction:
|
||||
e, _ := expected.(core.PatchAction)
|
||||
expPatch := e.GetPatch()
|
||||
patch := a.GetPatch()
|
||||
|
||||
if !reflect.DeepEqual(expPatch, expPatch) {
|
||||
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, printJSONDiff(expPatch, patch))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newUploadFixture(t *testing.T) *uploadFixture {
|
||||
f := &uploadFixture{}
|
||||
f.t = t
|
||||
|
@ -1,11 +1,11 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
crdv1alpha1 "github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1"
|
||||
@ -14,17 +14,17 @@ import (
|
||||
extclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
|
||||
clientset "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned"
|
||||
"kubevirt.io/containerized-data-importer/pkg/common"
|
||||
"kubevirt.io/containerized-data-importer/pkg/token"
|
||||
"kubevirt.io/containerized-data-importer/pkg/util"
|
||||
"kubevirt.io/containerized-data-importer/pkg/util/cert"
|
||||
)
|
||||
@ -53,6 +53,17 @@ const (
|
||||
SourceNone = "none"
|
||||
// SourceRegistry is the source type of Registry
|
||||
SourceRegistry = "registry"
|
||||
|
||||
// AnnAPIGroup is the APIGroup for CDI
|
||||
AnnAPIGroup = "cdi.kubevirt.io"
|
||||
// AnnCreatedBy is a pod annotation indicating if the pod was created by the PVC
|
||||
AnnCreatedBy = AnnAPIGroup + "/storage.createdByController"
|
||||
// AnnPodPhase is a PVC annotation indicating the related pod progress (phase)
|
||||
AnnPodPhase = AnnAPIGroup + "/storage.pod.phase"
|
||||
// AnnPodReady tells whether the pod is ready
|
||||
AnnPodReady = AnnAPIGroup + "/storage.pod.ready"
|
||||
// AnnOwnerRef is used when owner is in a different namespace
|
||||
AnnOwnerRef = AnnAPIGroup + "/storage.ownerRef"
|
||||
)
|
||||
|
||||
type podDeleteRequest struct {
|
||||
@ -299,14 +310,14 @@ func GetScratchPvcStorageClass(client kubernetes.Interface, cdiclient clientset.
|
||||
}
|
||||
|
||||
// GetDefaultPodResourceRequirements gets default pod resource requirements from cdi config status
|
||||
func GetDefaultPodResourceRequirements(cdiclient clientset.Interface) (*v1.ResourceRequirements, error) {
|
||||
config, err := cdiclient.CdiV1alpha1().CDIConfigs().Get(common.ConfigName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
func GetDefaultPodResourceRequirements(client client.Client) (*v1.ResourceRequirements, error) {
|
||||
cdiconfig := &cdiv1.CDIConfig{}
|
||||
if err := client.Get(context.TODO(), types.NamespacedName{Name: common.ConfigName}, cdiconfig); err != nil {
|
||||
klog.Errorf("Unable to find CDI configuration, %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config.Status.DefaultPodResourceRequirements, nil
|
||||
return cdiconfig.Status.DefaultPodResourceRequirements, nil
|
||||
}
|
||||
|
||||
// this is being called for pods using PV with block volume mode
|
||||
@ -332,86 +343,6 @@ func addToMap(m1, m2 map[string]string) map[string]string {
|
||||
return m1
|
||||
}
|
||||
|
||||
// returns the CloneRequest string which contains the pvc name (and namespace) from which we want to clone the image.
|
||||
func getCloneRequestSourcePVC(pvc *v1.PersistentVolumeClaim, pvcLister corelisters.PersistentVolumeClaimLister) (*v1.PersistentVolumeClaim, error) {
|
||||
exists, namespace, name := ParseCloneRequestAnnotation(pvc)
|
||||
if !exists {
|
||||
return nil, errors.New("error parsing clone request annotation")
|
||||
}
|
||||
pvc, err := pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting clone source PVC")
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
// ParseCloneRequestAnnotation parses the clone request annotation
|
||||
func ParseCloneRequestAnnotation(pvc *v1.PersistentVolumeClaim) (exists bool, namespace, name string) {
|
||||
var ann string
|
||||
ann, exists = pvc.Annotations[AnnCloneRequest]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
sp := strings.Split(ann, "/")
|
||||
if len(sp) != 2 {
|
||||
klog.V(1).Infof("Bad CloneRequest Annotation %s", ann)
|
||||
exists = false
|
||||
return
|
||||
}
|
||||
|
||||
namespace, name = sp[0], sp[1]
|
||||
return
|
||||
}
|
||||
|
||||
// ValidateCanCloneSourceAndTargetSpec validates the specs passed in are compatible for cloning.
|
||||
func ValidateCanCloneSourceAndTargetSpec(sourceSpec, targetSpec *v1.PersistentVolumeClaimSpec) error {
|
||||
sourceRequest := sourceSpec.Resources.Requests[v1.ResourceStorage]
|
||||
targetRequest := targetSpec.Resources.Requests[v1.ResourceStorage]
|
||||
// Verify that the target PVC size is equal or larger than the source.
|
||||
if sourceRequest.Value() > targetRequest.Value() {
|
||||
return errors.New("target resources requests storage size is smaller than the source")
|
||||
}
|
||||
// Verify that the source and target volume modes are the same.
|
||||
sourceVolumeMode := v1.PersistentVolumeFilesystem
|
||||
if sourceSpec.VolumeMode != nil && *sourceSpec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
sourceVolumeMode = v1.PersistentVolumeBlock
|
||||
}
|
||||
targetVolumeMode := v1.PersistentVolumeFilesystem
|
||||
if targetSpec.VolumeMode != nil && *targetSpec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
targetVolumeMode = v1.PersistentVolumeBlock
|
||||
}
|
||||
if sourceVolumeMode != targetVolumeMode {
|
||||
return fmt.Errorf("source volumeMode (%s) and target volumeMode (%s) do not match",
|
||||
sourceVolumeMode, targetVolumeMode)
|
||||
}
|
||||
// Can clone.
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateCloneToken(validator token.Validator, source, target *v1.PersistentVolumeClaim) error {
|
||||
tok, ok := target.Annotations[AnnCloneToken]
|
||||
if !ok {
|
||||
return errors.New("clone token missing")
|
||||
}
|
||||
|
||||
tokenData, err := validator.Validate(tok)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error verifying token")
|
||||
}
|
||||
|
||||
if tokenData.Operation != token.OperationClone ||
|
||||
tokenData.Name != source.Name ||
|
||||
tokenData.Namespace != source.Namespace ||
|
||||
tokenData.Resource.Resource != "persistentvolumeclaims" ||
|
||||
tokenData.Params["targetNamespace"] != target.Namespace ||
|
||||
tokenData.Params["targetName"] != target.Name {
|
||||
return errors.New("invalid token")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodePublicKey turns a bunch of bytes into a public key
|
||||
func DecodePublicKey(keyBytes []byte) (*rsa.PublicKey, error) {
|
||||
keys, err := cert.ParsePublicKeysPEM(keyBytes)
|
||||
@ -431,181 +362,6 @@ func DecodePublicKey(keyBytes []byte) (*rsa.PublicKey, error) {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// CloneSourcePodArgs are the required args to create a clone source pod
|
||||
type CloneSourcePodArgs struct {
|
||||
Client kubernetes.Interface
|
||||
CDIClient clientset.Interface
|
||||
Image, PullPolicy string
|
||||
ServerCACert, ClientCert, ClientKey []byte
|
||||
PVC *v1.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
// CreateCloneSourcePod creates our cloning src pod which will be used for out of band cloning to read the contents of the src PVC
|
||||
func CreateCloneSourcePod(args CloneSourcePodArgs) (*v1.Pod, error) {
|
||||
exists, sourcePvcNamespace, sourcePvcName := ParseCloneRequestAnnotation(args.PVC)
|
||||
if !exists {
|
||||
return nil, errors.Errorf("bad CloneRequest Annotation")
|
||||
}
|
||||
|
||||
ownerKey, err := cache.MetaNamespaceKeyFunc(args.PVC)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting cache key")
|
||||
}
|
||||
|
||||
podResourceRequirements, err := GetDefaultPodResourceRequirements(args.CDIClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pod := MakeCloneSourcePodSpec(args.Image, args.PullPolicy, sourcePvcName, ownerKey,
|
||||
args.ClientKey, args.ClientCert, args.ServerCACert, args.PVC, podResourceRequirements)
|
||||
|
||||
pod, err = args.Client.CoreV1().Pods(sourcePvcNamespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "source pod API create errored")
|
||||
}
|
||||
|
||||
klog.V(1).Infof("cloning source pod \"%s/%s\" (image: %q) created\n", pod.Namespace, pod.Name, args.Image)
|
||||
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// MakeCloneSourcePodSpec creates and returns the clone source pod spec based on the target pvc.
|
||||
func MakeCloneSourcePodSpec(image, pullPolicy, sourcePvcName, ownerRefAnno string,
|
||||
clientKey, clientCert, serverCACert []byte, pvc *v1.PersistentVolumeClaim, resourceRequirements *v1.ResourceRequirements) *v1.Pod {
|
||||
|
||||
var ownerID string
|
||||
podName := fmt.Sprintf("%s-%s-", common.ClonerSourcePodName, sourcePvcName)
|
||||
id := string(pvc.GetUID())
|
||||
url := GetUploadServerURL(pvc.Namespace, pvc.Name, common.UploadPathSync)
|
||||
pvcOwner := metav1.GetControllerOf(pvc)
|
||||
if pvcOwner != nil && pvcOwner.Kind == "DataVolume" {
|
||||
ownerID = string(pvcOwner.UID)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Annotations: map[string]string{
|
||||
AnnCreatedBy: "yes",
|
||||
AnnOwnerRef: ownerRefAnno,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
common.CDILabelKey: common.CDILabelValue, //filtered by the podInformer
|
||||
common.CDIComponentLabel: common.ClonerSourcePodName,
|
||||
// this label is used when searching for a pvc's cloner source pod.
|
||||
CloneUniqueID: id + "-source-pod",
|
||||
common.PrometheusLabel: "",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsUser: &[]int64{0}[0],
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: common.ClonerSourcePodName,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(pullPolicy),
|
||||
Env: []v1.EnvVar{
|
||||
/*
|
||||
Easier to just stick key/certs in env vars directly no.
|
||||
Maybe revisit when we fix the "naming things" problem.
|
||||
*/
|
||||
{
|
||||
Name: "CLIENT_KEY",
|
||||
Value: string(clientKey),
|
||||
},
|
||||
{
|
||||
Name: "CLIENT_CERT",
|
||||
Value: string(clientCert),
|
||||
},
|
||||
{
|
||||
Name: "SERVER_CA_CERT",
|
||||
Value: string(serverCACert),
|
||||
},
|
||||
{
|
||||
Name: "UPLOAD_URL",
|
||||
Value: url,
|
||||
},
|
||||
{
|
||||
Name: common.OwnerUID,
|
||||
Value: ownerID,
|
||||
},
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "metrics",
|
||||
ContainerPort: 8443,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: DataVolName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: sourcePvcName,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if resourceRequirements != nil {
|
||||
pod.Spec.Containers[0].Resources = *resourceRequirements
|
||||
}
|
||||
|
||||
var volumeMode v1.PersistentVolumeMode
|
||||
var addVars []v1.EnvVar
|
||||
|
||||
if pvc.Spec.VolumeMode != nil {
|
||||
volumeMode = *pvc.Spec.VolumeMode
|
||||
} else {
|
||||
volumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
|
||||
if volumeMode == v1.PersistentVolumeBlock {
|
||||
pod.Spec.Containers[0].VolumeDevices = addVolumeDevices()
|
||||
addVars = []v1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "block",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.WriteBlockPath,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
|
||||
{
|
||||
Name: DataVolName,
|
||||
MountPath: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
addVars = []v1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "filesystem",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, addVars...)
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
// UploadPodArgs are the parameters required to create an upload pod
|
||||
type UploadPodArgs struct {
|
||||
Client kubernetes.Interface
|
||||
@ -624,11 +380,14 @@ type UploadPodArgs struct {
|
||||
func CreateUploadPod(args UploadPodArgs) (*v1.Pod, error) {
|
||||
ns := args.PVC.Namespace
|
||||
|
||||
podResourceRequirements, err := GetDefaultPodResourceRequirements(args.CdiClient)
|
||||
// TODO, replace this will call to GetDefaultPodResourceRequirements when using runtime library
|
||||
config, err := args.CdiClient.CdiV1alpha1().CDIConfigs().Get(common.ConfigName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to find CDI configuration, %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podResourceRequirements := config.Status.DefaultPodResourceRequirements
|
||||
pod := makeUploadPodSpec(args.Image, args.Verbose, args.PullPolicy, args.Name,
|
||||
args.PVC, args.ScratchPVCName, args.ClientName, args.ServerCert,
|
||||
args.ServerKey, args.ClientCA, podResourceRequirements)
|
||||
@ -1064,52 +823,6 @@ func isPodReady(pod *v1.Pod) bool {
|
||||
return numReady == len(pod.Status.ContainerStatuses)
|
||||
}
|
||||
|
||||
func addFinalizer(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim, name string) (*v1.PersistentVolumeClaim, error) {
|
||||
if hasFinalizer(pvc, name) {
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
cpy := pvc.DeepCopy()
|
||||
cpy.Finalizers = append(cpy.Finalizers, name)
|
||||
pvc, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(cpy)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error updating PVC")
|
||||
}
|
||||
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func removeFinalizer(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim, name string) (*v1.PersistentVolumeClaim, error) {
|
||||
if !hasFinalizer(pvc, name) {
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
var finalizers []string
|
||||
for _, f := range pvc.Finalizers {
|
||||
if f != name {
|
||||
finalizers = append(finalizers, f)
|
||||
}
|
||||
}
|
||||
|
||||
cpy := pvc.DeepCopy()
|
||||
cpy.Finalizers = finalizers
|
||||
pvc, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(cpy)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error updating PVC")
|
||||
}
|
||||
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
func hasFinalizer(object metav1.Object, value string) bool {
|
||||
for _, f := range object.GetFinalizers() {
|
||||
if f == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func podPhaseFromPVC(pvc *v1.PersistentVolumeClaim) v1.PodPhase {
|
||||
phase := pvc.ObjectMeta.Annotations[AnnPodPhase]
|
||||
return v1.PodPhase(phase)
|
||||
|
@ -1,7 +1,6 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -909,7 +908,7 @@ func createPvcInStorageClass(name, ns string, storageClassName *string, annotati
|
||||
Namespace: ns,
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
UID: types.UID(ns + "/" + name),
|
||||
UID: types.UID(ns + "-" + name),
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany, v1.ReadWriteOnce},
|
||||
@ -966,67 +965,6 @@ func createPvcNoSize(name, ns string, annotations, labels map[string]string) *v1
|
||||
}
|
||||
}
|
||||
|
||||
func createClonePvc(sourceNamespace, sourceName, targetNamespace, targetName string, annotations, labels map[string]string) *v1.PersistentVolumeClaim {
|
||||
return createClonePvcWithSize(sourceNamespace, sourceName, targetNamespace, targetName, annotations, labels, "1G")
|
||||
}
|
||||
|
||||
func createClonePvcWithSize(sourceNamespace, sourceName, targetNamespace, targetName string, annotations, labels map[string]string, size string) *v1.PersistentVolumeClaim {
|
||||
tokenData := &token.Payload{
|
||||
Operation: token.OperationClone,
|
||||
Name: sourceName,
|
||||
Namespace: sourceNamespace,
|
||||
Resource: metav1.GroupVersionResource{
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "persistentvolumeclaims",
|
||||
},
|
||||
Params: map[string]string{
|
||||
"targetNamespace": targetNamespace,
|
||||
"targetName": targetName,
|
||||
},
|
||||
}
|
||||
|
||||
g := token.NewGenerator(common.CloneTokenIssuer, getAPIServerKey(), 5*time.Minute)
|
||||
|
||||
tokenString, err := g.Generate(tokenData)
|
||||
if err != nil {
|
||||
panic("error generating token")
|
||||
}
|
||||
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
annotations[AnnCloneRequest] = fmt.Sprintf("%s/%s", sourceNamespace, sourceName)
|
||||
annotations[AnnCloneToken] = tokenString
|
||||
annotations[AnnUploadClientName] = "FOOBAR"
|
||||
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: targetName,
|
||||
Namespace: targetNamespace,
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
UID: "pvc-uid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany, v1.ReadWriteOnce},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(size),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createCloneBlockPvc(sourceNamespace, sourceName, targetNamespace, targetName string, annotations, labels map[string]string) *v1.PersistentVolumeClaim {
|
||||
pvc := createClonePvc(sourceNamespace, sourceName, targetNamespace, targetName, annotations, labels)
|
||||
VolumeMode := v1.PersistentVolumeBlock
|
||||
pvc.Spec.VolumeMode = &VolumeMode
|
||||
return pvc
|
||||
}
|
||||
|
||||
func createSecret(name, ns, accessKey, secretKey string, labels map[string]string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1043,127 +981,6 @@ func createSecret(name, ns, accessKey, secretKey string, labels map[string]strin
|
||||
}
|
||||
}
|
||||
|
||||
func createSourcePod(pvc *v1.PersistentVolumeClaim, pvcUID string) *v1.Pod {
|
||||
_, _, sourcePvcName := ParseCloneRequestAnnotation(pvc)
|
||||
podName := fmt.Sprintf("%s-%s-", common.ClonerSourcePodName, sourcePvcName)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Annotations: map[string]string{
|
||||
AnnCreatedBy: "yes",
|
||||
AnnOwnerRef: fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name),
|
||||
},
|
||||
Labels: map[string]string{
|
||||
CDILabelKey: CDILabelValue, //filtered by the podInformer
|
||||
CDIComponentLabel: ClonerSourcePodName,
|
||||
// this label is used when searching for a pvc's cloner source pod.
|
||||
CloneUniqueID: pvcUID + "-source-pod",
|
||||
common.PrometheusLabel: "",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsUser: &[]int64{0}[0],
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: common.ClonerSourcePodName,
|
||||
Image: "test/mycloneimage",
|
||||
ImagePullPolicy: v1.PullPolicy("Always"),
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "CLIENT_KEY",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "CLIENT_CERT",
|
||||
Value: "foo",
|
||||
},
|
||||
{
|
||||
Name: "SERVER_CA_CERT",
|
||||
Value: string("baz"),
|
||||
},
|
||||
{
|
||||
Name: "UPLOAD_URL",
|
||||
Value: GetUploadServerURL(pvc.Namespace, pvc.Name, common.UploadPathSync),
|
||||
},
|
||||
{
|
||||
Name: common.OwnerUID,
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "metrics",
|
||||
ContainerPort: 8443,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: DataVolName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: sourcePvcName,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var volumeMode v1.PersistentVolumeMode
|
||||
var addVars []v1.EnvVar
|
||||
|
||||
if pvc.Spec.VolumeMode != nil {
|
||||
volumeMode = *pvc.Spec.VolumeMode
|
||||
} else {
|
||||
volumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
|
||||
if volumeMode == v1.PersistentVolumeBlock {
|
||||
pod.Spec.Containers[0].VolumeDevices = addVolumeDevices()
|
||||
addVars = []v1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "block",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.WriteBlockPath,
|
||||
},
|
||||
}
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
RunAsUser: &[]int64{0}[0],
|
||||
}
|
||||
} else {
|
||||
pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
|
||||
{
|
||||
Name: DataVolName,
|
||||
MountPath: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
addVars = []v1.EnvVar{
|
||||
{
|
||||
Name: "VOLUME_MODE",
|
||||
Value: "filesystem",
|
||||
},
|
||||
{
|
||||
Name: "MOUNT_POINT",
|
||||
Value: common.ClonerMountPath,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, addVars...)
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func getPvcKey(pvc *corev1.PersistentVolumeClaim, t *testing.T) string {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pvc)
|
||||
if err != nil {
|
||||
|
@ -537,7 +537,7 @@ func doFileBasedCloneTest(f *framework.Framework, srcPVCDef *v1.PersistentVolume
|
||||
|
||||
func completeClone(f *framework.Framework, targetNs *v1.Namespace, targetPvc *v1.PersistentVolumeClaim, filePath, expectedMD5 string) {
|
||||
By("Find cloner pods")
|
||||
sourcePod, err := f.FindPodByPrefix(common.ClonerSourcePodName)
|
||||
sourcePod, err := f.FindPodBySuffix(common.ClonerSourcePodNameSuffix)
|
||||
if err != nil {
|
||||
PrintControllerLog(f)
|
||||
}
|
||||
@ -595,10 +595,11 @@ func cloneOfAnnoExistenceTest(f *framework.Framework, targetNamespaceName string
|
||||
}, controllerSkipPVCCompleteTimeout, assertionPollInterval).Should(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
By("Checking logs explicitly skips PVC")
|
||||
Eventually(func() bool {
|
||||
log, err := RunKubectlCommand(f, "logs", f.ControllerPod.Name, "-n", f.CdiInstallNs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return strings.Contains(log, fmt.Sprintf("Cleaning up for PVC %s/target-pvc", targetNamespaceName))
|
||||
return strings.Contains(log, fmt.Sprintf("{\"PVC\": \"%s/%s\", \"checkPVC(AnnCloneRequest)\": true, \"NOT has annotation(AnnCloneOf)\": false, \"has finalizer?\": false}", targetNamespaceName, "target-pvc"))
|
||||
}, controllerSkipPVCCompleteTimeout, assertionPollInterval).Should(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
@ -33,3 +33,8 @@ func (f *Framework) WaitTimeoutForPodStatus(podName string, status k8sv1.PodPhas
|
||||
func (f *Framework) FindPodByPrefix(prefix string) (*k8sv1.Pod, error) {
|
||||
return utils.FindPodByPrefix(f.K8sClient, f.Namespace.Name, prefix, common.CDILabelSelector)
|
||||
}
|
||||
|
||||
// FindPodBySuffix is a wrapper around utils.FindPodByPostFix
|
||||
func (f *Framework) FindPodBySuffix(prefix string) (*k8sv1.Pod, error) {
|
||||
return utils.FindPodBysuffix(f.K8sClient, f.Namespace.Name, prefix, common.CDILabelSelector)
|
||||
}
|
||||
|
@ -124,8 +124,17 @@ func addVolumeMounts(pvc *k8sv1.PersistentVolumeClaim) []v1.VolumeMount {
|
||||
return volumeMounts
|
||||
}
|
||||
|
||||
// FindPodBysuffix finds the first pod which has the passed in postfix. Returns error if multiple pods with the same prefix are found.
|
||||
func FindPodBysuffix(clientSet *kubernetes.Clientset, namespace, prefix, labelSelector string) (*k8sv1.Pod, error) {
|
||||
return findPodByCompFunc(clientSet, namespace, prefix, labelSelector, strings.HasSuffix)
|
||||
}
|
||||
|
||||
// FindPodByPrefix finds the first pod which has the passed in prefix. Returns error if multiple pods with the same prefix are found.
|
||||
func FindPodByPrefix(clientSet *kubernetes.Clientset, namespace, prefix, labelSelector string) (*k8sv1.Pod, error) {
|
||||
return findPodByCompFunc(clientSet, namespace, prefix, labelSelector, strings.HasPrefix)
|
||||
}
|
||||
|
||||
func findPodByCompFunc(clientSet *kubernetes.Clientset, namespace, prefix, labelSelector string, compFunc func(string, string) bool) (*k8sv1.Pod, error) {
|
||||
var result k8sv1.Pod
|
||||
var foundPod bool
|
||||
err := wait.PollImmediate(2*time.Second, podCreateTime, func() (bool, error) {
|
||||
@ -134,7 +143,7 @@ func FindPodByPrefix(clientSet *kubernetes.Clientset, namespace, prefix, labelSe
|
||||
})
|
||||
if err == nil {
|
||||
for _, pod := range podList.Items {
|
||||
if strings.HasPrefix(pod.Name, prefix) {
|
||||
if compFunc(pod.Name, prefix) {
|
||||
if !foundPod {
|
||||
foundPod = true
|
||||
result = pod
|
||||
|
Loading…
Reference in New Issue
Block a user