containerized-data-importer/pkg/controller/clone/host-clone.go
Michael Henriksen 4ce9272c2c
DataVolume Controller uses VolumeCloneSource Populator (#2750)
* remove CSI clone

bye bye

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* no more smart clone

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* PVC clone same namespace

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* cross namespace pvc clone

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* various fixes to get some functional tests to work

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* delete smart clone controller again

somehow reappeared after rebase

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* mostly pvc clone functional test fixes

make sure size detect pod only runs on kubevirt content type

clone populator was skipping last round op applying pvc' annotations

various func test fixes

review comments

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* more various test fixes

host clone phase should (implicitly) wait for clone source pod to exit

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* remove "smart" clone from snapshot

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* DataVolume clone from snapshot uses populator

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* improve clone populator/datavolume coordination on "running" condition

For host clone, not much changes, values still comming from annotations on host clone PVC

For smart/csi clone the DataVolume will be "running" if not in pending or error phase

Will have the same values for terminal "completed" state regardless of clone type

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* unit tests for pvc/snapshot clone controllers

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* remove skipped test added in https://github.com/kubevirt/containerized-data-importer/pull/2759

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* attempt address AfterSuite and generate-verify failures

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* handle snapshot clone with no target size specified

also add more validation to some snapshot clone tests

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* remove Patch calls

Using the controller runtime Patch API with controller runtime cached client seems to be a pretty bad fit

At least given the way the CR API is designed where an old object is compared to new.

I like patch in theory though and will revisit

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* Clone populator should plan and execute even if PVC is bound

It was possible to miss "preallocation applied" annotation otherwise

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* add long term token to datavolume

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

* Rename ProgressReporter to StatusReporter

Should have been done back when annotations were addded to "progress"

Also, if pvc is bound do not call phase Reconcile functions only Status

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>

---------

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>
2023-06-30 00:14:54 +02:00

153 lines
4.1 KiB
Go

package clone
import (
"context"
"fmt"
"net/http"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
cc "kubevirt.io/containerized-data-importer/pkg/controller/common"
)
// HostClonePhaseName is the name of the host clone phase
const HostClonePhaseName = "HostClone"
// HostClonePhase creates and monitors a dumb clone operation
type HostClonePhase struct {
Owner client.Object
Namespace string
SourceName string
DesiredClaim *corev1.PersistentVolumeClaim
ImmediateBind bool
OwnershipLabel string
Preallocation bool
PriorityClassName string
Client client.Client
Log logr.Logger
Recorder record.EventRecorder
}
var _ Phase = &HostClonePhase{}
var _ StatusReporter = &HostClonePhase{}
var httpClient *http.Client
func init() {
httpClient = cc.BuildHTTPClient(httpClient)
}
// Name returns the name of the phase
func (p *HostClonePhase) Name() string {
return HostClonePhaseName
}
// Status returns the phase status
func (p *HostClonePhase) Status(ctx context.Context) (*PhaseStatus, error) {
result := &PhaseStatus{}
pvc := &corev1.PersistentVolumeClaim{}
exists, err := getResource(ctx, p.Client, p.Namespace, p.DesiredClaim.Name, pvc)
if err != nil {
return nil, err
}
if !exists {
return result, nil
}
result.Annotations = pvc.Annotations
podName := pvc.Annotations[cc.AnnCloneSourcePod]
if podName == "" {
return result, nil
}
args := &cc.ProgressFromClaimArgs{
Client: p.Client,
HTTPClient: httpClient,
Claim: pvc,
PodNamespace: p.Namespace,
PodName: podName,
OwnerUID: string(p.Owner.GetUID()),
}
progress, err := cc.ProgressFromClaim(ctx, args)
if err != nil {
return nil, err
}
result.Progress = progress
return result, nil
}
// Reconcile creates the desired pvc and waits for the operation to complete
func (p *HostClonePhase) Reconcile(ctx context.Context) (*reconcile.Result, error) {
actualClaim := &corev1.PersistentVolumeClaim{}
exists, err := getResource(ctx, p.Client, p.Namespace, p.DesiredClaim.Name, actualClaim)
if err != nil {
return nil, err
}
if !exists {
actualClaim, err = p.createClaim(ctx)
if err != nil {
return nil, err
}
}
if !p.hostCloneComplete(actualClaim) {
// requeue to update status
return &reconcile.Result{RequeueAfter: 3 * time.Second}, nil
}
return nil, nil
}
func (p *HostClonePhase) createClaim(ctx context.Context) (*corev1.PersistentVolumeClaim, error) {
claim := p.DesiredClaim.DeepCopy()
claim.Namespace = p.Namespace
cc.AddAnnotation(claim, cc.AnnPreallocationRequested, fmt.Sprintf("%t", p.Preallocation))
cc.AddAnnotation(claim, cc.AnnOwnerUID, string(p.Owner.GetUID()))
cc.AddAnnotation(claim, cc.AnnPodRestarts, "0")
cc.AddAnnotation(claim, cc.AnnCloneRequest, fmt.Sprintf("%s/%s", p.Namespace, p.SourceName))
cc.AddAnnotation(claim, cc.AnnPopulatorKind, cdiv1.VolumeCloneSourceRef)
cc.AddAnnotation(claim, cc.AnnEventSourceKind, p.Owner.GetObjectKind().GroupVersionKind().Kind)
cc.AddAnnotation(claim, cc.AnnEventSource, fmt.Sprintf("%s/%s", p.Owner.GetNamespace(), p.Owner.GetName()))
if p.OwnershipLabel != "" {
AddOwnershipLabel(p.OwnershipLabel, claim, p.Owner)
}
if p.ImmediateBind {
cc.AddAnnotation(claim, cc.AnnImmediateBinding, "")
}
if p.PriorityClassName != "" {
cc.AddAnnotation(claim, cc.AnnPriorityClassName, p.PriorityClassName)
}
if err := p.Client.Create(ctx, claim); err != nil {
checkQuotaExceeded(p.Recorder, p.Owner, err)
return nil, err
}
return claim, nil
}
func (p *HostClonePhase) hostCloneComplete(pvc *corev1.PersistentVolumeClaim) bool {
// this is awfully lame
// both the upload controller and clone controller update the PVC status to succeeded
// but only the clone controller will set the preallocation annotation
// so we have to wait for that
if p.Preallocation && pvc.Annotations[cc.AnnPreallocationApplied] != "true" {
return false
}
return pvc.Annotations[cc.AnnPodPhase] == string(cdiv1.Succeeded)
}