mirror of
https://github.com/kubevirt/containerized-data-importer.git
synced 2025-06-03 06:30:22 +00:00

* Fix progress metric registration and parsing Use default metric registration. We shouldn't use the controller-runtime registration as we have no controller here and it will not register the metric correctly. Fix the metric parsing to match its new name. Otherwise the DV progress will not be updated until its 100%. Regression introduced in #3254 Signed-off-by: Arnon Gilboa <agilboa@redhat.com> * Add kubevirt_cdi_import_progress_total metric Use it in the importer instead of kubevirt_cdi_clone_progress_total and fix metric parsing accordingly. Signed-off-by: Arnon Gilboa <agilboa@redhat.com> * Move ProgressFromClaim to host-clone Nobody else is using it. Signed-off-by: Arnon Gilboa <agilboa@redhat.com> * Add ProgressMetric interface ProgressReader can now work with either import or clone progress metric. FIXME: consider removing the direct Add/Get and use only via interface. Signed-off-by: Arnon Gilboa <agilboa@redhat.com> * Refactor ProgressMetric interface Signed-off-by: Arnon Gilboa <agilboa@redhat.com> * Refactor progress parsing Signed-off-by: Arnon Gilboa <agilboa@redhat.com> * Refer metric names from the metrics package Signed-off-by: Arnon Gilboa <agilboa@redhat.com> --------- Signed-off-by: Arnon Gilboa <agilboa@redhat.com>
215 lines
5.7 KiB
Go
215 lines
5.7 KiB
Go
package clone
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net/http"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/go-logr/logr"
|
|
|
|
corev1 "k8s.io/api/core/v1"
|
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/client-go/tools/record"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
|
|
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
|
|
cc "kubevirt.io/containerized-data-importer/pkg/controller/common"
|
|
metrics "kubevirt.io/containerized-data-importer/pkg/monitoring/metrics/cdi-cloner"
|
|
)
|
|
|
|
// HostClonePhaseName is the name of the host clone phase
|
|
const HostClonePhaseName = "HostClone"
|
|
|
|
// HostClonePhase creates and monitors a dumb clone operation
|
|
type HostClonePhase struct {
|
|
Owner client.Object
|
|
Namespace string
|
|
SourceName string
|
|
DesiredClaim *corev1.PersistentVolumeClaim
|
|
ImmediateBind bool
|
|
OwnershipLabel string
|
|
Preallocation bool
|
|
PriorityClassName string
|
|
Client client.Client
|
|
Log logr.Logger
|
|
Recorder record.EventRecorder
|
|
}
|
|
|
|
var _ Phase = &HostClonePhase{}
|
|
|
|
var _ StatusReporter = &HostClonePhase{}
|
|
|
|
var httpClient *http.Client
|
|
|
|
func init() {
|
|
httpClient = cc.BuildHTTPClient(httpClient)
|
|
}
|
|
|
|
// Name returns the name of the phase
|
|
func (p *HostClonePhase) Name() string {
|
|
return HostClonePhaseName
|
|
}
|
|
|
|
// Status returns the phase status
|
|
func (p *HostClonePhase) Status(ctx context.Context) (*PhaseStatus, error) {
|
|
result := &PhaseStatus{}
|
|
pvc := &corev1.PersistentVolumeClaim{}
|
|
exists, err := getResource(ctx, p.Client, p.Namespace, p.DesiredClaim.Name, pvc)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if !exists {
|
|
return result, nil
|
|
}
|
|
|
|
result.Annotations = pvc.Annotations
|
|
|
|
podName := pvc.Annotations[cc.AnnCloneSourcePod]
|
|
if podName == "" {
|
|
return result, nil
|
|
}
|
|
|
|
args := &progressFromClaimArgs{
|
|
Client: p.Client,
|
|
HTTPClient: httpClient,
|
|
Claim: pvc,
|
|
PodNamespace: p.Namespace,
|
|
PodName: podName,
|
|
OwnerUID: string(p.Owner.GetUID()),
|
|
}
|
|
|
|
progress, err := progressFromClaim(ctx, args)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
result.Progress = progress
|
|
|
|
return result, nil
|
|
}
|
|
|
|
// progressFromClaimArgs are the args for ProgressFromClaim
|
|
type progressFromClaimArgs struct {
|
|
Client client.Client
|
|
HTTPClient *http.Client
|
|
Claim *corev1.PersistentVolumeClaim
|
|
OwnerUID string
|
|
PodNamespace string
|
|
PodName string
|
|
}
|
|
|
|
// progressFromClaim returns the progres
|
|
func progressFromClaim(ctx context.Context, args *progressFromClaimArgs) (string, error) {
|
|
// Just set 100.0% if pod is succeeded
|
|
if args.Claim.Annotations[cc.AnnPodPhase] == string(corev1.PodSucceeded) {
|
|
return cc.ProgressDone, nil
|
|
}
|
|
|
|
pod := &corev1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Namespace: args.PodNamespace,
|
|
Name: args.PodName,
|
|
},
|
|
}
|
|
if err := args.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil {
|
|
if k8serrors.IsNotFound(err) {
|
|
return "", nil
|
|
}
|
|
return "", err
|
|
}
|
|
|
|
// This will only work when the clone source pod is running
|
|
if pod.Status.Phase != corev1.PodRunning {
|
|
return "", nil
|
|
}
|
|
url, err := cc.GetMetricsURL(pod)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
if url == "" {
|
|
return "", nil
|
|
}
|
|
|
|
// We fetch the clone progress from the clone source pod metrics
|
|
progressReport, err := cc.GetProgressReportFromURL(url, args.HTTPClient, metrics.CloneProgressMetricName, args.OwnerUID)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
if progressReport != "" {
|
|
if f, err := strconv.ParseFloat(progressReport, 64); err == nil {
|
|
return fmt.Sprintf("%.2f%%", f), nil
|
|
}
|
|
}
|
|
|
|
return "", nil
|
|
}
|
|
|
|
// Reconcile creates the desired pvc and waits for the operation to complete
|
|
func (p *HostClonePhase) Reconcile(ctx context.Context) (*reconcile.Result, error) {
|
|
actualClaim := &corev1.PersistentVolumeClaim{}
|
|
exists, err := getResource(ctx, p.Client, p.Namespace, p.DesiredClaim.Name, actualClaim)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if !exists {
|
|
actualClaim, err = p.createClaim(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
if !p.hostCloneComplete(actualClaim) {
|
|
// requeue to update status
|
|
return &reconcile.Result{RequeueAfter: 3 * time.Second}, nil
|
|
}
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
func (p *HostClonePhase) createClaim(ctx context.Context) (*corev1.PersistentVolumeClaim, error) {
|
|
claim := p.DesiredClaim.DeepCopy()
|
|
|
|
claim.Namespace = p.Namespace
|
|
cc.AddAnnotation(claim, cc.AnnPreallocationRequested, fmt.Sprintf("%t", p.Preallocation))
|
|
cc.AddAnnotation(claim, cc.AnnOwnerUID, string(p.Owner.GetUID()))
|
|
cc.AddAnnotation(claim, cc.AnnPodRestarts, "0")
|
|
cc.AddAnnotation(claim, cc.AnnCloneRequest, fmt.Sprintf("%s/%s", p.Namespace, p.SourceName))
|
|
cc.AddAnnotation(claim, cc.AnnPopulatorKind, cdiv1.VolumeCloneSourceRef)
|
|
cc.AddAnnotation(claim, cc.AnnEventSourceKind, p.Owner.GetObjectKind().GroupVersionKind().Kind)
|
|
cc.AddAnnotation(claim, cc.AnnEventSource, fmt.Sprintf("%s/%s", p.Owner.GetNamespace(), p.Owner.GetName()))
|
|
if p.OwnershipLabel != "" {
|
|
AddOwnershipLabel(p.OwnershipLabel, claim, p.Owner)
|
|
}
|
|
if p.ImmediateBind {
|
|
cc.AddAnnotation(claim, cc.AnnImmediateBinding, "")
|
|
}
|
|
if p.PriorityClassName != "" {
|
|
cc.AddAnnotation(claim, cc.AnnPriorityClassName, p.PriorityClassName)
|
|
}
|
|
|
|
if err := p.Client.Create(ctx, claim); err != nil {
|
|
checkQuotaExceeded(p.Recorder, p.Owner, err)
|
|
return nil, err
|
|
}
|
|
|
|
return claim, nil
|
|
}
|
|
|
|
func (p *HostClonePhase) hostCloneComplete(pvc *corev1.PersistentVolumeClaim) bool {
|
|
// this is awfully lame
|
|
// both the upload controller and clone controller update the PVC status to succeeded
|
|
// but only the clone controller will set the preallocation annotation
|
|
// so we have to wait for that
|
|
if p.Preallocation && pvc.Annotations[cc.AnnPreallocationApplied] != "true" {
|
|
return false
|
|
}
|
|
return pvc.Annotations[cc.AnnPodPhase] == string(cdiv1.Succeeded)
|
|
}
|