mirror of
https://github.com/kubevirt/containerized-data-importer.git
synced 2025-06-03 06:30:22 +00:00

* Handle labels length correctly Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Handle service name generation correctly Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Remove not needed labels Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Store import pod name in annotation Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Enable long DV name Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Handle name with dot when creating service/label name Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Test long names on import, upload and clone Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Store upload pod name in annotation Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Store importer scratch pvc name in annotation Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Quick fix for tests (need improvements) Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Cleanup handling scratch name Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Ensure pod/service name conflicts are handled Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Handle client errors when trying to get the import pod Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Style improvements, and other code review fixes. Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Store clone source pod name in an annotation Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Correct name initialization and tests Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Do not init name if pod already exists. It is not needed. The situation of having a pod but not name on annotation can happen after the upgrade, when we have a legacy pvc and pod already existing, but clone operation not finished. But when we already have the pod, then in the code (currently) we do not need the name from annotation. Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Cleanup scratch name handling Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Use constant for max dv name in validation Signed-off-by: Bartosz Rybacki <brybacki@redhat.com> * Simplify clone source pod name initialization Signed-off-by: Bartosz Rybacki <brybacki@redhat.com>
289 lines
10 KiB
Go
289 lines
10 KiB
Go
package utils
|
|
|
|
import (
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/onsi/ginkgo"
|
|
corev1 "k8s.io/api/core/v1"
|
|
k8sv1 "k8s.io/api/core/v1"
|
|
v1 "k8s.io/api/core/v1"
|
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
"k8s.io/client-go/kubernetes"
|
|
"kubevirt.io/containerized-data-importer/pkg/util/naming"
|
|
)
|
|
|
|
const (
|
|
// PodWaitForTime is the time to wait for Pod operations to complete
|
|
PodWaitForTime = defaultPollPeriod
|
|
|
|
podCreateTime = defaultPollPeriod
|
|
podDeleteTime = defaultPollPeriod
|
|
)
|
|
|
|
// CreateExecutorPodWithPVC creates a Pod with the passed in PVC mounted under /pvc. You can then use the executor utilities to
|
|
// run commands against the PVC through this Pod.
|
|
func CreateExecutorPodWithPVC(clientSet *kubernetes.Clientset, podName, namespace string, pvc *k8sv1.PersistentVolumeClaim) (*k8sv1.Pod, error) {
|
|
return CreatePod(clientSet, namespace, newExecutorPodWithPVC(podName, pvc))
|
|
}
|
|
|
|
// CreateExecutorPodWithPVCSpecificNode creates a Pod on a specific node with the passed in PVC mounted under /pvc. You can then use the executor utilities to
|
|
// run commands against the PVC through this Pod.
|
|
func CreateExecutorPodWithPVCSpecificNode(clientSet *kubernetes.Clientset, podName, namespace string, pvc *k8sv1.PersistentVolumeClaim, node string) (*k8sv1.Pod, error) {
|
|
var pod = newExecutorPodWithPVC(podName, pvc)
|
|
pod.Spec.NodeSelector = map[string]string{
|
|
"kubernetes.io/hostname": node,
|
|
}
|
|
return CreatePod(clientSet, namespace, pod)
|
|
}
|
|
|
|
// CreatePod calls the Kubernetes API to create a Pod
|
|
func CreatePod(clientSet *kubernetes.Clientset, namespace string, podDef *k8sv1.Pod) (*k8sv1.Pod, error) {
|
|
var pod *k8sv1.Pod
|
|
err := wait.PollImmediate(2*time.Second, podCreateTime, func() (bool, error) {
|
|
var err error
|
|
pod, err = clientSet.CoreV1().Pods(namespace).Create(podDef)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
return true, nil
|
|
})
|
|
return pod, err
|
|
}
|
|
|
|
// DeletePod deletes the passed in Pod from the passed in Namespace
|
|
func DeletePod(clientSet *kubernetes.Clientset, pod *k8sv1.Pod, namespace string) error {
|
|
return wait.PollImmediate(2*time.Second, podDeleteTime, func() (bool, error) {
|
|
err := clientSet.CoreV1().Pods(namespace).Delete(pod.GetName(), &metav1.DeleteOptions{})
|
|
if err != nil {
|
|
return false, nil
|
|
}
|
|
return true, nil
|
|
})
|
|
}
|
|
|
|
// NewPodWithPVC creates a new pod that mounts the given PVC
|
|
func NewPodWithPVC(podName, cmd string, pvc *k8sv1.PersistentVolumeClaim) *k8sv1.Pod {
|
|
volumeName := naming.GetLabelNameFromResourceName(pvc.GetName())
|
|
pod := &k8sv1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: podName,
|
|
Annotations: map[string]string{
|
|
"cdi.kubevirt.io/testing": podName,
|
|
},
|
|
},
|
|
Spec: k8sv1.PodSpec{
|
|
RestartPolicy: k8sv1.RestartPolicyNever,
|
|
Containers: []k8sv1.Container{
|
|
{
|
|
Name: "runner",
|
|
Image: "kubevirt/cdi-importer:latest",
|
|
Command: []string{"/bin/sh", "-c", cmd},
|
|
Resources: k8sv1.ResourceRequirements{
|
|
Limits: map[k8sv1.ResourceName]resource.Quantity{
|
|
k8sv1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI),
|
|
k8sv1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)},
|
|
Requests: map[corev1.ResourceName]resource.Quantity{
|
|
k8sv1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI),
|
|
k8sv1.ResourceMemory: *resource.NewQuantity(0, resource.DecimalSI)},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []k8sv1.Volume{
|
|
{
|
|
Name: volumeName,
|
|
VolumeSource: k8sv1.VolumeSource{
|
|
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: pvc.GetName(),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
volumeMode := pvc.Spec.VolumeMode
|
|
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
|
|
pod.Spec.Containers[0].VolumeDevices = addVolumeDevices(pvc, volumeName)
|
|
} else {
|
|
pod.Spec.Containers[0].VolumeMounts = addVolumeMounts(pvc, volumeName)
|
|
}
|
|
return pod
|
|
}
|
|
|
|
func addVolumeDevices(pvc *k8sv1.PersistentVolumeClaim, volumeName string) []v1.VolumeDevice {
|
|
volumeDevices := []v1.VolumeDevice{
|
|
{
|
|
Name: volumeName,
|
|
DevicePath: DefaultPvcMountPath,
|
|
},
|
|
}
|
|
return volumeDevices
|
|
}
|
|
|
|
// this is being called for pods using PV with filesystem volume mode
|
|
func addVolumeMounts(pvc *k8sv1.PersistentVolumeClaim, volumeName string) []v1.VolumeMount {
|
|
volumeMounts := []v1.VolumeMount{
|
|
{
|
|
Name: volumeName,
|
|
MountPath: DefaultPvcMountPath,
|
|
},
|
|
}
|
|
return volumeMounts
|
|
}
|
|
|
|
// FindPodBysuffix finds the first pod which has the passed in postfix. Returns error if multiple pods with the same prefix are found.
|
|
func FindPodBysuffix(clientSet *kubernetes.Clientset, namespace, prefix, labelSelector string) (*k8sv1.Pod, error) {
|
|
return findPodByCompFunc(clientSet, namespace, prefix, labelSelector, strings.HasSuffix)
|
|
}
|
|
|
|
// FindPodByPrefix finds the first pod which has the passed in prefix. Returns error if multiple pods with the same prefix are found.
|
|
func FindPodByPrefix(clientSet *kubernetes.Clientset, namespace, prefix, labelSelector string) (*k8sv1.Pod, error) {
|
|
return findPodByCompFunc(clientSet, namespace, prefix, labelSelector, strings.HasPrefix)
|
|
}
|
|
|
|
func findPodByCompFunc(clientSet *kubernetes.Clientset, namespace, prefix, labelSelector string, compFunc func(string, string) bool) (*k8sv1.Pod, error) {
|
|
var result k8sv1.Pod
|
|
var foundPod bool
|
|
err := wait.PollImmediate(2*time.Second, podCreateTime, func() (bool, error) {
|
|
podList, err := clientSet.CoreV1().Pods(namespace).List(metav1.ListOptions{
|
|
LabelSelector: labelSelector,
|
|
})
|
|
if err == nil {
|
|
for _, pod := range podList.Items {
|
|
if compFunc(pod.Name, prefix) {
|
|
if !foundPod {
|
|
foundPod = true
|
|
result = pod
|
|
} else {
|
|
fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: First pod name %s in namespace %s\n", result.Name, result.Namespace)
|
|
fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: Second pod name %s in namespace %s\n", pod.Name, pod.Namespace)
|
|
return true, fmt.Errorf("Multiple pods starting with prefix %q in namespace %q", prefix, namespace)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return foundPod, nil
|
|
})
|
|
if !foundPod {
|
|
return nil, fmt.Errorf("Unable to find pod containing %s", prefix)
|
|
}
|
|
return &result, err
|
|
}
|
|
|
|
func newExecutorPodWithPVC(podName string, pvc *k8sv1.PersistentVolumeClaim) *k8sv1.Pod {
|
|
return NewPodWithPVC(podName, "sleep 30; echo I am an executor pod;", pvc)
|
|
}
|
|
|
|
// WaitTimeoutForPodReady waits for the given pod to be created and ready
|
|
func WaitTimeoutForPodReady(clientSet *kubernetes.Clientset, podName, namespace string, timeout time.Duration) error {
|
|
return WaitTimeoutForPodStatus(clientSet, podName, namespace, k8sv1.PodRunning, timeout)
|
|
}
|
|
|
|
// WaitTimeoutForPodSucceeded waits for pod to succeed
|
|
func WaitTimeoutForPodSucceeded(clientSet *kubernetes.Clientset, podName, namespace string, timeout time.Duration) error {
|
|
return WaitTimeoutForPodStatus(clientSet, podName, namespace, k8sv1.PodSucceeded, timeout)
|
|
}
|
|
|
|
// WaitTimeoutForPodFailed waits for pod to fail
|
|
func WaitTimeoutForPodFailed(clientSet *kubernetes.Clientset, podName, namespace string, timeout time.Duration) error {
|
|
return WaitTimeoutForPodStatus(clientSet, podName, namespace, k8sv1.PodFailed, timeout)
|
|
}
|
|
|
|
// WaitTimeoutForPodStatus waits for the given pod to be created and have a expected status
|
|
func WaitTimeoutForPodStatus(clientSet *kubernetes.Clientset, podName, namespace string, status k8sv1.PodPhase, timeout time.Duration) error {
|
|
return wait.PollImmediate(2*time.Second, timeout, podStatus(clientSet, podName, namespace, status))
|
|
}
|
|
|
|
func podStatus(clientSet *kubernetes.Clientset, podName, namespace string, status k8sv1.PodPhase) wait.ConditionFunc {
|
|
return func() (bool, error) {
|
|
pod, err := clientSet.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
|
if err != nil {
|
|
if k8serrors.IsNotFound(err) {
|
|
return false, nil
|
|
}
|
|
return false, err
|
|
}
|
|
fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: Checking POD %s phase: %s\n", podName, string(pod.Status.Phase))
|
|
switch pod.Status.Phase {
|
|
case status:
|
|
return true, nil
|
|
}
|
|
return false, nil
|
|
}
|
|
}
|
|
|
|
// PodGetNode returns the node on which a given pod is executing
|
|
func PodGetNode(clientSet *kubernetes.Clientset, podName, namespace string) (string, error) {
|
|
pod, err := clientSet.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return pod.Spec.NodeName, nil
|
|
}
|
|
|
|
// WaitPodDeleted waits fo a pod to no longer exist
|
|
// returns whether the pod is deleted along with any error
|
|
func WaitPodDeleted(clientSet *kubernetes.Clientset, podName, namespace string, timeout time.Duration) (bool, error) {
|
|
var result bool
|
|
err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) {
|
|
_, err := clientSet.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
|
if err != nil {
|
|
if k8serrors.IsNotFound(err) {
|
|
result = true
|
|
return true, nil
|
|
}
|
|
return false, err
|
|
}
|
|
return false, nil
|
|
})
|
|
return result, err
|
|
}
|
|
|
|
// IsExpectedNode waits to check if the specified pod is schedule on the specified node
|
|
func IsExpectedNode(clientSet *kubernetes.Clientset, nodeName, podName, namespace string, timeout time.Duration) error {
|
|
return wait.PollImmediate(2*time.Second, timeout, isExpectedNode(clientSet, nodeName, podName, namespace))
|
|
}
|
|
|
|
// returns true is the specified pod running on the specified nodeName. Otherwise returns false
|
|
func isExpectedNode(clientSet *kubernetes.Clientset, nodeName, podName, namespace string) wait.ConditionFunc {
|
|
return func() (bool, error) {
|
|
pod, err := clientSet.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
|
if err != nil {
|
|
if k8serrors.IsNotFound(err) {
|
|
return false, nil
|
|
}
|
|
return false, err
|
|
}
|
|
fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: Checking Node name: %s\n", string(pod.Spec.NodeName))
|
|
if pod.Spec.NodeName == nodeName {
|
|
return true, nil
|
|
}
|
|
return false, nil
|
|
}
|
|
}
|
|
|
|
// GetSchedulableNode return a schedulable node from a nodes list
|
|
func GetSchedulableNode(nodes *v1.NodeList) *string {
|
|
for _, node := range nodes.Items {
|
|
if node.Spec.Taints == nil {
|
|
return &node.Name
|
|
}
|
|
schedulableNode := true
|
|
for _, taint := range node.Spec.Taints {
|
|
if taint.Effect == "NoSchedule" {
|
|
schedulableNode = false
|
|
break
|
|
}
|
|
}
|
|
if schedulableNode {
|
|
return &node.Name
|
|
}
|
|
}
|
|
return nil
|
|
}
|