#!/usr/bin/env bash set -e source cluster-sync/install.sh ROOK_CEPH_VERSION=${ROOK_CEPH_VERSION:-v1.1.4} function seed_images(){ container="" container_alias="" images="${@:-${DOCKER_IMAGES}}" for arg in $images; do name=$(basename $arg) container="${container} registry:5000/${name}:${DOCKER_TAG}" done # We don't need to seed the nodes, but in the case of the default dev setup we'll just leave this here for i in $(seq 1 ${KUBEVIRT_NUM_NODES}); do ./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "echo \"${container}\" | xargs \-\-max-args=1 sudo docker pull" # Temporary until image is updated with provisioner that sets this field # This field is required by buildah tool ./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "sudo sysctl \-w user.max_user_namespaces=1024" done } function verify() { echo 'Wait until all nodes are ready' until [[ $(_kubectl get nodes --no-headers | wc -l) -eq $(_kubectl get nodes --no-headers | grep " Ready" | wc -l) ]]; do sleep 1 done echo "cluster node are ready!" } function configure_storage() { echo "Storage already configured ..." } function configure_hpp() { for i in $(seq 1 ${KUBEVIRT_NUM_NODES}); do ./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "sudo mkdir -p /var/hpvolumes" ./cluster-up/ssh.sh "node$(printf "%02d" ${i})" "sudo chcon -t container_file_t -R /var/hpvolumes" done HPP_RELEASE=$(curl -s https://github.com/kubevirt/hostpath-provisioner-operator/releases/latest | grep -o "v[0-9]\.[0-9]*\.[0-9]*") _kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/namespace.yaml _kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/operator.yaml -n hostpath-provisioner _kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/hostpathprovisioner_cr.yaml -n hostpath-provisioner _kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/storageclass-wffc.yaml _kubectl patch storageclass hostpath-provisioner -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' } function configure_ceph() { #Configure ceph storage. _kubectl apply -f ./cluster-sync/external-snapshotter _kubectl apply -f ./cluster-sync/rook-ceph/common.yaml if _kubectl get securitycontextconstraints; then _kubectl apply -f ./cluster-sync/rook-ceph/scc.yaml fi _kubectl apply -f ./cluster-sync/rook-ceph/operator.yaml _kubectl apply -f ./cluster-sync/rook-ceph/cluster.yaml _kubectl apply -f ./cluster-sync/rook-ceph/pool.yaml # wait for ceph until _kubectl get cephblockpools -n rook-ceph replicapool -o jsonpath='{.status.phase}' | grep Ready; do ((count++)) && ((count == 120)) && echo "Ceph not ready in time" && exit 1 if ! ((count % 6 )); then _kubectl get pods -n rook-ceph fi echo "Waiting for Ceph to be Ready, sleeping 5s and rechecking" sleep 5 done _kubectl patch storageclass rook-ceph-block -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' } function configure_nfs() { #Configure static nfs service and storage class, so we can create NFS PVs during test run. _kubectl apply -f ./cluster-sync/nfs/nfs-sc.yaml _kubectl apply -f ./cluster-sync/nfs/nfs-service.yaml -n $CDI_NAMESPACE _kubectl apply -f ./cluster-sync/nfs/nfs-server.yaml -n $CDI_NAMESPACE _kubectl patch storageclass nfs -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' } function configure_ember_lvm() { _kubectl apply -f ./cluster-sync/external-snapshotter _kubectl apply -f ./cluster-sync/ember/loop_back.yaml -n ember-csi-lvm set +e loopdeviceNode=$(_kubectl get pods -n ember-csi-lvm -l app=loop-back-lvm -o=jsonpath={.items[0].spec.nodeName}) echo "loop back device node [$loopdeviceNode]" retry_counter=0 while [[ $loopdeviceNode == "" ]] && [[ $retry_counter -lt 60 ]]; do retry_counter=$((retry_counter + 1)) loopdeviceNode=$(_kubectl get pods -n ember-csi-lvm -l app=loop-back-lvm -o=jsonpath={.items[0].spec.nodeName}) echo "Sleep 1s, waiting for loopback device pod node to be found [$loopdeviceNode]" sleep 1 done echo "Loop back device pod is running on node $loopdeviceNode" podIp=$(_kubectl get pods -n ember-csi-lvm -l app=loop-back-lvm -o=jsonpath={.items[0].status.podIP}) echo "loopback podIP: $podIp" retry_counter=0 while [[ $podIp == "" ]] && [[ $retry_counter -lt 60 ]]; do retry_counter=$((retry_counter + 1)) sleep 1 podIp=$(_kubectl get pods -n ember-csi-lvm -l app=loop-back-lvm -o=jsonpath={.items[0].status.podIP}) echo "loopback podIP: $podIp" done success=$(_kubectl get pod -n ember-csi-lvm -l app=loop-back-lvm -o=jsonpath={".items[0].status.phase"}) retry_counter=0 while [[ $success != "Succeeded" ]] && [[ $retry_counter -lt 60 ]]; do retry_counter=$((retry_counter + 1)) sleep 1 success=$(_kubectl get pod -n ember-csi-lvm -l app=loop-back-lvm -o=jsonpath={".items[0].status.phase"}) done echo "Loop back device available, starting ember csi controller" _kubectl apply -f ./cluster-sync/ember/ember-csi-lvm.yaml -n ember-csi-lvm cat <