Update kubevirtci with new rook ceph, 1.32 lanes (#3624)

* Bump kvci to latest (new rook ceph)

Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>

* Switch testing lanes to 1.32/1.31

Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>

* address some common make target failures

this addresses common annoying flakes with make targets
or adds logging to be able to debug them

Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>

---------

Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
This commit is contained in:
Alex Kalenyuk 2025-03-05 22:21:31 +02:00 committed by GitHub
parent c332ee0d32
commit 8949310eb2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 244 additions and 211 deletions

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.30
export TARGET=k8s-1.31
#ensure no hard coded cdi cr in tests.
export RANDOM_CR=true
export KUBEVIRT_STORAGE=rook-ceph-default

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.30
export TARGET=k8s-1.31
#ensure no hard coded cdi cr in tests.
export RANDOM_CR=true
export KUBEVIRT_STORAGE=rook-ceph-default

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.31
export TARGET=k8s-1.32
export KUBEVIRT_STORAGE=hpp
export KUBEVIRT_DEPLOY_PROMETHEUS=true
export CDI_E2E_FOCUS=Destructive

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.31
export TARGET=k8s-1.32
export KUBEVIRT_STORAGE=hpp
export CDI_E2E_SKIP=Destructive
export KUBEVIRT_DEPLOY_ISTIO=true

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.31
export TARGET=k8s-1.32
export KUBEVIRT_STORAGE=hpp
export CDI_E2E_SKIP=Destructive
automation/test.sh

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.31
export TARGET=k8s-1.32
export KUBEVIRT_DEPLOY_NFS_CSI=true
export KUBEVIRT_STORAGE=nfs
export CDI_E2E_SKIP=Destructive

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.31
export TARGET=k8s-1.32
export KUBEVIRT_STORAGE=hpp
export HPP_CLASSIC=true
export CDI_E2E_SKIP=Destructive

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.30
export TARGET=k8s-1.31
export KUBEVIRT_STORAGE=hpp
export KUBEVIRT_DEPLOY_PROMETHEUS=true
export CDI_E2E_SKIP=Destructive

View File

@ -18,7 +18,7 @@
#
set -ex
export TARGET=k8s-1.31
export TARGET=k8s-1.32
export KUBEVIRT_STORAGE=hpp
export MULTI_UPGRADE=true
export CDI_E2E_SKIP=Destructive

View File

@ -114,7 +114,8 @@ function _add_common_params() {
params=" --container-suffix=:$KUBEVIRTCI_CONTAINER_SUFFIX $params"
fi
if [[ ${KUBEVIRT_SLIM} == "true" ]]; then
# Currently, the s390x architecture supports only KUBEVIRT_SLIM.
if [[ ${KUBEVIRT_SLIM} == "true" || $(uname -m) == "s390x" ]]; then
params=" --slim $params"
fi
fi

View File

@ -1,9 +0,0 @@
#!/usr/bin/env bash
set -e
if [ "${KUBEVIRT_CGROUPV2}" == "true" ]; then
export KUBEVIRT_PROVIDER_EXTRA_ARGS="${KUBEVIRT_PROVIDER_EXTRA_ARGS} --kernel-args='systemd.unified_cgroup_hierarchy=1'"
fi
# shellcheck disable=SC1090
source "${KUBEVIRTCI_PATH}/cluster/k8s-provider-common.sh"

View File

@ -39,23 +39,6 @@ function wait_for_kwok_ready() {
fi
}
function configure_cpu_manager() {
if [ ${KUBEVIRT_CPU_MANAGER_POLICY} == "static" ]; then
for node in $($kubectl get nodes -l "node-role.kubernetes.io/worker" --no-headers -o custom-columns=":metadata.name" | tr -d '\r'); do
# FIXME Replace with kubelet config drop ins once all providers are using k8s >= 1.28
# https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/#kubelet-conf-d
$kubectl drain ${node}
$ssh ${node} -- sudo systemctl stop kubelet
# FIXME ${ssh} is broken when using HereDocs, fix and replace this mess if possible.
# https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#configuration
$ssh ${node} -- "sudo rm -f /var/lib/kubelet/cpu_manager_state && sudo echo -e 'cpuManagerPolicy: static\nkubeReserved:\n cpu: \"1\"\n memory: \"1Gi\"\ncpuManagerPolicyOptions:\n full-pcpus-only: \"true\"' | sudo tee -a /var/lib/kubelet/config.yaml && sudo sed -i 's/cpuManagerReconcilePeriod\:\ 0s/cpuManagerReconcilePeriod\:\ 5s/g' /var/lib/kubelet/config.yaml"
$ssh ${node} -- sudo systemctl start kubelet
$kubectl label --overwrite node/${node} cpumanager=true
$kubectl uncordon ${node}
done
fi
}
function up() {
params=$(_add_common_params)
if echo "$params" | grep -q ERROR; then
@ -90,7 +73,6 @@ function up() {
$kubectl label node -l $label node-role.kubernetes.io/worker=''
configure_prometheus
configure_cpu_manager
deploy_kwok

View File

@ -37,6 +37,7 @@ function up() {
cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
_add_kubeadm_cpu_manager_config_patch
_add_extra_mounts
_add_extra_portmapping
export CONFIG_WORKER_CPU_MANAGER=true
kind_up

View File

@ -0,0 +1,44 @@
# K8S with mdev support in a Kind cluster
Provides a pre-deployed k8s cluster that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart.
The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up
The following needs to be executed as root. Please refer to the name of the directory to get the kind version.
```bash
export KUBEVIRT_PROVIDER=kind-x.yz
make cluster-up
```
The cluster can be accessed as usual:
```bash
$ cluster-up/kubectl.sh get nodes
NAME STATUS ROLES AGE
kind-x.yz-control-plane Ready master 6m14s
```
## Bringing the cluster down
```bash
make cluster-down
```
This destroys the whole cluster.
## Setting a custom kind version
In order to use a custom kind image / kind version,
export KIND_NODE_IMAGE, KIND_VERSION before running cluster-up.
For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use:
```bash
export KIND_NODE_IMAGE="kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600"
export KIND_VERSION="0.9.0"
```
This allows users to test or use custom images / different kind versions before making them official.
See https://github.com/kubernetes-sigs/kind/releases for details about node images according to the kind version.
- In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported.

View File

@ -0,0 +1,47 @@
{
"Description": "DEFAULT",
"UUID": "",
"Version": "v0.56.9",
"ResultsDir": "/tmp/sonobuoy/results",
"Resources": null,
"Filters": {
"Namespaces": ".*",
"LabelSelector": ""
},
"Limits": {
"PodLogs": {
"Namespaces": "kube-system",
"SonobuoyNamespace": true,
"FieldSelectors": [],
"LabelSelector": "",
"Previous": false,
"SinceSeconds": null,
"SinceTime": null,
"Timestamps": false,
"TailLines": null,
"LimitBytes": null
}
},
"QPS": 30,
"Burst": 50,
"Server": {
"bindaddress": "0.0.0.0",
"bindport": 8080,
"advertiseaddress": "",
"timeoutseconds": 21600
},
"Plugins": null,
"PluginSearchPath": [
"./plugins.d",
"/etc/sonobuoy/plugins.d",
"~/sonobuoy/plugins.d"
],
"Namespace": "sonobuoy",
"WorkerImage": "sonobuoy/sonobuoy:v0.56.9",
"ImagePullPolicy": "IfNotPresent",
"ImagePullSecrets": "",
"AggregatorPermissions": "clusterAdmin",
"ServiceAccountName": "sonobuoy-serviceaccount",
"ProgressUpdatesPort": "8099",
"SecurityContextMode": "nonroot"
}

View File

@ -0,0 +1 @@
kindest/node:v1.31.2@sha256:18fbefc20a7113353c7b75b5c869d7145a6abd6269154825872dc59c1329912e

View File

@ -0,0 +1,54 @@
#!/usr/bin/env bash
set -e
DEFAULT_CLUSTER_NAME="kind-1.31"
DEFAULT_HOST_PORT=5000
ALTERNATE_HOST_PORT=5001
export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME}
if [ $CLUSTER_NAME == $DEFAULT_CLUSTER_NAME ]; then
export HOST_PORT=$DEFAULT_HOST_PORT
else
export HOST_PORT=$ALTERNATE_HOST_PORT
fi
function set_kind_params() {
version=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/version")
export KIND_VERSION="${KIND_VERSION:-$version}"
image=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/image")
export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-$image}"
}
function configure_registry_proxy() {
[ "$CI" != "true" ] && return
echo "Configuring cluster nodes to work with CI mirror-proxy..."
local -r ci_proxy_hostname="docker-mirror-proxy.kubevirt-prow.svc"
local -r kind_binary_path="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kind"
local -r configure_registry_proxy_script="${KUBEVIRTCI_PATH}/cluster/kind/configure-registry-proxy.sh"
KIND_BIN="$kind_binary_path" PROXY_HOSTNAME="$ci_proxy_hostname" $configure_registry_proxy_script
}
function up() {
cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
_add_kubeadm_cpu_manager_config_patch
_add_extra_mounts
_add_extra_portmapping
export CONFIG_WORKER_CPU_MANAGER=true
kind_up
configure_registry_proxy
# remove the rancher.io kind default storageClass
_kubectl delete sc standard
echo "$KUBEVIRT_PROVIDER cluster '$CLUSTER_NAME' is ready"
}
set_kind_params
source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh

View File

@ -0,0 +1 @@
0.25.0

View File

@ -62,6 +62,7 @@ function deploy_sriov() {
function up() {
cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
export CONFIG_WORKER_CPU_MANAGER=true
export CONFIG_TOPOLOGY_MANAGER_POLICY="single-numa-node"
kind_up
configure_registry_proxy

View File

@ -6,22 +6,22 @@ resources:
- sriov-cni-daemonset.yaml
- sriovdp-daemonset.yaml
- sriovdp-config.yaml
patchesJson6902:
patches:
- target:
group: apps
version: v1
kind: DaemonSet
name: kube-sriov-cni-ds-amd64
name: kube-sriov-cni-ds
path: patch-node-selector.yaml
- target:
group: apps
version: v1
kind: DaemonSet
name: kube-sriov-device-plugin-amd64
name: kube-sriov-device-plugin
path: patch-node-selector.yaml
- target:
group: apps
version: v1
kind: DaemonSet
name: kube-sriov-device-plugin-amd64
name: kube-sriov-device-plugin
path: patch-sriovdp-resource-prefix.yaml

View File

@ -1,3 +1,9 @@
- op: add
path: /spec/template/spec/nodeSelector
value: {}
- op: add
path: /spec/template/spec/nodeSelector/$LABEL_KEY
value: "$LABEL_VALUE"
- op: test
path: /spec/template/spec/nodeSelector/$LABEL_KEY
value: "$LABEL_VALUE"

View File

@ -2,7 +2,7 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-cni-ds-amd64
name: kube-sriov-cni-ds
namespace: kube-system
labels:
tier: node
@ -18,15 +18,16 @@ spec:
tier: node
app: sriov-cni
spec:
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
containers:
- name: kube-sriov-cni
image: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.7.0
image: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.9.0
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false

View File

@ -9,7 +9,7 @@ metadata:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-amd64
name: kube-sriov-device-plugin
namespace: kube-system
labels:
tier: node
@ -26,16 +26,10 @@ spec:
app: sriovdp
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
containers:
- name: kube-sriovdp
image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.4.0
image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.9.0
imagePullPolicy: IfNotPresent
args:
- --log-dir=sriovdp
@ -51,7 +45,10 @@ spec:
memory: "200Mi"
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/
mountPath: /var/lib/kubelet/device-plugins
readOnly: false
- name: plugins-registry
mountPath: /var/lib/kubelet/plugins_registry
readOnly: false
- name: log
mountPath: /var/log
@ -62,150 +59,10 @@ spec:
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/
- name: log
hostPath:
path: /var/log
- name: device-info
hostPath:
path: /var/run/k8s.cni.cncf.io/devinfo/dp
type: DirectoryOrCreate
- name: config-volume
configMap:
name: sriovdp-config
items:
- key: config.json
path: config.json
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-ppc64le
namespace: kube-system
labels:
tier: node
app: sriovdp
spec:
selector:
matchLabels:
name: sriov-device-plugin
template:
metadata:
labels:
name: sriov-device-plugin
tier: node
app: sriovdp
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: ppc64le
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
containers:
- name: kube-sriovdp
image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-ppc64le
imagePullPolicy: IfNotPresent
args:
- --log-dir=sriovdp
- --log-level=10
securityContext:
privileged: true
resources:
requests:
cpu: "250m"
memory: "40Mi"
limits:
cpu: 1
memory: "200Mi"
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/
readOnly: false
- name: log
mountPath: /var/log
- name: config-volume
mountPath: /etc/pcidp
- name: device-info
mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/
- name: log
hostPath:
path: /var/log
- name: device-info
hostPath:
path: /var/run/k8s.cni.cncf.io/devinfo/dp
type: DirectoryOrCreate
- name: config-volume
configMap:
name: sriovdp-config
items:
- key: config.json
path: config.json
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-arm64
namespace: kube-system
labels:
tier: node
app: sriovdp
spec:
selector:
matchLabels:
name: sriov-device-plugin
template:
metadata:
labels:
name: sriov-device-plugin
tier: node
app: sriovdp
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: arm64
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
containers:
- name: kube-sriovdp
image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-arm64
imagePullPolicy: IfNotPresent
args:
- --log-dir=sriovdp
- --log-level=10
securityContext:
privileged: true
resources:
requests:
cpu: "250m"
memory: "40Mi"
limits:
cpu: 1
memory: "200Mi"
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/
readOnly: false
- name: log
mountPath: /var/log
- name: config-volume
mountPath: /etc/pcidp
- name: device-info
mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/
path: /var/lib/kubelet/device-plugins
- name: plugins-registry
hostPath:
path: /var/lib/kubelet/plugins_registry
- name: log
hostPath:
path: /var/log

View File

@ -69,6 +69,11 @@ export CRI_BIN=${CRI_BIN:-$(detect_cri)}
fi
${kubectl} wait -n kubevirt kv kubevirt --for condition=Available --timeout 15m
if [[ "$KUBEVIRT_PROVIDER" =~ "sriov" ]]; then
# Some SR-IOV tests require Kubevirt CPUManager feature
${kubectl} patch kubevirts -n kubevirt kubevirt --type=json -p='[{"op": "replace", "path": "/spec/configuration/developerConfiguration/featureGates","value": ["CPUManager"]}]'
fi
echo "Run latest nighly build Kubevirt conformance tests"
kubevirt_plugin="--plugin ${nightly_build_base_url}/${latest}/conformance.yaml"
SONOBUOY_EXTRA_ARGS="${SONOBUOY_EXTRA_ARGS} ${kubevirt_plugin}"

View File

@ -12,6 +12,11 @@ CONFIG_WORKER_CPU_MANAGER=${CONFIG_WORKER_CPU_MANAGER:-false}
# avaliable value: ipv4, ipv6, dual
IPFAMILY=${IPFAMILY}
# setup the port mapping for kind cluster, this is needed for some e2e tests
# KIND_PORT_MAPPING=cluster_port:host_port e.g. KIND_PORT_MAPPING=30001:30002
# only one port mapping allowed
KIND_PORT_MAPPING=${KIND_PORT_MAPPING}
# check CPU arch
PLATFORM=$(uname -m)
case ${PLATFORM} in
@ -93,7 +98,11 @@ function _insecure-registry-config-cmd() {
# this works since the nodes use the same names as containers
function _ssh_into_node() {
${CRI_BIN} exec -it "$1" bash
if [[ $2 != "" ]]; then
${CRI_BIN} exec "$@"
else
${CRI_BIN} exec -it "$1" bash
fi
}
function _run_registry() {
@ -259,6 +268,22 @@ EOF
fi
}
function _add_extra_portmapping() {
if [[ "$KIND_PORT_MAPPING" != "" ]]; then
container_port=$(echo "$KIND_PORT_MAPPING" | awk -F: '{print $1}')
host_port=$(echo "$KIND_PORT_MAPPING" | awk -F: '{print $2}')
if [[ -z "$container_port" || -z "$host_port" ]]; then
echo "Invalid KIND_PORT_MAPPING format. Expected 'container_port:host_port'."
exit 1
fi
cat <<EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
extraPortMappings:
- containerPort: $container_port
hostPort: $host_port
EOF
fi
}
function _add_kubeadm_cpu_manager_config_patch() {
cat << EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
kubeadmConfigPatches:
@ -266,7 +291,6 @@ function _add_kubeadm_cpu_manager_config_patch() {
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
"feature-gates": "CPUManager=true"
"cpu-manager-policy": "static"
"kube-reserved": "cpu=500m"
"system-reserved": "cpu=500m"
@ -286,11 +310,17 @@ EOF
done
}
function _add_kubeadm_config_patches() {
if [ $KUBEVIRT_WITH_KIND_ETCD_IN_MEMORY == "true" ]; then
cat <<EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
function _add_kubeadm_config_patches_header() {
cat <<EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
kubeadmConfigPatches:
- |
EOF
}
function _add_kubeadm_config_patches() {
_add_kubeadm_config_patches_header
if [ $KUBEVIRT_WITH_KIND_ETCD_IN_MEMORY == "true" ]; then
cat <<EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
kind: ClusterConfiguration
metadata:
name: config
@ -298,8 +328,16 @@ kubeadmConfigPatches:
local:
dataDir: $ETCD_IN_MEMORY_DATA_DIR
EOF
echo "KIND cluster etcd data will be mounted to RAM on kind nodes: $ETCD_IN_MEMORY_DATA_DIR"
fi
echo "KIND cluster etcd data will be mounted to RAM on kind nodes: $ETCD_IN_MEMORY_DATA_DIR"
fi
if [[ -n "$CONFIG_TOPOLOGY_MANAGER_POLICY" ]]; then
cat <<EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml
---
kind: KubeletConfiguration
topologyManagerPolicy: ${CONFIG_TOPOLOGY_MANAGER_POLICY}
---
EOF
fi
}
function _setup_ipfamily() {
@ -314,7 +352,9 @@ EOF
function _prepare_kind_config() {
_add_workers
_add_kubeadm_config_patches
if [[ "$KUBEVIRT_WITH_KIND_ETCD_IN_MEMORY" == "true" || -n "$CONFIG_TOPOLOGY_MANAGER_POLICY" ]]; then
_add_kubeadm_config_patches
fi
_setup_ipfamily
echo "Final KIND config:"
cat ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml

View File

@ -40,7 +40,6 @@ KUBEVIRT_CUSTOM_CDI_VERSION=${KUBEVIRT_CUSTOM_CDI_VERSION}
KUBEVIRT_SWAP_ON=${KUBEVIRT_SWAP_ON:-false}
KUBEVIRT_KSM_ON=${KUBEVIRT_KSM_ON:-false}
KUBEVIRT_UNLIMITEDSWAP=${KUBEVIRT_UNLIMITEDSWAP:-false}
KUBEVIRT_CPU_MANAGER_POLICY=${KUBEVIRT_CPU_MANAGER_POLICY:-none}
KUBVIRT_WITH_CNAO_SKIP_CONFIG=${KUBVIRT_WITH_CNAO_SKIP_CONFIG:-false}
# If on a developer setup, expose ocp on 8443, so that the openshift web console can be used (the port is important because of auth redirects)
@ -55,4 +54,4 @@ provider_prefix=${JOB_NAME:-${KUBEVIRT_PROVIDER}}${EXECUTOR_NUMBER}
job_prefix=${JOB_NAME:-kubevirt}${EXECUTOR_NUMBER}
mkdir -p $KUBEVIRTCI_CONFIG_PATH/$KUBEVIRT_PROVIDER
KUBEVIRTCI_TAG=2411261507-4d4c8fe3
KUBEVIRTCI_TAG=2502072021-f3ed3dc0

View File

@ -12,7 +12,7 @@
#See the License for the specific language governing permissions and
#limitations under the License.
set -euo pipefail
set -exuo pipefail
script_dir="$(cd "$(dirname "$0")" && pwd -P)"
source "${script_dir}"/common.sh

View File

@ -12,7 +12,7 @@
#See the License for the specific language governing permissions and
#limitations under the License.
set -euo pipefail
set -exuo pipefail
script_dir="$(cd "$(dirname "$0")" && pwd -P)"
source "${script_dir}"/common.sh

View File

@ -70,6 +70,7 @@ function generateResourceManifest() {
-pull-policy="{{ pull_policy }}" \
-namespace="{{ cdi_namespace }}"
) 1>>"${targetDir}/"$manifestNamej2
sync
# Remove empty lines at the end of files which are added by go templating
find ${targetDir}/ -type f -exec sed -i {} -e '${/^$/d;}' \;
@ -156,6 +157,7 @@ function populateResourceManifest() {
-namespace="{{ cdi_namespace }}" \
-generated-manifests-path=${generatedManifests}
) 1>>"${tmplTargetDir}/"$outfile".j2"
sync
# Remove empty lines at the end of files which are added by go templating
find ${targetDir}/ -type f -exec sed -i {} -e '${/^$/d;}' \;

View File

@ -17,7 +17,7 @@
SCRIPT_ROOT="$(cd "$(dirname $0)/../" && pwd -P)"
# the kubevirtci tag to vendor from (https://github.com/kubevirt/kubevirtci/tags)
kubevirtci_release_tag=2411261507-4d4c8fe3
kubevirtci_release_tag=2502072021-f3ed3dc0
# remove previous cluster-up dir entirely before vendoring
rm -rf ${SCRIPT_ROOT}/cluster-up