Update kubevirtci images to refresh certificates. (#973)

Signed-off-by: Alexander Wels <awels@redhat.com>
This commit is contained in:
Alexander Wels 2019-09-24 19:08:24 -04:00 committed by GitHub
parent 4d79201788
commit f8dad13db6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 171 additions and 285 deletions

View File

@ -1,11 +0,0 @@
apiVersion: local.storage.openshift.io/v1
kind: LocalVolume
metadata:
name: local-disks
spec:
storageClassDevices:
- storageClassName: local-sc
volumeMode: Filesystem
fsType: ext4
devicePaths:
- /mnt/local-storage/local-sc

View File

@ -1,13 +0,0 @@
#!/usr/bin/env bash
if [ ! -e /mnt/local-storage/local/disk1 ]; then
# Create local-volume directories
for i in {1..10}
do
sudo mkdir -p /var/local/kubevirt-storage/local-volume/disk${i}
sudo mkdir -p /mnt/local-storage/local-sc/disk${i}
sudo mount --bind /var/local/kubevirt-storage/local-volume/disk${i} /mnt/local-storage/local-sc/disk${i}
done
sudo chmod -R 777 /var/local/kubevirt-storage/local-volume
# Setup selinux permissions to local volume directories.
sudo chcon -R unconfined_u:object_r:svirt_sandbox_file_t:s0 /mnt/local-storage/
fi

View File

@ -1,35 +0,0 @@
apiVersion: operators.coreos.com/v1alpha2
kind: OperatorGroup
metadata:
name: local-operator-group
namespace: local-storage
spec:
targetNamespaces:
- local-storage
---
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: local-storage-manifests
namespace: local-storage
spec:
sourceType: grpc
image: quay.io/gnufied/local-registry:v4.2.0
displayName: Local Storage Operator
publisher: Red Hat
description: An operator to manage local volumes
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: local-storage-subscription
namespace: local-storage
spec:
channel: stable
name: local-storage-operator
source: local-storage-manifests
sourceNamespace: local-storage

View File

@ -7,44 +7,7 @@ function seed_images(){
}
function configure_local_storage() {
#Check if we have already configured local storage, if so skip this step.
NS="$(_kubectl get namespace local-storage --no-headers -o custom-columns=name:.metadata.name --ignore-not-found)"
if [ "$NS" == "" ]; then
# local storage namespace doesn't exist, assume that we need to install local storage.
nodes=("master-0" "worker-0")
for n in "${nodes[@]}"
do
./cluster-up/ssh.sh $n < cluster-sync/$KUBEVIRT_PROVIDER/create-local-storage-volumes.sh
done
#Create the local-storage namespace
_kubectl new-project local-storage
#Create the olm provisioner operator
_kubectl create -f cluster-sync/$KUBEVIRT_PROVIDER/local-storage-operator.yaml
set +e
_kubectl get LocalVolume
while [ $? == 1 ]
do
sleep 5
_kubectl get LocalVolume
done
#Create the cr object.
_kubectl create -f cluster-sync/$KUBEVIRT_PROVIDER/create-local-storage-cr.yaml
SC="$(_kubectl get sc local-sc --no-headers -o custom-columns=name:.metadata.name --ignore-not-found)"
while [ "$SC" == "" ]
do
sleep 5
SC="$(_kubectl get sc local-sc --no-headers -o custom-columns=name:.metadata.name --ignore-not-found)"
done
#Set the default storage class.
_kubectl patch storageclass local-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
#Switch back to default project
_kubectl project default
set -e
fi
_kubectl patch storageclass local -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
}

View File

@ -51,7 +51,8 @@ install_cdi
wait_cdi_crd_installed $CDI_INSTALL_TIMEOUT
_kubectl apply -f "./_out/manifests/release/cdi-cr.yaml"
_kubectl wait cdis.cdi.kubevirt.io/cdi --for=condition=Available --timeout=240s
echo "Waiting 480 seconds for CDI to become available"
_kubectl wait cdis.cdi.kubevirt.io/cdi --for=condition=Available --timeout=480s
# If we are upgrading, verify our current value.
if [ ! -z $UPGRADE_FROM ]; then

View File

@ -3,7 +3,7 @@
Provides a pre-deployed Kubernetes with version 1.11.0 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -3,7 +3,7 @@
Provides a pre-deployed Kubernetes with version 1.13.3 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -0,0 +1,45 @@
# Kubernetes 1.14.6 in ephemeral containers
Provides a pre-deployed Kubernetes with version 1.14.6 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up
```bash
export KUBEVIRT_PROVIDER=k8s-1.14.6
export KUBEVIRT_NUM_NODES=2 # master + one node
make cluster-up
```
The cluster can be accessed as usual:
```bash
$ cluster/kubectl.sh get nodes
NAME STATUS ROLES AGE VERSION
node01 NotReady master 31s v1.14.6
node02 NotReady <none> 5s v1.14.6
```
## Bringing the cluster down
```bash
export KUBEVIRT_PROVIDER=k8s-1.14.6
make cluster-down
```
This destroys the whole cluster. Recreating the cluster is fast, since k8s is
already pre-deployed. The only state which is kept is the state of the local
docker registry.
## Destroying the docker registry state
The docker registry survives a `make cluster-down`. It's state is stored in a
docker volume called `kubevirt_registry`. If the volume gets too big or the
volume contains corrupt data, it can be deleted with
```bash
docker volume rm kubevirt_registry
```

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -e
image="k8s-1.14.6@sha256:ec29c07c94fce22f37a448cb85ca1fb9215d1854f52573316752d19a1c88bcb3"
source ${KUBEVIRTCI_PATH}/cluster/ephemeral-provider-common.sh
function up() {
${_cli} run $(_add_common_params)
# Copy k8s config and kubectl
${_cli} scp --prefix $provider_prefix /usr/bin/kubectl - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl
chmod u+x ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl
${_cli} scp --prefix $provider_prefix /etc/kubernetes/admin.conf - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig
# Set server and disable tls check
export KUBECONFIG=${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig
${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl config set-cluster kubernetes --server=https://$(_main_ip):$(_port k8s)
${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl config set-cluster kubernetes --insecure-skip-tls-verify=true
# Make sure that local config is correct
prepare_config
}

View File

@ -3,7 +3,7 @@
Provides a pre-deployed Kubernetes with version 1.15.1 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -3,7 +3,7 @@
Provides a pre-deployed Kubernetes with version 1.11.1 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -3,7 +3,7 @@
Provides a pre-deployed Kubernetes with version 1.13.3 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -1,7 +1,7 @@
# K8S 1.14.2 in a Kind cluster
Provides a pre-deployed k8s cluster with version 1.14.2 that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart.
The KubeVirt containers are built on the local machine and are the pushed to a registry which is exposed at
The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at
`localhost:5000`.

View File

@ -1,7 +1,7 @@
# K8S 1.14.2 with sriov in a Kind cluster
Provides a pre-deployed k8s cluster with version 1.14.2 that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart.
The KubeVirt containers are built on the local machine and are the pushed to a registry which is exposed at
The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
This version also expects to have sriov-enabed nics on the current host, and will move all the physical interfaces and virtual interfaces into the `kind`'s cluster master node so that they can be used through multus.

View File

@ -1,33 +1,36 @@
#!/bin/bash -e
set -x
CONTROL_PLANE_CMD="docker exec -it -d ${CLUSTER_NAME}-control-plane"
MANIFESTS_DIR="${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/manifests"
OPERATOR_GIT_HASH=b3ab84a316e16df392fbe9e07dbe0667ad075855
function wait_containers_ready {
echo "Waiting for all containers to become ready ..."
kubectl wait --for=condition=Ready pod --all -n kube-system --timeout 12m
# not using kubectl wait since with the sriov operator the pods get restarted a couple of times and this is
# more reliable
function wait_pods_ready {
while [ -n "$(kubectl get pods --all-namespaces -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | grep false)" ]; do
echo "Waiting for all pods to become ready ..."
kubectl get pods --all-namespaces -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers
sleep 10
done
}
function enable_vfio {
mount -o remount,rw /sys #need this to move devices to vfio drivers
for file in $(find /sys/devices/ -name *sriov_totalvfs*); do
pfroot=$(dirname $file)
# enable all enabled VFs. If it fails means that sysfs is not supported on that device and we pass
cat $file > $pfroot/sriov_numvfs || continue
# bind all VFs with vfio
for virtfn in $(ls -d $pfroot/virtfn*); do
pciid=$(basename $(readlink $virtfn))
if [ -e $virtfn/driver/unbind ]; then
echo $pciid > $virtfn/driver/unbind
function deploy_sriov_operator {
OPERATOR_PATH=${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/sriov-network-operator-${OPERATOR_GIT_HASH}
if [[ ! -d $OPERATOR_PATH ]]; then
curl -L https://github.com/openshift/sriov-network-operator/archive/${OPERATOR_GIT_HASH}/sriov-network-operator.tar.gz | tar xz -C ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/
fi
echo $(lspci -n -s $pciid | sed 's/:/ /g' | awk '{print $4 " " $5}') > /sys/bus/pci/drivers/vfio-pci/new_id
done
done
}
enable_vfio
pushd $OPERATOR_PATH
# TODO: right now in CI we need to use upstream sriov cni in order to have this
# https://github.com/intel/sriov-cni/pull/88 available. This can be removed once the feature will
# be merged in openshift sriov operator. We need latest since that feature was not tagged yet
sed -i '/SRIOV_CNI_IMAGE/!b;n;c\ value: nfvpe\/sriov-cni' ./deploy/operator.yaml
# on prow nodes the default shell is dash and some commands are not working
make deploy-setup-k8s SHELL=/bin/bash OPERATOR_EXEC=kubectl
popd
}
#move the pf to the node
mkdir -p /var/run/netns/
@ -36,42 +39,40 @@ ln -sf /proc/$pid/ns/net "/var/run/netns/${CLUSTER_NAME}-control-plane"
sriov_pfs=( /sys/class/net/*/device/sriov_numvfs )
counter=0
for ifs in "${sriov_pfs[@]}"; do
ifs_name="${ifs%%/device/*}"
ifs_name="${ifs_name##*/}"
if [[ "$counter" -eq 0 ]]; then
# These values are used to populate the network definition policy yaml.
# We need the num of vfs because if we don't set this value equals to the total, in case of mellanox
# the sriov operator will trigger a node reboot to update the firmware
export FIRST_PF="$ifs_name"
export FIRST_PF_NUM_VFS=$(cat /sys/class/net/"$FIRST_PF"/device/sriov_totalvfs)
fi
ip link set "$ifs_name" netns "${CLUSTER_NAME}-control-plane"
counter=$((counter+1))
done
# deploy multus
kubectl create -f $MANIFESTS_DIR/multus.yaml
# deploy sriov cni
kubectl create -f $MANIFESTS_DIR/sriov-crd.yaml
kubectl create -f $MANIFESTS_DIR/sriov-cni-daemonset.yaml
# deploy sriov device plugin
function configure-sriovdp() {
local cmd_context="${1}" # context to run command e.g. sudo, docker exec
${cmd_context} "mkdir -p /etc/pcidp"
${cmd_context} "$(sriovdp-config-cmd)"
}
function sriovdp-config-cmd() {
${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/sriovdp_setup.sh
echo "cat <<EOF > /etc/pcidp/config.json
$(cat /etc/pcidp/config.json)
EOF
"
}
configure-sriovdp "${CONTROL_PLANE_CMD} bash -c"
kubectl apply -f $MANIFESTS_DIR/sriovdp-daemonset.yaml
# give them some time to create pods before checking pod status
sleep 10
# make sure all containers are ready
wait_containers_ready
wait_pods_ready
${CONTROL_PLANE_CMD} mount -o remount,rw /sys # kind remounts it as readonly when it starts, we need it to be writeable
deploy_sriov_operator
kubectl label node sriov-control-plane node-role.kubernetes.io/worker=
kubectl label node sriov-control-plane sriov=true
envsubst < $MANIFESTS_DIR/network_config_policy.yaml | kubectl create -f -
wait_pods_ready
${CONTROL_PLANE_CMD} chmod 666 /dev/vfio/vfio
${CONTROL_PLANE_CMD} mount -o remount,rw /sys # kind remounts it as readonly when it starts, we need it to be writeable

View File

@ -0,0 +1,16 @@
apiVersion: sriovnetwork.openshift.io/v1
kind: SriovNetworkNodePolicy
metadata:
name: policy-1
namespace: sriov-network-operator
spec:
deviceType: vfio-pci
mtu: 1500
nodeSelector:
sriov: "true"
numVfs: $FIRST_PF_NUM_VFS
nicSelector:
pfNames:
- $FIRST_PF
priority: 90
resourceName: sriov_net

View File

@ -1,42 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-sriov-cni-ds-amd64
namespace: kube-system
labels:
tier: node
app: sriov-cni
spec:
template:
metadata:
labels:
tier: node
app: sriov-cni
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: kube-sriov-cni
image: nfvpe/sriov-cni:latest
securityContext:
privileged: true
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
volumes:
- name: cnibin
hostPath:
path: /opt/cni/bin

View File

@ -1,19 +0,0 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: sriov-net1
annotations:
k8s.v1.cni.cncf.io/resourceName: intel.com/sriov
spec:
config: '{
"type": "sriov",
"name": "sriov-network",
"ipam": {
"type": "host-local",
"subnet": "10.56.217.0/24",
"routes": [{
"dst": "0.0.0.0/0"
}],
"gateway": "10.56.217.1"
}
}'

View File

@ -1,57 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sriov-device-plugin
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-sriov-device-plugin-amd64
namespace: kube-system
labels:
tier: node
app: sriovdp
spec:
template:
metadata:
labels:
tier: node
app: sriovdp
spec:
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: sriov-device-plugin
containers:
- name: kube-sriovdp
image: nfvpe/sriov-device-plugin:latest
args:
- --log-level=10
securityContext:
privileged: false
volumeMounts:
- name: devicesock
mountPath: /var/lib/kubelet/device-plugins/
readOnly: false
- name: sysfs
mountPath: /sys
readOnly: true
- name: config
mountPath: /etc/pcidp/config.json
readOnly: true
volumes:
- name: devicesock
hostPath:
path: /var/lib/kubelet/device-plugins/
- name: sysfs
hostPath:
path: /sys
- name: config
hostPath:
path: /etc/pcidp/config.json

View File

@ -1,7 +1,7 @@
# K8S in a Kind cluster
This folder serves as base to spin a k8s cluster up using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart.
The KubeVirt containers are built on the local machine and are the pushed to a registry which is exposed at
The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
A kind cluster must specify:

View File

@ -6,6 +6,7 @@ NODE_CMD="docker exec -it -d "
export KIND_MANIFESTS_DIR="${KUBEVIRTCI_PATH}/cluster/kind/manifests"
export KIND_NODE_CLI="docker exec -it "
export KUBEVIRTCI_PATH
export KUBEVIRTCI_CONFIG_PATH
function _wait_kind_up {
echo "Waiting for kind to be ready ..."

View File

@ -3,7 +3,7 @@
Provides a pre-deployed OKD with version 4.1 purely in docker
containers with libvirt. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -2,7 +2,7 @@
set -e
image="okd-4.1@sha256:84ab0dcc301b1e5c801de4f30e9950641f65a39a8756663d02da6de70e22e19e"
image="okd-4.1@sha256:03b08bf66bf33c3ae1a1f63f1184761535513395e7b9c4cd496e22fc1eb2206b"
source ${KUBEVIRTCI_PATH}/cluster/ephemeral-provider-common.sh

View File

@ -3,7 +3,7 @@
Provides a pre-deployed OpenShift Origin with version 3.10.0 with CRI-O support purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -3,7 +3,7 @@
Provides a pre-deployed OpenShift Origin with version 3.11.0 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -3,7 +3,7 @@
Provides a pre-deployed OpenShift Origin with version 3.10.0 purely in docker
containers with qemu. The provided VMs are completely ephemeral and are
recreated on every cluster restart. The KubeVirt containers are built on the
local machine and are the pushed to a registry which is exposed at
local machine and are then pushed to a registry which is exposed at
`localhost:5000`.
## Bringing the cluster up

View File

@ -11,3 +11,10 @@ fi
source ${KUBEVIRTCI_PATH}hack/common.sh
source ${KUBEVIRTCI_PATH}cluster/$KUBEVIRT_PROVIDER/provider.sh
up
# check if the environment has a corrupted host
if [[ $(${KUBEVIRTCI_PATH}kubectl.sh get nodes | grep localhost) != "" ]]; then
echo "The environment has a corrupted host"
exit 1
fi

View File

@ -23,11 +23,11 @@ source hack/build/common.sh
parseTestOpts "${@}"
if [ -f "${TESTS_OUT_DIR}/tests.test" ]; then
test_command="${TESTS_OUT_DIR}/tests.test -test.timeout 120m ${test_args}"
test_command="${TESTS_OUT_DIR}/tests.test -test.timeout 180m ${test_args}"
echo "${test_command}"
(cd ${CDI_DIR}/tests; ${test_command})
else
test_command="go test -v -coverprofile=.coverprofile -test.timeout 120m ${pkgs} ${test_args:+-args $test_args}"
test_command="go test -v -coverprofile=.coverprofile -test.timeout 180m ${pkgs} ${test_args:+-args $test_args}"
echo "${test_command}"
${test_command}
fi

View File

@ -49,7 +49,7 @@ ${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \
${SCRIPT_ROOT}/bin/openapi-spec-generator > ${SCRIPT_ROOT}/api/openapi-spec/swagger.json
# the kubevirtci commit hash to vendor from
kubevirtci_git_hash=93616a62834cc35d1fa74b118f23320408038952
kubevirtci_git_hash=db8c24bf830bb927f01829e6c9f083627fe6b832
# remove previous cluster-up dir entirely before vendoring
rm -rf cluster-up

View File

@ -148,6 +148,11 @@ var _ = Describe("Validate creating multiple clones of same source Data Volume",
By("Calculating the md5sum of the source data volume")
md5sum, err := f.RunCommandAndCaptureOutput(utils.PersistentVolumeClaimFromDataVolume(sourceDv), "md5sum "+testBaseDir)
retry := 0
for err != nil && retry < 10 {
retry++
md5sum, err = f.RunCommandAndCaptureOutput(utils.PersistentVolumeClaimFromDataVolume(sourceDv), "md5sum "+testBaseDir)
}
Expect(err).ToNot(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "INFO: MD5SUM for source is: %s\n", md5sum[:32])