Update external provider to allow for hpp and ceph storage. (#1318)

* Add support for external openshift cluster, in particular CRC.
make cluster-sync, and functional tests should all work.
Added documentation on how to enable CRC to work.

Signed-off-by: Alexander Wels <awels@redhat.com>

* Updates based on review.

Signed-off-by: Alexander Wels <awels@redhat.com>

* Changes based on review comments:
- removed registry in favor of making people use an external registry.
- added ceph for external provider.

Signed-off-by: Alexander Wels <awels@redhat.com>

* Fix review comments

Signed-off-by: Alexander Wels <awels@redhat.com>
This commit is contained in:
Alexander Wels 2020-08-06 09:41:52 -04:00 committed by GitHub
parent dbab72c93e
commit c5f8d92d3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 184 additions and 27 deletions

View File

@ -49,12 +49,9 @@ gazelle_dependencies()
# bazel docker rules
http_archive(
name = "io_bazel_rules_docker",
sha256 = "d0b345518236e240d513fe0f59f6d3da274f035480273a7eb00af7d216ae2a06",
strip_prefix = "rules_docker-0.11.1",
urls = [
"https://github.com/bazelbuild/rules_docker/releases/download/v0.11.1/rules_docker-v0.11.1.tar.gz",
"https://storage.googleapis.com/builddeps/d0b345518236e240d513fe0f59f6d3da274f035480273a7eb00af7d216ae2a06",
],
sha256 = "4521794f0fba2e20f3bf15846ab5e01d5332e587e9ce81629c7f96c793bb7036",
strip_prefix = "rules_docker-0.14.4",
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.14.4/rules_docker-v0.14.4.tar.gz"],
)
load(
@ -75,6 +72,9 @@ load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps")
container_deps()
load("@io_bazel_rules_docker//repositories:pip_repositories.bzl", "pip_deps")
pip_deps()
# RPM rules
http_archive(
name = "io_bazel_rules_container_rpm",

View File

@ -53,6 +53,9 @@ function configure_ceph() {
#Configure ceph storage.
_kubectl apply -f ./cluster-sync/external-snapshotter
_kubectl apply -f ./cluster-sync/rook-ceph/common.yaml
if _kubectl get securitycontextconstraints; then
_kubectl apply -f ./cluster-sync/rook-ceph/scc.yaml
fi
_kubectl apply -f ./cluster-sync/rook-ceph/operator.yaml
_kubectl apply -f ./cluster-sync/rook-ceph/cluster.yaml
_kubectl apply -f ./cluster-sync/rook-ceph/pool.yaml

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
source cluster-sync/install.sh
source cluster-sync/ephemeral_provider.sh
function _kubectl(){
kubectl "$@"
@ -16,11 +16,25 @@ function verify() {
function up() {
echo "using external provider"
echo "External provider"
}
function configure_storage() {
echo "Local storage not needed for external provider..."
if [[ $KUBEVIRT_STORAGE == "hpp" ]] ; then
_kubectl apply -f ./cluster-sync/external/resources/machineconfig-worker.yaml
echo "Installing hostpath provisioner storage, please ensure /var/hpvolumes exists and has the right SELinux labeling"
HPP_RELEASE=$(curl -s https://github.com/kubevirt/hostpath-provisioner-operator/releases/latest | grep -o "v[0-9]\.[0-9]*\.[0-9]*")
_kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/namespace.yaml
_kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/operator.yaml -n hostpath-provisioner
_kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/hostpathprovisioner_cr.yaml -n hostpath-provisioner
_kubectl apply -f https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/$HPP_RELEASE/storageclass-wffc.yaml
_kubectl patch storageclass hostpath-provisioner -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
elif [[ $KUBEVIRT_STORAGE == "ceph" ]] ; then
echo "Installing hostpath provisioner storage"
configure_ceph
else
echo "Local storage not needed for external provider..."
fi
}

View File

@ -0,0 +1,81 @@
# scc for the Rook and Ceph daemons
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: rook-ceph
allowPrivilegedContainer: true
allowHostNetwork: true
allowHostDirVolumePlugin: true
priority:
allowedCapabilities: []
allowHostPorts: true
allowHostPID: true
allowHostIPC: true
readOnlyRootFilesystem: false
requiredDropCapabilities: []
defaultAddCapabilities: []
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
fsGroup:
type: MustRunAs
supplementalGroups:
type: RunAsAny
allowedFlexVolumes:
- driver: "ceph.rook.io/rook"
- driver: "ceph.rook.io/rook-ceph"
volumes:
- configMap
- downwardAPI
- emptyDir
- flexVolume
- hostPath
- persistentVolumeClaim
- projected
- secret
users:
# A user needs to be added for each rook service account.
# This assumes running in the default sample "rook-ceph" namespace.
# If other namespaces or service accounts are configured, they need to be updated here.
- system:serviceaccount:rook-ceph:rook-ceph-system
- system:serviceaccount:rook-ceph:default
- system:serviceaccount:rook-ceph:rook-ceph-mgr
- system:serviceaccount:rook-ceph:rook-ceph-osd
---
# scc for the CSI driver
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: rook-ceph-csi
allowPrivilegedContainer: true
allowHostNetwork: true
allowHostDirVolumePlugin: true
priority:
allowedCapabilities: ['*']
allowHostPorts: true
allowHostPID: true
allowHostIPC: true
readOnlyRootFilesystem: false
requiredDropCapabilities: []
defaultAddCapabilities: []
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
allowedFlexVolumes:
- driver: "ceph.rook.io/rook"
- driver: "ceph.rook.io/rook-ceph"
volumes: ['*']
users:
# A user needs to be added for each rook service account.
# This assumes running in the default sample "rook-ceph" namespace.
# If other namespaces or service accounts are configured, they need to be updated here.
- system:serviceaccount:rook-ceph:rook-csi-rbd-plugin-sa
- system:serviceaccount:rook-ceph:rook-csi-rbd-provisioner-sa
- system:serviceaccount:rook-ceph:rook-csi-cephfs-plugin-sa
- system:serviceaccount:rook-ceph:rook-csi-cephfs-provisioner-sa

View File

@ -93,7 +93,7 @@ _rsync() {
}
echo "Rsyncing ${CDI_DIR} to container"
# Copy kubevirt into the persistent docker volume
# Copy CDI into the persistent docker volume
_rsync \
--delete \
--exclude 'bazel-bin' \
@ -120,7 +120,6 @@ else
fi
# Ensure that a bazel server is running
if [ -z "$(docker ps --format '{{.Names}}' | grep ${BAZEL_BUILDER_SERVER})" ]; then
if [ "$KUBEVIRTCI_RUNTIME" = "podman" ]; then
docker run --network host -d ${volumes} --security-opt label=disable --name ${BAZEL_BUILDER_SERVER} -w "/root/go/src/kubevirt.io/containerized-data-importer" --rm ${BUILDER_IMAGE} hack/build/bazel-server.sh

View File

@ -30,14 +30,23 @@ KUBEVIRTCI_CONFIG_PATH="$(
# functional testing
BASE_PATH=${KUBEVIRTCI_CONFIG_PATH:-$PWD}
KUBECONFIG=${BASE_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig
KUBECTL=${BASE_PATH}/$KUBEVIRT_PROVIDER/.kubectl
KUBECONFIG=${KUBECONFIG:-$BASE_PATH/$KUBEVIRT_PROVIDER/.kubeconfig}
GOCLI=${GOCLI:-${CDI_DIR}/cluster-up/cli.sh}
KUBE_MASTER_URL=${KUBE_MASTER_URL:-""}
CDI_NAMESPACE=${CDI_NAMESPACE:-cdi}
SNAPSHOT_SC=${SNAPSHOT_SC:-rook-ceph-block}
BLOCK_SC=${BLOCK_SC:-rook-ceph-block}
if [ -z "${KUBECTL+x}" ]; then
kubevirtci_kubectl="${BASE_PATH}/${KUBEVIRT_PROVIDER}/.kubectl"
if [ -e ${kubevirtci_kubectl} ]; then
KUBECTL=${kubevirtci_kubectl}
else
KUBECTL=$(which kubectl)
fi
fi
# parsetTestOpts sets 'pkgs' and test_args
parseTestOpts "${@}"
@ -73,4 +82,5 @@ if [ $retry_counter -eq $MAX_CDI_WAIT_RETRY ]; then
fi
test_command="${TESTS_OUT_DIR}/tests.test -test.timeout 360m ${test_args}"
echo "$test_command"
(cd ${CDI_DIR}/tests; ${test_command})

View File

@ -137,6 +137,7 @@ var _ = Describe("[rfe_id:1347][crit:high][vendor:cnv-qe@redhat.com][level:compo
func ValidateRBACForResource(f *framework.Framework, expectedResults map[string]string, resource string, sa string) {
for verb, expectedRes := range expectedResults {
By(fmt.Sprintf("verifying cdi-sa "+resource+" rules, for verb %s", verb))
result, err := tests.RunKubectlCommand(f, "auth", "can-i", "--as", sa, verb, resource)
if expectedRes != "no" {
Expect(err).ToNot(HaveOccurred())

View File

@ -202,6 +202,7 @@ var _ = Describe("CDI storage class config tests", func() {
var _ = Describe("CDI ingress config tests, using manifests", func() {
var (
f = framework.NewFramework("cdiconfig-test")
routeStart = func() string { return fmt.Sprintf("%s-%s.", routeName, f.CdiInstallNs) }
manifestFile string
)
@ -213,6 +214,35 @@ var _ = Describe("CDI ingress config tests, using manifests", func() {
if !version.GE(minVersion) {
Skip(fmt.Sprintf("kubernetes version %s, doesn't support network ingress", version.String()))
}
cfg, err := clientcmd.BuildConfigFromFlags(f.Master, f.KubeConfig)
Expect(err).ToNot(HaveOccurred())
By("Checking if a route exists, we set that as default")
openshiftClient, err := route1client.NewForConfig(cfg)
Expect(err).ToNot(HaveOccurred())
_, err = openshiftClient.RouteV1().Routes(f.CdiInstallNs).Get("cdi-uploadproxy", metav1.GetOptions{})
if err == nil {
By("setting defaultURL to route")
Eventually(func() bool {
config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(common.ConfigName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
if config.Status.UploadProxyURL == nil {
return false
}
return strings.HasPrefix(*config.Status.UploadProxyURL, routeStart())
}, time.Second*30, time.Second).Should(BeTrue())
config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(common.ConfigName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
defaultUrl = *config.Status.UploadProxyURL
}
By("Making sure no url is set")
Eventually(func() string {
config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(common.ConfigName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
if config.Status.UploadProxyURL == nil {
return ""
}
return *config.Status.UploadProxyURL
}, time.Second*30, time.Second).Should(Equal(defaultUrl))
})
AfterEach(func() {

View File

@ -11,6 +11,7 @@ import (
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1"
@ -97,6 +98,10 @@ var _ = Describe("[rfe_id:1277][crit:high][vendor:cnv-qe@redhat.com][level:compo
By("Deleting verifier pod")
err = f.K8sClient.CoreV1().Pods(f.Namespace.Name).Delete(utils.VerifierPodName, &metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
_, err := f.K8sClient.CoreV1().Pods(f.Namespace.Name).Get(utils.VerifierPodName, metav1.GetOptions{})
return k8serrors.IsNotFound(err)
}, 60, 1).Should(BeTrue())
// Create targetPvc in new NS.
targetDV := utils.NewCloningDataVolume("target-dv", "1G", pvc)
@ -733,9 +738,9 @@ var _ = Describe("Namespace with quota", func() {
})
It("Should fail to clone in namespace with quota when pods have higher requirements, then succeed when quota increased", func() {
err := f.UpdateCdiConfigResourceLimits(int64(1), int64(1024*1024*1024), int64(1), int64(1024*1024*1024))
err := f.UpdateCdiConfigResourceLimits(int64(0), int64(256*1024*1024), int64(0), int64(256*1024*1024))
Expect(err).NotTo(HaveOccurred())
err = f.CreateQuotaInNs(int64(1), int64(512*1024*1024), int64(1), int64(512*1024*1024))
err = f.CreateQuotaInNs(int64(1), int64(128*1024*1024), int64(2), int64(128*1024*1024))
Expect(err).NotTo(HaveOccurred())
smartApplicable := f.IsSnapshotStorageClassAvailable()
sc, err := f.K8sClient.StorageV1().StorageClasses().Get(f.SnapshotSCName, metav1.GetOptions{})
@ -764,7 +769,7 @@ var _ = Describe("Namespace with quota", func() {
Expect(err).NotTo(HaveOccurred())
return log
}, controllerSkipPVCCompleteTimeout, assertionPollInterval).Should(ContainSubstring(matchString))
err = f.UpdateQuotaInNs(int64(2), int64(2*1024*1024*1024), int64(2), int64(2*1024*1024*1024))
err = f.UpdateQuotaInNs(int64(1), int64(512*1024*1024), int64(4), int64(512*1024*1024))
Expect(err).NotTo(HaveOccurred())
utils.WaitForPersistentVolumeClaimPhase(f.K8sClient, f.Namespace.Name, v1.ClaimBound, targetDV.Name)
targetPvc, err := utils.WaitForPVC(f.K8sClient, dataVolume.Namespace, dataVolume.Name)
@ -795,9 +800,9 @@ var _ = Describe("Namespace with quota", func() {
})
It("Should fail clone data across namespaces, if a namespace doesn't have enough quota", func() {
err := f.UpdateCdiConfigResourceLimits(int64(2), int64(1024*1024*1024), int64(2), int64(1*1024*1024*1024))
err := f.UpdateCdiConfigResourceLimits(int64(0), int64(512*1024*1024), int64(1), int64(512*1024*1024))
Expect(err).NotTo(HaveOccurred())
err = f.CreateQuotaInNs(int64(1), int64(1024*1024*1024), int64(2), int64(2*1024*1024*1024))
err = f.CreateQuotaInNs(int64(1), int64(256*1024*1024), int64(2), int64(256*1024*1024))
Expect(err).NotTo(HaveOccurred())
pvcDef := utils.NewPVCDefinition(sourcePVCName, "500M", nil, nil)
pvcDef.Namespace = f.Namespace.Name
@ -816,7 +821,7 @@ var _ = Describe("Namespace with quota", func() {
By("Verify Quota was exceeded in logs")
targetPvc, err := utils.WaitForPVC(f.K8sClient, dataVolume.Namespace, dataVolume.Name)
Expect(err).ToNot(HaveOccurred())
matchString := fmt.Sprintf("\\\"%s-source-pod\\\" is forbidden: exceeded quota: test-quota, requested: requests.cpu=2, used: requests.cpu=0, limited: requests.cpu=1", targetPvc.GetUID())
matchString := fmt.Sprintf("\\\"%s-source-pod\\\" is forbidden: exceeded quota: test-quota, requested", targetPvc.GetUID())
Eventually(func() string {
log, err := RunKubectlCommand(f, "logs", f.ControllerPod.Name, "-n", f.CdiInstallNs)
Expect(err).NotTo(HaveOccurred())

View File

@ -427,9 +427,9 @@ var _ = Describe("Namespace with quota", func() {
})
It("Should fail to create import pod in namespace with quota, with resource limits higher in CDIConfig", func() {
err := f.UpdateCdiConfigResourceLimits(int64(2), int64(1024*1024*1024), int64(2), int64(1*1024*1024*1024))
err := f.UpdateCdiConfigResourceLimits(int64(2), int64(512*1024*1024), int64(2), int64(512*1024*1024))
Expect(err).ToNot(HaveOccurred())
err = f.CreateQuotaInNs(int64(1), int64(1024*1024*1024), int64(1), int64(2*1024*1024*1024))
err = f.CreateQuotaInNs(int64(1), int64(512*1024*1024), int64(1), int64(1024*1024*1024))
Expect(err).ToNot(HaveOccurred())
httpEp := fmt.Sprintf("http://%s:%d", utils.FileHostName+"."+f.CdiInstallNs, utils.HTTPNoAuthPort)
pvcAnn := map[string]string{
@ -456,9 +456,9 @@ var _ = Describe("Namespace with quota", func() {
})
It("Should fail to create import pod in namespace with quota, then succeed once the quota is large enough", func() {
err := f.UpdateCdiConfigResourceLimits(int64(1), int64(1024*1024*1024), int64(1), int64(1024*1024*1024))
err := f.UpdateCdiConfigResourceLimits(int64(1), int64(512*1024*1024), int64(1), int64(512*1024*1024))
Expect(err).ToNot(HaveOccurred())
err = f.CreateQuotaInNs(int64(1), int64(512*1024*1024), int64(1), int64(512*1024*1024))
err = f.CreateQuotaInNs(int64(1), int64(256*1024*1024), int64(1), int64(256*1024*1024))
Expect(err).ToNot(HaveOccurred())
httpEp := fmt.Sprintf("http://%s:%d", utils.FileHostName+"."+f.CdiInstallNs, utils.HTTPNoAuthPort)
pvcAnn := map[string]string{
@ -483,7 +483,7 @@ var _ = Describe("Namespace with quota", func() {
return log
}, controllerSkipPVCCompleteTimeout, assertionPollInterval).Should(ContainSubstring(matchString))
err = f.UpdateQuotaInNs(int64(2), int64(1024*1024*1024), int64(2), int64(1024*1024*1024))
err = f.UpdateQuotaInNs(int64(2), int64(512*1024*1024), int64(2), int64(512*1024*1024))
Expect(err).ToNot(HaveOccurred())
By("Verify the pod status is succeeded on the target PVC")

View File

@ -2,6 +2,7 @@ package tests_test
import (
"flag"
"fmt"
"testing"
"github.com/onsi/ginkgo"
@ -37,6 +38,7 @@ func TestTests(t *testing.T) {
// cannot work when called during test tree construction.
func BuildTestSuite() {
BeforeSuite(func() {
fmt.Fprintf(ginkgo.GinkgoWriter, "Reading parameters\n")
// Read flags, and configure client instances
framework.ClientsInstance.KubectlPath = *kubectlPath
framework.ClientsInstance.OcPath = *ocPath
@ -47,6 +49,15 @@ func BuildTestSuite() {
framework.ClientsInstance.SnapshotSCName = *snapshotSCName
framework.ClientsInstance.BlockSCName = *blockSCName
fmt.Fprintf(ginkgo.GinkgoWriter, "Kubectl path: %s\n", framework.ClientsInstance.KubectlPath)
fmt.Fprintf(ginkgo.GinkgoWriter, "OC path: %s\n", framework.ClientsInstance.OcPath)
fmt.Fprintf(ginkgo.GinkgoWriter, "CDI install NS: %s\n", framework.ClientsInstance.CdiInstallNs)
fmt.Fprintf(ginkgo.GinkgoWriter, "Kubeconfig: %s\n", framework.ClientsInstance.KubeConfig)
fmt.Fprintf(ginkgo.GinkgoWriter, "Master: %s\n", framework.ClientsInstance.Master)
fmt.Fprintf(ginkgo.GinkgoWriter, "GO CLI path: %s\n", framework.ClientsInstance.GoCLIPath)
fmt.Fprintf(ginkgo.GinkgoWriter, "Snapshot SC: %s\n", framework.ClientsInstance.SnapshotSCName)
fmt.Fprintf(ginkgo.GinkgoWriter, "Block SC: %s\n", framework.ClientsInstance.BlockSCName)
restConfig, err := framework.ClientsInstance.LoadConfig()
if err != nil {
// Can't use Expect here due this being called outside of an It block, and Expect

View File

@ -509,9 +509,9 @@ var _ = Describe("Namespace with quota", func() {
})
It("Should fail to create upload pod in namespace with quota, and recover when quota fixed", func() {
err := f.UpdateCdiConfigResourceLimits(int64(2), int64(1024*1024*1024), int64(2), int64(1*1024*1024*1024))
err := f.UpdateCdiConfigResourceLimits(int64(0), int64(512*1024*1024), int64(2), int64(512*1024*1024))
Expect(err).ToNot(HaveOccurred())
err = f.CreateQuotaInNs(int64(1), int64(1024*1024*1024), int64(2), int64(2*1024*1024*1024))
err = f.CreateQuotaInNs(int64(1), int64(256*1024*1024), int64(2), int64(256*1024*1024))
Expect(err).ToNot(HaveOccurred())
By("Creating PVC with upload target annotation")
pvc, err = f.CreateBoundPVCFromDefinition(utils.UploadPVCDefinition())
@ -525,7 +525,7 @@ var _ = Describe("Namespace with quota", func() {
return log
}, controllerSkipPVCCompleteTimeout, assertionPollInterval).Should(ContainSubstring(matchString))
By("Updating the quota to be enough")
err = f.UpdateQuotaInNs(int64(2), int64(1024*1024*1024), int64(2), int64(2*1024*1024*1024))
err = f.UpdateQuotaInNs(int64(2), int64(512*1024*1024), int64(2), int64(1024*1024*1024))
Expect(err).ToNot(HaveOccurred())
By("Verify PVC annotation says ready")

View File

@ -43,6 +43,9 @@ func RunKubectlCommand(f *framework.Framework, args ...string) (string, error) {
if len(errb.String()) > 0 {
return errb.String(), err
}
// err will not always be nil calling kubectl, this is expected on no results for instance.
// still return the value and let the called decide what to do.
return string(stdOutBytes), err
}
return string(stdOutBytes), nil
}