mirror of
https://github.com/intel/intel-device-plugins-for-kubernetes.git
synced 2025-06-03 03:59:37 +00:00
e2e: add tests for SGX Admission Webhook
Signed-off-by: Mikko Ylinen <mikko.ylinen@intel.com>
This commit is contained in:
parent
578b60fd4c
commit
9b687401b8
2
Makefile
2
Makefile
@ -66,7 +66,7 @@ test-with-kind:
|
|||||||
@$(KIND) create cluster --name "intel-device-plugins" --kubeconfig $(e2e_tmp_dir)/kubeconfig --image "kindest/node:v1.19.0"
|
@$(KIND) create cluster --name "intel-device-plugins" --kubeconfig $(e2e_tmp_dir)/kubeconfig --image "kindest/node:v1.19.0"
|
||||||
@$(KIND) load image-archive --name "intel-device-plugins" $(e2e_tmp_dir)/$(WEBHOOK_IMAGE_FILE)
|
@$(KIND) load image-archive --name "intel-device-plugins" $(e2e_tmp_dir)/$(WEBHOOK_IMAGE_FILE)
|
||||||
$(KUBECTL) --kubeconfig=$(e2e_tmp_dir)/kubeconfig apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml
|
$(KUBECTL) --kubeconfig=$(e2e_tmp_dir)/kubeconfig apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml
|
||||||
@$(GO) test -v ./test/e2e -args -kubeconfig $(e2e_tmp_dir)/kubeconfig -kubectl-path $(KUBECTL) -ginkgo.focus "Webhook" || rc=1; \
|
@$(GO) test -v ./test/e2e -args -kubeconfig $(e2e_tmp_dir)/kubeconfig -kubectl-path $(KUBECTL) -ginkgo.focus "FPGA Admission" || rc=1; \
|
||||||
$(KIND) delete cluster --name "intel-device-plugins"; \
|
$(KIND) delete cluster --name "intel-device-plugins"; \
|
||||||
rm -rf $(e2e_tmp_dir); \
|
rm -rf $(e2e_tmp_dir); \
|
||||||
exit $$rc
|
exit $$rc
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
_ "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/gpu"
|
_ "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/gpu"
|
||||||
_ "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/qat"
|
_ "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/qat"
|
||||||
_ "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/sgx"
|
_ "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/sgx"
|
||||||
|
_ "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/sgxadmissionwebhook"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/component-base/logs"
|
"k8s.io/component-base/logs"
|
||||||
|
@ -73,7 +73,7 @@ func checkPodMutation(f *framework.Framework, mappingsNamespace string, source,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("deploying webhook")
|
ginkgo.By("deploying webhook")
|
||||||
utils.DeployFpgaWebhook(f, kustomizationPath)
|
_ = utils.DeployWebhook(f, kustomizationPath)
|
||||||
|
|
||||||
ginkgo.By("deploying mappings")
|
ginkgo.By("deploying mappings")
|
||||||
framework.RunKubectlOrDie(f.Namespace.Name, "apply", "-n", mappingsNamespace, "-f", filepath.Dir(kustomizationPath)+"/../mappings-collection.yaml")
|
framework.RunKubectlOrDie(f.Namespace.Name, "apply", "-n", mappingsNamespace, "-f", filepath.Dir(kustomizationPath)+"/../mappings-collection.yaml")
|
||||||
|
195
test/e2e/sgxadmissionwebhook/sgxaadmissionwebhook.go
Normal file
195
test/e2e/sgxadmissionwebhook/sgxaadmissionwebhook.go
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
// Copyright 2021 Intel Corporation. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package sgxadmissionwebhook implements E2E tests for SGX admission webhook.
|
||||||
|
package sgxadmissionwebhook
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils"
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||||
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
kustomizationYaml = "deployments/sgx_admissionwebhook/overlays/default-with-certmanager/kustomization.yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ginkgo.Describe("SGX Admission Webhook", describe)
|
||||||
|
}
|
||||||
|
|
||||||
|
func describe() {
|
||||||
|
f := framework.NewDefaultFramework("sgxwebhook")
|
||||||
|
var webhook v1.Pod
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
kustomizationPath, err := utils.LocateRepoFile(kustomizationYaml)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("unable to locate %q: %v", kustomizationYaml, err)
|
||||||
|
}
|
||||||
|
webhook = utils.DeployWebhook(f, kustomizationPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("checks the webhook pod is safely configured", func() {
|
||||||
|
err := utils.TestContainersRunAsNonRoot([]v1.Pod{webhook})
|
||||||
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
|
})
|
||||||
|
ginkgo.It("mutates created pods when no quote generation is needed", func() {
|
||||||
|
ginkgo.By("submitting the pod")
|
||||||
|
pod := submitPod(f, []string{"test"}, "")
|
||||||
|
|
||||||
|
ginkgo.By("checking the container resources have been mutated")
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"})
|
||||||
|
|
||||||
|
ginkgo.By("checking the pod total EPC size annotation is correctly set")
|
||||||
|
gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("1Mi"))
|
||||||
|
})
|
||||||
|
ginkgo.It("mutates created pods when the container contains the quote generation libraries", func() {
|
||||||
|
ginkgo.By("submitting the pod")
|
||||||
|
pod := submitPod(f, []string{"test"}, "test")
|
||||||
|
|
||||||
|
ginkgo.By("checking the container resources have been mutated")
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave", "sgx.intel.com/provision"}, []v1.ResourceName{})
|
||||||
|
|
||||||
|
ginkgo.By("checking the pod total EPC size annotation is correctly set")
|
||||||
|
gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("1Mi"))
|
||||||
|
})
|
||||||
|
ginkgo.It("mutates created pods when the container uses aesmd from a side-car container to generate quotes", func() {
|
||||||
|
ginkgo.By("submitting the pod")
|
||||||
|
pod := submitPod(f, []string{"test", "aesmd"}, "aesmd")
|
||||||
|
ginkgo.By("checking the container resources have been mutated")
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"})
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[1].Resources, []v1.ResourceName{"sgx.intel.com/enclave", "sgx.intel.com/provision"}, []v1.ResourceName{})
|
||||||
|
ginkgo.By("checking the container volumes have been mutated")
|
||||||
|
checkMutatedVolumes(f, pod, "aesmd-socket", v1.EmptyDirVolumeSource{})
|
||||||
|
ginkgo.By("checking the container envvars have been mutated")
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Env[0].Name).To(gomega.Equal("SGX_AESM_ADDR"))
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Env[0].Value).To(gomega.Equal("1"))
|
||||||
|
ginkgo.By("checking the pod total EPC size annotation is correctly set")
|
||||||
|
gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("2Mi"))
|
||||||
|
})
|
||||||
|
ginkgo.It("mutates created pods where one container uses host/daemonset aesmd to generate quotes", func() {
|
||||||
|
ginkgo.By("submitting the pod")
|
||||||
|
pod := submitPod(f, []string{"test"}, "aesmd")
|
||||||
|
ginkgo.By("checking the container resources have been mutated")
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"})
|
||||||
|
ginkgo.By("checking the container volumes have been mutated")
|
||||||
|
checkMutatedVolumes(f, pod, "aesmd-socket", v1.HostPathVolumeSource{})
|
||||||
|
ginkgo.By("checking the container envvars have been mutated")
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Env[0].Name).To(gomega.Equal("SGX_AESM_ADDR"))
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Env[0].Value).To(gomega.Equal("1"))
|
||||||
|
ginkgo.By("checking the pod total EPC size annotation is correctly set")
|
||||||
|
gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("1Mi"))
|
||||||
|
})
|
||||||
|
ginkgo.It("mutates created pods where three containers use host/daemonset aesmd to generate quotes", func() {
|
||||||
|
ginkgo.By("submitting the pod")
|
||||||
|
pod := submitPod(f, []string{"test1", "test2", "test3"}, "aesmd")
|
||||||
|
ginkgo.By("checking the container resources have been mutated")
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"})
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[1].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"})
|
||||||
|
checkMutatedResources(f, pod.Spec.Containers[2].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"})
|
||||||
|
ginkgo.By("checking the container volumes have been mutated")
|
||||||
|
checkMutatedVolumes(f, pod, "aesmd-socket", v1.HostPathVolumeSource{})
|
||||||
|
ginkgo.By("checking the container envvars have been mutated")
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Env[0].Name).To(gomega.Equal("SGX_AESM_ADDR"))
|
||||||
|
gomega.Expect(pod.Spec.Containers[0].Env[0].Value).To(gomega.Equal("1"))
|
||||||
|
gomega.Expect(pod.Spec.Containers[1].Env[0].Name).To(gomega.Equal("SGX_AESM_ADDR"))
|
||||||
|
gomega.Expect(pod.Spec.Containers[1].Env[0].Value).To(gomega.Equal("1"))
|
||||||
|
gomega.Expect(pod.Spec.Containers[2].Env[0].Name).To(gomega.Equal("SGX_AESM_ADDR"))
|
||||||
|
gomega.Expect(pod.Spec.Containers[2].Env[0].Value).To(gomega.Equal("1"))
|
||||||
|
ginkgo.By("checking the pod total EPC size annotation is correctly set")
|
||||||
|
gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("3Mi"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkMutatedVolumes(f *framework.Framework, pod *v1.Pod, volumeName string, volumeType interface{}) {
|
||||||
|
switch reflect.TypeOf(volumeType).String() {
|
||||||
|
case "v1.HostPathVolumeSource":
|
||||||
|
gomega.Expect(pod.Spec.Volumes[0].HostPath).NotTo(gomega.BeNil())
|
||||||
|
gomega.Expect(pod.Spec.Volumes[0].Name).To(gomega.Equal(volumeName))
|
||||||
|
case "v1.EmptyDirVolumeSource":
|
||||||
|
gomega.Expect(pod.Spec.Volumes[0].EmptyDir).NotTo(gomega.BeNil())
|
||||||
|
gomega.Expect(pod.Spec.Volumes[0].Name).To(gomega.Equal(volumeName))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range pod.Spec.Containers {
|
||||||
|
gomega.Expect(c.VolumeMounts[0].Name).To(gomega.Equal(volumeName))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkMutatedResources(f *framework.Framework, r v1.ResourceRequirements, expectedResources, forbiddenResources []v1.ResourceName) {
|
||||||
|
for _, res := range expectedResources {
|
||||||
|
q, ok := r.Limits[res]
|
||||||
|
if !ok {
|
||||||
|
framework.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||||
|
kubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
||||||
|
framework.Fail("the pod has missing resources")
|
||||||
|
}
|
||||||
|
gomega.Expect(q.String()).To(gomega.Equal("1"))
|
||||||
|
}
|
||||||
|
for _, res := range forbiddenResources {
|
||||||
|
_, ok := r.Limits[res]
|
||||||
|
if ok {
|
||||||
|
framework.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||||
|
kubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
||||||
|
framework.Fail("the pod has extra resources")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func submitPod(f *framework.Framework, containerNames []string, quoteProvider string) *v1.Pod {
|
||||||
|
containers := make([]v1.Container, 0)
|
||||||
|
|
||||||
|
for _, c := range containerNames {
|
||||||
|
containers = append(containers, v1.Container{
|
||||||
|
Name: c,
|
||||||
|
Image: imageutils.GetPauseImageName(),
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{"sgx.intel.com/epc": resource.MustParse("1Mi")},
|
||||||
|
Limits: v1.ResourceList{"sgx.intel.com/epc": resource.MustParse("1Mi")},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
disabled := false
|
||||||
|
|
||||||
|
podSpec := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "webhook-tester-pod",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"sgx.intel.com/quote-provider": quoteProvider,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
AutomountServiceAccountToken: &disabled,
|
||||||
|
Containers: containers,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(),
|
||||||
|
podSpec, metav1.CreateOptions{})
|
||||||
|
|
||||||
|
framework.ExpectNoError(err, "pod Create API error")
|
||||||
|
|
||||||
|
return pod
|
||||||
|
}
|
@ -126,14 +126,14 @@ func CreateKustomizationOverlay(namespace, base, overlay string) error {
|
|||||||
return os.WriteFile(overlay+"/kustomization.yaml", []byte(content), 0600)
|
return os.WriteFile(overlay+"/kustomization.yaml", []byte(content), 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeployFpgaWebhook deploys FPGA admission webhook to a framework-specific namespace.
|
// DeployWebhook deploys an admission webhook to a framework-specific namespace.
|
||||||
func DeployFpgaWebhook(f *framework.Framework, kustomizationPath string) {
|
func DeployWebhook(f *framework.Framework, kustomizationPath string) v1.Pod {
|
||||||
if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, "cert-manager",
|
if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, "cert-manager",
|
||||||
labels.Set{"app.kubernetes.io/name": "cert-manager"}.AsSelector(), 1 /* one replica */, 10*time.Second); err != nil {
|
labels.Set{"app.kubernetes.io/name": "cert-manager"}.AsSelector(), 1 /* one replica */, 10*time.Second); err != nil {
|
||||||
framework.Failf("unable to detect running cert-manager: %v", err)
|
framework.Failf("unable to detect running cert-manager: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpDir, err := os.MkdirTemp("", "fpgawebhooke2etest-"+f.Namespace.Name)
|
tmpDir, err := os.MkdirTemp("", "webhooke2etest-"+f.Namespace.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("unable to create temp directory: %v", err)
|
framework.Failf("unable to create temp directory: %v", err)
|
||||||
}
|
}
|
||||||
@ -145,10 +145,31 @@ func DeployFpgaWebhook(f *framework.Framework, kustomizationPath string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
framework.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", tmpDir)
|
framework.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", tmpDir)
|
||||||
if _, err = e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name,
|
podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name,
|
||||||
labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1 /* one replica */, 10*time.Second); err != nil {
|
labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1 /* one replica */, 10*time.Second)
|
||||||
|
if err != nil {
|
||||||
framework.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
framework.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||||
kubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
kubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
||||||
framework.Failf("unable to wait for all pods to be running and ready: %v", err)
|
framework.Failf("unable to wait for all pods to be running and ready: %v", err)
|
||||||
}
|
}
|
||||||
|
return podList.Items[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestContainersRunAsNonRoot checks that all containers within the Pods run
|
||||||
|
// with non-root UID/GID.
|
||||||
|
func TestContainersRunAsNonRoot(pods []v1.Pod) error {
|
||||||
|
for _, p := range pods {
|
||||||
|
for _, c := range append(p.Spec.InitContainers, p.Spec.Containers...) {
|
||||||
|
if !*c.SecurityContext.RunAsNonRoot {
|
||||||
|
return fmt.Errorf("%s (container: %s): RunAsNonRoot is not true", p.Name, c.Name)
|
||||||
|
}
|
||||||
|
if *c.SecurityContext.RunAsGroup == 0 {
|
||||||
|
return fmt.Errorf("%s (container: %s): RunAsGroup is root (0)", p.Name, c.Name)
|
||||||
|
}
|
||||||
|
if *c.SecurityContext.RunAsUser == 0 {
|
||||||
|
return fmt.Errorf("%s (container: %s): RunAsUser is root (0)", p.Name, c.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user