mirror of
https://github.com/intel/intel-device-plugins-for-kubernetes.git
synced 2025-06-03 03:59:37 +00:00
e2e: fix AfterEach to be in the correct location
'AfterEach' was made to prevent the failure of a plugin pod that occurs due to 'BeforeEach' that deploys a plugin pod. If it is inside a 'Context' it will still occur the same problem. Since current e2e tests have one 'Context' in general, problems were not visible, but it still have problem in logic and would cause the same problems if more 'Contexts' are made. So, this commit fixes 'AfterEach' to be in the correct location. Signed-off-by: Hyeongju Johannes Lee <hyeongju.lee@intel.com>
This commit is contained in:
parent
3744e09a1b
commit
e3e6e215b7
@ -85,14 +85,6 @@ func describe() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("When DSA resources are available", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
if err := utils.WaitForNodesWithResource(f.ClientSet, "dsa.intel.com/wq-user-dedicated", 300*time.Second); err != nil {
|
||||
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("undeploying DSA plugin")
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-k", filepath.Dir(kustomizationPath))
|
||||
@ -101,6 +93,14 @@ func describe() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("When DSA resources are available", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
if err := utils.WaitForNodesWithResource(f.ClientSet, "dsa.intel.com/wq-user-dedicated", 300*time.Second); err != nil {
|
||||
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("deploys a demo app", func() {
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", demoPath)
|
||||
|
||||
|
@ -85,14 +85,6 @@ func describeQatDpdkPlugin() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("When QAT Gen4 resources are available", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
if err := utils.WaitForNodesWithResource(f.ClientSet, "qat.intel.com/cy", 30*time.Second); err != nil {
|
||||
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("undeploying QAT plugin")
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-k", filepath.Dir(kustomizationPath))
|
||||
@ -101,6 +93,14 @@ func describeQatDpdkPlugin() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("When QAT Gen4 resources are available", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
if err := utils.WaitForNodesWithResource(f.ClientSet, "qat.intel.com/cy", 30*time.Second); err != nil {
|
||||
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("deploys a crypto pod requesting QAT resources", func() {
|
||||
ginkgo.By("submitting a crypto pod requesting QAT resources")
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", opensslTestYamlPath)
|
||||
@ -113,6 +113,7 @@ func describeQatDpdkPlugin() {
|
||||
framework.Logf("cpa_sample_code output:\n %s", output)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("When QAT Gen2 resources are available", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
|
@ -71,14 +71,6 @@ func describeQatKernelPlugin() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("When QAT resources are available", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
if err := utils.WaitForNodesWithResource(f.ClientSet, "qat.intel.com/cy1_dc0", 30*time.Second); err != nil {
|
||||
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("undeploying QAT plugin")
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-f", yamlPath)
|
||||
@ -87,6 +79,14 @@ func describeQatKernelPlugin() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("When QAT resources are available", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
if err := utils.WaitForNodesWithResource(f.ClientSet, "qat.intel.com/cy1_dc0", 30*time.Second); err != nil {
|
||||
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("deploys a pod requesting QAT resources", func() {
|
||||
ginkgo.By("submitting a pod requesting QAT resources")
|
||||
podSpec := &v1.Pod{
|
||||
|
Loading…
Reference in New Issue
Block a user