mirror of
https://github.com/intel/intel-device-plugins-for-kubernetes.git
synced 2025-06-03 03:59:37 +00:00
e2e: use utils.GetFormattedErrorAndLog for logging demo pods
this makes a demo pod's log get printed only when the pod did not run sucessfully Signed-off-by: Hyeongju Johannes Lee <hyeongju.lee@intel.com>
This commit is contained in:
parent
4b26ead3ac
commit
32c7c370ef
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -119,15 +120,7 @@ func runDemoApp(ctx context.Context, function, yaml string, f *framework.Framewo
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", demoPath)
|
||||
|
||||
ginkgo.By("waiting for the DLB demo to succeed")
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, podName, 200*time.Second)
|
||||
|
||||
ginkgo.By("getting workload log")
|
||||
|
||||
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
|
||||
if err != nil {
|
||||
framework.Failf("unable to get log from pod: %v", err)
|
||||
}
|
||||
|
||||
framework.Logf("log output: %s", log)
|
||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, podName, f.Namespace.Name, 200*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, podName, podName))
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
|
||||
@ -106,16 +107,8 @@ func describe() {
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", demoPath)
|
||||
|
||||
ginkgo.By("waiting for the DSA demo to succeed")
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, podName, 200*time.Second)
|
||||
|
||||
ginkgo.By("getting workload log")
|
||||
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
|
||||
if err != nil {
|
||||
framework.Failf("unable to get log from pod: %v", err)
|
||||
}
|
||||
|
||||
framework.Logf("log output: %s", log)
|
||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, podName, f.Namespace.Name, 200*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, podName, podName))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -97,7 +98,7 @@ func runTestCase(ctx context.Context, fmw *framework.Framework, pluginKustomizat
|
||||
|
||||
ginkgo.By("checking if the resource is allocatable")
|
||||
|
||||
if err := utils.WaitForNodesWithResource(ctx, fmw.ClientSet, resource, 30*time.Second); err != nil {
|
||||
if err = utils.WaitForNodesWithResource(ctx, fmw.ClientSet, resource, 30*time.Second); err != nil {
|
||||
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
|
||||
}
|
||||
|
||||
@ -109,11 +110,9 @@ func runTestCase(ctx context.Context, fmw *framework.Framework, pluginKustomizat
|
||||
pod := createPod(ctx, fmw, fmt.Sprintf("fpgaplugin-%s-%s-%s-correct", pluginMode, cmd1, cmd2), resource, image, []string{cmd1, "-S0"})
|
||||
|
||||
ginkgo.By("waiting the pod to finish successfully")
|
||||
e2epod.NewPodClient(fmw).WaitForSuccess(ctx, pod.ObjectMeta.Name, 60*time.Second)
|
||||
// If WaitForSuccess fails, ginkgo doesn't show the logs of the failed container.
|
||||
// Replacing WaitForSuccess with WaitForFinish + 'kubelet logs' would show the logs
|
||||
//fmw.PodClient().WaitForFinish(pod.ObjectMeta.Name, 60*time.Second)
|
||||
//framework.RunKubectlOrDie(fmw.Namespace.Name, "logs", pod.ObjectMeta.Name)
|
||||
|
||||
err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, fmw.ClientSet, pod.ObjectMeta.Name, fmw.Namespace.Name, 60*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, fmw, pod.ObjectMeta.Name, "testcontainer"))
|
||||
|
||||
ginkgo.By("submitting a pod requesting incorrect FPGA resources")
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
|
||||
@ -106,16 +107,8 @@ func describe() {
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", demoPath)
|
||||
|
||||
ginkgo.By("waiting for the IAA demo to succeed")
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, podName, 300*time.Second)
|
||||
|
||||
ginkgo.By("getting workload log")
|
||||
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
|
||||
if err != nil {
|
||||
framework.Failf("unable to get log from pod: %v", err)
|
||||
}
|
||||
|
||||
framework.Logf("log output: %s", log)
|
||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, podName, f.Namespace.Name, 300*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, podName, podName))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -38,6 +39,7 @@ const (
|
||||
compressTestYaml = "deployments/qat_dpdk_app/test-compress1/kustomization.yaml"
|
||||
cryptoTestYaml = "deployments/qat_dpdk_app/test-crypto1/kustomization.yaml"
|
||||
cryptoTestGen4Yaml = "deployments/qat_dpdk_app/test-crypto1-gen4/kustomization.yaml"
|
||||
demoPodContainerName = "crypto-perf"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -135,12 +137,8 @@ func describeQatDpdkPlugin() {
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(cryptoTestGen4YamlPath))
|
||||
|
||||
ginkgo.By("waiting the crypto pod to finish successfully")
|
||||
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, "qat-dpdk-test-crypto-perf-tc1-gen4", 300*time.Second)
|
||||
|
||||
output, _ := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, "qat-dpdk-test-crypto-perf-tc1-gen4", "crypto-perf")
|
||||
|
||||
framework.Logf("crypto-perf output:\n %s", output)
|
||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, "qat-dpdk-test-crypto-perf-tc1-gen4", f.Namespace.Name, 300*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, "qat-dpdk-test-crypto-perf-tc1-gen4", "crypto-perf"))
|
||||
})
|
||||
})
|
||||
|
||||
@ -169,7 +167,9 @@ func describeQatDpdkPlugin() {
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(cryptoTestYamlPath))
|
||||
|
||||
ginkgo.By("waiting the crypto pod to finish successfully")
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, "qat-dpdk-test-crypto-perf-tc1", 60*time.Second)
|
||||
demoPodName := "qat-dpdk-test-crypto-perf-tc1"
|
||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, demoPodName, f.Namespace.Name, 60*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, demoPodName, demoPodContainerName))
|
||||
})
|
||||
|
||||
ginkgo.It("deploys a compress pod requesting QAT resources", func(ctx context.Context) {
|
||||
@ -177,7 +177,9 @@ func describeQatDpdkPlugin() {
|
||||
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(compressTestYamlPath))
|
||||
|
||||
ginkgo.By("waiting the compress pod to finish successfully")
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, "qat-dpdk-test-compress-perf-tc1", 60*time.Second)
|
||||
demoPodName := "qat-dpdk-test-compress-perf-tc1"
|
||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, demoPodName, f.Namespace.Name, 60*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, demoPodName, demoPodContainerName))
|
||||
})
|
||||
})
|
||||
}
|
||||
@ -210,9 +212,7 @@ func runCpaSampleCode(ctx context.Context, f *framework.Framework, runTests int,
|
||||
framework.ExpectNoError(err, "pod Create API error")
|
||||
|
||||
ginkgo.By("waiting the cpa_sample_code pod for the resource" + resourceName.String() + "to finish successfully")
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.ObjectMeta.Name, 300*time.Second)
|
||||
|
||||
output, _ := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, pod.Spec.Containers[0].Name)
|
||||
|
||||
framework.Logf("cpa_sample_code output:\n %s", output)
|
||||
err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 300*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, pod.ObjectMeta.Name, pod.Spec.Containers[0].Name))
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -115,8 +116,8 @@ func describe() {
|
||||
framework.ExpectNoError(err, "pod Create API error")
|
||||
|
||||
ginkgo.By("waiting the pod to finish successfully")
|
||||
|
||||
e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.ObjectMeta.Name, 60*time.Second)
|
||||
err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 60*time.Second)
|
||||
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, pod.ObjectMeta.Name, "testcontainer"))
|
||||
})
|
||||
})
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user