fix codegen

This commit is contained in:
Igor Velichkovich 2024-05-23 14:11:37 -07:00
parent d1fe58b7a9
commit 50b8e8829a
8 changed files with 20 additions and 16 deletions

View File

@ -32,14 +32,14 @@ func isIPPoolAllocationsEmpty(ctx context.Context, k8sIPAM *kubeClient.Kubernete
}
}
func isIPPoolAllocationsEmptyForNodeSlices(k8sIPAM *kubeClient.KubernetesIPAM, ipPoolCIDR string, clientInfo *ClientInfo) wait.ConditionFunc {
return func() (bool, error) {
nodes, err := clientInfo.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
func isIPPoolAllocationsEmptyForNodeSlices(ctx context.Context, k8sIPAM *kubeClient.KubernetesIPAM, ipPoolCIDR string, clientInfo *ClientInfo) wait.ConditionWithContextFunc {
return func(context.Context) (bool, error) {
nodes, err := clientInfo.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
ipPool, err := k8sIPAM.GetIPPool(context.Background(), kubeClient.PoolIdentifier{NodeName: node.Name, IpRange: ipPoolCIDR, NetworkName: k8sIPAM.Config.NetworkName})
ipPool, err := k8sIPAM.GetIPPool(ctx, kubeClient.PoolIdentifier{NodeName: node.Name, IpRange: ipPoolCIDR, NetworkName: k8sIPAM.Config.NetworkName})
if err != nil {
if err.Error() == "k8s pool initialized" {
continue
@ -64,6 +64,6 @@ func WaitForZeroIPPoolAllocations(ctx context.Context, k8sIPAM *kubeClient.Kuber
// WaitForZeroIPPoolAllocationsAcrossNodeSlices polls up to timeout seconds for IP pool allocations to be gone from the Kubernetes cluster.
// Returns an error if any IP pool allocations remain after time limit, or if GETing IP pools causes an error.
func WaitForZeroIPPoolAllocationsAcrossNodeSlices(k8sIPAM *kubeClient.KubernetesIPAM, ipPoolCIDR string, timeout time.Duration, clientInfo *ClientInfo) error {
return wait.PollImmediate(time.Second, timeout, isIPPoolAllocationsEmptyForNodeSlices(k8sIPAM, ipPoolCIDR, clientInfo))
func WaitForZeroIPPoolAllocationsAcrossNodeSlices(ctx context.Context, k8sIPAM *kubeClient.KubernetesIPAM, ipPoolCIDR string, timeout time.Duration, clientInfo *ClientInfo) error {
return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, isIPPoolAllocationsEmptyForNodeSlices(ctx, k8sIPAM, ipPoolCIDR, clientInfo))
}

View File

@ -3,6 +3,7 @@ package client
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -23,13 +24,16 @@ func GetNodeSubnet(cs *ClientInfo, nodeName, sliceName, namespace string) (strin
}
func WaitForNodeSliceReady(ctx context.Context, cs *ClientInfo, namespace, nodeSliceName string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, isNodeSliceReady(ctx, cs, nodeSliceName, namespace))
return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, isNodeSliceReady(ctx, cs, namespace, nodeSliceName))
}
func isNodeSliceReady(ctx context.Context, cs *ClientInfo, namespace, nodeSliceName string) wait.ConditionWithContextFunc {
return func(context.Context) (bool, error) {
_, err := cs.WbClient.WhereaboutsV1alpha1().NodeSlicePools(namespace).Get(ctx, nodeSliceName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}

View File

@ -2,6 +2,7 @@ package whereabouts_e2e
import (
"context"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
"testing"
"time"
@ -21,7 +22,6 @@ import (
"github.com/k8snetworkplumbingwg/whereabouts/e2e/retrievers"
testenv "github.com/k8snetworkplumbingwg/whereabouts/e2e/testenvironment"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/util"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/storage"
wbstorage "github.com/k8snetworkplumbingwg/whereabouts/pkg/storage/kubernetes"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"

View File

@ -23,10 +23,10 @@ func (pc *NodeSliceChecker) MissingIPs() []string {
var mismatchedIPs []string
for _, pod := range pc.podList {
podIPs, err := retrievers.SecondaryIfaceIPValue(&pod)
podIP := podIPs[len(podIPs)-1]
if err != nil {
return []string{}
}
podIP := podIPs[len(podIPs)-1]
var found bool
for _, pool := range pc.ipPools {

View File

@ -122,7 +122,7 @@ func CheckZeroIPPoolAllocationsAndReplicas(ctx context.Context, clientInfo *wbte
return err
}
} else {
if err = wbtestclient.WaitForZeroIPPoolAllocationsAcrossNodeSlices(k8sIPAM, ipPoolCIDR, zeroIPPoolTimeout, clientInfo); err != nil {
if err = wbtestclient.WaitForZeroIPPoolAllocationsAcrossNodeSlices(ctx, k8sIPAM, ipPoolCIDR, zeroIPPoolTimeout, clientInfo); err != nil {
return err
}
}
@ -175,7 +175,6 @@ func MacvlanNetworkWithWhereaboutsIPAMNetwork(networkName string, namespaceName
}
func MacvlanNetworkWithNodeSlice(networkName, namespaceName, ipRange, poolName, sliceSize string) *nettypes.NetworkAttachmentDefinition {
//TODO: fails without leader timeouts set
macvlanConfig := fmt.Sprintf(`{
"cniVersion": "0.3.0",
"disableCheck": true,

View File

@ -98,10 +98,13 @@ trap "rm /tmp/whereabouts-img.tar || true" EXIT
kind load image-archive --name "$KIND_CLUSTER_NAME" /tmp/whereabouts-img.tar
echo "## install whereabouts"
for file in "daemonset-install.yaml" "whereabouts.cni.cncf.io_ippools.yaml" "whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml" "node-slice-controller.yaml"; do
for file in "daemonset-install.yaml" "whereabouts.cni.cncf.io_ippools.yaml" "whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml" "whereabouts.cni.cncf.io_nodeslicepools.yaml"; do
# insert 'imagePullPolicy: Never' under the container 'image' so it is certain that the image used
# by the daemonset is the one loaded into KinD and not one pulled from a repo
sed '/ image:/a\ imagePullPolicy: Never' "$ROOT/doc/crds/$file" | retry kubectl apply -f -
done
# deployment has an extra tab for the sed so doing out of the loop
sed '/ image:/a\ imagePullPolicy: Never' "$ROOT/doc/crds/node-slice-controller.yaml" | retry kubectl apply -f -
retry kubectl wait -n kube-system --for=condition=ready -l app=whereabouts pod --timeout=$TIMEOUT_K8
retry kubectl wait -n kube-system --for=condition=ready -l app=whereabouts-controller pod --timeout=$TIMEOUT_K8
echo "## done"

View File

@ -23,7 +23,6 @@ import (
v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
@ -35,9 +34,9 @@ type FakeNodeSlicePools struct {
ns string
}
var nodeslicepoolsResource = schema.GroupVersionResource{Group: "whereabouts.cni.cncf.io", Version: "v1alpha1", Resource: "nodeslicepools"}
var nodeslicepoolsResource = v1alpha1.SchemeGroupVersion.WithResource("nodeslicepools")
var nodeslicepoolsKind = schema.GroupVersionKind{Group: "whereabouts.cni.cncf.io", Version: "v1alpha1", Kind: "NodeSlicePool"}
var nodeslicepoolsKind = v1alpha1.SchemeGroupVersion.WithKind("NodeSlicePool")
// Get takes name of the nodeSlicePool, and returns the corresponding nodeSlicePool object, and an error if there is any.
func (c *FakeNodeSlicePools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeSlicePool, err error) {

View File

@ -200,7 +200,6 @@ func (f *fixture) newController(ctx context.Context) (*Controller, informers.Sha
f.nadClient,
kubeInformerFactory.Core().V1().Nodes(),
whereaboutsInformerFactory.Whereabouts().V1alpha1().NodeSlicePools(),
whereaboutsInformerFactory.Whereabouts().V1alpha1().IPPools(),
nadInformerFactory.K8sCniCncfIo().V1().NetworkAttachmentDefinitions(),
true)