fast node slice initial implementation

This commit is contained in:
Igor Velichkovich 2024-04-16 17:00:52 -07:00
parent c5e45aa58c
commit 9a9a3a01af
38 changed files with 3721 additions and 66 deletions

2
.gitignore vendored
View File

@ -10,6 +10,8 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
.idea
kind/
bin/
/github.com/

View File

@ -9,5 +9,6 @@ FROM alpine:latest
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/whereabouts
COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/whereabouts .
COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-control-loop .
COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/node-slice-controller .
COPY script/install-cni.sh .
CMD ["/install-cni.sh"]

View File

@ -0,0 +1,88 @@
package main
import (
"flag"
"time"
nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned"
nadinformers "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
clientset "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned"
informers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions"
node_controller "github.com/k8snetworkplumbingwg/whereabouts/pkg/node-controller"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/node-controller/signals"
)
var (
masterURL string
kubeconfig string
)
// TODO: leader election
func main() {
klog.InitFlags(nil)
flag.Parse()
// set up signals so we handle the shutdown signal gracefully
ctx := signals.SetupSignalHandler()
logger := klog.FromContext(ctx)
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
logger.Error(err, "Error building kubeconfig")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
logger.Error(err, "Error building kubernetes clientset")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
whereaboutsClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Error(err, "Error building kubernetes clientset")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
nadClient, err := nadclient.NewForConfig(cfg)
if err != nil {
logger.Error(err, "Error building kubernetes clientset")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
whereaboutsInformerFactory := informers.NewSharedInformerFactory(whereaboutsClient, time.Second*30)
nadInformerFactory := nadinformers.NewSharedInformerFactory(nadClient, time.Second*30)
controller := node_controller.NewController(
ctx,
kubeClient,
whereaboutsClient,
nadClient,
kubeInformerFactory.Core().V1().Nodes(),
whereaboutsInformerFactory.Whereabouts().V1alpha1().NodeSlicePools(),
nadInformerFactory.K8sCniCncfIo().V1().NetworkAttachmentDefinitions(),
false,
)
// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(ctx.done())
// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
kubeInformerFactory.Start(ctx.Done())
whereaboutsInformerFactory.Start(ctx.Done())
nadInformerFactory.Start(ctx.Done())
if err = controller.Run(ctx, 1); err != nil {
logger.Error(err, "Error running controller")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
}
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
}

View File

@ -28,6 +28,7 @@ rules:
resources:
- ippools
- overlappingrangeipreservations
- nodeslicepools
verbs:
- get
- list
@ -48,11 +49,14 @@ rules:
verbs:
- list
- watch
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- network-attachment-definitions

View File

@ -0,0 +1,92 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: whereabouts-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: whereabouts-controller
template:
metadata:
labels:
app: whereabouts-controller
spec:
containers:
- command:
- /node-slice-controller
env:
- name: NODENAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: WHEREABOUTS_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: ghcr.io/k8snetworkplumbingwg/whereabouts:latest
name: whereabouts
resources:
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cnibin
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /cron-schedule
name: cron-scheduler-configmap
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-6kd6k
readOnly: true
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: whereabouts
serviceAccountName: whereabouts
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /opt/cni/bin
type: ""
name: cnibin
- hostPath:
path: /etc/cni/net.d
type: ""
name: cni-net-dir
- configMap:
defaultMode: 484
items:
- key: cron-expression
path: config
name: whereabouts-config
name: cron-scheduler-configmap
- name: kube-api-access-6kd6k
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace

View File

@ -0,0 +1,76 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
creationTimestamp: null
name: nodeslicepools.whereabouts.cni.cncf.io
spec:
group: whereabouts.cni.cncf.io
names:
kind: NodeSlicePool
listKind: NodeSlicePoolList
plural: nodeslicepools
singular: nodeslicepool
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: NodeSlicePool is the Schema for the nodesliceippools API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NodeSlicePoolSpec defines the desired state of NodeSlicePool
properties:
range:
description: Range is a RFC 4632/4291-style string that represents
an IP address and prefix length in CIDR notation this refers to
the entire range where the node is allocated a subset
type: string
sliceSize:
type: string
required:
- range
- sliceSize
type: object
status:
description: NodeSlicePoolStatus defines the desired state of NodeSlicePool
properties:
allocations:
items:
properties:
nodeName:
type: string
sliceRange:
type: string
required:
- nodeName
- sliceRange
type: object
type: array
required:
- allocations
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -10,6 +10,7 @@ import (
"time"
kubeClient "github.com/k8snetworkplumbingwg/whereabouts/pkg/storage/kubernetes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
@ -31,8 +32,38 @@ func isIPPoolAllocationsEmpty(ctx context.Context, k8sIPAM *kubeClient.Kubernete
}
}
func isIPPoolAllocationsEmptyForNodeSlices(k8sIPAM *kubeClient.KubernetesIPAM, ipPoolCIDR string, clientInfo *ClientInfo) wait.ConditionFunc {
return func() (bool, error) {
nodes, err := clientInfo.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
ipPool, err := k8sIPAM.GetIPPool(context.Background(), kubeClient.PoolIdentifier{NodeName: node.Name, IpRange: ipPoolCIDR, NetworkName: k8sIPAM.Config.NetworkName})
if err != nil {
if err.Error() == "k8s pool initialized" {
continue
} else {
return false, err
}
}
if len(ipPool.Allocations()) != 0 {
return false, nil
}
}
return true, nil
}
}
// WaitForZeroIPPoolAllocations polls up to timeout seconds for IP pool allocations to be gone from the Kubernetes cluster.
// Returns an error if any IP pool allocations remain after time limit, or if GETing IP pools causes an error.
func WaitForZeroIPPoolAllocations(ctx context.Context, k8sIPAM *kubeClient.KubernetesIPAM, ipPoolCIDR string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, isIPPoolAllocationsEmpty(ctx, k8sIPAM, ipPoolCIDR))
}
// WaitForZeroIPPoolAllocationsAcrossNodeSlices polls up to timeout seconds for IP pool allocations to be gone from the Kubernetes cluster.
// Returns an error if any IP pool allocations remain after time limit, or if GETing IP pools causes an error.
func WaitForZeroIPPoolAllocationsAcrossNodeSlices(k8sIPAM *kubeClient.KubernetesIPAM, ipPoolCIDR string, timeout time.Duration, clientInfo *ClientInfo) error {
return wait.PollImmediate(time.Second, timeout, isIPPoolAllocationsEmptyForNodeSlices(k8sIPAM, ipPoolCIDR, clientInfo))
}

View File

@ -0,0 +1,38 @@
package client
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
func GetNodeSubnet(cs *ClientInfo, nodeName, sliceName, namespace string) (string, error) {
slice, err := cs.WbClient.WhereaboutsV1alpha1().NodeSlicePools(namespace).Get(context.TODO(), sliceName, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, allocation := range slice.Status.Allocations {
if allocation.NodeName == nodeName {
return allocation.SliceRange, nil
}
}
return "", fmt.Errorf("slice range not found for node")
}
func WaitForNodeSliceReady(ctx context.Context, cs *ClientInfo, namespace, nodeSliceName string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, isNodeSliceReady(ctx, cs, nodeSliceName, namespace))
}
func isNodeSliceReady(ctx context.Context, cs *ClientInfo, namespace, nodeSliceName string) wait.ConditionWithContextFunc {
return func(context.Context) (bool, error) {
_, err := cs.WbClient.WhereaboutsV1alpha1().NodeSlicePools(namespace).Get(ctx, nodeSliceName, metav1.GetOptions{})
if err != nil {
return false, err
}
return true, nil
}
}

View File

@ -2,10 +2,12 @@ package client
import (
"context"
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@ -14,13 +16,15 @@ import (
netclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/entities"
whereaboutscnicncfiov1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
wbclient "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned"
)
const (
createTimeout = 10 * time.Second
deleteTimeout = 2 * createTimeout
rsCreateTimeout = 600 * time.Second
createTimeout = 10 * time.Second
deleteTimeout = 2 * createTimeout
rsCreateTimeout = 600 * time.Second
nodeSliceCreateTimeout = 5 * time.Second
)
type statefulSetPredicate func(statefulSet *appsv1.StatefulSet, expectedReplicas int) bool
@ -53,6 +57,18 @@ func NewClientInfo(config *rest.Config) (*ClientInfo, error) {
}, nil
}
func (c *ClientInfo) GetNodeSlicePool(name string, namespace string) (*whereaboutscnicncfiov1alpha1.NodeSlicePool, error) {
err := WaitForNodeSliceReady(context.TODO(), c, namespace, name, nodeSliceCreateTimeout)
if err != nil {
return nil, err
}
nodeslice, err := c.WbClient.WhereaboutsV1alpha1().NodeSlicePools(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return nodeslice, nil
}
func (c *ClientInfo) AddNetAttachDef(netattach *nettypes.NetworkAttachmentDefinition) (*nettypes.NetworkAttachmentDefinition, error) {
return c.NetClient.NetworkAttachmentDefinitions(netattach.ObjectMeta.Namespace).Create(context.TODO(), netattach, metav1.CreateOptions{})
}
@ -61,6 +77,14 @@ func (c *ClientInfo) DelNetAttachDef(netattach *nettypes.NetworkAttachmentDefini
return c.NetClient.NetworkAttachmentDefinitions(netattach.ObjectMeta.Namespace).Delete(context.TODO(), netattach.Name, metav1.DeleteOptions{})
}
func (c *ClientInfo) NodeSliceDeleted(name, namespace string) error {
_, err := c.WbClient.WhereaboutsV1alpha1().NodeSlicePools(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil || !errors.IsNotFound(err) {
return fmt.Errorf("expected not found nodeslice")
}
return nil
}
func (c *ClientInfo) ProvisionPod(podName string, namespace string, label, annotations map[string]string) (*corev1.Pod, error) {
ctx := context.Background()
pod := entities.PodObject(podName, namespace, label, annotations)

353
e2e/e2e_node_slice_test.go Normal file
View File

@ -0,0 +1,353 @@
package whereabouts_e2e
import (
"context"
"testing"
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
v1 "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
wbtestclient "github.com/k8snetworkplumbingwg/whereabouts/e2e/client"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/entities"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/poolconsistency"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/retrievers"
testenv "github.com/k8snetworkplumbingwg/whereabouts/e2e/testenvironment"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/util"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/storage"
wbstorage "github.com/k8snetworkplumbingwg/whereabouts/pkg/storage/kubernetes"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"
)
func TestWhereaboutsE2ENodeSlice(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "whereabouts-e2e-node-slice")
}
var _ = Describe("Whereabouts node slice functionality", func() {
Context("Test setup", func() {
const (
testNamespace = "default"
ipv4TestRange = "10.0.0.0/8"
sliceSize = "/20" // tests will depend on subnets being > node count of test environment
testNetworkName = "wa-nad"
subnets = 4096
rsName = "whereabouts-scale-test"
)
var (
clientInfo *wbtestclient.ClientInfo
testConfig *testenv.Configuration
netAttachDef *nettypes.NetworkAttachmentDefinition
replicaSet *v1.ReplicaSet
pod *core.Pod
)
BeforeEach(func() {
var (
config *rest.Config
err error
)
testConfig, err = testenv.NewConfig()
Expect(err).NotTo(HaveOccurred())
config, err = util.ClusterConfig()
Expect(err).NotTo(HaveOccurred())
clientInfo, err = wbtestclient.NewClientInfo(config)
Expect(err).NotTo(HaveOccurred())
netAttachDef = util.MacvlanNetworkWithNodeSlice(testNetworkName, testNamespace, ipv4TestRange, testNetworkName, sliceSize)
By("creating a NetworkAttachmentDefinition for whereabouts")
_, err = clientInfo.AddNetAttachDef(netAttachDef)
Expect(err).NotTo(HaveOccurred())
By("checking node slices have been allocated and nodes are assigned")
Expect(util.ValidateNodeSlicePoolSlicesCreatedAndNodesAssigned(testNetworkName, testNamespace, subnets, clientInfo)).To(Succeed())
})
AfterEach(func() {
Expect(clientInfo.DelNetAttachDef(netAttachDef)).To(Succeed())
time.Sleep(1 * time.Second)
Expect(clientInfo.NodeSliceDeleted(testNetworkName, testNamespace)).To(Succeed())
})
Context("Single pod tests node slice", func() {
BeforeEach(func() {
const singlePodName = "whereabouts-basic-test"
var err error
By("creating a pod with whereabouts net-attach-def")
pod, err = clientInfo.ProvisionPod(
singlePodName,
testNamespace,
util.PodTierLabel(singlePodName),
entities.PodNetworkSelectionElements(testNetworkName),
)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
By("deleting pod with whereabouts net-attach-def")
Expect(clientInfo.DeletePod(pod)).To(Succeed())
})
It("allocates a single pod within the correct IP range", func() {
By("checking pod IP is within whereabouts IPAM range")
secondaryIfaceIPs, err := retrievers.SecondaryIfaceIPValue(pod, "net1")
Expect(err).NotTo(HaveOccurred())
Expect(secondaryIfaceIPs).NotTo(BeEmpty())
Expect(util.InNodeRange(clientInfo, pod.Spec.NodeName, testNetworkName, testNamespace, secondaryIfaceIPs[0])).To(Succeed())
})
})
Context("Replicaset tests node slice", func() {
const (
emptyReplicaSet = 0
rsSteadyTimeout = 1200 * time.Second
)
var k8sIPAM *wbstorage.KubernetesIPAM
BeforeEach(func() {
By("creating a replicaset with whereabouts net-attach-def")
var err error
const ipPoolNamespace = "kube-system"
k8sIPAM, err = wbstorage.NewKubernetesIPAMWithNamespace("", "", types.IPAMConfig{
Kubernetes: types.KubernetesConfig{
KubeConfigPath: testConfig.KubeconfigPath,
},
NodeSliceSize: sliceSize,
NetworkName: testNetworkName,
Namespace: testNamespace,
}, ipPoolNamespace)
Expect(err).NotTo(HaveOccurred())
replicaSet, err = clientInfo.ProvisionReplicaSet(
rsName,
testNamespace,
emptyReplicaSet,
util.PodTierLabel(rsName),
entities.PodNetworkSelectionElements(testNetworkName),
)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
By("removing replicas and expecting 0 IP pool allocations")
Expect(
util.CheckZeroIPPoolAllocationsAndReplicas(
context.TODO(), clientInfo, k8sIPAM, rsName, testNamespace, ipv4TestRange, testNetworkName)).To(Succeed())
By("deleting replicaset with whereabouts net-attach-def")
Expect(clientInfo.DeleteReplicaSet(replicaSet)).To(Succeed())
})
It("allocates each IP pool entry with a unique pod IP", func() {
By("creating max number of pods and checking IP Pool validity")
for i := 0; i < testConfig.NumberOfIterations; i++ {
Expect(
util.CheckZeroIPPoolAllocationsAndReplicas(
context.TODO(), clientInfo, k8sIPAM, rsName, testNamespace, ipv4TestRange, testNetworkName)).To(Succeed())
allPods, err := clientInfo.Client.CoreV1().Pods(core.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
replicaSet, err = clientInfo.UpdateReplicaSet(
entities.ReplicaSetObject(
testConfig.MaxReplicas(allPods.Items),
rsName,
testNamespace,
util.PodTierLabel(rsName),
entities.PodNetworkSelectionElements(testNetworkName),
))
Expect(err).NotTo(HaveOccurred())
Expect(
wbtestclient.WaitForReplicaSetSteadyState(
context.TODO(),
clientInfo.Client,
testNamespace,
entities.ReplicaSetQuery(rsName),
replicaSet,
rsSteadyTimeout)).To(Succeed())
podList, err := wbtestclient.ListPods(context.TODO(), clientInfo.Client, testNamespace, entities.ReplicaSetQuery(rsName))
Expect(err).NotTo(HaveOccurred())
Expect(podList.Items).NotTo(BeEmpty())
nodes, err := clientInfo.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(nodes.Items).NotTo(BeEmpty())
ipPools := []storage.IPPool{}
for _, node := range nodes.Items {
nodeSliceRange, err := wbstorage.GetNodeSlicePoolRange(context.TODO(), k8sIPAM, node.Name)
Expect(err).NotTo(HaveOccurred())
ipPool, err := k8sIPAM.GetIPPool(context.Background(), wbstorage.PoolIdentifier{IpRange: nodeSliceRange, NetworkName: testNetworkName, NodeName: node.Name})
if err == nil {
ipPools = append(ipPools, ipPool)
}
}
Expect(poolconsistency.NewNodeSliceConsistencyCheck(ipPools, podList.Items).MissingIPs()).To(BeEmpty())
Expect(poolconsistency.NewNodeSliceConsistencyCheck(ipPools, podList.Items).StaleIPs()).To(BeEmpty())
}
})
})
Context("stateful set tests", func() {
const (
initialReplicaNumber = 20
ipPoolNamespace = "kube-system"
namespace = "default"
serviceName = "web"
selector = "app=" + serviceName
statefulSetName = "statefulthingy"
)
podList := func(podList *core.PodList) []core.Pod { return podList.Items }
var k8sIPAM *wbstorage.KubernetesIPAM
Context("regular sized network", func() {
BeforeEach(func() {
var err error
_, err = clientInfo.ProvisionStatefulSet(statefulSetName, namespace, serviceName, initialReplicaNumber, testNetworkName)
Expect(err).NotTo(HaveOccurred())
Expect(
clientInfo.Client.CoreV1().Pods(namespace).List(
context.TODO(), metav1.ListOptions{LabelSelector: selector})).To(
WithTransform(podList, HaveLen(initialReplicaNumber)))
const ipPoolNamespace = "kube-system"
k8sIPAM, err = wbstorage.NewKubernetesIPAMWithNamespace("", "", types.IPAMConfig{
Kubernetes: types.KubernetesConfig{
KubeConfigPath: testConfig.KubeconfigPath,
},
NodeSliceSize: sliceSize,
NetworkName: testNetworkName,
Namespace: testNamespace,
}, ipPoolNamespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
Expect(clientInfo.DeleteStatefulSet(namespace, serviceName, selector)).To(Succeed())
Expect(
clientInfo.Client.CoreV1().Pods(namespace).List(
context.TODO(), metav1.ListOptions{LabelSelector: selector})).To(
WithTransform(podList, BeEmpty()),
"cannot have leaked pods in the system")
poolAllocations := func(ipPool *v1alpha1.IPPool) map[string]v1alpha1.IPAllocation {
return ipPool.Spec.Allocations
}
nodes, err := clientInfo.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(nodes.Items).NotTo(BeEmpty())
for _, node := range nodes.Items {
Expect(
clientInfo.WbClient.WhereaboutsV1alpha1().IPPools(ipPoolNamespace).Get(
context.TODO(),
wbstorage.IPPoolName(wbstorage.PoolIdentifier{IpRange: ipv4TestRange, NetworkName: testNetworkName, NodeName: node.Name}),
metav1.GetOptions{})).To(
WithTransform(poolAllocations, BeEmpty()),
"cannot have leaked IPAllocations in the system")
}
})
It("IPPools feature allocations", func() {
nodes, err := clientInfo.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(nodes.Items).NotTo(BeEmpty())
ipPools := []storage.IPPool{}
podList, err := clientInfo.Client.CoreV1().Pods(testNamespace).List(context.TODO(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(podList.Items).NotTo(BeEmpty())
for _, node := range nodes.Items {
nodeSliceRange, err := wbstorage.GetNodeSlicePoolRange(context.TODO(), k8sIPAM, node.Name)
Expect(err).NotTo(HaveOccurred())
ipPool, err := k8sIPAM.GetIPPool(context.Background(), wbstorage.PoolIdentifier{IpRange: nodeSliceRange, NetworkName: testNetworkName, NodeName: node.Name})
if err == nil {
ipPools = append(ipPools, ipPool)
}
}
Expect(poolconsistency.NewNodeSliceConsistencyCheck(ipPools, podList.Items).MissingIPs()).To(BeEmpty())
totalAllocations := 0
for _, node := range nodes.Items {
nodeSliceRange, err := wbstorage.GetNodeSlicePoolRange(context.TODO(), k8sIPAM, node.Name)
Expect(err).NotTo(HaveOccurred())
ipPool, err := clientInfo.WbClient.WhereaboutsV1alpha1().IPPools(ipPoolNamespace).Get(context.TODO(),
wbstorage.IPPoolName(wbstorage.PoolIdentifier{IpRange: nodeSliceRange, NetworkName: testNetworkName, NodeName: node.Name}),
metav1.GetOptions{})
// error is okay because pod may not land on every node
if err == nil {
totalAllocations = totalAllocations + len(ipPool.Spec.Allocations)
}
}
Expect(totalAllocations).To(Equal(initialReplicaNumber))
})
table.DescribeTable("stateful sets scale up / down", func(testSetup func(int), instanceDelta int) {
const scaleTimeout = util.CreatePodTimeout * 6
testSetup(instanceDelta)
Eventually(func() (int, error) {
totalAllocations := 0
nodes, err := clientInfo.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(nodes.Items).NotTo(BeEmpty())
for _, node := range nodes.Items {
nodeSliceRange, err := wbstorage.GetNodeSlicePoolRange(context.TODO(), k8sIPAM, node.Name)
Expect(err).NotTo(HaveOccurred())
ipPool, err := clientInfo.WbClient.WhereaboutsV1alpha1().IPPools(ipPoolNamespace).Get(context.TODO(),
wbstorage.IPPoolName(wbstorage.PoolIdentifier{IpRange: nodeSliceRange, NetworkName: testNetworkName, NodeName: node.Name}),
metav1.GetOptions{})
// error is okay because pod may not land on every node
if err == nil {
totalAllocations = totalAllocations + len(ipPool.Spec.Allocations)
}
}
return totalAllocations, nil
}, scaleTimeout).Should(
Equal(initialReplicaNumber), "we should have one allocation for each live pod")
},
table.Entry("scale up then down 5 replicas", func(deltaInstances int) {
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, deltaInstances)).To(Succeed())
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, -deltaInstances)).To(Succeed())
}, 5),
table.Entry("scale up then down 10 replicas", func(deltaInstances int) {
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, deltaInstances)).To(Succeed())
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, -deltaInstances)).To(Succeed())
}, 10),
table.Entry("scale up then down 20 replicas", func(deltaInstances int) {
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, deltaInstances)).To(Succeed())
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, -deltaInstances)).To(Succeed())
}, 20),
table.Entry("scale down then up 5 replicas", func(deltaInstances int) {
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, -deltaInstances)).To(Succeed())
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, deltaInstances)).To(Succeed())
}, 5),
table.Entry("scale down then up 10 replicas", func(deltaInstances int) {
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, -deltaInstances)).To(Succeed())
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, deltaInstances)).To(Succeed())
}, 10),
table.Entry("scale down then up 20 replicas", func(deltaInstances int) {
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, -deltaInstances)).To(Succeed())
Expect(clientInfo.ScaleStatefulSet(serviceName, namespace, deltaInstances)).To(Succeed())
}, 20),
)
})
})
})
})

View File

@ -3,6 +3,8 @@ package whereabouts_e2e
import (
"context"
"fmt"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/util"
"k8s.io/client-go/tools/clientcmd"
"net"
"os"
"sort"
@ -14,14 +16,12 @@ import (
"github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
v1 "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
wbtestclient "github.com/k8snetworkplumbingwg/whereabouts/e2e/client"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/entities"
@ -72,13 +72,13 @@ var _ = Describe("Whereabouts functionality", func() {
testConfig, err = testenv.NewConfig()
Expect(err).NotTo(HaveOccurred())
config, err = clusterConfig()
config, err = util.ClusterConfig()
Expect(err).NotTo(HaveOccurred())
clientInfo, err = wbtestclient.NewClientInfo(config)
Expect(err).NotTo(HaveOccurred())
netAttachDef = macvlanNetworkWithWhereaboutsIPAMNetwork(testNetworkName, testNamespace, ipv4TestRange, []string{}, wbstorage.UnnamedNetwork, true)
netAttachDef = util.MacvlanNetworkWithWhereaboutsIPAMNetwork(testNetworkName, testNamespace, ipv4TestRange, []string{}, wbstorage.UnnamedNetwork, true)
By("creating a NetworkAttachmentDefinition for whereabouts")
_, err = clientInfo.AddNetAttachDef(netAttachDef)
@ -103,7 +103,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod, err = clientInfo.ProvisionPod(
singlePodName,
testNamespace,
podTierLabel(singlePodName),
util.PodTierLabel(singlePodName),
entities.PodNetworkSelectionElements(testNetworkName),
)
Expect(err).NotTo(HaveOccurred())
@ -176,7 +176,7 @@ var _ = Describe("Whereabouts functionality", func() {
const dualstackPodName = "whereabouts-dualstack-test"
var err error
netAttachDefDualStack = macvlanNetworkWithWhereaboutsIPAMNetwork(
netAttachDefDualStack = util.MacvlanNetworkWithWhereaboutsIPAMNetwork(
testDualStackNetworkName,
testNamespace,
"",
@ -190,7 +190,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod, err = clientInfo.ProvisionPod(
dualstackPodName,
testNamespace,
podTierLabel(dualstackPodName),
util.PodTierLabel(dualstackPodName),
entities.PodNetworkSelectionElements(testDualStackNetworkName),
)
Expect(err).NotTo(HaveOccurred())
@ -208,8 +208,8 @@ var _ = Describe("Whereabouts functionality", func() {
secondaryIfaceIPs, err := retrievers.SecondaryIfaceIPValue(pod, "net1")
Expect(err).NotTo(HaveOccurred())
Expect(secondaryIfaceIPs).To(HaveLen(2))
Expect(inRange(dualStackIPv4Range, secondaryIfaceIPs[0])).To(Succeed())
Expect(inRange(dualStackIPv6Range, secondaryIfaceIPs[1])).To(Succeed())
Expect(util.InRange(dualStackIPv4Range, secondaryIfaceIPs[0])).To(Succeed())
Expect(util.InRange(dualStackIPv6Range, secondaryIfaceIPs[1])).To(Succeed())
})
})
@ -218,7 +218,7 @@ var _ = Describe("Whereabouts functionality", func() {
const dualstackPodName = "whereabouts-dualstack-test"
var err error
netAttachDefDualStack = macvlanNetworkWithWhereaboutsIPAMNetwork(
netAttachDefDualStack = util.MacvlanNetworkWithWhereaboutsIPAMNetwork(
testDualStackNetworkName,
testNamespace,
ipv4TestRange,
@ -232,7 +232,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod, err = clientInfo.ProvisionPod(
dualstackPodName,
testNamespace,
podTierLabel(dualstackPodName),
util.PodTierLabel(dualstackPodName),
entities.PodNetworkSelectionElements(testDualStackNetworkName),
)
Expect(err).NotTo(HaveOccurred())
@ -250,9 +250,9 @@ var _ = Describe("Whereabouts functionality", func() {
secondaryIfaceIPs, err := retrievers.SecondaryIfaceIPValue(pod, "net1")
Expect(err).NotTo(HaveOccurred())
Expect(secondaryIfaceIPs).To(HaveLen(3))
Expect(inRange(ipv4TestRange, secondaryIfaceIPs[0])).To(Succeed())
Expect(inRange(dualStackIPv4Range, secondaryIfaceIPs[1])).To(Succeed())
Expect(inRange(dualStackIPv6Range, secondaryIfaceIPs[2])).To(Succeed())
Expect(util.InRange(ipv4TestRange, secondaryIfaceIPs[0])).To(Succeed())
Expect(util.InRange(dualStackIPv4Range, secondaryIfaceIPs[1])).To(Succeed())
Expect(util.InRange(dualStackIPv6Range, secondaryIfaceIPs[2])).To(Succeed())
})
})
})
@ -281,7 +281,7 @@ var _ = Describe("Whereabouts functionality", func() {
rsName,
testNamespace,
emptyReplicaSet,
podTierLabel(rsName),
util.PodTierLabel(rsName),
entities.PodNetworkSelectionElements(testNetworkName),
)
Expect(err).NotTo(HaveOccurred())
@ -290,7 +290,7 @@ var _ = Describe("Whereabouts functionality", func() {
AfterEach(func() {
By("removing replicas and expecting 0 IP pool allocations")
Expect(
checkZeroIPPoolAllocationsAndReplicas(
util.CheckZeroIPPoolAllocationsAndReplicas(
ctx, clientInfo, k8sIPAM, rsName, testNamespace, ipPoolCIDR, testNetworkName)).To(Succeed())
By("deleting replicaset with whereabouts net-attach-def")
@ -301,7 +301,7 @@ var _ = Describe("Whereabouts functionality", func() {
By("creating max number of pods and checking IP Pool validity")
for i := 0; i < testConfig.NumberOfIterations; i++ {
Expect(
checkZeroIPPoolAllocationsAndReplicas(
util.CheckZeroIPPoolAllocationsAndReplicas(
ctx, clientInfo, k8sIPAM, rsName, testNamespace, ipPoolCIDR, testNetworkName)).To(Succeed())
allPods, err := clientInfo.Client.CoreV1().Pods(core.NamespaceAll).List(ctx, metav1.ListOptions{})
@ -312,7 +312,7 @@ var _ = Describe("Whereabouts functionality", func() {
testConfig.MaxReplicas(allPods.Items),
rsName,
testNamespace,
podTierLabel(rsName),
util.PodTierLabel(rsName),
entities.PodNetworkSelectionElements(testNetworkName),
))
Expect(err).NotTo(HaveOccurred())
@ -388,7 +388,7 @@ var _ = Describe("Whereabouts functionality", func() {
})
table.DescribeTable("stateful sets scale up / down", func(testSetup func(int), instanceDelta int) {
const scaleTimeout = createPodTimeout * 6
const scaleTimeout = util.CreatePodTimeout * 6
testSetup(instanceDelta)
@ -446,7 +446,7 @@ var _ = Describe("Whereabouts functionality", func() {
BeforeEach(func() {
var err error
tinyNetwork, err = clientInfo.AddNetAttachDef(
macvlanNetworkWithWhereaboutsIPAMNetwork(networkName, namespace, rangeWithTwoIPs, []string{}, wbstorage.UnnamedNetwork, true))
util.MacvlanNetworkWithWhereaboutsIPAMNetwork(networkName, namespace, rangeWithTwoIPs, []string{}, wbstorage.UnnamedNetwork, true))
Expect(err).NotTo(HaveOccurred())
_, err = clientInfo.ProvisionStatefulSet(statefulSetName, namespace, serviceName, replicaNumber, networkName)
@ -508,7 +508,7 @@ var _ = Describe("Whereabouts functionality", func() {
time.Second,
wbtestclient.IsStatefulSetDegradedPredicate)).Should(Succeed())
scaleUpTimeout := 2 * createPodTimeout
scaleUpTimeout := 2 * util.CreatePodTimeout
Expect(wbtestclient.WaitForStatefulSetCondition(
ctx,
clientInfo.Client,
@ -526,7 +526,6 @@ var _ = Describe("Whereabouts functionality", func() {
metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(ipPool.Spec.Allocations).NotTo(BeEmpty())
Expect(allocationForPodRef(podRef, *ipPool)[0].ContainerID).NotTo(Equal(containerID))
Expect(allocationForPodRef(podRef, *ipPool)[0].PodRef).To(Equal(podRef))
})
@ -677,7 +676,7 @@ var _ = Describe("Whereabouts functionality", func() {
When(fmt.Sprintf("a second net-attach-definition with \"enable_overlapping_ranges\": %t is created",
enableOverlappingRanges), func() {
BeforeEach(func() {
netAttachDef2 = macvlanNetworkWithWhereaboutsIPAMNetwork(testNetwork2Name, testNamespace,
netAttachDef2 = util.MacvlanNetworkWithWhereaboutsIPAMNetwork(testNetwork2Name, testNamespace,
ipv4TestRangeOverlapping, []string{}, "", false)
By("creating a second NetworkAttachmentDefinition for whereabouts")
@ -700,7 +699,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod, err = clientInfo.ProvisionPod(
singlePodName,
testNamespace,
podTierLabel(singlePodName),
util.PodTierLabel(singlePodName),
entities.PodNetworkSelectionElements(testNetworkName),
)
Expect(err).NotTo(HaveOccurred())
@ -709,7 +708,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod2, err = clientInfo.ProvisionPod(
singlePod2Name,
testNamespace,
podTierLabel(singlePodName),
util.PodTierLabel(singlePodName),
entities.PodNetworkSelectionElements(testNetwork2Name),
)
Expect(err).NotTo(HaveOccurred())
@ -765,9 +764,9 @@ var _ = Describe("Whereabouts functionality", func() {
err error
)
netAttachDef2 = macvlanNetworkWithWhereaboutsIPAMNetwork(testNetwork2Name, testNamespace,
netAttachDef2 = util.MacvlanNetworkWithWhereaboutsIPAMNetwork(testNetwork2Name, testNamespace,
ipv4TestRange, []string{}, namedNetworkName, true)
netAttachDef3 = macvlanNetworkWithWhereaboutsIPAMNetwork(testNetwork3Name, testNamespace,
netAttachDef3 = util.MacvlanNetworkWithWhereaboutsIPAMNetwork(testNetwork3Name, testNamespace,
ipv4TestRangeOverlapping, []string{}, namedNetworkName, true)
By("creating a second NetworkAttachmentDefinition for whereabouts")
@ -796,7 +795,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod, err = clientInfo.ProvisionPod(
singlePodName,
testNamespace,
podTierLabel(singlePodName),
util.PodTierLabel(singlePodName),
entities.PodNetworkSelectionElements(testNetworkName),
)
Expect(err).NotTo(HaveOccurred())
@ -805,7 +804,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod2, err = clientInfo.ProvisionPod(
singlePod2Name,
testNamespace,
podTierLabel(singlePodName),
util.PodTierLabel(singlePodName),
entities.PodNetworkSelectionElements(testNetwork2Name),
)
Expect(err).NotTo(HaveOccurred())
@ -814,7 +813,7 @@ var _ = Describe("Whereabouts functionality", func() {
pod3, err = clientInfo.ProvisionPod(
singlePod3Name,
testNamespace,
podTierLabel(singlePodName),
util.PodTierLabel(singlePodName),
entities.PodNetworkSelectionElements(testNetwork3Name),
)
Expect(err).NotTo(HaveOccurred())

View File

@ -0,0 +1,75 @@
package poolconsistency
import (
corev1 "k8s.io/api/core/v1"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/retrievers"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/storage"
)
type NodeSliceChecker struct {
ipPools []storage.IPPool
podList []corev1.Pod
}
func NewNodeSliceConsistencyCheck(ipPools []storage.IPPool, podList []corev1.Pod) *NodeSliceChecker {
return &NodeSliceChecker{
ipPools: ipPools,
podList: podList,
}
}
func (pc *NodeSliceChecker) MissingIPs() []string {
var mismatchedIPs []string
for _, pod := range pc.podList {
podIPs, err := retrievers.SecondaryIfaceIPValue(&pod)
podIP := podIPs[len(podIPs)-1]
if err != nil {
return []string{}
}
var found bool
for _, pool := range pc.ipPools {
for _, allocation := range pool.Allocations() {
reservedIP := allocation.IP.String()
if reservedIP == podIP {
found = true
break
}
}
}
if !found {
mismatchedIPs = append(mismatchedIPs, podIP)
}
}
return mismatchedIPs
}
func (pc *NodeSliceChecker) StaleIPs() []string {
var staleIPs []string
for _, pool := range pc.ipPools {
for _, allocation := range pool.Allocations() {
reservedIP := allocation.IP.String()
found := false
for _, pod := range pc.podList {
podIPs, err := retrievers.SecondaryIfaceIPValue(&pod)
podIP := podIPs[len(podIPs)-1]
if err != nil {
continue
}
if reservedIP == podIP {
found = true
break
}
}
if !found {
staleIPs = append(staleIPs, allocation.IP.String())
}
}
}
return staleIPs
}

235
e2e/util/util.go Normal file
View File

@ -0,0 +1,235 @@
package util
import (
"context"
"fmt"
"net"
"os"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
wbtestclient "github.com/k8snetworkplumbingwg/whereabouts/e2e/client"
"github.com/k8snetworkplumbingwg/whereabouts/e2e/entities"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
wbstorage "github.com/k8snetworkplumbingwg/whereabouts/pkg/storage/kubernetes"
)
const (
CreatePodTimeout = 10 * time.Second
)
func AllocationForPodRef(podRef string, ipPool v1alpha1.IPPool) *v1alpha1.IPAllocation {
for _, allocation := range ipPool.Spec.Allocations {
if allocation.PodRef == podRef {
return &allocation
}
}
return nil
}
func ClusterConfig() (*rest.Config, error) {
const kubeconfig = "KUBECONFIG"
kubeconfigPath, found := os.LookupEnv(kubeconfig)
if !found {
return nil, fmt.Errorf("must provide the path to the kubeconfig via the `KUBECONFIG` env variable")
}
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return nil, err
}
return config, nil
}
func PodTierLabel(podTier string) map[string]string {
const tier = "tier"
return map[string]string{tier: podTier}
}
// This will check that the count of subnets has been created and that each node has a unique allocation
// NOTE: this requires that there are not more nodes than subnets in the nodeslicepool
func ValidateNodeSlicePoolSlicesCreatedAndNodesAssigned(nodesliceName string, nodeSliceNamespace string, expectedSubnets int, clientInfo *wbtestclient.ClientInfo) error {
nodeSlice, err := clientInfo.GetNodeSlicePool(nodesliceName, nodeSliceNamespace)
if err != nil {
return err
}
// Should create subnets
if len(nodeSlice.Status.Allocations) != expectedSubnets {
return fmt.Errorf("expected allocations %v but got allocations %v", expectedSubnets, len(nodeSlice.Status.Allocations))
}
// Each subnet should have a unique range
allocationMap := map[string]struct{}{}
nodeMap := map[string]struct{}{}
for _, allocation := range nodeSlice.Status.Allocations {
if _, ok := allocationMap[allocation.SliceRange]; ok {
return fmt.Errorf("error allocation has duplication in subnet %v", allocation.SliceRange)
}
if _, ok := allocationMap[allocation.NodeName]; allocation.NodeName != "" && ok {
return fmt.Errorf("error allocation has duplication in nodes %v", allocation.NodeName)
}
allocationMap[allocation.SliceRange] = struct{}{}
nodeMap[allocation.NodeName] = struct{}{}
}
// All nodes should be assigned exactly one time
nodes, err := clientInfo.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
for _, node := range nodes.Items {
if _, ok := nodeMap[node.Name]; !ok {
//TODO: CP nodes?
return fmt.Errorf("node not assinged to slice %v", node.Name)
}
}
return nil
}
// Waits for all replicas to be fully removed from replicaset, and checks that there are 0 ip pool allocations
func CheckZeroIPPoolAllocationsAndReplicas(ctx context.Context, clientInfo *wbtestclient.ClientInfo, k8sIPAM *wbstorage.KubernetesIPAM, rsName, namespace string, ipPoolCIDR string, networkNames ...string) error {
const (
emptyReplicaSet = 0
rsSteadyTimeout = 1200 * time.Second
zeroIPPoolTimeout = 2 * time.Minute
)
var err error
replicaSet, err := clientInfo.UpdateReplicaSet(
entities.ReplicaSetObject(
emptyReplicaSet,
rsName,
namespace,
PodTierLabel(rsName),
entities.PodNetworkSelectionElements(networkNames...),
))
if err != nil {
return err
}
matchingLabel := entities.ReplicaSetQuery(rsName)
if err = wbtestclient.WaitForReplicaSetSteadyState(ctx, clientInfo.Client, namespace, matchingLabel, replicaSet, rsSteadyTimeout); err != nil {
return err
}
if k8sIPAM.Config.NodeSliceSize == "" {
if err = wbtestclient.WaitForZeroIPPoolAllocations(ctx, k8sIPAM, ipPoolCIDR, zeroIPPoolTimeout); err != nil {
return err
}
} else {
if err = wbtestclient.WaitForZeroIPPoolAllocationsAcrossNodeSlices(k8sIPAM, ipPoolCIDR, zeroIPPoolTimeout, clientInfo); err != nil {
return err
}
}
return nil
}
// Returns a network attachment definition object configured by provided parameters
func GenerateNetAttachDefSpec(name, namespace, config string) *nettypes.NetworkAttachmentDefinition {
return &nettypes.NetworkAttachmentDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "NetworkAttachmentDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: nettypes.NetworkAttachmentDefinitionSpec{
Config: config,
},
}
}
func MacvlanNetworkWithWhereaboutsIPAMNetwork(networkName string, namespaceName string, ipRange string, ipRanges []string, poolName string, enableOverlappingRanges bool) *nettypes.NetworkAttachmentDefinition {
macvlanConfig := fmt.Sprintf(`{
"cniVersion": "0.3.0",
"disableCheck": true,
"plugins": [
{
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "whereabouts",
"leader_lease_duration": 1500,
"leader_renew_deadline": 1000,
"leader_retry_period": 500,
"range": "%s",
"ipRanges": %s,
"log_level": "debug",
"log_file": "/tmp/wb",
"network_name": "%s",
"enable_overlapping_ranges": %v
}
}
]
}`, ipRange, CreateIPRanges(ipRanges), poolName, enableOverlappingRanges)
return GenerateNetAttachDefSpec(networkName, namespaceName, macvlanConfig)
}
func MacvlanNetworkWithNodeSlice(networkName, namespaceName, ipRange, poolName, sliceSize string) *nettypes.NetworkAttachmentDefinition {
//TODO: fails without leader timeouts set
macvlanConfig := fmt.Sprintf(`{
"cniVersion": "0.3.0",
"disableCheck": true,
"plugins": [
{
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "whereabouts",
"leader_lease_duration": 1500,
"leader_renew_deadline": 1000,
"leader_retry_period": 500,
"namespace": "%s",
"range": "%s",
"log_level": "debug",
"log_file": "/tmp/wb",
"network_name": "%s",
"node_slice_size": "%s"
}
}
]
}`, namespaceName, ipRange, poolName, sliceSize)
return GenerateNetAttachDefSpec(networkName, namespaceName, macvlanConfig)
}
func InNodeRange(clientInfo *wbtestclient.ClientInfo, nodeName, sliceName, namespace, ip string) error {
cidrRange, err := wbtestclient.GetNodeSubnet(clientInfo, nodeName, sliceName, namespace)
if err != nil {
return err
}
return InRange(cidrRange, ip)
}
func InRange(cidr string, ip string) error {
_, cidrRange, err := net.ParseCIDR(cidr)
if err != nil {
return err
}
if cidrRange.Contains(net.ParseIP(ip)) {
return nil
}
return fmt.Errorf("ip [%s] is NOT in range %s", ip, cidr)
}
func CreateIPRanges(ranges []string) string {
formattedRanges := []string{}
for _, ipRange := range ranges {
singleRange := fmt.Sprintf(`{"range": "%s"}`, ipRange)
formattedRanges = append(formattedRanges, singleRange)
}
ipRanges := "[" + strings.Join(formattedRanges[:], ",") + "]"
return ipRanges
}

View File

@ -46,3 +46,5 @@ GLDFLAGS="${GLDFLAGS} ${VERSION_LDFLAGS}"
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/${cmd} cmd/${cmd}.go
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/ip-control-loop cmd/controlloop/*.go
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/node-slice-controller cmd/nodeslicecontroller/*.go

View File

@ -98,7 +98,7 @@ trap "rm /tmp/whereabouts-img.tar || true" EXIT
kind load image-archive --name "$KIND_CLUSTER_NAME" /tmp/whereabouts-img.tar
echo "## install whereabouts"
for file in "daemonset-install.yaml" "whereabouts.cni.cncf.io_ippools.yaml" "whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml"; do
for file in "daemonset-install.yaml" "whereabouts.cni.cncf.io_ippools.yaml" "whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml" "node-slice-controller.yaml"; do
# insert 'imagePullPolicy: Never' under the container 'image' so it is certain that the image used
# by the daemonset is the one loaded into KinD and not one pulled from a repo
sed '/ image:/a\ imagePullPolicy: Never' "$ROOT/doc/crds/$file" | retry kubectl apply -f -

View File

@ -0,0 +1,57 @@
package v1alpha1
import (
"net"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NodeSlicePoolSpec defines the desired state of NodeSlicePool
type NodeSlicePoolSpec struct {
// Range is a RFC 4632/4291-style string that represents an IP address and prefix length in CIDR notation
// this refers to the entire range where the node is allocated a subset
Range string `json:"range"`
// SliceSize is the size of subnets or slices of the range that each node will be assigned
SliceSize string `json:"sliceSize"`
}
// NodeSlicePoolStatus defines the desired state of NodeSlicePool
type NodeSlicePoolStatus struct {
// Allocations holds the allocations of nodes to slices
Allocations []NodeSliceAllocation `json:"allocations"`
}
type NodeSliceAllocation struct {
// NodeName is the name of the node assigned to this slice, empty node name is an available slice for assignment
NodeName string `json:"nodeName"`
// SliceRange is the subnet of this slice
SliceRange string `json:"sliceRange"`
}
// ParseCIDR formats the Range of the IPPool
func (i NodeSlicePool) ParseCIDR() (net.IP, *net.IPNet, error) {
return net.ParseCIDR(i.Spec.Range)
}
// +genclient
// +kubebuilder:object:root=true
// NodeSlicePool is the Schema for the nodesliceippools API
type NodeSlicePool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec NodeSlicePoolSpec `json:"spec,omitempty"`
Status NodeSlicePoolStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// NodeSlicePoolList contains a list of NodeSlicePool
type NodeSlicePoolList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []NodeSlicePool `json:"items"`
}

View File

@ -58,6 +58,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&IPPoolList{},
&OverlappingRangeIPReservation{},
&OverlappingRangeIPReservationList{},
&NodeSlicePool{},
&NodeSlicePoolList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View File

@ -103,6 +103,115 @@ func (in *IPPoolSpec) DeepCopy() *IPPoolSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSliceAllocation) DeepCopyInto(out *NodeSliceAllocation) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSliceAllocation.
func (in *NodeSliceAllocation) DeepCopy() *NodeSliceAllocation {
if in == nil {
return nil
}
out := new(NodeSliceAllocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSlicePool) DeepCopyInto(out *NodeSlicePool) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePool.
func (in *NodeSlicePool) DeepCopy() *NodeSlicePool {
if in == nil {
return nil
}
out := new(NodeSlicePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeSlicePool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSlicePoolList) DeepCopyInto(out *NodeSlicePoolList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NodeSlicePool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePoolList.
func (in *NodeSlicePoolList) DeepCopy() *NodeSlicePoolList {
if in == nil {
return nil
}
out := new(NodeSlicePoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeSlicePoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSlicePoolSpec) DeepCopyInto(out *NodeSlicePoolSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePoolSpec.
func (in *NodeSlicePoolSpec) DeepCopy() *NodeSlicePoolSpec {
if in == nil {
return nil
}
out := new(NodeSlicePoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSlicePoolStatus) DeepCopyInto(out *NodeSlicePoolStatus) {
*out = *in
if in.Allocations != nil {
in, out := &in.Allocations, &out.Allocations
*out = make([]NodeSliceAllocation, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSlicePoolStatus.
func (in *NodeSlicePoolStatus) DeepCopy() *NodeSlicePoolStatus {
if in == nil {
return nil
}
out := new(NodeSlicePoolStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OverlappingRangeIPReservation) DeepCopyInto(out *OverlappingRangeIPReservation) {
*out = *in

View File

@ -0,0 +1,141 @@
/*
Copyright 2024 The Kubernetes Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeNodeSlicePools implements NodeSlicePoolInterface
type FakeNodeSlicePools struct {
Fake *FakeWhereaboutsV1alpha1
ns string
}
var nodeslicepoolsResource = schema.GroupVersionResource{Group: "whereabouts.cni.cncf.io", Version: "v1alpha1", Resource: "nodeslicepools"}
var nodeslicepoolsKind = schema.GroupVersionKind{Group: "whereabouts.cni.cncf.io", Version: "v1alpha1", Kind: "NodeSlicePool"}
// Get takes name of the nodeSlicePool, and returns the corresponding nodeSlicePool object, and an error if there is any.
func (c *FakeNodeSlicePools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeSlicePool, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(nodeslicepoolsResource, c.ns, name), &v1alpha1.NodeSlicePool{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.NodeSlicePool), err
}
// List takes label and field selectors, and returns the list of NodeSlicePools that match those selectors.
func (c *FakeNodeSlicePools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeSlicePoolList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(nodeslicepoolsResource, nodeslicepoolsKind, c.ns, opts), &v1alpha1.NodeSlicePoolList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.NodeSlicePoolList{ListMeta: obj.(*v1alpha1.NodeSlicePoolList).ListMeta}
for _, item := range obj.(*v1alpha1.NodeSlicePoolList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested nodeSlicePools.
func (c *FakeNodeSlicePools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(nodeslicepoolsResource, c.ns, opts))
}
// Create takes the representation of a nodeSlicePool and creates it. Returns the server's representation of the nodeSlicePool, and an error, if there is any.
func (c *FakeNodeSlicePools) Create(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.CreateOptions) (result *v1alpha1.NodeSlicePool, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(nodeslicepoolsResource, c.ns, nodeSlicePool), &v1alpha1.NodeSlicePool{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.NodeSlicePool), err
}
// Update takes the representation of a nodeSlicePool and updates it. Returns the server's representation of the nodeSlicePool, and an error, if there is any.
func (c *FakeNodeSlicePools) Update(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (result *v1alpha1.NodeSlicePool, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(nodeslicepoolsResource, c.ns, nodeSlicePool), &v1alpha1.NodeSlicePool{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.NodeSlicePool), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeNodeSlicePools) UpdateStatus(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (*v1alpha1.NodeSlicePool, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(nodeslicepoolsResource, "status", c.ns, nodeSlicePool), &v1alpha1.NodeSlicePool{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.NodeSlicePool), err
}
// Delete takes name of the nodeSlicePool and deletes it. Returns an error if one occurs.
func (c *FakeNodeSlicePools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteActionWithOptions(nodeslicepoolsResource, c.ns, name, opts), &v1alpha1.NodeSlicePool{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeNodeSlicePools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(nodeslicepoolsResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.NodeSlicePoolList{})
return err
}
// Patch applies the patch and returns the patched nodeSlicePool.
func (c *FakeNodeSlicePools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodeSlicePool, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(nodeslicepoolsResource, c.ns, name, pt, data, subresources...), &v1alpha1.NodeSlicePool{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.NodeSlicePool), err
}

View File

@ -31,6 +31,10 @@ func (c *FakeWhereaboutsV1alpha1) IPPools(namespace string) v1alpha1.IPPoolInter
return &FakeIPPools{c, namespace}
}
func (c *FakeWhereaboutsV1alpha1) NodeSlicePools(namespace string) v1alpha1.NodeSlicePoolInterface {
return &FakeNodeSlicePools{c, namespace}
}
func (c *FakeWhereaboutsV1alpha1) OverlappingRangeIPReservations(namespace string) v1alpha1.OverlappingRangeIPReservationInterface {
return &FakeOverlappingRangeIPReservations{c, namespace}
}

View File

@ -19,4 +19,6 @@ package v1alpha1
type IPPoolExpansion interface{}
type NodeSlicePoolExpansion interface{}
type OverlappingRangeIPReservationExpansion interface{}

View File

@ -0,0 +1,194 @@
/*
Copyright 2024 The Kubernetes Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
scheme "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// NodeSlicePoolsGetter has a method to return a NodeSlicePoolInterface.
// A group's client should implement this interface.
type NodeSlicePoolsGetter interface {
NodeSlicePools(namespace string) NodeSlicePoolInterface
}
// NodeSlicePoolInterface has methods to work with NodeSlicePool resources.
type NodeSlicePoolInterface interface {
Create(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.CreateOptions) (*v1alpha1.NodeSlicePool, error)
Update(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (*v1alpha1.NodeSlicePool, error)
UpdateStatus(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (*v1alpha1.NodeSlicePool, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NodeSlicePool, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NodeSlicePoolList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodeSlicePool, err error)
NodeSlicePoolExpansion
}
// nodeSlicePools implements NodeSlicePoolInterface
type nodeSlicePools struct {
client rest.Interface
ns string
}
// newNodeSlicePools returns a NodeSlicePools
func newNodeSlicePools(c *WhereaboutsV1alpha1Client, namespace string) *nodeSlicePools {
return &nodeSlicePools{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the nodeSlicePool, and returns the corresponding nodeSlicePool object, and an error if there is any.
func (c *nodeSlicePools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeSlicePool, err error) {
result = &v1alpha1.NodeSlicePool{}
err = c.client.Get().
Namespace(c.ns).
Resource("nodeslicepools").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of NodeSlicePools that match those selectors.
func (c *nodeSlicePools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeSlicePoolList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.NodeSlicePoolList{}
err = c.client.Get().
Namespace(c.ns).
Resource("nodeslicepools").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested nodeSlicePools.
func (c *nodeSlicePools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("nodeslicepools").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a nodeSlicePool and creates it. Returns the server's representation of the nodeSlicePool, and an error, if there is any.
func (c *nodeSlicePools) Create(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.CreateOptions) (result *v1alpha1.NodeSlicePool, err error) {
result = &v1alpha1.NodeSlicePool{}
err = c.client.Post().
Namespace(c.ns).
Resource("nodeslicepools").
VersionedParams(&opts, scheme.ParameterCodec).
Body(nodeSlicePool).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a nodeSlicePool and updates it. Returns the server's representation of the nodeSlicePool, and an error, if there is any.
func (c *nodeSlicePools) Update(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (result *v1alpha1.NodeSlicePool, err error) {
result = &v1alpha1.NodeSlicePool{}
err = c.client.Put().
Namespace(c.ns).
Resource("nodeslicepools").
Name(nodeSlicePool.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(nodeSlicePool).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *nodeSlicePools) UpdateStatus(ctx context.Context, nodeSlicePool *v1alpha1.NodeSlicePool, opts v1.UpdateOptions) (result *v1alpha1.NodeSlicePool, err error) {
result = &v1alpha1.NodeSlicePool{}
err = c.client.Put().
Namespace(c.ns).
Resource("nodeslicepools").
Name(nodeSlicePool.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(nodeSlicePool).
Do(ctx).
Into(result)
return
}
// Delete takes name of the nodeSlicePool and deletes it. Returns an error if one occurs.
func (c *nodeSlicePools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("nodeslicepools").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *nodeSlicePools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("nodeslicepools").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched nodeSlicePool.
func (c *nodeSlicePools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodeSlicePool, err error) {
result = &v1alpha1.NodeSlicePool{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("nodeslicepools").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@ -28,6 +28,7 @@ import (
type WhereaboutsV1alpha1Interface interface {
RESTClient() rest.Interface
IPPoolsGetter
NodeSlicePoolsGetter
OverlappingRangeIPReservationsGetter
}
@ -40,6 +41,10 @@ func (c *WhereaboutsV1alpha1Client) IPPools(namespace string) IPPoolInterface {
return newIPPools(c, namespace)
}
func (c *WhereaboutsV1alpha1Client) NodeSlicePools(namespace string) NodeSlicePoolInterface {
return newNodeSlicePools(c, namespace)
}
func (c *WhereaboutsV1alpha1Client) OverlappingRangeIPReservations(namespace string) OverlappingRangeIPReservationInterface {
return newOverlappingRangeIPReservations(c, namespace)
}

View File

@ -54,6 +54,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
// Group=whereabouts.cni.cncf.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("ippools"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Whereabouts().V1alpha1().IPPools().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("nodeslicepools"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Whereabouts().V1alpha1().NodeSlicePools().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("overlappingrangeipreservations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Whereabouts().V1alpha1().OverlappingRangeIPReservations().Informer()}, nil

View File

@ -25,6 +25,8 @@ import (
type Interface interface {
// IPPools returns a IPPoolInformer.
IPPools() IPPoolInformer
// NodeSlicePools returns a NodeSlicePoolInformer.
NodeSlicePools() NodeSlicePoolInformer
// OverlappingRangeIPReservations returns a OverlappingRangeIPReservationInformer.
OverlappingRangeIPReservations() OverlappingRangeIPReservationInformer
}
@ -45,6 +47,11 @@ func (v *version) IPPools() IPPoolInformer {
return &iPPoolInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// NodeSlicePools returns a NodeSlicePoolInformer.
func (v *version) NodeSlicePools() NodeSlicePoolInformer {
return &nodeSlicePoolInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// OverlappingRangeIPReservations returns a OverlappingRangeIPReservationInformer.
func (v *version) OverlappingRangeIPReservations() OverlappingRangeIPReservationInformer {
return &overlappingRangeIPReservationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}

View File

@ -0,0 +1,89 @@
/*
Copyright 2024 The Kubernetes Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
whereaboutscnicncfiov1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
versioned "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned"
internalinterfaces "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// NodeSlicePoolInformer provides access to a shared informer and lister for
// NodeSlicePools.
type NodeSlicePoolInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.NodeSlicePoolLister
}
type nodeSlicePoolInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewNodeSlicePoolInformer constructs a new informer for NodeSlicePool type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewNodeSlicePoolInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredNodeSlicePoolInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredNodeSlicePoolInformer constructs a new informer for NodeSlicePool type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredNodeSlicePoolInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.WhereaboutsV1alpha1().NodeSlicePools(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.WhereaboutsV1alpha1().NodeSlicePools(namespace).Watch(context.TODO(), options)
},
},
&whereaboutscnicncfiov1alpha1.NodeSlicePool{},
resyncPeriod,
indexers,
)
}
func (f *nodeSlicePoolInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredNodeSlicePoolInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *nodeSlicePoolInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&whereaboutscnicncfiov1alpha1.NodeSlicePool{}, f.defaultInformer)
}
func (f *nodeSlicePoolInformer) Lister() v1alpha1.NodeSlicePoolLister {
return v1alpha1.NewNodeSlicePoolLister(f.Informer().GetIndexer())
}

View File

@ -25,6 +25,14 @@ type IPPoolListerExpansion interface{}
// IPPoolNamespaceLister.
type IPPoolNamespaceListerExpansion interface{}
// NodeSlicePoolListerExpansion allows custom methods to be added to
// NodeSlicePoolLister.
type NodeSlicePoolListerExpansion interface{}
// NodeSlicePoolNamespaceListerExpansion allows custom methods to be added to
// NodeSlicePoolNamespaceLister.
type NodeSlicePoolNamespaceListerExpansion interface{}
// OverlappingRangeIPReservationListerExpansion allows custom methods to be added to
// OverlappingRangeIPReservationLister.
type OverlappingRangeIPReservationListerExpansion interface{}

View File

@ -0,0 +1,98 @@
/*
Copyright 2024 The Kubernetes Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// NodeSlicePoolLister helps list NodeSlicePools.
// All objects returned here must be treated as read-only.
type NodeSlicePoolLister interface {
// List lists all NodeSlicePools in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error)
// NodeSlicePools returns an object that can list and get NodeSlicePools.
NodeSlicePools(namespace string) NodeSlicePoolNamespaceLister
NodeSlicePoolListerExpansion
}
// nodeSlicePoolLister implements the NodeSlicePoolLister interface.
type nodeSlicePoolLister struct {
indexer cache.Indexer
}
// NewNodeSlicePoolLister returns a new NodeSlicePoolLister.
func NewNodeSlicePoolLister(indexer cache.Indexer) NodeSlicePoolLister {
return &nodeSlicePoolLister{indexer: indexer}
}
// List lists all NodeSlicePools in the indexer.
func (s *nodeSlicePoolLister) List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.NodeSlicePool))
})
return ret, err
}
// NodeSlicePools returns an object that can list and get NodeSlicePools.
func (s *nodeSlicePoolLister) NodeSlicePools(namespace string) NodeSlicePoolNamespaceLister {
return nodeSlicePoolNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// NodeSlicePoolNamespaceLister helps list and get NodeSlicePools.
// All objects returned here must be treated as read-only.
type NodeSlicePoolNamespaceLister interface {
// List lists all NodeSlicePools in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error)
// Get retrieves the NodeSlicePool from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha1.NodeSlicePool, error)
NodeSlicePoolNamespaceListerExpansion
}
// nodeSlicePoolNamespaceLister implements the NodeSlicePoolNamespaceLister
// interface.
type nodeSlicePoolNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all NodeSlicePools in the indexer for a given namespace.
func (s nodeSlicePoolNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.NodeSlicePool, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.NodeSlicePool))
})
return ret, err
}
// Get retrieves the NodeSlicePool from the indexer for a given namespace and name.
func (s nodeSlicePoolNamespaceLister) Get(name string) (*v1alpha1.NodeSlicePool, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("nodeslicepool"), name)
}
return obj.(*v1alpha1.NodeSlicePool), nil
}

View File

@ -109,6 +109,7 @@ func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (*
} else {
firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range)
if err != nil {
logging.Debugf("invalid cidr error on range %v, within ranges %v", n.IPAM.IPRanges[idx].Range, n.IPAM.IPRanges)
return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()

View File

@ -1,9 +1,13 @@
package iphelpers
import (
"encoding/binary"
"errors"
"fmt"
"math"
"net"
"strconv"
"strings"
)
// CompareIPs reports whether out of 2 given IPs, ipX and ipY, ipY is smaller (-1), the same (0) or larger (1).
@ -25,6 +29,60 @@ func CompareIPs(ipX net.IP, ipY net.IP) int {
return 0
}
// DivideRangeBySize takes an ipRange i.e. 11.0.0.0/8 and a sliceSize i.e. /24
// and returns a list of IPNets that divide the input range into sizes
func DivideRangeBySize(inputNetwork string, sliceSizeString string) ([]string, error) {
// Remove "/" from the start of the sliceSize
sliceSizeString = strings.TrimPrefix(sliceSizeString, "/")
sliceSize, err := strconv.Atoi(sliceSizeString)
if err != nil {
fmt.Println("Error:", err)
return nil, nil
}
ip, ipNet, err := net.ParseCIDR(inputNetwork)
if err != nil {
return nil, err
}
if !ip.Equal(ipNet.IP) {
return nil, errors.New("netCIDR is not a valid network address")
}
netMaskSize, _ := ipNet.Mask.Size()
if netMaskSize > int(sliceSize) {
return nil, errors.New("subnetMaskSize must be greater or equal than netMaskSize")
}
totalSubnetsInNetwork := math.Pow(2, float64(sliceSize)-float64(netMaskSize))
totalHostsInSubnet := math.Pow(2, 32-float64(sliceSize))
subnetIntAddresses := make([]uint32, int(totalSubnetsInNetwork))
// first subnet address is same as the network address
subnetIntAddresses[0] = ip2int(ip.To4())
for i := 1; i < int(totalSubnetsInNetwork); i++ {
subnetIntAddresses[i] = subnetIntAddresses[i-1] + uint32(totalHostsInSubnet)
}
subnetCIDRs := make([]string, 0)
for _, sia := range subnetIntAddresses {
subnetCIDRs = append(
subnetCIDRs,
int2ip(sia).String()+"/"+strconv.Itoa(int(sliceSize)),
)
}
return subnetCIDRs, nil
}
func ip2int(ip net.IP) uint32 {
if len(ip) == 16 {
panic("cannot convert IPv6 into uint32")
}
return binary.BigEndian.Uint32(ip)
}
func int2ip(nn uint32) net.IP {
ip := make(net.IP, 4)
binary.BigEndian.PutUint32(ip, nn)
return ip
}
// IsIPInRange returns true if a given IP is within the continuous range of start and end IP (inclusively).
func IsIPInRange(in net.IP, start net.IP, end net.IP) (bool, error) {
if in == nil || start == nil || end == nil {

View File

@ -923,3 +923,69 @@ var _ = Describe("IPAddOffset operations", func() {
Expect(fmt.Sprint(newIP)).To(Equal("2000::1:0"))
})
})
func TestDivideRangeBySize(t *testing.T) {
cases := []struct {
name string
netRange string
sliceSize string
expectedResult []string
expectError bool
}{
{
name: "Network divided by same size slice",
netRange: "10.0.0.0/8",
sliceSize: "/8",
expectedResult: []string{"10.0.0.0/8"},
},
{
name: "Network divided /8 by /10",
netRange: "10.0.0.0/8",
sliceSize: "/10",
expectedResult: []string{"10.0.0.0/10", "10.64.0.0/10", "10.128.0.0/10", "10.192.0.0/10"},
},
{
name: "Network divided /10 by /8",
netRange: "10.0.0.0/10",
sliceSize: "/8",
expectError: true,
},
{
name: "Network divided /8 by /11",
netRange: "10.0.0.0/8",
sliceSize: "/11",
expectedResult: []string{"10.0.0.0/11", "10.32.0.0/11", "10.64.0.0/11", "10.96.0.0/11", "10.128.0.0/11", "10.160.0.0/11", "10.192.0.0/11", "10.224.0.0/11"},
},
{
name: "Network divided /10 by /12",
netRange: "10.0.0.0/10",
sliceSize: "/12",
expectedResult: []string{"10.0.0.0/12", "10.16.0.0/12", "10.32.0.0/12", "10.48.0.0/12"},
},
{
name: "Network divided /8 by /10 without / in slice",
netRange: "10.0.0.0/8",
sliceSize: "10",
expectedResult: []string{"10.0.0.0/10", "10.64.0.0/10", "10.128.0.0/10", "10.192.0.0/10"},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
result, err := DivideRangeBySize(tc.netRange, tc.sliceSize)
if err != nil && !tc.expectError {
t.Errorf("unexpected error: %v", err)
}
if err == nil && tc.expectError {
t.Fatalf("expected error but did not get it")
}
if len(result) != len(tc.expectedResult) {
t.Fatalf("Expected result: %s, got result: %s", tc.expectedResult, result)
}
for i := range result {
if result[i] != tc.expectedResult[i] {
t.Fatalf("Expected result: %s, got result: %s", tc.expectedResult, result)
}
}
})
}
}

View File

@ -0,0 +1,612 @@
package node_controller
import (
"context"
"fmt"
"sort"
"time"
"golang.org/x/time/rate"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
cncfV1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned"
nadinformers "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1"
nadlisters "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
clientset "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned"
whereaboutsInformers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions/whereabouts.cni.cncf.io/v1alpha1"
whereaboutsListers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/listers/whereabouts.cni.cncf.io/v1alpha1"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/config"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/iphelpers"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"
)
const controllerAgentName = "node-controller"
const (
whereaboutsConfigPath = "/etc/cni/net.d/whereabouts.d/whereabouts.conf"
)
// Controller is the controller implementation for Foo resources
type Controller struct {
// kubeclientset is a standard kubernetes clientset
kubeclientset kubernetes.Interface
// sampleclientset is a clientset for our own API group
whereaboutsclientset clientset.Interface
nadclientset nadclient.Interface
nodeLister corelisters.NodeLister
nodeInformer coreinformers.NodeInformer
nodesSynced cache.InformerSynced
nodeSlicePoolLister whereaboutsListers.NodeSlicePoolLister
nodeSlicePoolInformer whereaboutsInformers.NodeSlicePoolInformer
nodeSlicePoolSynced cache.InformerSynced
nadInformer nadinformers.NetworkAttachmentDefinitionInformer
nadLister nadlisters.NetworkAttachmentDefinitionLister
nadSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
//For testing, sort nodes before assigning to get consistent return values
sortResults bool
}
// NewController returns a new sample controller
func NewController(
ctx context.Context,
kubeclientset kubernetes.Interface,
whereaboutsclientset clientset.Interface,
nadclientset nadclient.Interface,
nodeInformer coreinformers.NodeInformer,
nodeSlicePoolInformer whereaboutsInformers.NodeSlicePoolInformer,
nadInformer nadinformers.NetworkAttachmentDefinitionInformer,
sortResults bool,
) *Controller {
logger := klog.FromContext(ctx)
logger.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(0)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
ratelimiter := workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(50), 300)},
)
c := &Controller{
kubeclientset: kubeclientset,
nodeLister: nodeInformer.Lister(),
nodeInformer: nodeInformer,
nodesSynced: nodeInformer.Informer().HasSynced,
whereaboutsclientset: whereaboutsclientset,
nodeSlicePoolLister: nodeSlicePoolInformer.Lister(),
nodeSlicePoolInformer: nodeSlicePoolInformer,
nodeSlicePoolSynced: nodeSlicePoolInformer.Informer().HasSynced,
nadclientset: nadclientset,
nadInformer: nadInformer,
nadLister: nadInformer.Lister(),
nadSynced: nadInformer.Informer().HasSynced,
workqueue: workqueue.NewRateLimitingQueue(ratelimiter),
recorder: recorder,
sortResults: sortResults,
}
logger.Info("Setting up event handlers")
nadInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.onNadEvent,
UpdateFunc: func(old, cur interface{}) {
c.onNadEvent(cur)
},
DeleteFunc: c.onNadEvent,
})
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.requeueNADs,
UpdateFunc: func(old, cur interface{}) {
c.requeueNADs(cur)
},
DeleteFunc: c.requeueNADs,
})
nodeSlicePoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.requeueNADs,
UpdateFunc: func(old, cur interface{}) {
c.requeueNADs(cur)
},
DeleteFunc: c.requeueNADs,
})
return c
}
func (c *Controller) onNadEvent(obj interface{}) {
klog.Infof("handling network attachment definition event")
var object metav1.Object
var ok bool
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
}
key, err := cache.MetaNamespaceKeyFunc(object)
klog.Info(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
return
}
c.workqueue.Add(key)
}
// TODO: we may want to require nodes to have an annotation similar to what pods have to receive a slice
// in this case we get all applicable NADs for the node rather than requeuing all
// same applies to other node event handlers
func (c *Controller) requeueNADs(obj interface{}) {
nadlist, err := c.nadLister.List(labels.Everything())
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get network-attachment-definition list from informer: %v", err))
return
}
for _, nad := range nadlist {
key, err := cache.MetaNamespaceKeyFunc(nad)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", nad, err))
return
}
c.workqueue.Add(key)
}
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(ctx context.Context, workers int) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
logger := klog.FromContext(ctx)
// Start the informer factories to begin populating the informer caches
logger.Info("Starting node-slice controller")
// Wait for the caches to be synced before starting workers
logger.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(ctx.Done(), c.nodesSynced); !ok {
return fmt.Errorf("failed to wait for nodes caches to sync")
}
if ok := cache.WaitForCacheSync(ctx.Done(), c.nodeSlicePoolSynced); !ok {
return fmt.Errorf("failed to wait for nodeslices caches to sync")
}
if ok := cache.WaitForCacheSync(ctx.Done(), c.nadSynced); !ok {
return fmt.Errorf("failed to wait for nad caches to sync")
}
logger.Info("Starting workers", "count", workers)
// Launch two workers to process Foo resources
for i := 0; i < workers; i++ {
go wait.UntilWithContext(ctx, c.runWorker, time.Second)
}
logger.Info("Started workers")
<-ctx.Done()
logger.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker(ctx context.Context) {
for c.processNextWorkItem(ctx) {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
obj, shutdown := c.workqueue.Get()
logger := klog.FromContext(ctx)
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(ctx, key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
logger.Info("Successfully synced", "resourceName", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) syncHandler(ctx context.Context, key string) error {
// Convert the namespace/name string into a distinct namespace and name
logger := klog.LoggerWithValues(klog.FromContext(ctx), "resourceName", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
err = c.checkForMultiNadMismatch(name, namespace)
if err != nil {
return err
}
nad, err := c.nadLister.NetworkAttachmentDefinitions(namespace).Get(name)
if err != nil {
if !errors.IsNotFound(err) {
return err
}
// in this case the nad dne so it must've been deleted so we will cleanup nodeslicepools
// if we are down during the delete this could be missed similar to endpoints see kubernetes #6877
nodeSlices, err := c.nodeSlicePoolLister.List(labels.Everything())
if err != nil {
return nil
}
for _, nodeSlice := range nodeSlices {
if hasOwnerRef(nodeSlice, name) {
if len(nodeSlice.OwnerReferences) == 1 {
//this is the last NAD owning this so delete
err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
}
}
}
return nil
}
//nad does exist so did it change node_slice_range or slice_size
ipamConf, err := ipamConfiguration(nad, "")
if err != nil {
return err
}
// This is to support several NADs and interfaces on the same network
logger.Info(fmt.Sprintf("%v", ipamConf))
logger.Info(fmt.Sprintf("slicesize: %v", ipamConf.NodeSliceSize))
if ipamConf.NodeSliceSize == "" || len(ipamConf.IPRanges) == 0 {
logger.Info("skipping update node slices for network-attachment-definition due missing node slice or range configurations",
"network-attachment-definition", klog.KRef(namespace, name))
return nil
}
logger.Info("About to update node slices for network-attachment-definition",
"network-attachment-definition", klog.KRef(namespace, name))
currentNodeSlicePool, err := c.nodeSlicePoolLister.NodeSlicePools(namespace).Get(getSliceName(ipamConf))
if err != nil {
logger.Info("node slice pool does not exist, creating")
if !errors.IsNotFound(err) {
return err
}
//Create
nodeslice := &v1alpha1.NodeSlicePool{
TypeMeta: metav1.TypeMeta{
Kind: "NodeSlicePool",
APIVersion: "whereabouts.cni.cncf.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: getSliceName(ipamConf),
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(nad, cncfV1.SchemeGroupVersion.WithKind("NetworkAttachmentDefinition")),
},
},
// only supports single range with node slice
Spec: v1alpha1.NodeSlicePoolSpec{
Range: ipamConf.IPRanges[0].Range,
SliceSize: ipamConf.NodeSliceSize,
},
}
allocations := []v1alpha1.NodeSliceAllocation{}
logger.Info(fmt.Sprintf("node slice: %v\n", nodeslice))
//TODO: handle case when full, we could fire an event
subnets, err := iphelpers.DivideRangeBySize(nodeslice.Spec.Range, ipamConf.NodeSliceSize)
if err != nil {
return err
}
logger.Info(fmt.Sprintf("subnets: %v\n", subnets))
for _, subnet := range subnets {
allocations = append(allocations, v1alpha1.NodeSliceAllocation{
SliceRange: subnet,
})
}
nodes, err := c.getNodeList()
if err != nil {
return err
}
for _, node := range nodes {
logger.Info(fmt.Sprintf("assigning node to slice: %v\n", node.Name))
assignNodeToSlice(allocations, node.Name)
}
nodeslice.Status = v1alpha1.NodeSlicePoolStatus{
Allocations: allocations,
}
logger.Info(fmt.Sprintf("final allocations: %v\n", allocations))
_, err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Create(ctx, nodeslice, metav1.CreateOptions{})
if err != nil {
return err
}
} else {
nodeslice := currentNodeSlicePool.DeepCopy()
// make sure if multiple NADs act on this NodeSlicePool they are all listed as owners
nadIsOwner := false
for _, ownerRef := range nodeslice.OwnerReferences {
if ownerRef.Name == name {
nadIsOwner = true
}
}
if !nadIsOwner {
nodeslice.OwnerReferences = append(nodeslice.OwnerReferences, getAuxiliaryOwnerRef(nad))
}
logger.Info(fmt.Sprintf("owner references: %v\n", nodeslice.OwnerReferences))
// node slice currently exists
if currentNodeSlicePool.Spec.SliceSize != ipamConf.NodeSliceSize ||
currentNodeSlicePool.Spec.Range != ipamConf.IPRanges[0].Range {
logger.Info("network-attachment-definition range or slice size changed, re-allocating node slices")
// slices have changed so redo the slicing and reassign nodes
subnets, err := iphelpers.DivideRangeBySize(ipamConf.Range, ipamConf.NodeSliceSize)
if err != nil {
return err
}
allocations := []v1alpha1.NodeSliceAllocation{}
for _, subnet := range subnets {
allocations = append(allocations, v1alpha1.NodeSliceAllocation{
SliceRange: subnet,
})
}
nodes, err := c.getNodeList()
if err != nil {
return err
}
for _, node := range nodes {
assignNodeToSlice(allocations, node.Name)
}
nodeslice.Status = v1alpha1.NodeSlicePoolStatus{
Allocations: allocations,
}
_, err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Update(ctx, nodeslice, metav1.UpdateOptions{})
if err != nil {
return err
}
} else {
logger.Info("node slice exists and range configuration did not change, ensuring nodes assigned")
//slices have not changed so only make sure all nodes are assigned
allocations := nodeslice.Status.Allocations
nodes, err := c.getNodeList()
if err != nil {
return err
}
for _, node := range nodes {
assignNodeToSlice(allocations, node.Name)
}
removeUnusedNodes(allocations, nodes)
nodeslice.Status.Allocations = allocations
_, err = c.whereaboutsclientset.WhereaboutsV1alpha1().NodeSlicePools(namespace).Update(context.TODO(), nodeslice, metav1.UpdateOptions{})
if err != nil {
logger.Info(fmt.Sprintf("Error updating NSP with no changes: %v", err))
return err
}
}
}
//TODO: recorder events
//c.recorder.Event(foo, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced)
return nil
}
func (c *Controller) getNodeList() ([]*corev1.Node, error) {
nodes, err := c.nodeLister.List(labels.Everything())
if err != nil {
return nil, err
}
if !c.sortResults {
return nodes, nil
}
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].Name < nodes[j].Name
})
return nodes, nil
}
// since multiple NADs can be attached to the same BE Network, we need to make sure their settings match in this case
func (c *Controller) checkForMultiNadMismatch(name, namespace string) error {
nad, err := c.nadLister.NetworkAttachmentDefinitions(namespace).Get(name)
if err != nil {
if !errors.IsNotFound(err) {
return err
}
return nil
}
ipamConf, err := ipamConfiguration(nad, "")
if err != nil {
return err
}
nadList, err := c.nadLister.List(labels.Everything())
if err != nil {
return err
}
for _, additionalNad := range nadList {
additionalIpamConf, err := ipamConfiguration(additionalNad, "")
if err != nil {
return err
}
if !checkIpamConfMatch(ipamConf, additionalIpamConf) {
return fmt.Errorf("found IPAM conf mismatch for network-attachment-definitions with same network name")
}
}
return nil
}
func checkIpamConfMatch(conf1, conf2 *types.IPAMConfig) bool {
if conf1.NetworkName == conf2.NetworkName {
return conf1.IPRanges[0].Range == conf2.IPRanges[0].Range && conf1.NodeSliceSize == conf2.NodeSliceSize
}
return true
}
func hasOwnerRef(nodeSlice *v1alpha1.NodeSlicePool, name string) bool {
for _, ownerRef := range nodeSlice.OwnerReferences {
if ownerRef.Name == name {
return true
}
}
return false
}
func getSliceName(ipamConf *types.IPAMConfig) string {
sliceName := ipamConf.Name
if ipamConf.NetworkName != "" {
sliceName = ipamConf.NetworkName
}
return sliceName
}
// since multiple nads can share a nodeslicepool we need to set multiple owner refs but only
// one controller owner ref
func getAuxiliaryOwnerRef(nad *cncfV1.NetworkAttachmentDefinition) metav1.OwnerReference {
return metav1.OwnerReference{
APIVersion: nad.APIVersion,
Kind: nad.Kind,
Name: nad.Name,
UID: nad.UID,
}
}
func removeUnusedNodes(allocations []v1alpha1.NodeSliceAllocation, nodes []*corev1.Node) {
//create map for fast lookup, we only care about keys so use empty struct b/c takes up no memory
nodeMap := make(map[string]struct{}, len(nodes))
for _, node := range nodes {
nodeMap[node.Name] = struct{}{}
}
for i, allocation := range allocations {
if allocation.NodeName != "" {
if _, ok := nodeMap[allocation.NodeName]; !ok {
allocations[i] = v1alpha1.NodeSliceAllocation{
SliceRange: allocation.SliceRange,
}
}
}
}
}
func ipamConfiguration(nad *cncfV1.NetworkAttachmentDefinition, mountPath string) (*types.IPAMConfig, error) {
mounterWhereaboutsConfigFilePath := mountPath + whereaboutsConfigPath
ipamConfig, err := config.LoadIPAMConfiguration([]byte(nad.Spec.Config), "", mounterWhereaboutsConfigFilePath)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
func assignNodeToSlice(allocations []v1alpha1.NodeSliceAllocation, nodeName string) {
if nodeHasAllocation(allocations, nodeName) {
return
}
for i, allocation := range allocations {
if allocation.NodeName == "" {
allocations[i] = v1alpha1.NodeSliceAllocation{
SliceRange: allocation.SliceRange,
NodeName: nodeName,
}
return
}
}
}
func nodeHasAllocation(allocations []v1alpha1.NodeSliceAllocation, nodeName string) bool {
for _, allocation := range allocations {
if allocation.NodeName == nodeName {
return true
}
}
return false
}

View File

@ -0,0 +1,972 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node_controller
import (
"context"
"fmt"
k8snetplumbersv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"os"
"reflect"
"testing"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
k8snetplumbersv1fake "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake"
nadinformers "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned/fake"
informers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions"
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
)
type fixture struct {
t *testing.T
whereaboutsclient *fake.Clientset
kubeclient *k8sfake.Clientset
nadClient *k8snetplumbersv1fake.Clientset
// Objects to put in the store.
nadLister []*k8snetplumbersv1.NetworkAttachmentDefinition
nodeSlicePoolLister []*v1alpha1.NodeSlicePool
nodeLister []*v1.Node
// Actions expected to happen on the client.
whereaboutsactions []core.Action
// Objects from here preloaded into NewSimpleFake.
kubeobjects []runtime.Object
whereaboutsObjects []runtime.Object
nadObjects []runtime.Object
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.whereaboutsObjects = []runtime.Object{}
f.kubeobjects = []runtime.Object{}
f.nadObjects = []runtime.Object{}
return f
}
func newNad(name string, networkName string, networkRange string, sliceSize string) *k8snetplumbersv1.NetworkAttachmentDefinition {
return &k8snetplumbersv1.NetworkAttachmentDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: k8snetplumbersv1.SchemeGroupVersion.String(),
Kind: "NetworkAttachmentDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
},
Spec: k8snetplumbersv1.NetworkAttachmentDefinitionSpec{
Config: fmt.Sprintf(`
{
"cniVersion": "0.3.1",
"name": "test-name",
"plugins":
[
{
"type": "macvlan",
"master": "test",
"mode": "bridge",
"mtu": "mtu",
"ipam":
{
"configuration_path": "/tmp/whereabouts.conf",
"type": "whereabouts",
"range": "%s",
"node_slice_size": "%s",
"network_name": "%s",
"enable_overlapping_ranges": false
}
}
]
}`, networkRange, sliceSize, networkName),
},
}
}
func getOwnerRefs(nads []*k8snetplumbersv1.NetworkAttachmentDefinition) []metav1.OwnerReference {
if len(nads) == 1 {
return []metav1.OwnerReference{
*metav1.NewControllerRef(nads[0], k8snetplumbersv1.SchemeGroupVersion.WithKind("NetworkAttachmentDefinition")),
}
} else if len(nads) > 1 {
refs := []metav1.OwnerReference{
*metav1.NewControllerRef(nads[0], k8snetplumbersv1.SchemeGroupVersion.WithKind("NetworkAttachmentDefinition")),
}
for i, nad := range nads {
if i == 0 {
continue
}
refs = append(refs, metav1.OwnerReference{
APIVersion: nad.APIVersion,
Kind: nad.Kind,
Name: nad.Name,
UID: nad.UID,
})
}
return refs
}
return []metav1.OwnerReference{}
}
func newNodeSlicePool(name string, rangeSize string, sliceSize string, status v1alpha1.NodeSlicePoolStatus, nad ...*k8snetplumbersv1.NetworkAttachmentDefinition) *v1alpha1.NodeSlicePool {
return &v1alpha1.NodeSlicePool{
TypeMeta: metav1.TypeMeta{
APIVersion: v1alpha1.SchemeGroupVersion.String(),
Kind: "NodeSlicePool",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
OwnerReferences: getOwnerRefs(nad),
},
Spec: v1alpha1.NodeSlicePoolSpec{
Range: rangeSize,
SliceSize: sliceSize,
},
Status: status,
}
}
func newNode(name string) *v1.Node {
return &v1.Node{
TypeMeta: metav1.TypeMeta{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: "Node",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
},
}
}
func (f *fixture) newController(ctx context.Context) (*Controller, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory, nadinformers.SharedInformerFactory) {
f.whereaboutsclient = fake.NewSimpleClientset(f.whereaboutsObjects...)
f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
f.nadClient = k8snetplumbersv1fake.NewSimpleClientset()
// We have to manually Create the resources in the tracker for nad because
// k8s.io/client-go/testing/fixture.go uses meta.UnsafeGuessKindToResource(gvk) to convert gvk to gvr
// this leads to tracker containing resource of 'networkattachmentdefinition' instead of 'network-attachment-definition'
// which causes the informer to trigger deletes because there is no 'network-attachment-definition'
for _, nad := range f.nadObjects {
//TODO: clean way to set GVR
f.nadClient.Tracker().Create(schema.GroupVersionResource{
Group: "k8s.cni.cncf.io",
Version: "v1",
Resource: "network-attachment-definitions",
}, nad, "default")
}
whereaboutsInformerFactory := informers.NewSharedInformerFactory(f.whereaboutsclient, noResyncPeriodFunc())
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc())
nadInformerFactory := nadinformers.NewSharedInformerFactory(f.nadClient, noResyncPeriodFunc())
c := NewController(
ctx,
f.kubeclient,
f.whereaboutsclient,
f.nadClient,
kubeInformerFactory.Core().V1().Nodes(),
whereaboutsInformerFactory.Whereabouts().V1alpha1().NodeSlicePools(),
whereaboutsInformerFactory.Whereabouts().V1alpha1().IPPools(),
nadInformerFactory.K8sCniCncfIo().V1().NetworkAttachmentDefinitions(),
true)
//TODO: add sync for IP Pool or remove IP pool if not used
c.nadSynced = alwaysReady
c.nodesSynced = alwaysReady
c.nodeSlicePoolSynced = alwaysReady
c.recorder = &record.FakeRecorder{}
for _, node := range f.nodeLister {
err := kubeInformerFactory.Core().V1().Nodes().Informer().GetIndexer().Add(node)
if err != nil {
f.t.Error("error adding nodes to informer mock")
}
}
for _, nad := range f.nadLister {
err := nadInformerFactory.K8sCniCncfIo().V1().NetworkAttachmentDefinitions().Informer().GetIndexer().Add(nad)
if err != nil {
f.t.Error("error adding nads to informer mock")
}
}
for _, nodeSlicePool := range f.nodeSlicePoolLister {
err := whereaboutsInformerFactory.Whereabouts().V1alpha1().NodeSlicePools().Informer().GetIndexer().Add(nodeSlicePool)
if err != nil {
f.t.Error("error adding nodeslicepools to informer mock")
}
}
return c, whereaboutsInformerFactory, kubeInformerFactory, nadInformerFactory
}
func (f *fixture) run(ctx context.Context, name string) {
//requires conf file to run
globalconf := `{
"datastore": "kubernetes",
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/whereabouts.d/whereabouts.kubeconfig"
},
"log_file": "/tmp/whereabouts.log",
"log_level": "debug",
"gateway": "192.168.5.5"
}`
err := os.WriteFile("/tmp/whereabouts.conf", []byte(globalconf), 0755)
if err != nil {
f.t.Error("error writing /tmp/whereabouts.conf")
}
f.runController(ctx, name, true, false)
}
func (f *fixture) runExpectError(ctx context.Context, name string) {
f.runController(ctx, name, true, true)
}
func (f *fixture) runController(ctx context.Context, nadName string, startInformers bool, expectError bool) {
c, whereaboutsInformer, kubeInformer, nadInformer := f.newController(ctx)
if startInformers {
whereaboutsInformer.Start(ctx.Done())
kubeInformer.Start(ctx.Done())
nadInformer.Start(ctx.Done())
}
err := c.syncHandler(ctx, nadName)
if !expectError && err != nil {
f.t.Errorf("error syncing nad: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing nad, got nil")
}
whereaboutsActions := filterInformerActions(f.whereaboutsclient.Actions())
for i, action := range whereaboutsActions {
if len(f.whereaboutsactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(whereaboutsActions)-len(f.whereaboutsactions), whereaboutsActions[i:])
break
}
expectedAction := f.whereaboutsactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.whereaboutsactions) > len(whereaboutsActions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.whereaboutsactions)-len(whereaboutsActions), f.whereaboutsactions[len(whereaboutsActions):])
}
}
// checkAction verifies that expected and actual actions are equal and both have
// same attached resources
func checkAction(expected, actual core.Action, t *testing.T) {
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
return
}
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
return
}
switch a := actual.(type) {
case core.CreateActionImpl:
e, _ := expected.(core.CreateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.UpdateActionImpl:
e, _ := expected.(core.UpdateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.PatchActionImpl:
e, _ := expected.(core.PatchActionImpl)
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
}
case core.DeleteActionImpl:
e, _ := expected.(core.DeleteActionImpl)
expName := e.GetName()
name := a.GetName()
expNamespace := e.GetNamespace()
namespace := a.GetNamespace()
if expName != name || expNamespace != namespace {
t.Errorf("Action %s %s has wrong namespace or name. Expected %s/%s, actual %s/%s",
a.GetVerb(), a.GetResource().Resource, expNamespace, expName, namespace, name)
}
default:
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
actual.GetVerb(), actual.GetResource().Resource)
}
}
// filterInformerActions filters list and watch actions for testing resources.
// Since list and watch don't change resource state we can filter it to lower
// nose level in our tests.
func filterInformerActions(actions []core.Action) []core.Action {
ret := []core.Action{}
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "network-attachment-definitions") ||
action.Matches("watch", "network-attachment-definitions") ||
action.Matches("list", "nodeslicepools") ||
action.Matches("watch", "nodeslicepools") ||
action.Matches("list", "nodes") ||
action.Matches("watch", "nodes") ||
action.Matches("list", "ippools") ||
action.Matches("watch", "ippools")) {
continue
}
ret = append(ret, action)
}
return ret
}
func (f *fixture) expectNodeSlicePoolCreateAction(nodeSlicePool *v1alpha1.NodeSlicePool) {
f.whereaboutsactions = append(f.whereaboutsactions, core.NewCreateAction(schema.GroupVersionResource{Resource: "nodeslicepools"}, nodeSlicePool.Namespace, nodeSlicePool))
}
func (f *fixture) expectNodeSlicePoolUpdateAction(nodeSlicePool *v1alpha1.NodeSlicePool) {
f.whereaboutsactions = append(f.whereaboutsactions, core.NewUpdateAction(schema.GroupVersionResource{Resource: "nodeslicepools"}, nodeSlicePool.Namespace, nodeSlicePool))
}
func (f *fixture) expectNodeSlicePoolDeleteAction(nodeSlicePool *v1alpha1.NodeSlicePool) {
f.whereaboutsactions = append(f.whereaboutsactions, core.NewDeleteAction(schema.GroupVersionResource{Resource: "nodeslicepools"}, nodeSlicePool.Namespace, nodeSlicePool.Name))
}
// TestCreatesNodeSlicePoolsNoNodes tests nad creation results in a new nodeslicepool being created correctly when no nodes in cluster
func TestCreatesNodeSlicePoolsNoNodes(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/8", "/10")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
f.nadLister = append(f.nadLister, nad)
f.nadObjects = append(f.nadObjects, nad)
f.expectNodeSlicePoolCreateAction(nodeSlicePool)
f.run(context.TODO(), getKey(nad, t))
}
// TestCreatesNodeSlicePoolsWithNodes tests that a new nad with existing nodes will be result in nodeslicepool created correctly
func TestCreatesNodeSlicePoolsWithNodes(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/8", "/10")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
f.nadLister = append(f.nadLister, nad)
f.nodeLister = append(f.nodeLister, node1, node2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nadObjects = append(f.nadObjects, nad)
f.expectNodeSlicePoolCreateAction(nodeSlicePool)
f.run(context.TODO(), getKey(nad, t))
}
// TestDoNothing checks for no action taken when no nad exists
func TestDoNothing(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/8", "/10")
node1 := newNode("node1")
node2 := newNode("node2")
f.nodeLister = append(f.nodeLister, node1, node2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.run(context.TODO(), getKey(nad, t))
}
// TestNodeJoins test for node addition to nodeslicepool after node is added
func TestNodeJoins(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/8", "/10")
node1 := newNode("node1")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
expectedNodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
f.nadLister = append(f.nadLister, nad)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.kubeobjects = append(f.kubeobjects, node1)
f.nodeLister = append(f.nodeLister, node1)
f.nadObjects = append(f.nadObjects, nad)
f.expectNodeSlicePoolUpdateAction(expectedNodeSlicePool)
f.run(context.TODO(), getKey(nad, t))
}
// TestNodeLeaves tests for node removal from nodeslicepool after the node no longer exists
func TestNodeLeaves(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/8", "/10")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
expectedNodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
f.nadLister = append(f.nadLister, nad)
f.nadObjects = append(f.nadObjects, nad)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.expectNodeSlicePoolUpdateAction(expectedNodeSlicePool)
f.run(context.TODO(), getKey(nad, t))
}
// TestNadDelete tests the deletion of NodeSlicePool after its only owning NAD is deleted
func TestNadDelete(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/8", "/10")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
f.nodeLister = append(f.nodeLister, node1, node2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nadObjects = append(f.nadObjects, nad)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.expectNodeSlicePoolDeleteAction(nodeSlicePool)
f.run(context.TODO(), getKey(nad, t))
}
// TestUpdateNoImpactfulChange tests for a change to NAD with existing node slice pool where the change does
// not cause a reslicing of the nodeslicepool
func TestUpdateNoImpactfulChange(t *testing.T) {
f := newFixture(t)
nad := newNad("test2", "test", "10.0.0.0/8", "/10")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
f.nodeLister = append(f.nodeLister, node1, node2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nadLister = append(f.nadLister, nad)
f.nadObjects = append(f.nadObjects, nad)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
}
// TestUpdateRangeChangeAndSliceChange tests update where range and slice changes
func TestUpdateRangeChangeAndSliceChange(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/10", "/12")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
expectedNodeSlicePool := newNodeSlicePool("test", "10.0.0.0/10", "/12",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/12",
},
{
NodeName: "node2",
SliceRange: "10.16.0.0/12",
},
{
NodeName: "",
SliceRange: "10.32.0.0/12",
},
{
NodeName: "",
SliceRange: "10.48.0.0/12",
},
},
}, nad)
f.nodeLister = append(f.nodeLister, node1, node2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nadLister = append(f.nadLister, nad)
f.nadObjects = append(f.nadObjects, nad)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.expectNodeSlicePoolUpdateAction(expectedNodeSlicePool)
}
// TestUpdateRangeChangeChange tests update where range changes
func TestUpdateRangeChangeChange(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "11.0.0.0/8", "/10")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
expectedNodeSlicePool := newNodeSlicePool("test", "11.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "11.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "11.64.0.0/10",
},
{
NodeName: "",
SliceRange: "11.128.0.0/10",
},
{
NodeName: "",
SliceRange: "11.192.0.0/10",
},
},
}, nad)
f.nodeLister = append(f.nodeLister, node1, node2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nadLister = append(f.nadLister, nad)
f.nadObjects = append(f.nadObjects, nad)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.expectNodeSlicePoolUpdateAction(expectedNodeSlicePool)
}
// TestUpdateChangeSliceChange tests update where slice changes
func TestUpdateChangeSliceChange(t *testing.T) {
f := newFixture(t)
nad := newNad("test", "test", "10.0.0.0/8", "/11")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad)
expectedNodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/11",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/11",
},
{
NodeName: "node2",
SliceRange: "10.32.0.0/11",
},
{
NodeName: "",
SliceRange: "10.64.0.0/11",
},
{
NodeName: "",
SliceRange: "10.96.0.0/11",
},
{
NodeName: "",
SliceRange: "10.128.0.0/11",
},
{
NodeName: "",
SliceRange: "10.160.0.0/11",
},
{
NodeName: "",
SliceRange: "10.192.0.0/11",
},
{
NodeName: "",
SliceRange: "10.224.0.0/11",
},
},
}, nad)
f.nodeLister = append(f.nodeLister, node1, node2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nadLister = append(f.nadLister, nad)
f.nadObjects = append(f.nadObjects, nad)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.expectNodeSlicePoolUpdateAction(expectedNodeSlicePool)
}
// TestMultipleNadsSameNetworkName tests that if nad and node slice already exist and new nad with same network name is
// created it appends the new owner ref
func TestMultipleNadsSameNetworkName(t *testing.T) {
f := newFixture(t)
nad1 := newNad("test1", "test", "10.0.0.0/8", "/10")
nad2 := newNad("test2", "test", "10.0.0.0/8", "/10")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad1)
expectedNodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad1, nad2)
f.nadObjects = append(f.nadObjects, nad1, nad2)
f.nadLister = append(f.nadLister, nad1, nad2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nodeLister = append(f.nodeLister, node1, node2)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.expectNodeSlicePoolUpdateAction(expectedNodeSlicePool)
f.run(context.TODO(), getKey(nad2, t))
}
// TestMultipleNadsSameNetworkNameDeleteOneNad tests nothing is done if multiple nads share ownership of nodeslice pool
// and one is deleted
func TestMultipleNadsSameNetworkNameDeleteOneNad(t *testing.T) {
f := newFixture(t)
nad1 := newNad("test1", "test", "10.0.0.0/8", "/10")
nad2 := newNad("test2", "test", "10.0.0.0/8", "/10")
node1 := newNode("node1")
node2 := newNode("node2")
nodeSlicePool := newNodeSlicePool("test", "10.0.0.0/8", "/10",
v1alpha1.NodeSlicePoolStatus{
Allocations: []v1alpha1.NodeSliceAllocation{
{
NodeName: "node1",
SliceRange: "10.0.0.0/10",
},
{
NodeName: "node2",
SliceRange: "10.64.0.0/10",
},
{
NodeName: "",
SliceRange: "10.128.0.0/10",
},
{
NodeName: "",
SliceRange: "10.192.0.0/10",
},
},
}, nad1, nad2)
f.nadObjects = append(f.nadObjects, nad1)
f.nadLister = append(f.nadLister, nad1)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nodeSlicePoolLister = append(f.nodeSlicePoolLister, nodeSlicePool)
f.whereaboutsObjects = append(f.whereaboutsObjects, nodeSlicePool)
f.nodeLister = append(f.nodeLister, node1, node2)
f.run(context.TODO(), getKey(nad2, t))
}
// TestTwoNetworksRangeAndSliceMismatch tests that error is thrown if multiple nads share network name with dif configs
func TestTwoNetworksRangeAndSliceMismatch(t *testing.T) {
f := newFixture(t)
nad1 := newNad("test1", "test", "10.0.0.0/8", "/10")
nad2 := newNad("test2", "test", "10.0.0.0/8", "/8")
node1 := newNode("node1")
node2 := newNode("node2")
f.nadObjects = append(f.nadObjects, nad1, nad2)
f.nadLister = append(f.nadLister, nad1, nad2)
f.kubeobjects = append(f.kubeobjects, node1, node2)
f.nodeLister = append(f.nodeLister, node1, node2)
f.runExpectError(context.TODO(), getKey(nad2, t))
}
func getKey(nad *k8snetplumbersv1.NetworkAttachmentDefinition, t *testing.T) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(nad)
if err != nil {
t.Errorf("Unexpected error getting key for nad %v: %v", nad.Name, err)
return ""
}
return key
}

View File

@ -0,0 +1,28 @@
package signals
import (
"context"
"os"
"os/signal"
)
var onlyOneSignalHandler = make(chan struct{})
// SetupSignalHandler registered for SIGTERM and SIGINT. A context is returned
// which is cancelled on one of these signals. If a second signal is caught,
// the program is terminated with exit code 1.
func SetupSignalHandler() context.Context {
close(onlyOneSignalHandler) // panics when called twice
c := make(chan os.Signal, 2)
ctx, cancel := context.WithCancel(context.Background())
signal.Notify(c, shutdownSignals...)
go func() {
<-c
cancel()
<-c
os.Exit(1) // second signal. Exit directly.
}()
return ctx
}

View File

@ -0,0 +1,8 @@
package signals
import (
"os"
"syscall"
)
var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"net"
"os"
"strconv"
"strings"
"sync"
@ -82,6 +83,7 @@ func NewKubernetesIPAMWithNamespace(containerID, ifName string, ipamConf whereab
type PoolIdentifier struct {
IpRange string
NetworkName string
NodeName string
}
// GetIPPool returns a storage.IPPool for the given range
@ -101,6 +103,36 @@ func (i *KubernetesIPAM) GetIPPool(ctx context.Context, poolIdentifier PoolIdent
return &KubernetesIPPool{i.client, firstIP, pool}, nil
}
func IPPoolName(poolIdentifier PoolIdentifier) string {
if poolIdentifier.NodeName != "" {
// fast node range naming convention
if poolIdentifier.NetworkName == UnnamedNetwork {
return fmt.Sprintf("%v-%v", poolIdentifier.NodeName, normalizeRange(poolIdentifier.IpRange))
} else {
return fmt.Sprintf("%v-%v-%v", poolIdentifier.NetworkName, poolIdentifier.NodeName, normalizeRange(poolIdentifier.IpRange))
}
} else {
// default naming convention
if poolIdentifier.NetworkName == UnnamedNetwork {
return normalizeRange(poolIdentifier.IpRange)
} else {
return fmt.Sprintf("%s-%s", poolIdentifier.NetworkName, normalizeRange(poolIdentifier.IpRange))
}
}
}
func normalizeRange(ipRange string) string {
// v6 filter
if ipRange[len(ipRange)-1] == ':' {
ipRange = ipRange + "0"
}
normalized := strings.ReplaceAll(ipRange, ":", "-")
// replace subnet cidr slash
normalized = strings.ReplaceAll(normalized, "/", "-")
return normalized
}
func (i *KubernetesIPAM) getPool(ctx context.Context, name string, iprange string) (*whereaboutsv1alpha1.IPPool, error) {
ctxWithTimeout, cancel := context.WithTimeout(ctx, storage.RequestTimeout)
defer cancel()
@ -128,26 +160,6 @@ func (i *KubernetesIPAM) getPool(ctx context.Context, name string, iprange strin
return pool, nil
}
func IPPoolName(poolIdentifier PoolIdentifier) string {
if poolIdentifier.NetworkName == UnnamedNetwork {
return normalizeRange(poolIdentifier.IpRange)
} else {
return fmt.Sprintf("%s-%s", poolIdentifier.NetworkName, normalizeRange(poolIdentifier.IpRange))
}
}
func normalizeRange(ipRange string) string {
// v6 filter
if ipRange[len(ipRange)-1] == ':' {
ipRange = ipRange + "0"
}
normalized := strings.ReplaceAll(ipRange, ":", "-")
// replace subnet cidr slash
normalized = strings.ReplaceAll(normalized, "/", "-")
return normalized
}
// Status tests connectivity to the kubernetes backend
func (i *KubernetesIPAM) Status(ctx context.Context) error {
_, err := i.client.WhereaboutsV1alpha1().IPPools(i.namespace).List(ctx, metav1.ListOptions{})
@ -343,9 +355,36 @@ func NormalizeIP(ip net.IP, networkName string) string {
return normalizedIP
}
// TODO: what's the best way to discover the node name? this should work in both controller pod and whereabouts host process
func getNodeName() (string, error) {
envName := os.Getenv("NODENAME")
if envName != "" {
return strings.TrimSpace(envName), nil
}
file, err := os.Open("/etc/hostname")
if err != nil {
logging.Errorf("Error opening file /etc/hostname: %v", err)
return "", err
}
defer file.Close()
// Read the contents of the file
data := make([]byte, 1024) // Adjust the buffer size as needed
n, err := file.Read(data)
if err != nil {
logging.Errorf("Error reading file /etc/hostname: %v", err)
}
// Convert bytes to string
hostname := string(data[:n])
hostname = strings.TrimSpace(hostname)
logging.Debugf("discovered current hostname as: %s", hostname)
return hostname, nil
}
// newLeaderElector creates a new leaderelection.LeaderElector and associated
// channels by which to observe elections and depositions.
func newLeaderElector(clientset kubernetes.Interface, namespace string, podNamespace string, podID string, leaseDuration int, renewDeadline int, retryPeriod int) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) {
func newLeaderElector(ctx context.Context, clientset kubernetes.Interface, namespace string, ipamConf *KubernetesIPAM) (*leaderelection.LeaderElector, chan struct{}, chan struct{}) {
//log.WithField("context", "leaderelection")
// leaderOK will block gRPC startup until it's closed.
leaderOK := make(chan struct{})
@ -353,14 +392,31 @@ func newLeaderElector(clientset kubernetes.Interface, namespace string, podNames
// we are deposed as leader so that we can clean up.
deposed := make(chan struct{})
leaseName := "whereabouts"
if ipamConf.Config.NodeSliceSize != "" {
// we lock per IP Pool so just use the pool name for the lease name
hostname, err := getNodeName()
if err != nil {
logging.Errorf("Failed to create leader elector: %v", err)
return nil, leaderOK, deposed
}
nodeSliceRange, err := GetNodeSlicePoolRange(ctx, ipamConf, hostname)
if err != nil {
logging.Errorf("Failed to create leader elector: %v", err)
return nil, leaderOK, deposed
}
leaseName = IPPoolName(PoolIdentifier{IpRange: nodeSliceRange, NodeName: hostname, NetworkName: ipamConf.Config.NetworkName})
}
logging.Debugf("using lease with name: %v", leaseName)
var rl = &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: "whereabouts",
Name: leaseName,
Namespace: namespace,
},
Client: clientset.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: fmt.Sprintf("%s/%s", podNamespace, podID),
Identity: fmt.Sprintf("%s/%s", ipamConf.Config.PodNamespace, ipamConf.Config.PodName),
},
}
@ -368,9 +424,9 @@ func newLeaderElector(clientset kubernetes.Interface, namespace string, podNames
// !bang
le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: time.Duration(leaseDuration) * time.Millisecond,
RenewDeadline: time.Duration(renewDeadline) * time.Millisecond,
RetryPeriod: time.Duration(retryPeriod) * time.Millisecond,
LeaseDuration: time.Duration(ipamConf.Config.LeaderLeaseDuration) * time.Millisecond,
RenewDeadline: time.Duration(ipamConf.Config.LeaderRenewDeadline) * time.Millisecond,
RetryPeriod: time.Duration(ipamConf.Config.LeaderRetryPeriod) * time.Millisecond,
ReleaseOnCancel: true,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
@ -401,7 +457,7 @@ func IPManagement(ctx context.Context, mode int, ipamConf whereaboutstypes.IPAMC
}
// setup leader election
le, leader, deposed := newLeaderElector(client.clientSet, client.namespace, ipamConf.PodNamespace, ipamConf.PodName, ipamConf.LeaderLeaseDuration, ipamConf.LeaderRenewDeadline, ipamConf.LeaderRetryPeriod)
le, leader, deposed := newLeaderElector(ctx, client.clientSet, client.namespace, client)
var wg sync.WaitGroup
wg.Add(2)
@ -448,13 +504,36 @@ func IPManagement(ctx context.Context, mode int, ipamConf whereaboutstypes.IPAMC
leCancel()
result <- (<-res)
}()
wg.Wait()
close(stopM)
logging.Debugf("IPManagement: %v, %v", newips, err)
return newips, err
}
func GetNodeSlicePoolRange(ctx context.Context, ipam *KubernetesIPAM, nodeName string) (string, error) {
logging.Debugf("ipam namespace is %v", ipam.namespace)
nodeSlice, err := ipam.client.WhereaboutsV1alpha1().NodeSlicePools(ipam.Config.Namespace).Get(ctx, getNodeSliceName(ipam), metav1.GetOptions{})
if err != nil {
logging.Errorf("error getting node slice %s/%s %v", ipam.Config.Namespace, getNodeSliceName(ipam), err)
return "", err
}
for _, allocation := range nodeSlice.Status.Allocations {
if allocation.NodeName == nodeName {
logging.Debugf("found matching node slice allocation for hostname %v: %v", nodeName, allocation)
return allocation.SliceRange, nil
}
}
logging.Errorf("error finding node within node slice allocations")
return "", fmt.Errorf("no allocated node slice for node")
}
func getNodeSliceName(ipam *KubernetesIPAM) string {
if ipam.Config.NetworkName == UnnamedNetwork {
return ipam.Config.Name
}
return ipam.Config.NetworkName
}
// IPManagementKubernetesUpdate manages k8s updates
func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *KubernetesIPAM, ipamConf whereaboutstypes.IPAMConfig) ([]net.IPNet, error) {
logging.Debugf("IPManagement -- mode: %d / containerID: %q / podRef: %q / ifName: %q ", mode, ipam.containerID, ipamConf.GetPodRef(), ipam.IfName)
@ -494,14 +573,47 @@ func IPManagementKubernetesUpdate(ctx context.Context, mode int, ipam *Kubernete
default:
// retry the IPAM loop if the context has not been cancelled
}
overlappingrangestore, err = ipam.GetOverlappingRangeStore()
if err != nil {
logging.Errorf("IPAM error getting OverlappingRangeStore: %v", err)
return newips, err
}
pool, err = ipam.GetIPPool(requestCtx, PoolIdentifier{IpRange: ipRange.Range, NetworkName: ipamConf.NetworkName})
poolIdentifier := PoolIdentifier{IpRange: ipRange.Range, NetworkName: ipamConf.NetworkName}
if ipamConf.NodeSliceSize != "" {
hostname, err := getNodeName()
if err != nil {
logging.Errorf("Failed to get node hostname: %v", err)
return newips, err
}
poolIdentifier.NodeName = hostname
nodeSliceRange, err := GetNodeSlicePoolRange(ctx, ipam, hostname)
if err != nil {
return newips, err
}
_, ipNet, err := net.ParseCIDR(nodeSliceRange)
if err != nil {
logging.Errorf("Error parsing node slice cidr to net.IPNet: %v", err)
return newips, err
}
poolIdentifier.IpRange = nodeSliceRange
rangeStart, err := iphelpers.FirstUsableIP(*ipNet)
if err != nil {
logging.Errorf("Error parsing node slice cidr to range start: %v", err)
return newips, err
}
rangeEnd, err := iphelpers.LastUsableIP(*ipNet)
if err != nil {
logging.Errorf("Error parsing node slice cidr to range start: %v", err)
return newips, err
}
ipRange = whereaboutstypes.RangeConfiguration{
Range: nodeSliceRange,
RangeStart: rangeStart,
RangeEnd: rangeEnd,
}
}
logging.Debugf("using pool identifier: %v", poolIdentifier)
pool, err = ipam.GetIPPool(requestCtx, poolIdentifier)
if err != nil {
logging.Errorf("IPAM error reading pool allocations (attempt: %d): %v", j, err)
if e, ok := err.(storage.Temporary); ok && e.Temporary() {

View File

@ -0,0 +1,54 @@
package kubernetes
import "testing"
func TestIPPoolName(t *testing.T) {
cases := []struct {
name string
poolIdentifier PoolIdentifier
expectedResult string
}{
{
name: "No node name, unnamed network",
poolIdentifier: PoolIdentifier{
NetworkName: UnnamedNetwork,
IpRange: "10.0.0.0/8",
},
expectedResult: "10.0.0.0-8",
},
{
name: "No node name, named network",
poolIdentifier: PoolIdentifier{
NetworkName: "test",
IpRange: "10.0.0.0/8",
},
expectedResult: "test-10.0.0.0-8",
},
{
name: "Node name, unnamed network",
poolIdentifier: PoolIdentifier{
NetworkName: UnnamedNetwork,
NodeName: "testnode",
IpRange: "10.0.0.0/8",
},
expectedResult: "testnode-10.0.0.0-8",
},
{
name: "Node name, named network",
poolIdentifier: PoolIdentifier{
NetworkName: "testnetwork",
NodeName: "testnode",
IpRange: "10.0.0.0/8",
},
expectedResult: "testnetwork-testnode-10.0.0.0-8",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
result := IPPoolName(tc.poolIdentifier)
if result != tc.expectedResult {
t.Errorf("Expected result: %s, got result: %s", tc.expectedResult, result)
}
})
}
}

View File

@ -54,6 +54,8 @@ type IPAMConfig struct {
OmitRanges []string `json:"exclude,omitempty"`
DNS cnitypes.DNS `json:"dns"`
Range string `json:"range"`
NodeSliceSize string `json:"node_slice_size"`
Namespace string `json:"namespace"` //TODO: best way to get namespace of the NAD?
RangeStart net.IP `json:"range_start,omitempty"`
RangeEnd net.IP `json:"range_end,omitempty"`
GatewayStr string `json:"gateway"`
@ -81,6 +83,8 @@ func (ic *IPAMConfig) UnmarshalJSON(data []byte) error {
Datastore string `json:"datastore"`
Addresses []Address `json:"addresses,omitempty"`
IPRanges []RangeConfiguration `json:"ipRanges"`
NodeSliceSize string `json:"node_slice_size"`
Namespace string `json:"namespace"` //TODO: best way to get namespace of the NAD?
OmitRanges []string `json:"exclude,omitempty"`
DNS cnitypes.DNS `json:"dns"`
Range string `json:"range"`
@ -126,8 +130,10 @@ func (ic *IPAMConfig) UnmarshalJSON(data []byte) error {
OmitRanges: ipamConfigAlias.OmitRanges,
DNS: ipamConfigAlias.DNS,
Range: ipamConfigAlias.Range,
Namespace: ipamConfigAlias.Namespace,
RangeStart: backwardsCompatibleIPAddress(ipamConfigAlias.RangeStart),
RangeEnd: backwardsCompatibleIPAddress(ipamConfigAlias.RangeEnd),
NodeSliceSize: ipamConfigAlias.NodeSliceSize,
GatewayStr: ipamConfigAlias.GatewayStr,
LeaderLeaseDuration: ipamConfigAlias.LeaderLeaseDuration,
LeaderRenewDeadline: ipamConfigAlias.LeaderRenewDeadline,