mirror of
https://github.com/intel/intel-device-plugins-for-kubernetes.git
synced 2025-06-03 03:59:37 +00:00
operator: dsa: Add provisioning configurability
The provisioning config can be optionally stored in the ProvisioningConfig configMap which is then passed to initcontainer through the volume mount. There's also a possibility for a node specific congfiguration through passing a nodename via NODE_NAME into initcontainer's environment and passing a node specific profile via configMap volume mount. Signed-off-by: Oleg Zhurakivskyy <oleg.zhurakivskyy@intel.com>
This commit is contained in:
parent
ebb5f4987d
commit
594a696879
@ -2,9 +2,6 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ndev=$(accel-config list --idle | jq '.[].dev' | grep -c dsa)
|
||||
nwq=4
|
||||
|
||||
function cmd() {
|
||||
|
||||
echo "$@"
|
||||
@ -12,11 +9,26 @@ function cmd() {
|
||||
"${@}"
|
||||
}
|
||||
|
||||
for i in $(accel-config list | jq '.[].dev' | grep dsa | sed 's/\"//g'); do
|
||||
|
||||
cmd accel-config disable-device "$i"
|
||||
|
||||
done
|
||||
|
||||
ndev=$(accel-config list --idle | jq '.[].dev' | grep -c dsa)
|
||||
nwq=4
|
||||
|
||||
for (( i = 0; i < ndev; i++ )); do
|
||||
|
||||
dev="dsa${i}"
|
||||
|
||||
sed "s/X/${i}/g" < dsa.conf > $dev.conf
|
||||
config="dsa.conf"
|
||||
|
||||
[ -f "conf/dsa.conf" ] && config="conf/dsa.conf"
|
||||
|
||||
[ -f "conf/dsa-$NODE_NAME.conf" ] && config="conf/dsa-$NODE_NAME.conf"
|
||||
|
||||
sed "s/X/${i}/g" < "$config" > $dev.conf
|
||||
|
||||
cmd accel-config load-config -c "$dev.conf"
|
||||
|
||||
|
@ -0,0 +1,224 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: intel-dsa-config
|
||||
namespace: inteldeviceplugins-system
|
||||
data:
|
||||
dsa.conf: |
|
||||
[
|
||||
{
|
||||
"dev":"dsaX",
|
||||
"token_limit":0,
|
||||
"groups":[
|
||||
{
|
||||
"dev":"groupX.0",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.0",
|
||||
"mode":"dedicated",
|
||||
"size":16,
|
||||
"group_id":0,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX0",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.0",
|
||||
"group_id":0
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"dev":"groupX.1",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.1",
|
||||
"mode":"dedicated",
|
||||
"size":16,
|
||||
"group_id":1,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX1",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.1",
|
||||
"group_id":1
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"dev":"groupX.2",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.2",
|
||||
"mode":"dedicated",
|
||||
"size":16,
|
||||
"group_id":2,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX2",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.2",
|
||||
"group_id":2
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"dev":"groupX.3",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.3",
|
||||
"mode":"dedicated",
|
||||
"size":16,
|
||||
"group_id":3,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX3",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.3",
|
||||
"group_id":3
|
||||
},
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
]
|
||||
dsa-node1.conf: |
|
||||
[
|
||||
{
|
||||
"dev":"dsaX",
|
||||
"token_limit":0,
|
||||
"groups":[
|
||||
{
|
||||
"dev":"groupX.0",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.0",
|
||||
"mode":"shared",
|
||||
"size":16,
|
||||
"group_id":0,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX0",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.0",
|
||||
"group_id":0
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"dev":"groupX.1",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.1",
|
||||
"mode":"shared",
|
||||
"size":16,
|
||||
"group_id":1,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX1",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.1",
|
||||
"group_id":1
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"dev":"groupX.2",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.2",
|
||||
"mode":"shared",
|
||||
"size":16,
|
||||
"group_id":2,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX2",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.2",
|
||||
"group_id":2
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"dev":"groupX.3",
|
||||
"tokens_reserved":0,
|
||||
"use_token_limit":0,
|
||||
"tokens_allowed":8,
|
||||
"grouped_workqueues":[
|
||||
{
|
||||
"dev":"wqX.3",
|
||||
"mode":"shared",
|
||||
"size":16,
|
||||
"group_id":3,
|
||||
"priority":10,
|
||||
"block_on_fault":1,
|
||||
"type":"user",
|
||||
"name":"appX3",
|
||||
"threshold":15
|
||||
}
|
||||
],
|
||||
"grouped_engines":[
|
||||
{
|
||||
"dev":"engineX.3",
|
||||
"group_id":3
|
||||
},
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
]
|
@ -7,13 +7,23 @@ spec:
|
||||
spec:
|
||||
initContainers:
|
||||
- name: intel-dsa-initcontainer
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
image: intel/intel-dsa-initcontainer:devel
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys/devices
|
||||
name: sys-devices
|
||||
- mountPath: /dsa-init/conf
|
||||
name: intel-dsa-config-volume
|
||||
volumes:
|
||||
- name: sys-devices
|
||||
hostPath:
|
||||
path: /sys/devices
|
||||
- name: intel-dsa-config-volume
|
||||
configMap:
|
||||
name: intel-dsa-config
|
||||
|
@ -68,6 +68,10 @@ spec:
|
||||
description: NodeSelector provides a simple way to constrain device
|
||||
plugin pods to nodes with particular labels.
|
||||
type: object
|
||||
provisioningConfig:
|
||||
description: ProvisioningConfig is a ConfigMap used to pass the configuration
|
||||
into DSA initcontainer.
|
||||
type: string
|
||||
sharedDevNum:
|
||||
description: SharedDevNum is a number of containers that can share
|
||||
the same DSA device.
|
||||
|
@ -34,6 +34,9 @@ type DsaDevicePluginSpec struct {
|
||||
// InitImage is an initcontainer image to configure and enable DSA devices and workqueues with accel-config utility
|
||||
InitImage string `json:"InitImage,omitempty"`
|
||||
|
||||
// ProvisioningConfig is a ConfigMap used to pass the configuration into DSA initcontainer.
|
||||
ProvisioningConfig string `json:"provisioningConfig,omitempty"`
|
||||
|
||||
// SharedDevNum is a number of containers that can share the same DSA device.
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
SharedDevNum int `json:"sharedDevNum,omitempty"`
|
||||
|
@ -90,6 +90,10 @@ func (r *DsaDevicePlugin) validatePlugin() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(r.Spec.ProvisioningConfig) > 0 && len(r.Spec.InitImage) == 0 {
|
||||
return errors.Errorf("ProvisioningConfig is set with no InitImage")
|
||||
}
|
||||
|
||||
if len(r.Spec.InitImage) > 0 {
|
||||
return validatePluginImage(r.Spec.InitImage, "intel-dsa-initcontainer", dsaMinVersion)
|
||||
}
|
||||
|
@ -82,6 +82,16 @@ func setInitContainer(spec *v1.PodSpec, imageName string) {
|
||||
Image: imageName,
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Name: "intel-dsa-initcontainer",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &yes,
|
||||
},
|
||||
@ -225,6 +235,25 @@ func (c *controller) NewDaemonSet(rawObj client.Object) *apps.DaemonSet {
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if devicePlugin.Spec.ProvisioningConfig != "" {
|
||||
daemonSet.Spec.Template.Spec.Volumes = append(daemonSet.Spec.Template.Spec.Volumes, v1.Volume{
|
||||
Name: "intel-dsa-config-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: devicePlugin.Spec.ProvisioningConfig}},
|
||||
},
|
||||
})
|
||||
|
||||
for i, initcontainer := range daemonSet.Spec.Template.Spec.InitContainers {
|
||||
if initcontainer.Name == "intel-dsa-initcontainer" {
|
||||
daemonSet.Spec.Template.Spec.InitContainers[i].VolumeMounts = append(daemonSet.Spec.Template.Spec.InitContainers[i].VolumeMounts, v1.VolumeMount{
|
||||
Name: "intel-dsa-config-volume",
|
||||
MountPath: "/dsa-init/conf",
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &daemonSet
|
||||
|
Loading…
Reference in New Issue
Block a user