sgx: add NFD EPC source, README and deployment YAMLs

Signed-off-by: Mikko Ylinen <mikko.ylinen@intel.com>
This commit is contained in:
Mikko Ylinen 2020-03-26 15:54:54 +02:00 committed by Ismo Puustinen
parent dd50254146
commit a5f648077e
13 changed files with 568 additions and 0 deletions

View File

@ -72,6 +72,8 @@ jobs:
- intel-qat-plugin
- intel-vpu-plugin
- intel-deviceplugin-operator
- intel-sgx-plugin
- intel-sgx-initcontainer
# Demo images
- crypto-perf

View File

@ -0,0 +1,53 @@
# CLEAR_LINUX_BASE and CLEAR_LINUX_VERSION can be used to make the build
# reproducible by choosing an image by its hash and installing an OS version
# with --version=:
# CLEAR_LINUX_BASE=clearlinux@sha256:b8e5d3b2576eb6d868f8d52e401f678c873264d349e469637f98ee2adf7b33d4
# CLEAR_LINUX_VERSION="--version=29970"
#
# This is used on release branches before tagging a stable version.
# The master branch defaults to using the latest Clear Linux.
ARG CLEAR_LINUX_BASE=clearlinux/golang:latest
FROM ${CLEAR_LINUX_BASE} as builder
ARG CLEAR_LINUX_VERSION=
RUN swupd update --no-boot-update ${CLEAR_LINUX_VERSION}
ARG DIR=/intel-device-plugins-for-kubernetes
ARG GO111MODULE=on
WORKDIR $DIR
COPY . .
RUN mkdir /install_root \
&& swupd os-install \
${CLEAR_LINUX_VERSION} \
--path /install_root \
--statedir /swupd-state \
--bundles=rsync \
--no-boot-update \
&& rm -rf /install_root/var/lib/swupd/*
# Build NFD Feature Detector Hook
RUN cd $DIR/cmd/sgx_epchook && \
GO111MODULE=${GO111MODULE} go install && \
chmod a+x /go/bin/sgx_epchook && \
cd $DIR && \
install -D ${DIR}/LICENSE /install_root/usr/local/share/package-licenses/intel-device-plugins-for-kubernetes/LICENSE && \
scripts/copy-modules-licenses.sh ./cmd/sgx_epchook /install_root/usr/local/share/package-licenses/
FROM scratch as final
COPY --from=builder /install_root /
ARG NFD_HOOK=intel-sgx-epchook
ARG SRC_DIR=/usr/local/bin/sgx-sw
ARG DST_DIR=/etc/kubernetes/node-feature-discovery/source.d/
COPY --from=builder /go/bin/sgx_epchook $SRC_DIR/$NFD_HOOK
RUN echo -e "#!/bin/sh\n\
rsync -a $SRC_DIR/ $DST_DIR\n\
rm $DST_DIR/deploy.sh\
">> $SRC_DIR/deploy.sh && chmod +x $SRC_DIR/deploy.sh
ENTRYPOINT [ "/usr/local/bin/sgx-sw/deploy.sh" ]

View File

@ -0,0 +1,38 @@
# CLEAR_LINUX_BASE and CLEAR_LINUX_VERSION can be used to make the build
# reproducible by choosing an image by its hash and installing an OS version
# with --version=:
# CLEAR_LINUX_BASE=clearlinux@sha256:b8e5d3b2576eb6d868f8d52e401f678c873264d349e469637f98ee2adf7b33d4
# CLEAR_LINUX_VERSION="--version=29970"
#
# This is used on release branches before tagging a stable version.
# The master branch defaults to using the latest Clear Linux.
ARG CLEAR_LINUX_BASE=clearlinux/golang:latest
FROM ${CLEAR_LINUX_BASE} as builder
ARG CLEAR_LINUX_VERSION=
RUN swupd update --no-boot-update ${CLEAR_LINUX_VERSION}
ARG DIR=/intel-device-plugins-for-kubernetes
ARG GO111MODULE=on
WORKDIR $DIR
COPY . .
RUN mkdir /install_root \
&& swupd os-install \
${CLEAR_LINUX_VERSION} \
--path /install_root \
--statedir /swupd-state \
--no-boot-update \
&& rm -rf /install_root/var/lib/swupd/*
RUN cd cmd/sgx_plugin; GO111MODULE=${GO111MODULE} go install; cd -
RUN chmod a+x /go/bin/sgx_plugin \
&& install -D /go/bin/sgx_plugin /install_root/usr/local/bin/intel_sgx_device_plugin \
&& install -D ${DIR}/LICENSE /install_root/usr/local/share/package-licenses/intel-device-plugins-for-kubernetes/LICENSE \
&& scripts/copy-modules-licenses.sh ./cmd/sgx_plugin /install_root/usr/local/share/package-licenses/
FROM scratch as final
COPY --from=builder /install_root /
ENTRYPOINT ["/usr/local/bin/intel_sgx_device_plugin"]

33
cmd/sgx_epchook/main.go Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2020 Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"github.com/klauspost/cpuid"
)
func main() {
if cpuid.CPU.SGX.Available {
var total uint64
for _, s := range cpuid.CPU.SGX.EPCSections {
total += s.EPCSize
}
if total != 0 {
fmt.Printf("sgx.intel.com/epc=%d", total)
}
}
}

147
cmd/sgx_plugin/README.md Normal file
View File

@ -0,0 +1,147 @@
# Intel Software Guard Extensions (SGX) device plugin for Kubernetes
# Table of Contents
* [Introduction](#introduction)
* [Installation](#installation)
* [Prerequisites](#prerequisites)
* [Getting the source code:](#getting-the-source-code)
* [Verify node kubelet config](#verify-node-kubelet-config)
* [Deploying as a DaemonSet](#deploying-as-a-daemonset)
* [Build the plugin image](#build-the-plugin-image)
* [Deploy the DaemonSet](#deploy-the-daemonset)
* [Verify SGX device plugin is registered on master:](#verify-sgx-device-plugin-is-registered-on-master)
* [Deploying by hand](#deploying-by-hand)
* [Build SGX device plugin](#build-sgx-device-plugin)
* [Deploy SGX plugin](#deploy-sgx-plugin)
# Introduction
**Note:** The work is still WIP. The SGX device plugin can be tested to run simple enclaves
but the full e2e deployment (including the SGX remote attestation) is not yet finished. See
the open issues for details.
This Intel SGX device plugin provides support for Intel SGX TEE under Kubernetes.
## Modes and Configuration options
The SGX plugin can take a number of command line arguments, summarised in the following table:
| Flag | Argument | Meaning |
|:---- |:-------- |:------- |
| -enclave-limit | int | the number of containers per node allowed to use `/dev/sgx/enclave` (default: `20`) |
| -provision-limit | int | the number of containers per node allowed to use `/dev/sgx/provision` (default: `20`) |
The plugin also accepts a number of other arguments related to logging. Please use the `-h` option to see
the complete list of logging related options.
# Installation
The below sections cover how to obtain, build and install this component.
The component can be installed either using a DaemonSet or running 'by hand' on each node.
## Prerequisites
The component has the same basic dependancies as the
[generic plugin framework dependencies](../../README.md#about).
The SGX plugin requires Linux Kernel SGX drivers to be available. These drivers
are currently available via RFC patches on Linux Kernel Mailing List.
## Getting the source code
```bash
$ mkdir -p $(go env GOPATH)/src/github.com/intel
$ git clone https://github.com/intel/intel-device-plugins-for-kubernetes $(go env GOPATH)/src/github.com/intel/intel-device-plugins-for-kubernetes
```
## Verify node kubelet config
Every node that will be running the plugin must have the
[kubelet device-plugins](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)
configured. For each node, check that the kubelet device plugin socket exists:
```bash
$ ls /var/lib/kubelet/device-plugins/kubelet.sock
/var/lib/kubelet/device-plugins/kubelet.sock
```
## Deploying as a DaemonSet
To deploy the plugin as a DaemonSet, you first need to build a container image for the plugin and
ensure that is visible to your nodes.
### Build the plugin and EPC source images
The following will use `docker` to build a local container images called `intel/intel-sgx-plugin`
and `intel/intel-sgx-initcontainer` with the tag `devel`. The image build tool can be changed from the
default docker by setting the `BUILDER` argument to the [Makefile](../../Makefile).
```bash
$ cd $(go env GOPATH)/src/github.com/intel/intel-device-plugins-for-kubernetes
$ make intel-sgx-plugin
...
Successfully tagged intel/intel-sgx-plugin:devel
$ make intel-sgx-initcontainer
...
Successfully tagged intel/intel-sgx-initcontainer:devel
```
### Deploy the DaemonSet
Deploying the plugin involves the deployment of a
[NFD EPC Source InitContainer Job](../../deployments/sgx_plugin/base/intel-sgx-hookinstall.yaml) the
[DaemonSet YAML](../../deployments/sgx_plugin/base/intel-sgx-plugin.yaml), and node-feature-discovery
with the necessary configuration.
There is a kustomization for deploying everything:
```bash
$ cd $(go env GOPATH)/src/github.com/intel/intel-device-plugins-for-kubernetes
$ kubectl apply -k deployments/sgx_plugin/overlays/nfd
```
### Verify SGX device plugin is registered on master:
Verification of the plugin deployment and detection of SGX hardware can be confirmed by
examining the resource allocations on the nodes:
```bash
$ kubectl describe node <node name> | grep sgx.intel.com
nfd.node.kubernetes.io/extended-resources: sgx.intel.com/epc
sgx.intel.com/enclave: 20
sgx.intel.com/epc: 98566144
sgx.intel.com/provision: 20
sgx.intel.com/enclave: 20
sgx.intel.com/epc: 98566144
sgx.intel.com/provision: 20
sgx.intel.com/enclave 1 1
sgx.intel.com/epc 400 400
sgx.intel.com/provision 1 1
```
## Deploying by hand
For development purposes, it is sometimes convenient to deploy the plugin 'by hand' on a node.
In this case, you do not need to build the complete container image, and can build just the plugin.
### Build SGX device plugin
```bash
$ cd $(go env GOPATH)/src/github.com/intel/intel-device-plugins-for-kubernetes
$ make sgx_plugin
```
### Deploy SGX plugin
Deploy the plugin on a node by running it as `root`. The below is just an example - modify the
paramaters as necessary for your setup:
```bash
$ sudo $(go env GOPATH)/src/github.com/intel/intel-device-plugins-for-kubernetes/cmd/sgx_plugin/sgx_plugin \
-enclave-limit 50 -provision-limit 1 -v 2
I0626 20:33:01.414446 964346 server.go:219] Start server for provision at: /var/lib/kubelet/device-plugins/sgx.intel.com-provision.sock
I0626 20:33:01.414640 964346 server.go:219] Start server for enclave at: /var/lib/kubelet/device-plugins/sgx.intel.com-enclave.sock
I0626 20:33:01.417315 964346 server.go:237] Device plugin for provision registered
I0626 20:33:01.417748 964346 server.go:237] Device plugin for enclave registered
```

View File

@ -0,0 +1,27 @@
apiVersion: batch/v1
kind: Job
metadata:
name: intel-sgx-hookinstall-job
labels:
jobgroup: intel-sgx-hookinstall-job
spec:
template:
metadata:
labels:
jobgroup: intel-sgx-hookinstall-job
spec:
restartPolicy: Never
containers:
- name: intel-sgx-hookinstall-job
image: intel/intel-sgx-initcontainer:devel
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/kubernetes/node-feature-discovery/source.d/
name: nfd-source-hooks
volumes:
- name: nfd-source-hooks
hostPath:
path: /etc/kubernetes/node-feature-discovery/source.d/
type: DirectoryOrCreate

View File

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: intel-sgx-plugin
labels:
app: intel-sgx-plugin
spec:
selector:
matchLabels:
app: intel-sgx-plugin
template:
metadata:
labels:
app: intel-sgx-plugin
spec:
containers:
- name: intel-sgx-plugin
image: intel/intel-sgx-plugin:devel
securityContext:
readOnlyRootFilesystem: true
imagePullPolicy: IfNotPresent
volumeMounts:
- name: kubeletsockets
mountPath: /var/lib/kubelet/device-plugins
- name: sgxdevices
mountPath: /dev/sgx
readOnly: true
volumes:
- name: kubeletsockets
hostPath:
path: /var/lib/kubelet/device-plugins
- name: sgxdevices
hostPath:
path: /dev/sgx

View File

@ -0,0 +1,3 @@
resources:
- intel-sgx-hookinstall.yaml
- intel-sgx-plugin.yaml

View File

@ -0,0 +1,2 @@
bases:
- base

View File

@ -0,0 +1,14 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
generatorOptions:
disableNameSuffixHash: true
bases:
- ../../base
resources:
- nfd-master.yaml
- nfd-worker-daemonset.yaml
configMapGenerator:
- name: nfd-worker-config
namespace: node-feature-discovery
files:
- nfd-worker.conf

View File

@ -0,0 +1,124 @@
apiVersion: v1
kind: Namespace
metadata:
name: node-feature-discovery # NFD namespace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfd-master
rules:
- apiGroups:
- ""
resources:
- nodes
# when using command line flag --resource-labels to create extended resources
# you will need to uncomment "- nodes/status"
- nodes/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfd-master
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nfd-master
subjects:
- kind: ServiceAccount
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfd-master
name: nfd-master
namespace: node-feature-discovery
spec:
replicas: 1
selector:
matchLabels:
app: nfd-master
template:
metadata:
labels:
app: nfd-master
spec:
serviceAccount: nfd-master
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values: [""]
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: quay.io/kubernetes_incubator/node-feature-discovery:v0.6.0
name: nfd-master
command:
- "nfd-master"
args:
- "--resource-labels=sgx.intel.com/epc"
- "--extra-label-ns=sgx.intel.com"
## Enable TLS authentication
## The example below assumes having the root certificate named ca.crt stored in
## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored
## in a TLS Secret named nfd-master-cert.
## Additional hardening can be enabled by specifying --verify-node-name in
## args, in which case every nfd-worker requires a individual node-specific
## TLS certificate.
# args:
# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt"
# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
# volumeMounts:
# - name: nfd-ca-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
# readOnly: true
# - name: nfd-master-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
# readOnly: true
# volumes:
# - name: nfd-ca-cert
# configMap:
# name: nfd-ca-cert
# - name: nfd-master-cert
# secret:
# secretName: nfd-master-cert
---
apiVersion: v1
kind: Service
metadata:
name: nfd-master
namespace: node-feature-discovery
spec:
selector:
app: nfd-master
ports:
- protocol: TCP
port: 8080
type: ClusterIP

View File

@ -0,0 +1,85 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: nfd-worker
name: nfd-worker
namespace: node-feature-discovery
spec:
selector:
matchLabels:
app: nfd-worker
template:
metadata:
labels:
app: nfd-worker
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: quay.io/kubernetes_incubator/node-feature-discovery:v0.6.0
name: nfd-worker
command:
- "nfd-worker"
args:
- "--sleep-interval=60s"
- "--server=nfd-master:8080"
## Enable TLS authentication (1/3)
## The example below assumes having the root certificate named ca.crt stored in
## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored
## in a TLS Secret named nfd-worker-cert
# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt"
# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
volumeMounts:
- name: host-boot
mountPath: "/host-boot"
readOnly: true
- name: host-os-release
mountPath: "/host-etc/os-release"
readOnly: true
- name: host-sys
mountPath: "/host-sys"
- name: source-d
mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
- name: features-d
mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
- name: nfd-worker-config-cm
mountPath: "/etc/kubernetes/node-feature-discovery/"
## Enable TLS authentication (2/3)
# - name: nfd-ca-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
# readOnly: true
# - name: nfd-worker-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
# readOnly: true
volumes:
- name: host-boot
hostPath:
path: "/boot"
- name: host-os-release
hostPath:
path: "/etc/os-release"
- name: host-sys
hostPath:
path: "/sys"
- name: source-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/source.d/"
- name: features-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/features.d/"
- name: nfd-worker-config-cm
configMap:
name: nfd-worker-config
## Enable TLS authentication (3/3)
# - name: nfd-ca-cert
# configMap:
# name: nfd-ca-cert
# - name: nfd-worker-cert
# secret:
# secretName: nfd-worker-cert

View File

@ -0,0 +1,6 @@
sources:
cpu:
cpuid:
attributeWhitelist:
- "SGX"
- "SGXLC"