initial commit for vm-import-controller

This commit is contained in:
Gaurav Mehta 2022-08-10 11:08:50 +10:00
commit 112008bb81
50 changed files with 8292 additions and 0 deletions

94
README.md Normal file
View File

@ -0,0 +1,94 @@
# vm-import-controller
vm-import-controller is an addon to help migrate VM workloads from other source clusters to an existing Harvester cluster.
Currently the following source providers will be supported:
* vmware
* openstack
## API
The vm-import-controller introduces two CRD's
### Sources
sources allows users to define valid source clusters.
For example:
```yaml
apiVersion: source.harvesterhci.io/v1beta1
kind: Vmware
metadata:
name: vcsim
namespace: default
spec:
endpoint: "https://vscim/sdk"
dc: "DCO"
credentials:
name: vsphere-credentials
namespace: default
```
The secret contains the credentials for the vcenter endpoint:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: vsphere-credentials
namespace: default
stringData:
"username": "user"
"password": "password"
```
As part of the reconcile process, the controller will login to the vcenter and verify the `dc` specified in the source spec is valid.
Once this check is passed, the source is marked ready, and can be used for vm migrations
```shell
$ kubectl get vmware.source
NAME STATUS
vcsim clusterReady
```
### ImportJob
The ImportJob crd provides a way for users to define the source VM and mapping to the actual source cluster to perform the VM export-import from.
A sample import job looks as follows:
```yaml
apiVersion: importjob.harvesterhci.io/v1beta1
kind: VirtualMachine
metadata:
name: alpine-export-test
namespace: default
spec:
virtualMachineName: "alpine-export-test"
networkMapping:
- sourceNetwork: "dvSwitch 1"
destinationNetwork: "default/vlan1"
- sourceNetwork: "dvSwitch 2"
destinationNetwork: "default/vlan2"
sourceCluster:
name: vcsim
namespace: default
kind: Vmware
apiVersion: source.harvesterhci.io/v1beta1
```
This will trigger the controller to export the VM named "alpine-export-test" on the vmware source vcsim to be exported, processed and recreated into the harvester cluster
This can take a while based on the size of the virtual machine, but users should see `VirtualMachineImages` created for each disk in the defined virtual machine.
The list of items in `networkMapping` will define how the source network interfaces are mapped into the Harvester Networks.
If a match is not found, then each unmatched network inteface is attached to the default `managementNetwork`
Once the virtual machine has been imported successfully the object will reflect the status
```shell
$ kubectl get virtualmachine.importjob
NAME STATUS
alpine-export-test virtualMachineRunning
```

4
generate.go Normal file
View File

@ -0,0 +1,4 @@
//go:generate go run pkg/codegen/cleanup/main.go
//go:generate go run pkg/codegen/main.go
package main

160
go.mod Normal file
View File

@ -0,0 +1,160 @@
module github.com/harvester/vm-import-controller
go 1.18
require (
github.com/harvester/harvester v1.0.2
github.com/onsi/ginkgo/v2 v2.1.4
github.com/onsi/gomega v1.19.0
github.com/ory/dockertest/v3 v3.9.1
github.com/rancher/lasso v0.0.0-20210709145333-6c6cd7fd6607
github.com/rancher/wrangler v1.0.0
github.com/sirupsen/logrus v1.9.0
github.com/stretchr/testify v1.7.1
github.com/vmware/govmomi v0.29.0
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
k8s.io/api v0.24.2
k8s.io/apiextensions-apiserver v0.24.2
k8s.io/apimachinery v0.24.2
k8s.io/client-go v12.0.0+incompatible
kubevirt.io/api v0.0.0-20220430221853-33880526e414
kubevirt.io/kubevirt v0.49.0
sigs.k8s.io/controller-runtime v0.12.2
)
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/containerd/containerd v1.6.6 // indirect
github.com/containerd/continuity v0.3.0 // indirect
github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/cli v20.10.14+incompatible // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v20.10.7+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/emicklei/go-restful v2.15.0+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
github.com/go-logfmt/logfmt v0.5.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/spec v0.20.3 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/k8snetworkplumbingwg/network-attachment-definition-client v0.0.0-20200331171230-d50e42f2b669 // indirect
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/opencontainers/runc v1.1.2 // indirect
github.com/openshift/api v0.0.0 // indirect
github.com/openshift/client-go v0.0.0 // indirect
github.com/openshift/custom-resource-status v1.1.2 // indirect
github.com/pborman/uuid v1.2.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.1 // indirect
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.1.10 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
google.golang.org/grpc v1.43.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/code-generator v0.24.2 // indirect
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-aggregator v0.24.0 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
kubevirt.io/client-go v0.49.0 // indirect
kubevirt.io/containerized-data-importer-api v1.47.0 // indirect
kubevirt.io/controller-lifecycle-operator-sdk v0.2.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace (
github.com/docker/distribution => github.com/docker/distribution v0.0.0-20191216044856-a8371794149d
github.com/docker/docker => github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce
github.com/openshift/api => github.com/openshift/api v0.0.0-20191219222812-2987a591a72c
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200521150516-05eb9880269c
github.com/operator-framework/operator-lifecycle-manager => github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190128024246-5eb7ae5bdb7a
github.com/rancher/rancher/pkg/apis => github.com/rancher/rancher/pkg/apis v0.0.0-20211208233239-77392a65423d
github.com/rancher/rancher/pkg/client => github.com/rancher/rancher/pkg/client v0.0.0-20211208233239-77392a65423d
k8s.io/api => k8s.io/api v0.20.2 // Dropped to v0.20.2 to handle kubevirt deps for installing CRDs
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.5
k8s.io/apimachinery => k8s.io/apimachinery v0.21.5
k8s.io/apiserver => k8s.io/apiserver v0.21.5
k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.5
k8s.io/client-go => k8s.io/client-go v0.20.2 // Dropped to v0.20.2 to handle kubevirt deps for installing CRDs
k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.5
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.5
k8s.io/code-generator => k8s.io/code-generator v0.21.5
k8s.io/component-base => k8s.io/component-base v0.21.5
k8s.io/component-helpers => k8s.io/component-helpers v0.21.5
k8s.io/controller-manager => k8s.io/controller-manager v0.21.5
k8s.io/cri-api => k8s.io/cri-api v0.21.5
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.5
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.5
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.5
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7
k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.5
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.5
k8s.io/kubectl => k8s.io/kubectl v0.21.5
k8s.io/kubelet => k8s.io/kubelet v0.21.5
k8s.io/kubernetes => k8s.io/kubernetes v1.21.5
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.5
k8s.io/metrics => k8s.io/metrics v0.21.5
k8s.io/mount-utils => k8s.io/mount-utils v0.21.5
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.21.5
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.5
kubevirt.io/api => github.com/kubevirt/api v0.49.0
kubevirt.io/client-go => github.com/kubevirt/client-go v0.49.0
kubevirt.io/containerized-data-importer-api => kubevirt.io/containerized-data-importer-api v1.41.0
sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2
)

2855
go.sum Normal file

File diff suppressed because it is too large Load Diff

50
main.go Normal file
View File

@ -0,0 +1,50 @@
package main
import (
"log"
harvesterv1beta1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/controllers"
"github.com/harvester/vm-import-controller/pkg/server"
"github.com/rancher/wrangler/pkg/signals"
"golang.org/x/sync/errgroup"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
kubevirtv1 "kubevirt.io/api/core/v1"
)
func init() {
scheme := runtime.NewScheme()
source.AddToScheme(scheme)
harvesterv1beta1.AddToScheme(scheme)
kubevirtv1.AddToScheme(scheme)
}
func main() {
ctx := signals.SetupSignalContext()
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
config, err := kubeConfig.ClientConfig()
if err != nil {
log.Fatal(err)
}
eg, egctx := errgroup.WithContext(ctx)
eg.Go(func() error {
return controllers.Start(egctx, config)
})
eg.Go(func() error {
return server.NewServer(egctx)
})
err = eg.Wait()
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,21 @@
package common
import (
"github.com/rancher/wrangler/pkg/condition"
v1 "k8s.io/api/core/v1"
)
type Condition struct {
// Type of the condition.
Type condition.Cond `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status v1.ConditionStatus `json:"status"`
// The last time this condition was updated.
LastUpdateTime string `json:"lastUpdateTime,omitempty"`
// Last time the condition transitioned from one status to another.
LastTransitionTime string `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
Reason string `json:"reason,omitempty"`
// Human-readable message indicating details about last transition
Message string `json:"message,omitempty"`
}

View File

@ -0,0 +1,21 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
// +k8s:deepcopy-gen=package
// +groupName=importjob.harvesterhci.io
package v1beta1

View File

@ -0,0 +1,75 @@
package v1beta1
import (
"github.com/harvester/vm-import-controller/pkg/apis/common"
"github.com/rancher/wrangler/pkg/condition"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type VirtualMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec VirtualMachineImportSpec `json:"spec"`
Status VirtualMachineImportStatus `json:"status,omitempty"`
}
// VirtualMachineImportSpec is used to create kubevirt VirtualMachines by exporting VM's from source clusters.
type VirtualMachineImportSpec struct {
SourceCluster corev1.ObjectReference `json:"sourceCluster"`
VirtualMachineName string `json:"virtualMachineName"`
Folder string `json:"folder,omitempty"`
Mapping []NetworkMapping `json:"networkMapping,omitempty"` //If empty new VirtualMachine will be mapped to Management Network
}
// VirtualMachineImportStatus tracks the status of the VirtualMachine export from source and import into the Harvester cluster
type VirtualMachineImportStatus struct {
Status ImportStatus `json:"importStatus,omitempty"`
DiskImportStatus []DiskInfo `json:"diskImportStatus,omitempty"`
ImportConditions []common.Condition `json:"importConditions,omitempty"`
NewVirtualMachine string `json:"newVirtualMachine,omitempty"`
}
// DiskInfo contains the information about associated Disk in the Import source.
// VM's may have multiple disks, and each disk will be represented as a DiskInfo object.
// DiskInfo is used to track the following tasks
// * disk format conversion
// * path to temp disk location
// * http route to tmp disk path, as this will be exposed as a url for VirtualMachineImage
// * virtualmachineimage created from the disk route and associated file
// * conditions to track the progress of disk conversion and virtualmachineimport progress
type DiskInfo struct {
Name string `json:"diskName"`
DiskSize int64 `json:"diskSize"`
DiskLocalPath string `json:"diskLocalPath,omitempty"`
DiskRoute string `json:"diskRoute,omitempty"`
VirtualMachineImage string `json:"VirtualMachineImage,omitempty"`
DiskConditions []common.Condition `json:"diskConditions,omitempty"`
}
type NetworkMapping struct {
SourceNetwork string `json:"sourceNetwork"`
DestinationNetwork string `json:"destinationNetwork"`
}
type ImportStatus string
const (
SourceReady ImportStatus = "sourceReady"
DisksExported ImportStatus = "disksExported"
DiskImagesSubmitted ImportStatus = "diskImageSubmitted"
DiskImagesReady ImportStatus = "diskImagesReady"
DiskImagesFailed ImportStatus = "diskImageFailed"
VirtualMachineCreated ImportStatus = "virtualMachineCreated"
VirtualMachineRunning ImportStatus = "virtualMachineRunning"
VirtualMachinePoweringOff condition.Cond = "VMPoweringOff"
VirtualMachinePoweredOff condition.Cond = "VMPoweredOff"
VirtualMachineExported condition.Cond = "VMExported"
VirtualMachineImageSubmitted condition.Cond = "VirtualMachineImageSubmitted"
VirtualMachineImageReady condition.Cond = "VirtualMachineImageReady"
VirtualMachineImageFailed condition.Cond = "VirtualMachineImageFailed"
)

View File

@ -0,0 +1,175 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1beta1
import (
common "github.com/harvester/vm-import-controller/pkg/apis/common"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskInfo) DeepCopyInto(out *DiskInfo) {
*out = *in
if in.DiskConditions != nil {
in, out := &in.DiskConditions, &out.DiskConditions
*out = make([]common.Condition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskInfo.
func (in *DiskInfo) DeepCopy() *DiskInfo {
if in == nil {
return nil
}
out := new(DiskInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkMapping) DeepCopyInto(out *NetworkMapping) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMapping.
func (in *NetworkMapping) DeepCopy() *NetworkMapping {
if in == nil {
return nil
}
out := new(NetworkMapping)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualMachine) DeepCopyInto(out *VirtualMachine) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachine.
func (in *VirtualMachine) DeepCopy() *VirtualMachine {
if in == nil {
return nil
}
out := new(VirtualMachine)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualMachine) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualMachineImportSpec) DeepCopyInto(out *VirtualMachineImportSpec) {
*out = *in
out.SourceCluster = in.SourceCluster
if in.Mapping != nil {
in, out := &in.Mapping, &out.Mapping
*out = make([]NetworkMapping, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineImportSpec.
func (in *VirtualMachineImportSpec) DeepCopy() *VirtualMachineImportSpec {
if in == nil {
return nil
}
out := new(VirtualMachineImportSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualMachineImportStatus) DeepCopyInto(out *VirtualMachineImportStatus) {
*out = *in
if in.DiskImportStatus != nil {
in, out := &in.DiskImportStatus, &out.DiskImportStatus
*out = make([]DiskInfo, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ImportConditions != nil {
in, out := &in.ImportConditions, &out.ImportConditions
*out = make([]common.Condition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineImportStatus.
func (in *VirtualMachineImportStatus) DeepCopy() *VirtualMachineImportStatus {
if in == nil {
return nil
}
out := new(VirtualMachineImportStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualMachineList) DeepCopyInto(out *VirtualMachineList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VirtualMachine, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineList.
func (in *VirtualMachineList) DeepCopy() *VirtualMachineList {
if in == nil {
return nil
}
out := new(VirtualMachineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualMachineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
// +k8s:deepcopy-gen=package
// +groupName=importjob.harvesterhci.io
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualMachineList is a list of VirtualMachine resources
type VirtualMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []VirtualMachine `json:"items"`
}
func NewVirtualMachine(namespace, name string, obj VirtualMachine) *VirtualMachine {
obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("VirtualMachine").ToAPIVersionAndKind()
obj.Name = name
obj.Namespace = namespace
return &obj
}

View File

@ -0,0 +1,60 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
// +k8s:deepcopy-gen=package
// +groupName=importjob.harvesterhci.io
package v1beta1
import (
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
VirtualMachineResourceName = "virtualmachines"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: importjob.GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&VirtualMachine{},
&VirtualMachineList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -0,0 +1,24 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package importjob
const (
// Package-wide consts from generator "zz_generated_register".
GroupName = "importjob.harvesterhci.io"
)

View File

@ -0,0 +1,29 @@
package v1beta1
import (
"context"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
corev1 "k8s.io/api/core/v1"
kubevirt "kubevirt.io/api/core/v1"
)
type SourceInterface interface {
ClusterStatus() ClusterStatus
SecretReference() corev1.SecretReference
GenerateClient(ctx context.Context, secret *corev1.Secret) (VirtualMachineOperations, error)
}
type VirtualMachineOperations interface {
// ExportVirtualMachine is responsible for generating the raw images for each disk associated with the VirtualMachine
// Any image format conversion will be performed by the VM Operation
ExportVirtualMachine(vm *importjob.VirtualMachine) error
// PowerOffVirtualMachine is responsible for the powering off the virtualmachine
PowerOffVirtualMachine(vm *importjob.VirtualMachine) error
// IsPoweredOff will check the status of VM Power and return true if machine is powered off
IsPoweredOff(vm *importjob.VirtualMachine) (bool, error)
GenerateVirtualMachine(vm *importjob.VirtualMachine) (*kubevirt.VirtualMachine, error)
}

View File

@ -0,0 +1,21 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
// +k8s:deepcopy-gen=package
// +groupName=source.harvesterhci.io
package v1beta1

View File

@ -0,0 +1,45 @@
package v1beta1
import (
"context"
"github.com/harvester/vm-import-controller/pkg/apis/common"
"github.com/harvester/vm-import-controller/pkg/source/openstack"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Openstack struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec OpenstackSpec `json:"spec"`
Status OpenStackStatus `json:"status,omitempty"`
}
type OpenstackSpec struct {
EndpointAddress string `json:"endpoint"`
Project string `json:"dc"`
Credentials corev1.SecretReference `json:"credentials"`
}
type OpenStackStatus struct {
Status ClusterStatus `json:"status,omitempty"`
// +optional
Conditions []common.Condition `json:"conditions,omitempty"`
}
func (o *Openstack) ClusterStatus() ClusterStatus {
return o.Status.Status
}
func (o *Openstack) GenerateClient(ctx context.Context, secret *corev1.Secret) (VirtualMachineOperations, error) {
return openstack.NewClient(ctx, o.Spec.EndpointAddress, o.Spec.Project, secret)
}
func (o *Openstack) SecretReference() corev1.SecretReference {
return o.Spec.Credentials
}

View File

@ -0,0 +1,54 @@
package v1beta1
import (
"context"
"github.com/harvester/vm-import-controller/pkg/apis/common"
"github.com/harvester/vm-import-controller/pkg/source/vmware"
"github.com/rancher/wrangler/pkg/condition"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type ClusterStatus string
const (
ClusterReady ClusterStatus = "clusterReady"
ClusterNotReady ClusterStatus = "clusterNotReady"
ClusterReadyCondition condition.Cond = "ClusterReady"
ClusterErrorCondition condition.Cond = "ClusterError"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Vmware struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec VmwareClusterSpec `json:"spec"`
Status VmwareClusterStatus `json:"status,omitempty"`
}
type VmwareClusterSpec struct {
EndpointAddress string `json:"endpoint"`
Datacenter string `json:"dc"`
Credentials corev1.SecretReference `json:"credentials"`
}
type VmwareClusterStatus struct {
Status ClusterStatus `json:"status,omitempty"`
// +optional
Conditions []common.Condition `json:"conditions,omitempty"`
}
func (v *Vmware) ClusterStatus() ClusterStatus {
return v.Status.Status
}
func (v *Vmware) GenerateClient(ctx context.Context, secret *corev1.Secret) (VirtualMachineOperations, error) {
return vmware.NewClient(ctx, v.Spec.EndpointAddress, v.Spec.Datacenter, secret)
}
func (v *Vmware) SecretReference() corev1.SecretReference {
return v.Spec.Credentials
}

View File

@ -0,0 +1,225 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1beta1
import (
common "github.com/harvester/vm-import-controller/pkg/apis/common"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenStackStatus) DeepCopyInto(out *OpenStackStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]common.Condition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackStatus.
func (in *OpenStackStatus) DeepCopy() *OpenStackStatus {
if in == nil {
return nil
}
out := new(OpenStackStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Openstack) DeepCopyInto(out *Openstack) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Openstack.
func (in *Openstack) DeepCopy() *Openstack {
if in == nil {
return nil
}
out := new(Openstack)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Openstack) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenstackList) DeepCopyInto(out *OpenstackList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Openstack, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackList.
func (in *OpenstackList) DeepCopy() *OpenstackList {
if in == nil {
return nil
}
out := new(OpenstackList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OpenstackList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenstackSpec) DeepCopyInto(out *OpenstackSpec) {
*out = *in
out.Credentials = in.Credentials
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackSpec.
func (in *OpenstackSpec) DeepCopy() *OpenstackSpec {
if in == nil {
return nil
}
out := new(OpenstackSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Vmware) DeepCopyInto(out *Vmware) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Vmware.
func (in *Vmware) DeepCopy() *Vmware {
if in == nil {
return nil
}
out := new(Vmware)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Vmware) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VmwareClusterSpec) DeepCopyInto(out *VmwareClusterSpec) {
*out = *in
out.Credentials = in.Credentials
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VmwareClusterSpec.
func (in *VmwareClusterSpec) DeepCopy() *VmwareClusterSpec {
if in == nil {
return nil
}
out := new(VmwareClusterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VmwareClusterStatus) DeepCopyInto(out *VmwareClusterStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]common.Condition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VmwareClusterStatus.
func (in *VmwareClusterStatus) DeepCopy() *VmwareClusterStatus {
if in == nil {
return nil
}
out := new(VmwareClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VmwareList) DeepCopyInto(out *VmwareList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Vmware, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VmwareList.
func (in *VmwareList) DeepCopy() *VmwareList {
if in == nil {
return nil
}
out := new(VmwareList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VmwareList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@ -0,0 +1,59 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
// +k8s:deepcopy-gen=package
// +groupName=source.harvesterhci.io
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// OpenstackList is a list of Openstack resources
type OpenstackList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Openstack `json:"items"`
}
func NewOpenstack(namespace, name string, obj Openstack) *Openstack {
obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("Openstack").ToAPIVersionAndKind()
obj.Name = name
obj.Namespace = namespace
return &obj
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VmwareList is a list of Vmware resources
type VmwareList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Vmware `json:"items"`
}
func NewVmware(namespace, name string, obj Vmware) *Vmware {
obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("Vmware").ToAPIVersionAndKind()
obj.Name = name
obj.Namespace = namespace
return &obj
}

View File

@ -0,0 +1,63 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
// +k8s:deepcopy-gen=package
// +groupName=source.harvesterhci.io
package v1beta1
import (
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
OpenstackResourceName = "openstacks"
VmwareResourceName = "vmwares"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: source.GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Openstack{},
&OpenstackList{},
&Vmware{},
&VmwareList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -0,0 +1,24 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package source
const (
// Package-wide consts from generator "zz_generated_register".
GroupName = "source.harvesterhci.io"
)

View File

@ -0,0 +1,17 @@
package main
import (
"os"
"github.com/rancher/wrangler/pkg/cleanup"
"github.com/sirupsen/logrus"
)
func main() {
if err := cleanup.Cleanup("./pkg/apis"); err != nil {
logrus.Fatal(err)
}
if err := os.RemoveAll("./pkg/generated"); err != nil {
logrus.Fatal(err)
}
}

33
pkg/codegen/main.go Normal file
View File

@ -0,0 +1,33 @@
package main
import (
"os"
controllergen "github.com/rancher/wrangler/pkg/controller-gen"
"github.com/rancher/wrangler/pkg/controller-gen/args"
// Ensure gvk gets loaded in wrangler/pkg/gvk cache
_ "github.com/rancher/wrangler/pkg/generated/controllers/apiextensions.k8s.io/v1"
)
func main() {
os.Unsetenv("GOPATH")
controllergen.Run(args.Options{
OutputPackage: "github.com/harvester/vm-import-controller/pkg/generated",
Boilerplate: "scripts/boilerplate.go.txt",
Groups: map[string]args.Group{
"source.harvesterhci.io": {
Types: []interface{}{
"./pkg/apis/source.harvesterhci.io/v1beta1",
},
GenerateTypes: true,
},
"importjob.harvesterhci.io": {
Types: []interface{}{
"./pkg/apis/importjob.harvesterhci.io/v1beta1",
},
GenerateTypes: true,
},
},
})
}

View File

@ -0,0 +1,88 @@
package controllers
import (
"context"
"time"
harvester "github.com/harvester/harvester/pkg/generated/controllers/harvesterhci.io"
"github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io"
ic "github.com/harvester/vm-import-controller/pkg/controllers/importjob"
sc "github.com/harvester/vm-import-controller/pkg/controllers/source"
"github.com/harvester/vm-import-controller/pkg/crd"
"github.com/harvester/vm-import-controller/pkg/generated/controllers/importjob.harvesterhci.io"
"github.com/harvester/vm-import-controller/pkg/generated/controllers/source.harvesterhci.io"
"github.com/rancher/lasso/pkg/cache"
"github.com/rancher/lasso/pkg/client"
"github.com/rancher/lasso/pkg/controller"
"github.com/rancher/wrangler/pkg/generated/controllers/core"
"github.com/rancher/wrangler/pkg/start"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/workqueue"
)
func Start(ctx context.Context, restConfig *rest.Config) error {
if err := crd.Create(ctx, restConfig); err != nil {
return err
}
if err := Register(ctx, restConfig); err != nil {
return err
}
<-ctx.Done()
return nil
}
func Register(ctx context.Context, restConfig *rest.Config) error {
rateLimit := workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 5*time.Minute)
workqueue.DefaultControllerRateLimiter()
clientFactory, err := client.NewSharedClientFactory(restConfig, nil)
if err != nil {
return err
}
cacheFactory := cache.NewSharedCachedFactory(clientFactory, nil)
scf := controller.NewSharedControllerFactory(cacheFactory, &controller.SharedControllerFactoryOptions{
DefaultRateLimiter: rateLimit,
DefaultWorkers: 5,
})
if err != nil {
return err
}
sourceFactory, err := source.NewFactoryFromConfigWithOptions(restConfig, &source.FactoryOptions{
SharedControllerFactory: scf,
})
if err != nil {
return err
}
coreFactory, err := core.NewFactoryFromConfigWithOptions(restConfig, &core.FactoryOptions{
SharedControllerFactory: scf,
})
if err != nil {
return err
}
harvesterFactory, err := harvester.NewFactoryFromConfigWithOptions(restConfig, &harvester.FactoryOptions{
SharedControllerFactory: scf,
})
importJobFactory, err := importjob.NewFactoryFromConfigWithOptions(restConfig, &importjob.FactoryOptions{
SharedControllerFactory: scf,
})
kubevirtFactory, err := kubevirt.NewFactoryFromConfigWithOptions(restConfig, &kubevirt.FactoryOptions{
SharedControllerFactory: scf,
})
sc.RegisterVmareController(ctx, sourceFactory.Source().V1beta1().Vmware(), coreFactory.Core().V1().Secret())
ic.RegisterVMImportController(ctx, sourceFactory.Source().V1beta1().Vmware(), sourceFactory.Source().V1beta1().Openstack(),
coreFactory.Core().V1().Secret(), importJobFactory.Importjob().V1beta1().VirtualMachine(),
harvesterFactory.Harvesterhci().V1beta1().VirtualMachineImage(), kubevirtFactory.Kubevirt().V1().VirtualMachine(),
coreFactory.Core().V1().PersistentVolumeClaim())
return start.All(ctx, 1, sourceFactory)
}

View File

@ -0,0 +1,578 @@
package importjob
import (
"context"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
kubevirt "kubevirt.io/api/core/v1"
harvesterv1beta1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
harvester "github.com/harvester/harvester/pkg/generated/controllers/harvesterhci.io/v1beta1"
kubevirtv1 "github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io/v1"
"github.com/harvester/vm-import-controller/pkg/apis/common"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
importJobController "github.com/harvester/vm-import-controller/pkg/generated/controllers/importjob.harvesterhci.io/v1beta1"
sourceController "github.com/harvester/vm-import-controller/pkg/generated/controllers/source.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/server"
"github.com/harvester/vm-import-controller/pkg/util"
coreControllers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/relatedresource"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
type virtualMachineHandler struct {
ctx context.Context
vmware sourceController.VmwareController
openstack sourceController.OpenstackController
secret coreControllers.SecretController
importVM importJobController.VirtualMachineController
vmi harvester.VirtualMachineImageController
kubevirt kubevirtv1.VirtualMachineController
pvc coreControllers.PersistentVolumeClaimController
}
func RegisterVMImportController(ctx context.Context, vmware sourceController.VmwareController, openstack sourceController.OpenstackController,
secret coreControllers.SecretController, importVM importJobController.VirtualMachineController, vmi harvester.VirtualMachineImageController, kubevirt kubevirtv1.VirtualMachineController, pvc coreControllers.PersistentVolumeClaimController) {
vmHandler := &virtualMachineHandler{
ctx: ctx,
vmware: vmware,
openstack: openstack,
secret: secret,
importVM: importVM,
vmi: vmi,
kubevirt: kubevirt,
pvc: pvc,
}
relatedresource.Watch(ctx, "virtualmachineimage-change", vmHandler.ReconcileVMI, importVM, vmi)
importVM.OnChange(ctx, "virtualmachine-import-job-change", vmHandler.OnVirtualMachineChange)
}
func (h *virtualMachineHandler) OnVirtualMachineChange(key string, vm *importjob.VirtualMachine) (*importjob.VirtualMachine, error) {
if vm == nil || vm.DeletionTimestamp != nil {
return vm, nil
}
switch vm.Status.Status {
case "": // run preflight checks and make vm ready for import
err := h.preFlightChecks(vm)
if err != nil {
return vm, err
}
vm.Status.Status = importjob.SourceReady
return h.importVM.UpdateStatus(vm)
case importjob.SourceReady: //vm source is valid and ready. trigger source specific import
err := h.triggerExport(vm)
if err != nil {
return vm, err
}
if util.ConditionExists(vm.Status.ImportConditions, importjob.VirtualMachineExported, v1.ConditionTrue) {
vm.Status.Status = importjob.DisksExported
}
return h.importVM.UpdateStatus(vm)
case importjob.DisksExported: // prepare and add routes for disks to be used for VirtualMachineImage CRD
orgStatus := vm.Status.DeepCopy()
err := h.createVirtualMachineImages(vm)
if err != nil {
// check if any disks have been updated. We need to save this info to eventually reconcile the VMI creation
var newVM *importjob.VirtualMachine
var newErr error
if !reflect.DeepEqual(orgStatus.DiskImportStatus, vm.Status.DiskImportStatus) {
newVM, newErr = h.importVM.UpdateStatus(vm)
}
if newErr != nil {
logrus.Errorf("error updating status for vm status %s: %v", vm.Name, newErr)
}
return newVM, err
}
ok := true
for _, d := range vm.Status.DiskImportStatus {
ok = util.ConditionExists(d.DiskConditions, importjob.VirtualMachineImageSubmitted, v1.ConditionTrue) && ok
}
if ok {
vm.Status.Status = importjob.DiskImagesSubmitted
}
return h.importVM.UpdateStatus(vm)
case importjob.DiskImagesSubmitted:
// check and update disk image status based on VirtualMachineImage watches
err := h.reconcileVMIStatus(vm)
if err != nil {
return vm, err
}
ok := true
failed := false
var failedCount, passedCount int
for _, d := range vm.Status.DiskImportStatus {
ok = util.ConditionExists(d.DiskConditions, importjob.VirtualMachineImageReady, v1.ConditionTrue) && ok
if ok {
passedCount++
}
failed = util.ConditionExists(d.DiskConditions, importjob.VirtualMachineImageFailed, v1.ConditionTrue) || failed
if failed {
failedCount++
}
}
if len(vm.Status.DiskImportStatus) != failedCount+passedCount {
// if length's dont match, then we have disks with missing status. Lets ignore failures for now, and handle
// disk failures once we have had watches triggered for all disks
return vm, nil
}
if ok {
vm.Status.Status = importjob.DiskImagesReady
}
if failed {
vm.Status.Status = importjob.DiskImagesFailed
}
return h.importVM.UpdateStatus(vm)
case importjob.DiskImagesFailed:
// re-export VM and trigger re-import again
err := h.cleanupAndResubmit(vm)
if err != nil {
return vm, err
}
vm.Status.Status = importjob.SourceReady
return h.importVM.UpdateStatus(vm)
case importjob.DiskImagesReady:
// create VM to use the VirtualMachineObject
err := h.createVirtualMachine(vm)
if err != nil {
return vm, err
}
vm.Status.Status = importjob.VirtualMachineCreated
return h.importVM.UpdateStatus(vm)
case importjob.VirtualMachineCreated:
// wait for VM to be running using a watch on VM's
ok, err := h.checkVirtualMachine(vm)
if err != nil {
return vm, err
}
if ok {
vm.Status.Status = importjob.VirtualMachineRunning
h.importVM.UpdateStatus(vm)
}
// by default we will poll again after 5 mins
h.importVM.EnqueueAfter(vm.Namespace, vm.Name, 5*time.Minute)
case importjob.VirtualMachineRunning:
logrus.Infof("vm %s in namespace %v imported successfully", vm.Name, vm.Namespace)
return vm, h.tidyUpObjects(vm)
}
return vm, nil
}
// preFlightChecks is used to validate that the associate sources and VM source references are valid
func (h *virtualMachineHandler) preFlightChecks(vm *importjob.VirtualMachine) error {
if vm.Spec.SourceCluster.APIVersion != "source.harvesterhci.io/v1beta1" {
return fmt.Errorf("expected source cluster apiversion to be source.harvesterhci.io/v1beta1 but got %s", vm.Spec.SourceCluster.APIVersion)
}
var ss source.SourceInterface
var err error
switch strings.ToLower(vm.Spec.SourceCluster.Kind) {
case "vmware", "openstack":
ss, err = h.generateSource(vm)
if err != nil {
return fmt.Errorf("error generating source in preflight checks :%v", err)
}
default:
return fmt.Errorf("unsupported source kind. Currently supported values are vmware/openstack but got %s", strings.ToLower(vm.Spec.SourceCluster.Kind))
}
if ss.ClusterStatus() != source.ClusterReady {
return fmt.Errorf("source not yet ready. current status is %s", ss.ClusterStatus())
}
return nil
}
func (h *virtualMachineHandler) triggerExport(vm *importjob.VirtualMachine) error {
vmo, err := h.generateVMO(vm)
if err != nil {
return fmt.Errorf("error generating VMO in trigger export: %v", err)
}
// power off machine
if !util.ConditionExists(vm.Status.ImportConditions, importjob.VirtualMachinePoweringOff, v1.ConditionTrue) {
err = vmo.PowerOffVirtualMachine(vm)
if err != nil {
return fmt.Errorf("error in poweroff call: %v", err)
}
conds := []common.Condition{
{
Type: importjob.VirtualMachinePoweringOff,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
vm.Status.ImportConditions = util.MergeConditions(vm.Status.ImportConditions, conds)
return nil
}
if !util.ConditionExists(vm.Status.ImportConditions, importjob.VirtualMachinePoweredOff, v1.ConditionTrue) &&
util.ConditionExists(vm.Status.ImportConditions, importjob.VirtualMachinePoweringOff, v1.ConditionTrue) {
// check if VM is powered off
ok, err := vmo.IsPoweredOff(vm)
if err != nil {
return fmt.Errorf("error during check for vm power: %v", err)
}
if ok {
conds := []common.Condition{
{
Type: importjob.VirtualMachinePoweredOff,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
vm.Status.ImportConditions = util.MergeConditions(vm.Status.ImportConditions, conds)
return nil
}
// default behaviour
return fmt.Errorf("waiting for vm %s to be powered off", fmt.Sprintf("%s/%s", vm.Namespace, vm.Name))
}
if util.ConditionExists(vm.Status.ImportConditions, importjob.VirtualMachinePoweredOff, v1.ConditionTrue) &&
util.ConditionExists(vm.Status.ImportConditions, importjob.VirtualMachinePoweringOff, v1.ConditionTrue) &&
!util.ConditionExists(vm.Status.ImportConditions, importjob.VirtualMachineExported, v1.ConditionTrue) {
err := vmo.ExportVirtualMachine(vm)
if err != nil {
return fmt.Errorf("error exporting virtual machine: %v", err)
}
conds := []common.Condition{
{
Type: importjob.VirtualMachineExported,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
vm.Status.ImportConditions = util.MergeConditions(vm.Status.ImportConditions, conds)
return nil
}
return nil
}
// generateVMO is a wrapper to generate a VirtualMachineOperations client
func (h *virtualMachineHandler) generateVMO(vm *importjob.VirtualMachine) (source.VirtualMachineOperations, error) {
source, err := h.generateSource(vm)
if err != nil {
return nil, fmt.Errorf("error generating source interface: %v", err)
}
secretRef := source.SecretReference()
secret, err := h.secret.Get(secretRef.Namespace, secretRef.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error fetching secret :%v", err)
}
// generate VirtualMachineOperations Interface.
// this will be used for source specific operations
return source.GenerateClient(h.ctx, secret)
}
func (h *virtualMachineHandler) generateSource(vm *importjob.VirtualMachine) (source.SourceInterface, error) {
var s source.SourceInterface
var err error
if strings.ToLower(vm.Spec.SourceCluster.Kind) == "vmware" {
s, err = h.vmware.Get(vm.Spec.SourceCluster.Namespace, vm.Spec.SourceCluster.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
}
if strings.ToLower(vm.Spec.SourceCluster.Kind) == "openstack" {
s, err = h.openstack.Get(vm.Spec.SourceCluster.Namespace, vm.Spec.SourceCluster.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
}
return s, nil
}
func (h *virtualMachineHandler) createVirtualMachineImages(vm *importjob.VirtualMachine) error {
// check and create VirtualMachineImage objects
status := vm.Status.DeepCopy()
for i, d := range status.DiskImportStatus {
if !util.ConditionExists(d.DiskConditions, importjob.VirtualMachineImageSubmitted, v1.ConditionTrue) {
vmi := &harvesterv1beta1.VirtualMachineImage{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "image-",
Namespace: vm.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: vm.APIVersion,
Kind: vm.Kind,
UID: vm.UID,
Name: vm.Name,
},
},
},
Spec: harvesterv1beta1.VirtualMachineImageSpec{
DisplayName: fmt.Sprintf("vm-import-%s-%s", vm.Name, d.Name),
URL: fmt.Sprintf("http://%s:%d/%s", server.Address(), server.DefaultPort(), d.Name),
SourceType: "download",
},
}
vmiObj, err := h.vmi.Create(vmi)
if err != nil {
return fmt.Errorf("error creating vmi: %v", err)
}
d.VirtualMachineImage = vmiObj.Name
vm.Status.DiskImportStatus[i] = d
cond := []common.Condition{
{
Type: importjob.VirtualMachineImageSubmitted,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
vm.Status.DiskImportStatus[i].DiskConditions = util.MergeConditions(vm.Status.DiskImportStatus[i].DiskConditions, cond)
}
}
return nil
}
func (h *virtualMachineHandler) reconcileVMIStatus(vm *importjob.VirtualMachine) error {
for i, d := range vm.Status.DiskImportStatus {
if !util.ConditionExists(d.DiskConditions, importjob.VirtualMachineImageReady, v1.ConditionTrue) {
vmi, err := h.vmi.Get(vm.Namespace, d.VirtualMachineImage, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error quering vmi in reconcileVMIStatus: %v", err)
}
for _, v := range vmi.Status.Conditions {
if v.Type == harvesterv1beta1.ImageImported && v.Status == v1.ConditionTrue {
cond := []common.Condition{
{
Type: importjob.VirtualMachineImageReady,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
d.DiskConditions = util.MergeConditions(d.DiskConditions, cond)
vm.Status.DiskImportStatus[i] = d
}
// handle failed imports if any
if v.Type == harvesterv1beta1.ImageImported && v.Status == v1.ConditionFalse && v.Reason == "ImportFailed" {
cond := []common.Condition{
{
Type: importjob.VirtualMachineImageFailed,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
d.DiskConditions = util.MergeConditions(d.DiskConditions, cond)
vm.Status.DiskImportStatus[i] = d
}
}
}
}
return nil
}
func (h *virtualMachineHandler) createVirtualMachine(vm *importjob.VirtualMachine) error {
vmo, err := h.generateVMO(vm)
if err != nil {
return fmt.Errorf("error generating VMO in createVirtualMachine :%v", err)
}
runVM, err := vmo.GenerateVirtualMachine(vm)
if err != nil {
return fmt.Errorf("error generating Kubevirt VM: %v", err)
}
// create PVC claims from VMI's to create the Kubevirt VM
err = h.findAndCreatePVC(vm)
if err != nil {
return err
}
// patch VM object with PVC info
var vmVols []kubevirt.Volume
var disks []kubevirt.Disk
for i, v := range vm.Status.DiskImportStatus {
pvcName := strings.ToLower(strings.Split(v.Name, ".img")[0])
vmVols = append(vmVols, kubevirt.Volume{
Name: fmt.Sprintf("disk-%d", i),
VolumeSource: kubevirt.VolumeSource{
PersistentVolumeClaim: &kubevirt.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
},
})
diskOrder := i
diskOrder++ // Disk order cant be 0, so need to kick things off from 1
disks = append(disks, kubevirt.Disk{
Name: fmt.Sprintf("disk-%d", i),
BootOrder: &[]uint{uint(diskOrder)}[0],
DiskDevice: kubevirt.DiskDevice{
Disk: &kubevirt.DiskTarget{
Bus: "virtio",
},
},
})
}
runVM.Spec.Template.Spec.Volumes = vmVols
runVM.Spec.Template.Spec.Domain.Devices.Disks = disks
runVMObj, err := h.kubevirt.Create(runVM)
if err != nil {
return fmt.Errorf("error creating kubevirt VM in createVirtualMachine :%v", err)
}
vm.Status.NewVirtualMachine = runVMObj.Name
return nil
}
func (h *virtualMachineHandler) checkVirtualMachine(vm *importjob.VirtualMachine) (bool, error) {
vmObj, err := h.kubevirt.Get(vm.Namespace, vm.Status.NewVirtualMachine, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error querying kubevirt vm in checkVirtualMachine :%v", err)
}
return vmObj.Status.Ready, nil
}
func (h *virtualMachineHandler) ReconcileVMI(_ string, _ string, obj runtime.Object) ([]relatedresource.Key, error) {
if vmiObj, ok := obj.(*harvesterv1beta1.VirtualMachineImage); ok {
owners := vmiObj.GetOwnerReferences()
if vmiObj.DeletionTimestamp == nil {
for _, v := range owners {
if strings.ToLower(v.Kind) == "virtualmachine" {
return []relatedresource.Key{
{
Namespace: vmiObj.Namespace,
Name: v.Name,
},
}, nil
}
}
}
}
return nil, nil
}
func (h *virtualMachineHandler) cleanupAndResubmit(vm *importjob.VirtualMachine) error {
// need to wait for all VMI's to be complete or failed before we cleanup failed objects
for i, d := range vm.Status.DiskImportStatus {
if util.ConditionExists(d.DiskConditions, importjob.VirtualMachineImageFailed, v1.ConditionTrue) {
err := h.vmi.Delete(vm.Namespace, d.VirtualMachineImage, &metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("error deleting failed virtualmachineimage: %v", err)
}
conds := util.RemoveCondition(d.DiskConditions, importjob.VirtualMachineImageFailed, v1.ConditionTrue)
d.DiskConditions = conds
vm.Status.DiskImportStatus[i] = d
}
}
return nil
}
func (h *virtualMachineHandler) findAndCreatePVC(vm *importjob.VirtualMachine) error {
for _, v := range vm.Status.DiskImportStatus {
vmiObj, err := h.vmi.Get(vm.Namespace, v.VirtualMachineImage, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error quering vmi in findAndCreatePVC :%v", err)
}
// check if PVC has already been created
var createPVC bool
pvcName := strings.ToLower(strings.Split(v.Name, ".img")[0])
_, err = h.pvc.Get(vm.Namespace, pvcName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
createPVC = true
} else {
return fmt.Errorf("error looking up existing PVC in findAndCreatePVC :%v", err)
}
}
if createPVC {
pvcObj := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: vm.Namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteMany,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(fmt.Sprintf("%d", vmiObj.Status.Size)),
},
},
StorageClassName: &vmiObj.Status.StorageClassName,
VolumeMode: &[]v1.PersistentVolumeMode{v1.PersistentVolumeBlock}[0],
},
}
_, err = h.pvc.Create(pvcObj)
if err != nil {
return err
}
}
}
return nil
}
func (h *virtualMachineHandler) tidyUpObjects(vm *importjob.VirtualMachine) error {
for _, v := range vm.Status.DiskImportStatus {
vmiObj, err := h.vmi.Get(vm.Namespace, v.VirtualMachineImage, metav1.GetOptions{})
if err != nil {
return err
}
var newRef []metav1.OwnerReference
for _, o := range vmiObj.GetOwnerReferences() {
if o.Kind == vm.Kind && o.APIVersion == vm.APIVersion && o.UID == vm.UID && o.Name == vm.Name {
continue
}
newRef = append(newRef, o)
}
vmiObj.ObjectMeta.OwnerReferences = newRef
_, err = h.vmi.Update(vmiObj)
if err != nil {
return fmt.Errorf("error removing ownerReference for vmi %s :%v", vmiObj.Name, err)
}
// remove processed img files
os.Remove(filepath.Join(server.TempDir(), v.Name))
}
return nil
}

View File

@ -0,0 +1,93 @@
package source
import (
"context"
"fmt"
"time"
"github.com/harvester/vm-import-controller/pkg/apis/common"
"github.com/sirupsen/logrus"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
sourceController "github.com/harvester/vm-import-controller/pkg/generated/controllers/source.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/source/vmware"
"github.com/harvester/vm-import-controller/pkg/util"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type vmwareHandler struct {
ctx context.Context
vmware sourceController.VmwareController
secret corecontrollers.SecretController
}
func RegisterVmareController(ctx context.Context, vc sourceController.VmwareController, secret corecontrollers.SecretController) {
vHandler := &vmwareHandler{
ctx: ctx,
vmware: vc,
secret: secret,
}
vc.OnChange(ctx, "vmware-source-change", vHandler.OnSourceChange)
}
func (h *vmwareHandler) OnSourceChange(key string, v *source.Vmware) (*source.Vmware, error) {
if v == nil || v.DeletionTimestamp != nil {
return v, nil
}
logrus.Infof("reoncilling vmware source %s", key)
if v.Status.Status != source.ClusterReady {
secretObj, err := h.secret.Get(v.Spec.Credentials.Namespace, v.Spec.Credentials.Name, metav1.GetOptions{})
if err != nil {
return v, fmt.Errorf("error looking up secret for vmware source: %s", err)
}
client, err := vmware.NewClient(h.ctx, v.Spec.EndpointAddress, v.Spec.Datacenter, secretObj)
if err != nil {
return v, fmt.Errorf("error generating vmware client for vmware source: %s: %v", v.Name, err)
}
err = client.Verify()
if err != nil {
// unable to find specific datacenter
conds := []common.Condition{
{
Type: source.ClusterErrorCondition,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
}, {
Type: source.ClusterReadyCondition,
Status: v1.ConditionFalse,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
v.Status.Conditions = util.MergeConditions(v.Status.Conditions, conds)
v.Status.Status = source.ClusterNotReady
return h.vmware.UpdateStatus(v)
}
conds := []common.Condition{
{
Type: source.ClusterReadyCondition,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
}, {
Type: source.ClusterErrorCondition,
Status: v1.ConditionFalse,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
v.Status.Conditions = util.MergeConditions(v.Status.Conditions, conds)
v.Status.Status = source.ClusterReady
return h.vmware.UpdateStatus(v)
}
return v, nil
}

53
pkg/crd/crd.go Normal file
View File

@ -0,0 +1,53 @@
package crd
import (
"context"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/rancher/wrangler/pkg/crd"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
)
func List() []crd.CRD {
return []crd.CRD{
newCRD("source.harvesterhci.io", &source.Vmware{}, func(c crd.CRD) crd.CRD {
return c.
WithColumn("Status", ".status.status")
}),
newCRD("source.harvesterhci.io", &source.Openstack{}, func(c crd.CRD) crd.CRD {
return c.
WithColumn("Status", ".status.status")
}),
newCRD("importjob.harvesterhci.io", &importjob.VirtualMachine{}, func(c crd.CRD) crd.CRD {
return c.
WithColumn("Status", ".status.importStatus")
}),
}
}
func Create(ctx context.Context, cfg *rest.Config) error {
factory, err := crd.NewFactoryFromClient(cfg)
if err != nil {
return err
}
return factory.BatchCreateCRDs(ctx, List()...).BatchWait()
}
func newCRD(group string, obj interface{}, customize func(crd.CRD) crd.CRD) crd.CRD {
crd := crd.CRD{
GVK: schema.GroupVersionKind{
Group: group,
Version: "v1beta1",
},
Status: true,
NonNamespace: false,
SchemaObject: obj,
}
if customize != nil {
crd = customize(crd)
}
return crd
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package importjob
import (
"github.com/rancher/wrangler/pkg/generic"
"k8s.io/client-go/rest"
)
type Factory struct {
*generic.Factory
}
func NewFactoryFromConfigOrDie(config *rest.Config) *Factory {
f, err := NewFactoryFromConfig(config)
if err != nil {
panic(err)
}
return f
}
func NewFactoryFromConfig(config *rest.Config) (*Factory, error) {
return NewFactoryFromConfigWithOptions(config, nil)
}
func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) {
return NewFactoryFromConfigWithOptions(config, &FactoryOptions{
Namespace: namespace,
})
}
type FactoryOptions = generic.FactoryOptions
func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) {
f, err := generic.NewFactoryFromConfigWithOptions(config, opts)
return &Factory{
Factory: f,
}, err
}
func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory {
f, err := NewFactoryFromConfigWithOptions(config, opts)
if err != nil {
panic(err)
}
return f
}
func (c *Factory) Importjob() Interface {
return New(c.ControllerFactory())
}

View File

@ -0,0 +1,43 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package importjob
import (
v1beta1 "github.com/harvester/vm-import-controller/pkg/generated/controllers/importjob.harvesterhci.io/v1beta1"
"github.com/rancher/lasso/pkg/controller"
)
type Interface interface {
V1beta1() v1beta1.Interface
}
type group struct {
controllerFactory controller.SharedControllerFactory
}
// New returns a new Interface.
func New(controllerFactory controller.SharedControllerFactory) Interface {
return &group{
controllerFactory: controllerFactory,
}
}
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.controllerFactory)
}

View File

@ -0,0 +1,48 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1beta1
import (
v1beta1 "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
"github.com/rancher/lasso/pkg/controller"
"github.com/rancher/wrangler/pkg/schemes"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func init() {
schemes.Register(v1beta1.AddToScheme)
}
type Interface interface {
VirtualMachine() VirtualMachineController
}
func New(controllerFactory controller.SharedControllerFactory) Interface {
return &version{
controllerFactory: controllerFactory,
}
}
type version struct {
controllerFactory controller.SharedControllerFactory
}
func (c *version) VirtualMachine() VirtualMachineController {
return NewVirtualMachineController(schema.GroupVersionKind{Group: "importjob.harvesterhci.io", Version: "v1beta1", Kind: "VirtualMachine"}, "virtualmachines", true, c.controllerFactory)
}

View File

@ -0,0 +1,376 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1beta1
import (
"context"
"time"
v1beta1 "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
"github.com/rancher/lasso/pkg/client"
"github.com/rancher/lasso/pkg/controller"
"github.com/rancher/wrangler/pkg/apply"
"github.com/rancher/wrangler/pkg/condition"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/kv"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
type VirtualMachineHandler func(string, *v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error)
type VirtualMachineController interface {
generic.ControllerMeta
VirtualMachineClient
OnChange(ctx context.Context, name string, sync VirtualMachineHandler)
OnRemove(ctx context.Context, name string, sync VirtualMachineHandler)
Enqueue(namespace, name string)
EnqueueAfter(namespace, name string, duration time.Duration)
Cache() VirtualMachineCache
}
type VirtualMachineClient interface {
Create(*v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error)
Update(*v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error)
UpdateStatus(*v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error)
Delete(namespace, name string, options *metav1.DeleteOptions) error
Get(namespace, name string, options metav1.GetOptions) (*v1beta1.VirtualMachine, error)
List(namespace string, opts metav1.ListOptions) (*v1beta1.VirtualMachineList, error)
Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error)
Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VirtualMachine, err error)
}
type VirtualMachineCache interface {
Get(namespace, name string) (*v1beta1.VirtualMachine, error)
List(namespace string, selector labels.Selector) ([]*v1beta1.VirtualMachine, error)
AddIndexer(indexName string, indexer VirtualMachineIndexer)
GetByIndex(indexName, key string) ([]*v1beta1.VirtualMachine, error)
}
type VirtualMachineIndexer func(obj *v1beta1.VirtualMachine) ([]string, error)
type virtualMachineController struct {
controller controller.SharedController
client *client.Client
gvk schema.GroupVersionKind
groupResource schema.GroupResource
}
func NewVirtualMachineController(gvk schema.GroupVersionKind, resource string, namespaced bool, controller controller.SharedControllerFactory) VirtualMachineController {
c := controller.ForResourceKind(gvk.GroupVersion().WithResource(resource), gvk.Kind, namespaced)
return &virtualMachineController{
controller: c,
client: c.Client(),
gvk: gvk,
groupResource: schema.GroupResource{
Group: gvk.Group,
Resource: resource,
},
}
}
func FromVirtualMachineHandlerToHandler(sync VirtualMachineHandler) generic.Handler {
return func(key string, obj runtime.Object) (ret runtime.Object, err error) {
var v *v1beta1.VirtualMachine
if obj == nil {
v, err = sync(key, nil)
} else {
v, err = sync(key, obj.(*v1beta1.VirtualMachine))
}
if v == nil {
return nil, err
}
return v, err
}
}
func (c *virtualMachineController) Updater() generic.Updater {
return func(obj runtime.Object) (runtime.Object, error) {
newObj, err := c.Update(obj.(*v1beta1.VirtualMachine))
if newObj == nil {
return nil, err
}
return newObj, err
}
}
func UpdateVirtualMachineDeepCopyOnChange(client VirtualMachineClient, obj *v1beta1.VirtualMachine, handler func(obj *v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error)) (*v1beta1.VirtualMachine, error) {
if obj == nil {
return obj, nil
}
copyObj := obj.DeepCopy()
newObj, err := handler(copyObj)
if newObj != nil {
copyObj = newObj
}
if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) {
return client.Update(copyObj)
}
return copyObj, err
}
func (c *virtualMachineController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) {
c.controller.RegisterHandler(ctx, name, controller.SharedControllerHandlerFunc(handler))
}
func (c *virtualMachineController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), handler))
}
func (c *virtualMachineController) OnChange(ctx context.Context, name string, sync VirtualMachineHandler) {
c.AddGenericHandler(ctx, name, FromVirtualMachineHandlerToHandler(sync))
}
func (c *virtualMachineController) OnRemove(ctx context.Context, name string, sync VirtualMachineHandler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), FromVirtualMachineHandlerToHandler(sync)))
}
func (c *virtualMachineController) Enqueue(namespace, name string) {
c.controller.Enqueue(namespace, name)
}
func (c *virtualMachineController) EnqueueAfter(namespace, name string, duration time.Duration) {
c.controller.EnqueueAfter(namespace, name, duration)
}
func (c *virtualMachineController) Informer() cache.SharedIndexInformer {
return c.controller.Informer()
}
func (c *virtualMachineController) GroupVersionKind() schema.GroupVersionKind {
return c.gvk
}
func (c *virtualMachineController) Cache() VirtualMachineCache {
return &virtualMachineCache{
indexer: c.Informer().GetIndexer(),
resource: c.groupResource,
}
}
func (c *virtualMachineController) Create(obj *v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error) {
result := &v1beta1.VirtualMachine{}
return result, c.client.Create(context.TODO(), obj.Namespace, obj, result, metav1.CreateOptions{})
}
func (c *virtualMachineController) Update(obj *v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error) {
result := &v1beta1.VirtualMachine{}
return result, c.client.Update(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{})
}
func (c *virtualMachineController) UpdateStatus(obj *v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error) {
result := &v1beta1.VirtualMachine{}
return result, c.client.UpdateStatus(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{})
}
func (c *virtualMachineController) Delete(namespace, name string, options *metav1.DeleteOptions) error {
if options == nil {
options = &metav1.DeleteOptions{}
}
return c.client.Delete(context.TODO(), namespace, name, *options)
}
func (c *virtualMachineController) Get(namespace, name string, options metav1.GetOptions) (*v1beta1.VirtualMachine, error) {
result := &v1beta1.VirtualMachine{}
return result, c.client.Get(context.TODO(), namespace, name, result, options)
}
func (c *virtualMachineController) List(namespace string, opts metav1.ListOptions) (*v1beta1.VirtualMachineList, error) {
result := &v1beta1.VirtualMachineList{}
return result, c.client.List(context.TODO(), namespace, result, opts)
}
func (c *virtualMachineController) Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
return c.client.Watch(context.TODO(), namespace, opts)
}
func (c *virtualMachineController) Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta1.VirtualMachine, error) {
result := &v1beta1.VirtualMachine{}
return result, c.client.Patch(context.TODO(), namespace, name, pt, data, result, metav1.PatchOptions{}, subresources...)
}
type virtualMachineCache struct {
indexer cache.Indexer
resource schema.GroupResource
}
func (c *virtualMachineCache) Get(namespace, name string) (*v1beta1.VirtualMachine, error) {
obj, exists, err := c.indexer.GetByKey(namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(c.resource, name)
}
return obj.(*v1beta1.VirtualMachine), nil
}
func (c *virtualMachineCache) List(namespace string, selector labels.Selector) (ret []*v1beta1.VirtualMachine, err error) {
err = cache.ListAllByNamespace(c.indexer, namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.VirtualMachine))
})
return ret, err
}
func (c *virtualMachineCache) AddIndexer(indexName string, indexer VirtualMachineIndexer) {
utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{
indexName: func(obj interface{}) (strings []string, e error) {
return indexer(obj.(*v1beta1.VirtualMachine))
},
}))
}
func (c *virtualMachineCache) GetByIndex(indexName, key string) (result []*v1beta1.VirtualMachine, err error) {
objs, err := c.indexer.ByIndex(indexName, key)
if err != nil {
return nil, err
}
result = make([]*v1beta1.VirtualMachine, 0, len(objs))
for _, obj := range objs {
result = append(result, obj.(*v1beta1.VirtualMachine))
}
return result, nil
}
type VirtualMachineStatusHandler func(obj *v1beta1.VirtualMachine, status v1beta1.VirtualMachineImportStatus) (v1beta1.VirtualMachineImportStatus, error)
type VirtualMachineGeneratingHandler func(obj *v1beta1.VirtualMachine, status v1beta1.VirtualMachineImportStatus) ([]runtime.Object, v1beta1.VirtualMachineImportStatus, error)
func RegisterVirtualMachineStatusHandler(ctx context.Context, controller VirtualMachineController, condition condition.Cond, name string, handler VirtualMachineStatusHandler) {
statusHandler := &virtualMachineStatusHandler{
client: controller,
condition: condition,
handler: handler,
}
controller.AddGenericHandler(ctx, name, FromVirtualMachineHandlerToHandler(statusHandler.sync))
}
func RegisterVirtualMachineGeneratingHandler(ctx context.Context, controller VirtualMachineController, apply apply.Apply,
condition condition.Cond, name string, handler VirtualMachineGeneratingHandler, opts *generic.GeneratingHandlerOptions) {
statusHandler := &virtualMachineGeneratingHandler{
VirtualMachineGeneratingHandler: handler,
apply: apply,
name: name,
gvk: controller.GroupVersionKind(),
}
if opts != nil {
statusHandler.opts = *opts
}
controller.OnChange(ctx, name, statusHandler.Remove)
RegisterVirtualMachineStatusHandler(ctx, controller, condition, name, statusHandler.Handle)
}
type virtualMachineStatusHandler struct {
client VirtualMachineClient
condition condition.Cond
handler VirtualMachineStatusHandler
}
func (a *virtualMachineStatusHandler) sync(key string, obj *v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error) {
if obj == nil {
return obj, nil
}
origStatus := obj.Status.DeepCopy()
obj = obj.DeepCopy()
newStatus, err := a.handler(obj, obj.Status)
if err != nil {
// Revert to old status on error
newStatus = *origStatus.DeepCopy()
}
if a.condition != "" {
if errors.IsConflict(err) {
a.condition.SetError(&newStatus, "", nil)
} else {
a.condition.SetError(&newStatus, "", err)
}
}
if !equality.Semantic.DeepEqual(origStatus, &newStatus) {
if a.condition != "" {
// Since status has changed, update the lastUpdatedTime
a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339))
}
var newErr error
obj.Status = newStatus
newObj, newErr := a.client.UpdateStatus(obj)
if err == nil {
err = newErr
}
if newErr == nil {
obj = newObj
}
}
return obj, err
}
type virtualMachineGeneratingHandler struct {
VirtualMachineGeneratingHandler
apply apply.Apply
opts generic.GeneratingHandlerOptions
gvk schema.GroupVersionKind
name string
}
func (a *virtualMachineGeneratingHandler) Remove(key string, obj *v1beta1.VirtualMachine) (*v1beta1.VirtualMachine, error) {
if obj != nil {
return obj, nil
}
obj = &v1beta1.VirtualMachine{}
obj.Namespace, obj.Name = kv.RSplit(key, "/")
obj.SetGroupVersionKind(a.gvk)
return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects()
}
func (a *virtualMachineGeneratingHandler) Handle(obj *v1beta1.VirtualMachine, status v1beta1.VirtualMachineImportStatus) (v1beta1.VirtualMachineImportStatus, error) {
if !obj.DeletionTimestamp.IsZero() {
return status, nil
}
objs, newStatus, err := a.VirtualMachineGeneratingHandler(obj, status)
if err != nil {
return newStatus, err
}
return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects(objs...)
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package source
import (
"github.com/rancher/wrangler/pkg/generic"
"k8s.io/client-go/rest"
)
type Factory struct {
*generic.Factory
}
func NewFactoryFromConfigOrDie(config *rest.Config) *Factory {
f, err := NewFactoryFromConfig(config)
if err != nil {
panic(err)
}
return f
}
func NewFactoryFromConfig(config *rest.Config) (*Factory, error) {
return NewFactoryFromConfigWithOptions(config, nil)
}
func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) {
return NewFactoryFromConfigWithOptions(config, &FactoryOptions{
Namespace: namespace,
})
}
type FactoryOptions = generic.FactoryOptions
func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) {
f, err := generic.NewFactoryFromConfigWithOptions(config, opts)
return &Factory{
Factory: f,
}, err
}
func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory {
f, err := NewFactoryFromConfigWithOptions(config, opts)
if err != nil {
panic(err)
}
return f
}
func (c *Factory) Source() Interface {
return New(c.ControllerFactory())
}

View File

@ -0,0 +1,43 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package source
import (
v1beta1 "github.com/harvester/vm-import-controller/pkg/generated/controllers/source.harvesterhci.io/v1beta1"
"github.com/rancher/lasso/pkg/controller"
)
type Interface interface {
V1beta1() v1beta1.Interface
}
type group struct {
controllerFactory controller.SharedControllerFactory
}
// New returns a new Interface.
func New(controllerFactory controller.SharedControllerFactory) Interface {
return &group{
controllerFactory: controllerFactory,
}
}
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.controllerFactory)
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1beta1
import (
v1beta1 "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/rancher/lasso/pkg/controller"
"github.com/rancher/wrangler/pkg/schemes"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func init() {
schemes.Register(v1beta1.AddToScheme)
}
type Interface interface {
Openstack() OpenstackController
Vmware() VmwareController
}
func New(controllerFactory controller.SharedControllerFactory) Interface {
return &version{
controllerFactory: controllerFactory,
}
}
type version struct {
controllerFactory controller.SharedControllerFactory
}
func (c *version) Openstack() OpenstackController {
return NewOpenstackController(schema.GroupVersionKind{Group: "source.harvesterhci.io", Version: "v1beta1", Kind: "Openstack"}, "openstacks", true, c.controllerFactory)
}
func (c *version) Vmware() VmwareController {
return NewVmwareController(schema.GroupVersionKind{Group: "source.harvesterhci.io", Version: "v1beta1", Kind: "Vmware"}, "vmwares", true, c.controllerFactory)
}

View File

@ -0,0 +1,376 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1beta1
import (
"context"
"time"
v1beta1 "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/rancher/lasso/pkg/client"
"github.com/rancher/lasso/pkg/controller"
"github.com/rancher/wrangler/pkg/apply"
"github.com/rancher/wrangler/pkg/condition"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/kv"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
type OpenstackHandler func(string, *v1beta1.Openstack) (*v1beta1.Openstack, error)
type OpenstackController interface {
generic.ControllerMeta
OpenstackClient
OnChange(ctx context.Context, name string, sync OpenstackHandler)
OnRemove(ctx context.Context, name string, sync OpenstackHandler)
Enqueue(namespace, name string)
EnqueueAfter(namespace, name string, duration time.Duration)
Cache() OpenstackCache
}
type OpenstackClient interface {
Create(*v1beta1.Openstack) (*v1beta1.Openstack, error)
Update(*v1beta1.Openstack) (*v1beta1.Openstack, error)
UpdateStatus(*v1beta1.Openstack) (*v1beta1.Openstack, error)
Delete(namespace, name string, options *metav1.DeleteOptions) error
Get(namespace, name string, options metav1.GetOptions) (*v1beta1.Openstack, error)
List(namespace string, opts metav1.ListOptions) (*v1beta1.OpenstackList, error)
Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error)
Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Openstack, err error)
}
type OpenstackCache interface {
Get(namespace, name string) (*v1beta1.Openstack, error)
List(namespace string, selector labels.Selector) ([]*v1beta1.Openstack, error)
AddIndexer(indexName string, indexer OpenstackIndexer)
GetByIndex(indexName, key string) ([]*v1beta1.Openstack, error)
}
type OpenstackIndexer func(obj *v1beta1.Openstack) ([]string, error)
type openstackController struct {
controller controller.SharedController
client *client.Client
gvk schema.GroupVersionKind
groupResource schema.GroupResource
}
func NewOpenstackController(gvk schema.GroupVersionKind, resource string, namespaced bool, controller controller.SharedControllerFactory) OpenstackController {
c := controller.ForResourceKind(gvk.GroupVersion().WithResource(resource), gvk.Kind, namespaced)
return &openstackController{
controller: c,
client: c.Client(),
gvk: gvk,
groupResource: schema.GroupResource{
Group: gvk.Group,
Resource: resource,
},
}
}
func FromOpenstackHandlerToHandler(sync OpenstackHandler) generic.Handler {
return func(key string, obj runtime.Object) (ret runtime.Object, err error) {
var v *v1beta1.Openstack
if obj == nil {
v, err = sync(key, nil)
} else {
v, err = sync(key, obj.(*v1beta1.Openstack))
}
if v == nil {
return nil, err
}
return v, err
}
}
func (c *openstackController) Updater() generic.Updater {
return func(obj runtime.Object) (runtime.Object, error) {
newObj, err := c.Update(obj.(*v1beta1.Openstack))
if newObj == nil {
return nil, err
}
return newObj, err
}
}
func UpdateOpenstackDeepCopyOnChange(client OpenstackClient, obj *v1beta1.Openstack, handler func(obj *v1beta1.Openstack) (*v1beta1.Openstack, error)) (*v1beta1.Openstack, error) {
if obj == nil {
return obj, nil
}
copyObj := obj.DeepCopy()
newObj, err := handler(copyObj)
if newObj != nil {
copyObj = newObj
}
if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) {
return client.Update(copyObj)
}
return copyObj, err
}
func (c *openstackController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) {
c.controller.RegisterHandler(ctx, name, controller.SharedControllerHandlerFunc(handler))
}
func (c *openstackController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), handler))
}
func (c *openstackController) OnChange(ctx context.Context, name string, sync OpenstackHandler) {
c.AddGenericHandler(ctx, name, FromOpenstackHandlerToHandler(sync))
}
func (c *openstackController) OnRemove(ctx context.Context, name string, sync OpenstackHandler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), FromOpenstackHandlerToHandler(sync)))
}
func (c *openstackController) Enqueue(namespace, name string) {
c.controller.Enqueue(namespace, name)
}
func (c *openstackController) EnqueueAfter(namespace, name string, duration time.Duration) {
c.controller.EnqueueAfter(namespace, name, duration)
}
func (c *openstackController) Informer() cache.SharedIndexInformer {
return c.controller.Informer()
}
func (c *openstackController) GroupVersionKind() schema.GroupVersionKind {
return c.gvk
}
func (c *openstackController) Cache() OpenstackCache {
return &openstackCache{
indexer: c.Informer().GetIndexer(),
resource: c.groupResource,
}
}
func (c *openstackController) Create(obj *v1beta1.Openstack) (*v1beta1.Openstack, error) {
result := &v1beta1.Openstack{}
return result, c.client.Create(context.TODO(), obj.Namespace, obj, result, metav1.CreateOptions{})
}
func (c *openstackController) Update(obj *v1beta1.Openstack) (*v1beta1.Openstack, error) {
result := &v1beta1.Openstack{}
return result, c.client.Update(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{})
}
func (c *openstackController) UpdateStatus(obj *v1beta1.Openstack) (*v1beta1.Openstack, error) {
result := &v1beta1.Openstack{}
return result, c.client.UpdateStatus(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{})
}
func (c *openstackController) Delete(namespace, name string, options *metav1.DeleteOptions) error {
if options == nil {
options = &metav1.DeleteOptions{}
}
return c.client.Delete(context.TODO(), namespace, name, *options)
}
func (c *openstackController) Get(namespace, name string, options metav1.GetOptions) (*v1beta1.Openstack, error) {
result := &v1beta1.Openstack{}
return result, c.client.Get(context.TODO(), namespace, name, result, options)
}
func (c *openstackController) List(namespace string, opts metav1.ListOptions) (*v1beta1.OpenstackList, error) {
result := &v1beta1.OpenstackList{}
return result, c.client.List(context.TODO(), namespace, result, opts)
}
func (c *openstackController) Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
return c.client.Watch(context.TODO(), namespace, opts)
}
func (c *openstackController) Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta1.Openstack, error) {
result := &v1beta1.Openstack{}
return result, c.client.Patch(context.TODO(), namespace, name, pt, data, result, metav1.PatchOptions{}, subresources...)
}
type openstackCache struct {
indexer cache.Indexer
resource schema.GroupResource
}
func (c *openstackCache) Get(namespace, name string) (*v1beta1.Openstack, error) {
obj, exists, err := c.indexer.GetByKey(namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(c.resource, name)
}
return obj.(*v1beta1.Openstack), nil
}
func (c *openstackCache) List(namespace string, selector labels.Selector) (ret []*v1beta1.Openstack, err error) {
err = cache.ListAllByNamespace(c.indexer, namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.Openstack))
})
return ret, err
}
func (c *openstackCache) AddIndexer(indexName string, indexer OpenstackIndexer) {
utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{
indexName: func(obj interface{}) (strings []string, e error) {
return indexer(obj.(*v1beta1.Openstack))
},
}))
}
func (c *openstackCache) GetByIndex(indexName, key string) (result []*v1beta1.Openstack, err error) {
objs, err := c.indexer.ByIndex(indexName, key)
if err != nil {
return nil, err
}
result = make([]*v1beta1.Openstack, 0, len(objs))
for _, obj := range objs {
result = append(result, obj.(*v1beta1.Openstack))
}
return result, nil
}
type OpenstackStatusHandler func(obj *v1beta1.Openstack, status v1beta1.OpenStackStatus) (v1beta1.OpenStackStatus, error)
type OpenstackGeneratingHandler func(obj *v1beta1.Openstack, status v1beta1.OpenStackStatus) ([]runtime.Object, v1beta1.OpenStackStatus, error)
func RegisterOpenstackStatusHandler(ctx context.Context, controller OpenstackController, condition condition.Cond, name string, handler OpenstackStatusHandler) {
statusHandler := &openstackStatusHandler{
client: controller,
condition: condition,
handler: handler,
}
controller.AddGenericHandler(ctx, name, FromOpenstackHandlerToHandler(statusHandler.sync))
}
func RegisterOpenstackGeneratingHandler(ctx context.Context, controller OpenstackController, apply apply.Apply,
condition condition.Cond, name string, handler OpenstackGeneratingHandler, opts *generic.GeneratingHandlerOptions) {
statusHandler := &openstackGeneratingHandler{
OpenstackGeneratingHandler: handler,
apply: apply,
name: name,
gvk: controller.GroupVersionKind(),
}
if opts != nil {
statusHandler.opts = *opts
}
controller.OnChange(ctx, name, statusHandler.Remove)
RegisterOpenstackStatusHandler(ctx, controller, condition, name, statusHandler.Handle)
}
type openstackStatusHandler struct {
client OpenstackClient
condition condition.Cond
handler OpenstackStatusHandler
}
func (a *openstackStatusHandler) sync(key string, obj *v1beta1.Openstack) (*v1beta1.Openstack, error) {
if obj == nil {
return obj, nil
}
origStatus := obj.Status.DeepCopy()
obj = obj.DeepCopy()
newStatus, err := a.handler(obj, obj.Status)
if err != nil {
// Revert to old status on error
newStatus = *origStatus.DeepCopy()
}
if a.condition != "" {
if errors.IsConflict(err) {
a.condition.SetError(&newStatus, "", nil)
} else {
a.condition.SetError(&newStatus, "", err)
}
}
if !equality.Semantic.DeepEqual(origStatus, &newStatus) {
if a.condition != "" {
// Since status has changed, update the lastUpdatedTime
a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339))
}
var newErr error
obj.Status = newStatus
newObj, newErr := a.client.UpdateStatus(obj)
if err == nil {
err = newErr
}
if newErr == nil {
obj = newObj
}
}
return obj, err
}
type openstackGeneratingHandler struct {
OpenstackGeneratingHandler
apply apply.Apply
opts generic.GeneratingHandlerOptions
gvk schema.GroupVersionKind
name string
}
func (a *openstackGeneratingHandler) Remove(key string, obj *v1beta1.Openstack) (*v1beta1.Openstack, error) {
if obj != nil {
return obj, nil
}
obj = &v1beta1.Openstack{}
obj.Namespace, obj.Name = kv.RSplit(key, "/")
obj.SetGroupVersionKind(a.gvk)
return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects()
}
func (a *openstackGeneratingHandler) Handle(obj *v1beta1.Openstack, status v1beta1.OpenStackStatus) (v1beta1.OpenStackStatus, error) {
if !obj.DeletionTimestamp.IsZero() {
return status, nil
}
objs, newStatus, err := a.OpenstackGeneratingHandler(obj, status)
if err != nil {
return newStatus, err
}
return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects(objs...)
}

View File

@ -0,0 +1,376 @@
/*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v1beta1
import (
"context"
"time"
v1beta1 "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/rancher/lasso/pkg/client"
"github.com/rancher/lasso/pkg/controller"
"github.com/rancher/wrangler/pkg/apply"
"github.com/rancher/wrangler/pkg/condition"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/kv"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
type VmwareHandler func(string, *v1beta1.Vmware) (*v1beta1.Vmware, error)
type VmwareController interface {
generic.ControllerMeta
VmwareClient
OnChange(ctx context.Context, name string, sync VmwareHandler)
OnRemove(ctx context.Context, name string, sync VmwareHandler)
Enqueue(namespace, name string)
EnqueueAfter(namespace, name string, duration time.Duration)
Cache() VmwareCache
}
type VmwareClient interface {
Create(*v1beta1.Vmware) (*v1beta1.Vmware, error)
Update(*v1beta1.Vmware) (*v1beta1.Vmware, error)
UpdateStatus(*v1beta1.Vmware) (*v1beta1.Vmware, error)
Delete(namespace, name string, options *metav1.DeleteOptions) error
Get(namespace, name string, options metav1.GetOptions) (*v1beta1.Vmware, error)
List(namespace string, opts metav1.ListOptions) (*v1beta1.VmwareList, error)
Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error)
Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Vmware, err error)
}
type VmwareCache interface {
Get(namespace, name string) (*v1beta1.Vmware, error)
List(namespace string, selector labels.Selector) ([]*v1beta1.Vmware, error)
AddIndexer(indexName string, indexer VmwareIndexer)
GetByIndex(indexName, key string) ([]*v1beta1.Vmware, error)
}
type VmwareIndexer func(obj *v1beta1.Vmware) ([]string, error)
type vmwareController struct {
controller controller.SharedController
client *client.Client
gvk schema.GroupVersionKind
groupResource schema.GroupResource
}
func NewVmwareController(gvk schema.GroupVersionKind, resource string, namespaced bool, controller controller.SharedControllerFactory) VmwareController {
c := controller.ForResourceKind(gvk.GroupVersion().WithResource(resource), gvk.Kind, namespaced)
return &vmwareController{
controller: c,
client: c.Client(),
gvk: gvk,
groupResource: schema.GroupResource{
Group: gvk.Group,
Resource: resource,
},
}
}
func FromVmwareHandlerToHandler(sync VmwareHandler) generic.Handler {
return func(key string, obj runtime.Object) (ret runtime.Object, err error) {
var v *v1beta1.Vmware
if obj == nil {
v, err = sync(key, nil)
} else {
v, err = sync(key, obj.(*v1beta1.Vmware))
}
if v == nil {
return nil, err
}
return v, err
}
}
func (c *vmwareController) Updater() generic.Updater {
return func(obj runtime.Object) (runtime.Object, error) {
newObj, err := c.Update(obj.(*v1beta1.Vmware))
if newObj == nil {
return nil, err
}
return newObj, err
}
}
func UpdateVmwareDeepCopyOnChange(client VmwareClient, obj *v1beta1.Vmware, handler func(obj *v1beta1.Vmware) (*v1beta1.Vmware, error)) (*v1beta1.Vmware, error) {
if obj == nil {
return obj, nil
}
copyObj := obj.DeepCopy()
newObj, err := handler(copyObj)
if newObj != nil {
copyObj = newObj
}
if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) {
return client.Update(copyObj)
}
return copyObj, err
}
func (c *vmwareController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) {
c.controller.RegisterHandler(ctx, name, controller.SharedControllerHandlerFunc(handler))
}
func (c *vmwareController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), handler))
}
func (c *vmwareController) OnChange(ctx context.Context, name string, sync VmwareHandler) {
c.AddGenericHandler(ctx, name, FromVmwareHandlerToHandler(sync))
}
func (c *vmwareController) OnRemove(ctx context.Context, name string, sync VmwareHandler) {
c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), FromVmwareHandlerToHandler(sync)))
}
func (c *vmwareController) Enqueue(namespace, name string) {
c.controller.Enqueue(namespace, name)
}
func (c *vmwareController) EnqueueAfter(namespace, name string, duration time.Duration) {
c.controller.EnqueueAfter(namespace, name, duration)
}
func (c *vmwareController) Informer() cache.SharedIndexInformer {
return c.controller.Informer()
}
func (c *vmwareController) GroupVersionKind() schema.GroupVersionKind {
return c.gvk
}
func (c *vmwareController) Cache() VmwareCache {
return &vmwareCache{
indexer: c.Informer().GetIndexer(),
resource: c.groupResource,
}
}
func (c *vmwareController) Create(obj *v1beta1.Vmware) (*v1beta1.Vmware, error) {
result := &v1beta1.Vmware{}
return result, c.client.Create(context.TODO(), obj.Namespace, obj, result, metav1.CreateOptions{})
}
func (c *vmwareController) Update(obj *v1beta1.Vmware) (*v1beta1.Vmware, error) {
result := &v1beta1.Vmware{}
return result, c.client.Update(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{})
}
func (c *vmwareController) UpdateStatus(obj *v1beta1.Vmware) (*v1beta1.Vmware, error) {
result := &v1beta1.Vmware{}
return result, c.client.UpdateStatus(context.TODO(), obj.Namespace, obj, result, metav1.UpdateOptions{})
}
func (c *vmwareController) Delete(namespace, name string, options *metav1.DeleteOptions) error {
if options == nil {
options = &metav1.DeleteOptions{}
}
return c.client.Delete(context.TODO(), namespace, name, *options)
}
func (c *vmwareController) Get(namespace, name string, options metav1.GetOptions) (*v1beta1.Vmware, error) {
result := &v1beta1.Vmware{}
return result, c.client.Get(context.TODO(), namespace, name, result, options)
}
func (c *vmwareController) List(namespace string, opts metav1.ListOptions) (*v1beta1.VmwareList, error) {
result := &v1beta1.VmwareList{}
return result, c.client.List(context.TODO(), namespace, result, opts)
}
func (c *vmwareController) Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
return c.client.Watch(context.TODO(), namespace, opts)
}
func (c *vmwareController) Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta1.Vmware, error) {
result := &v1beta1.Vmware{}
return result, c.client.Patch(context.TODO(), namespace, name, pt, data, result, metav1.PatchOptions{}, subresources...)
}
type vmwareCache struct {
indexer cache.Indexer
resource schema.GroupResource
}
func (c *vmwareCache) Get(namespace, name string) (*v1beta1.Vmware, error) {
obj, exists, err := c.indexer.GetByKey(namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(c.resource, name)
}
return obj.(*v1beta1.Vmware), nil
}
func (c *vmwareCache) List(namespace string, selector labels.Selector) (ret []*v1beta1.Vmware, err error) {
err = cache.ListAllByNamespace(c.indexer, namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.Vmware))
})
return ret, err
}
func (c *vmwareCache) AddIndexer(indexName string, indexer VmwareIndexer) {
utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{
indexName: func(obj interface{}) (strings []string, e error) {
return indexer(obj.(*v1beta1.Vmware))
},
}))
}
func (c *vmwareCache) GetByIndex(indexName, key string) (result []*v1beta1.Vmware, err error) {
objs, err := c.indexer.ByIndex(indexName, key)
if err != nil {
return nil, err
}
result = make([]*v1beta1.Vmware, 0, len(objs))
for _, obj := range objs {
result = append(result, obj.(*v1beta1.Vmware))
}
return result, nil
}
type VmwareStatusHandler func(obj *v1beta1.Vmware, status v1beta1.VmwareClusterStatus) (v1beta1.VmwareClusterStatus, error)
type VmwareGeneratingHandler func(obj *v1beta1.Vmware, status v1beta1.VmwareClusterStatus) ([]runtime.Object, v1beta1.VmwareClusterStatus, error)
func RegisterVmwareStatusHandler(ctx context.Context, controller VmwareController, condition condition.Cond, name string, handler VmwareStatusHandler) {
statusHandler := &vmwareStatusHandler{
client: controller,
condition: condition,
handler: handler,
}
controller.AddGenericHandler(ctx, name, FromVmwareHandlerToHandler(statusHandler.sync))
}
func RegisterVmwareGeneratingHandler(ctx context.Context, controller VmwareController, apply apply.Apply,
condition condition.Cond, name string, handler VmwareGeneratingHandler, opts *generic.GeneratingHandlerOptions) {
statusHandler := &vmwareGeneratingHandler{
VmwareGeneratingHandler: handler,
apply: apply,
name: name,
gvk: controller.GroupVersionKind(),
}
if opts != nil {
statusHandler.opts = *opts
}
controller.OnChange(ctx, name, statusHandler.Remove)
RegisterVmwareStatusHandler(ctx, controller, condition, name, statusHandler.Handle)
}
type vmwareStatusHandler struct {
client VmwareClient
condition condition.Cond
handler VmwareStatusHandler
}
func (a *vmwareStatusHandler) sync(key string, obj *v1beta1.Vmware) (*v1beta1.Vmware, error) {
if obj == nil {
return obj, nil
}
origStatus := obj.Status.DeepCopy()
obj = obj.DeepCopy()
newStatus, err := a.handler(obj, obj.Status)
if err != nil {
// Revert to old status on error
newStatus = *origStatus.DeepCopy()
}
if a.condition != "" {
if errors.IsConflict(err) {
a.condition.SetError(&newStatus, "", nil)
} else {
a.condition.SetError(&newStatus, "", err)
}
}
if !equality.Semantic.DeepEqual(origStatus, &newStatus) {
if a.condition != "" {
// Since status has changed, update the lastUpdatedTime
a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339))
}
var newErr error
obj.Status = newStatus
newObj, newErr := a.client.UpdateStatus(obj)
if err == nil {
err = newErr
}
if newErr == nil {
obj = newObj
}
}
return obj, err
}
type vmwareGeneratingHandler struct {
VmwareGeneratingHandler
apply apply.Apply
opts generic.GeneratingHandlerOptions
gvk schema.GroupVersionKind
name string
}
func (a *vmwareGeneratingHandler) Remove(key string, obj *v1beta1.Vmware) (*v1beta1.Vmware, error) {
if obj != nil {
return obj, nil
}
obj = &v1beta1.Vmware{}
obj.Namespace, obj.Name = kv.RSplit(key, "/")
obj.SetGroupVersionKind(a.gvk)
return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects()
}
func (a *vmwareGeneratingHandler) Handle(obj *v1beta1.Vmware, status v1beta1.VmwareClusterStatus) (v1beta1.VmwareClusterStatus, error) {
if !obj.DeletionTimestamp.IsZero() {
return status, nil
}
objs, newStatus, err := a.VmwareGeneratingHandler(obj, status)
if err != nil {
return newStatus, err
}
return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts).
WithOwner(obj).
WithSetID(a.name).
ApplyObjects(objs...)
}

26
pkg/qemu/gemu_test.go Normal file
View File

@ -0,0 +1,26 @@
package qemu
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func Test_ConvertVMDKToRaw(t *testing.T) {
assert := require.New(t)
tmpDir, err := ioutil.TempDir("/tmp", "disk-test")
assert.NoError(err, "expected no error during creation of tmpDir")
defer os.RemoveAll(tmpDir)
tmpVMDK := filepath.Join(tmpDir, "vmdktest.vmdk")
err = createVMDK(tmpVMDK, "512M")
assert.NoError(err, "expected no error during tmp vmdk creation")
destRaw := filepath.Join(tmpDir, "vmdktest.img")
err = ConvertVMDKtoRAW(tmpVMDK, destRaw)
assert.NoError(err, "expected no error during disk conversion")
f, err := os.Stat(destRaw)
assert.NoError(err, "expected no error during check for raw file")
assert.NotNil(f, "expect file to be not nil")
}

19
pkg/qemu/qemu.go Normal file
View File

@ -0,0 +1,19 @@
package qemu
import (
"os/exec"
)
const defaultCommand = "qemu-img"
func ConvertVMDKtoRAW(source, target string) error {
args := []string{"convert", "-f", "vmdk", "-O", "raw", source, target}
cmd := exec.Command(defaultCommand, args...)
return cmd.Run()
}
func createVMDK(path string, size string) error {
args := []string{"create", "-f", "vmdk", path, size}
cmd := exec.Command(defaultCommand, args...)
return cmd.Run()
}

66
pkg/server/server.go Normal file
View File

@ -0,0 +1,66 @@
package server
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"golang.org/x/sync/errgroup"
)
const defaultPort = 8080
var tmpDir string
func NewServer(ctx context.Context) error {
var err error
tmpDir, err = createTmpDir()
if err != nil {
return err
}
return newServer(ctx, tmpDir)
}
func newServer(ctx context.Context, path string) error {
defer os.RemoveAll(tmpDir)
srv := http.Server{
Addr: fmt.Sprintf(":%d", defaultPort),
Handler: http.FileServer(http.Dir(path)),
}
eg, _ := errgroup.WithContext(ctx)
eg.Go(func() error {
return srv.ListenAndServe()
})
eg.Go(func() error {
<-ctx.Done()
return srv.Shutdown(ctx)
})
return eg.Wait()
}
func createTmpDir() (string, error) {
return ioutil.TempDir("/tmp", "vm-import-controller-")
}
func DefaultPort() int {
return defaultPort
}
func TempDir() string {
return tmpDir
}
// Address returns the address for vm-import url. For local testing set env variable
// SVC_ADDRESS to point to a local endpoint
func Address() string {
address := "vm-import-controller.harvester-system.svc"
if val := os.Getenv("SVC_ADDRESS"); val != "" {
address = val
}
return address
}

37
pkg/server/server_test.go Normal file
View File

@ -0,0 +1,37 @@
package server
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func Test_NewServer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
assert := require.New(t)
var err error
tmpDir, err = createTmpDir()
assert.NoError(err, "expected no error during creation of tmp dir")
go func() {
err = newServer(ctx, tmpDir)
assert.Contains(err.Error(), "context canceled", "error occured during shutdown") //only expected error is context canceled
}()
time.Sleep(1 * time.Second)
f, err := ioutil.TempFile(TempDir(), "sample")
assert.NoError(err, "expect no error during creation of tmp file")
_, relative := filepath.Split(f.Name())
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/%s", defaultPort, relative))
assert.NoError(err, "expect no error during http call")
assert.Equal(resp.StatusCode, 200, "expected http response code to be 200")
cancel()
time.Sleep(5 * time.Second)
_, err = os.Stat(f.Name())
assert.True(os.IsNotExist(err), "expected no file to be found")
}

View File

@ -0,0 +1,33 @@
package openstack
import (
"context"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
corev1 "k8s.io/api/core/v1"
kubevirt "kubevirt.io/api/core/v1"
)
type Client struct {
}
func NewClient(ctx context.Context, endpoint string, dc string, secret *corev1.Secret) (*Client, error) {
return nil, nil
}
func (c *Client) ExportVirtualMachine(vm *importjob.VirtualMachine) error {
return nil
}
func (c *Client) PowerOffVirtualMachine(vm *importjob.VirtualMachine) error {
return nil
}
func (c *Client) IsPoweredOff(vm *importjob.VirtualMachine) (bool, error) {
return false, nil
}
func (c *Client) GenerateVirtualMachine(vm *importjob.VirtualMachine) (*kubevirt.VirtualMachine, error) {
return nil, nil
}

391
pkg/source/vmware/client.go Normal file
View File

@ -0,0 +1,391 @@
package vmware
import (
"context"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/qemu"
"github.com/harvester/vm-import-controller/pkg/server"
"github.com/sirupsen/logrus"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubevirt "kubevirt.io/api/core/v1"
)
type Client struct {
ctx context.Context
*govmomi.Client
tmpCerts string
dc string
}
func NewClient(ctx context.Context, endpoint string, dc string, secret *corev1.Secret) (*Client, error) {
var insecure bool
username, ok := secret.Data["username"]
if !ok {
return nil, fmt.Errorf("no key username found in secret %s", secret.Name)
}
password, ok := secret.Data["password"]
if !ok {
return nil, fmt.Errorf("no key password found in the secret %s", secret.Name)
}
caCert, ok := secret.Data["caCert"]
if !ok {
insecure = true
}
endpointUrl, err := url.Parse(endpoint)
if err != nil {
return nil, fmt.Errorf("error parsing endpoint url: %v", err)
}
sc := soap.NewClient(endpointUrl, insecure)
vmwareClient := &Client{}
if !insecure {
tmpFile, err := ioutil.TempFile("/tmp", "vmware-ca-")
if err != nil {
return nil, fmt.Errorf("error creating tmp file for vmware ca certs: %v", err)
}
_, err = tmpFile.Write(caCert)
if err != nil {
return nil, fmt.Errorf("error writing ca cert to tmp file %s: %v", tmpFile.Name(), err)
}
sc.SetRootCAs(tmpFile.Name())
vmwareClient.tmpCerts = tmpFile.Name()
}
vc, err := vim25.NewClient(ctx, sc)
if err != nil {
return nil, fmt.Errorf("error creating vim client: %v", err)
}
c := &govmomi.Client{
Client: vc,
SessionManager: session.NewManager(vc),
}
err = c.Login(ctx, url.UserPassword(string(username), string(password)))
if err != nil {
return nil, fmt.Errorf("error during login :%v", err)
}
vmwareClient.ctx = ctx
vmwareClient.Client = c
vmwareClient.dc = dc
return vmwareClient, nil
}
func (c *Client) Close() error {
c.Client.CloseIdleConnections()
err := c.Client.Logout(c.ctx)
if err != nil {
return err
}
return os.Remove(c.tmpCerts)
}
// Verify checks is a verfication check for source provider to ensure that the config is valid
// it is used to set the condition Ready on the source provider.
// for vmware client we verify if the DC exists
func (c *Client) Verify() error {
f := find.NewFinder(c.Client.Client, true)
dc := c.dc
if !strings.HasPrefix(c.dc, "/") {
dc = fmt.Sprintf("/%s", c.dc)
}
dcObj, err := f.Datacenter(c.ctx, dc)
if err != nil {
return err
}
logrus.Infof("found dc :%v", dcObj)
return nil
}
func (c *Client) ExportVirtualMachine(vm *importjob.VirtualMachine) error {
tmpPath, err := ioutil.TempDir("/tmp", fmt.Sprintf("%s-%s-", vm.Name, vm.Namespace))
if err != nil {
return fmt.Errorf("error creating tmp dir for vmexport: %v", err)
}
vmObj, err := c.findVM(vm.Spec.Folder, vm.Spec.VirtualMachineName)
if err != nil {
return fmt.Errorf("error finding vm in ExportVirtualMacine: %v", err)
}
lease, err := vmObj.Export(c.ctx)
if err != nil {
return fmt.Errorf("error generate export lease in ExportVirtualMachine: %v", err)
}
info, err := lease.Wait(c.ctx, nil)
if err != nil {
return err
}
u := lease.StartUpdater(c.ctx, info)
defer u.Done()
for _, i := range info.Items {
// ignore iso and nvram disks
if strings.HasSuffix(i.Path, ".vmdk") {
if !strings.HasPrefix(i.Path, vm.Spec.VirtualMachineName) {
i.Path = vm.Name + "-" + vm.Namespace + "-" + i.Path
}
exportPath := filepath.Join(tmpPath, i.Path)
err = lease.DownloadFile(c.ctx, exportPath, i, soap.DefaultDownload)
if err != nil {
return err
}
vm.Status.DiskImportStatus = append(vm.Status.DiskImportStatus, importjob.DiskInfo{
Name: i.Path,
DiskSize: i.Size,
})
}
}
// disk info will name of disks including the format suffix ".vmdk"
// once the disks are converted this needs to be updated to ".img"
// spec for how download_url is generated
// Spec: harvesterv1beta1.VirtualMachineImageSpec{
// DisplayName: fmt.Sprintf("vm-import-%s-%s", vm.Name, d.Name),
// URL: fmt.Sprintf("http://%s:%d/%s.img", server.Address(), server.DefaultPort(), d.Name),
// },
// qemu conversion to raw image file
// converted disks need to be placed in the server.TmpDir from where they will be served
for i, d := range vm.Status.DiskImportStatus {
sourceFile := filepath.Join(tmpPath, d.Name)
rawDiskName := strings.Split(d.Name, ".vmdk")[0] + ".img"
destFile := filepath.Join(server.TempDir(), rawDiskName)
err := qemu.ConvertVMDKtoRAW(sourceFile, destFile)
if err != nil {
return err
}
// update fields to reflect final location of raw image file
vm.Status.DiskImportStatus[i].DiskLocalPath = server.TempDir()
vm.Status.DiskImportStatus[i].Name = rawDiskName
}
return os.RemoveAll(tmpPath)
}
func (c *Client) PowerOffVirtualMachine(vm *importjob.VirtualMachine) error {
vmObj, err := c.findVM(vm.Spec.Folder, vm.Spec.VirtualMachineName)
if err != nil {
return fmt.Errorf("error finding vm in PowerOffVirtualMachine: %v", err)
}
_, err = vmObj.PowerOff(c.ctx)
return err
}
func (c *Client) IsPoweredOff(vm *importjob.VirtualMachine) (bool, error) {
vmObj, err := c.findVM(vm.Spec.Folder, vm.Spec.VirtualMachineName)
if err != nil {
return false, fmt.Errorf("error find VM in IsPoweredOff :%v", err)
}
state, err := vmObj.PowerState(c.ctx)
if err != nil {
return false, fmt.Errorf("error looking up powerstate: %v", err)
}
if state == types.VirtualMachinePowerStatePoweredOff {
return true, nil
}
return false, nil
}
func (c *Client) GenerateVirtualMachine(vm *importjob.VirtualMachine) (*kubevirt.VirtualMachine, error) {
vmObj, err := c.findVM(vm.Spec.Folder, vm.Spec.VirtualMachineName)
if err != nil {
return nil, fmt.Errorf("error quering vm in GenerateVirtualMachine: %v", err)
}
newVM := &kubevirt.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: vm.Spec.VirtualMachineName,
Namespace: vm.Namespace,
},
}
var o mo.VirtualMachine
err = vmObj.Properties(c.ctx, vmObj.Reference(), []string{}, &o)
if err != nil {
return nil, err
}
// Need CPU, Socket, Memory, VirtualNIC information to perform the mapping
networkInfo := identifyNetworkCards(o.Config.Hardware.Device)
vmSpec := kubevirt.VirtualMachineSpec{
RunStrategy: &[]kubevirt.VirtualMachineRunStrategy{kubevirt.RunStrategyRerunOnFailure}[0],
Template: &kubevirt.VirtualMachineInstanceTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"harvesterhci.io/vmName": vm.Spec.VirtualMachineName,
},
},
Spec: kubevirt.VirtualMachineInstanceSpec{
Domain: kubevirt.DomainSpec{
CPU: &kubevirt.CPU{
Cores: uint32(o.Config.Hardware.NumCPU),
Sockets: uint32(o.Config.Hardware.NumCoresPerSocket),
Threads: 1,
},
Memory: &kubevirt.Memory{
Guest: &[]resource.Quantity{resource.MustParse(fmt.Sprintf("%dM", o.Config.Hardware.MemoryMB))}[0],
},
Resources: kubevirt.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", o.Config.Hardware.MemoryMB)),
corev1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", o.Config.Hardware.NumCPU)),
},
},
},
},
},
}
var networkConfig []kubevirt.Network
mappedNetwork := mapNetworkCards(networkInfo, vm.Spec.Mapping)
for i, v := range mappedNetwork {
networkConfig = append(networkConfig, kubevirt.Network{
NetworkSource: kubevirt.NetworkSource{
Multus: &kubevirt.MultusNetwork{
NetworkName: v.MappedNetwork,
},
},
Name: fmt.Sprintf("migrated-%d", i),
})
}
var interfaces []kubevirt.Interface
for i, v := range mappedNetwork {
interfaces = append(interfaces, kubevirt.Interface{
Name: fmt.Sprintf("migrated-%d", i),
MacAddress: v.MAC,
Model: "virtio",
InterfaceBindingMethod: kubevirt.InterfaceBindingMethod{
Bridge: &kubevirt.InterfaceBridge{},
},
})
}
// if there is no network, attach to Pod network. Essential for VM to be booted up
if len(networkConfig) == 0 {
networkConfig = append(networkConfig, kubevirt.Network{
Name: "pod-network",
NetworkSource: kubevirt.NetworkSource{
Pod: &kubevirt.PodNetwork{},
},
})
interfaces = append(interfaces, kubevirt.Interface{
Name: "pod-network",
Model: "virtio",
InterfaceBindingMethod: kubevirt.InterfaceBindingMethod{
Masquerade: &kubevirt.InterfaceMasquerade{},
},
})
}
vmSpec.Template.Spec.Networks = networkConfig
vmSpec.Template.Spec.Domain.Devices.Interfaces = interfaces
newVM.Spec = vmSpec
// disk attachment needs query by core controller for storage classes, so will be added by the importjob controller
return newVM, nil
}
func (c *Client) findVM(path, name string) (*object.VirtualMachine, error) {
f := find.NewFinder(c.Client.Client, true)
dc := c.dc
if !strings.HasPrefix(c.dc, "/") {
dc = fmt.Sprintf("/%s", c.dc)
}
vmPath := filepath.Join(dc, "/vm", path, name)
return f.VirtualMachine(c.ctx, vmPath)
}
type networkInfo struct {
NetworkName string
MAC string
MappedNetwork string
}
func identifyNetworkCards(devices []types.BaseVirtualDevice) []networkInfo {
var resp []networkInfo
for _, d := range devices {
switch d.(type) {
case *types.VirtualVmxnet:
obj := d.(*types.VirtualVmxnet)
resp = append(resp, networkInfo{
NetworkName: obj.DeviceInfo.GetDescription().Summary,
MAC: obj.MacAddress,
})
case *types.VirtualE1000e:
obj := d.(*types.VirtualE1000e)
resp = append(resp, networkInfo{
NetworkName: obj.DeviceInfo.GetDescription().Summary,
MAC: obj.MacAddress,
})
case *types.VirtualE1000:
obj := d.(*types.VirtualE1000)
resp = append(resp, networkInfo{
NetworkName: obj.DeviceInfo.GetDescription().Summary,
MAC: obj.MacAddress,
})
case *types.VirtualVmxnet3:
obj := d.(*types.VirtualVmxnet3)
resp = append(resp, networkInfo{
NetworkName: obj.DeviceInfo.GetDescription().Summary,
MAC: obj.MacAddress,
})
case *types.VirtualVmxnet2:
obj := d.(*types.VirtualVmxnet2)
resp = append(resp, networkInfo{
NetworkName: obj.DeviceInfo.GetDescription().Summary,
MAC: obj.MacAddress,
})
}
}
return resp
}
func mapNetworkCards(networkCards []networkInfo, mapping []importjob.NetworkMapping) []networkInfo {
var retNetwork []networkInfo
for _, nc := range networkCards {
for _, m := range mapping {
if m.SourceNetwork == nc.NetworkName {
nc.MappedNetwork = m.DestinationNetwork
retNetwork = append(retNetwork, nc)
}
}
}
return retNetwork
}

View File

@ -0,0 +1,297 @@
package vmware
import (
"context"
"fmt"
"log"
"os"
"testing"
"time"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/server"
"github.com/ory/dockertest/v3"
"github.com/stretchr/testify/require"
"github.com/vmware/govmomi/vim25/mo"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var vcsimPort string
// setup mock vmware endpoint
func TestMain(t *testing.M) {
pool, err := dockertest.NewPool("")
if err != nil {
log.Fatalf("error connecting to dockerd: %v", err)
}
runOpts := &dockertest.RunOptions{
Name: "vcsim",
Repository: "vmware/vcsim",
Tag: "v0.29.0",
}
vcsimMock, err := pool.RunWithOptions(runOpts)
if err != nil {
log.Fatalf("error creating vcsim container: %v", err)
}
vcsimPort = vcsimMock.GetPort("8989/tcp")
time.Sleep(30 * time.Second)
go func() {
server.NewServer(context.TODO())
}()
code := t.Run()
if err := pool.Purge(vcsimMock); err != nil {
log.Fatalf("error purging vcsimMock container: %v", err)
}
os.Exit(code)
}
func Test_NewClient(t *testing.T) {
ctx := context.TODO()
endpoint := fmt.Sprintf("https://localhost:%s/sdk", vcsimPort)
dc := "DC0"
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Data: map[string][]byte{
"username": []byte("user"),
"password": []byte("pass"),
},
}
c, err := NewClient(ctx, endpoint, dc, secret)
assert := require.New(t)
assert.NoError(err, "expected no error during creation of client")
err = c.Verify()
assert.NoError(err, "expected no error during verification of client")
}
func Test_PowerOffVirtualMachine(t *testing.T) {
ctx := context.TODO()
endpoint := fmt.Sprintf("https://localhost:%s/sdk", vcsimPort)
dc := "DC0"
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Data: map[string][]byte{
"username": []byte("user"),
"password": []byte("pass"),
},
}
c, err := NewClient(ctx, endpoint, dc, secret)
assert := require.New(t)
assert.NoError(err, "expected no error during creation of client")
err = c.Verify()
assert.NoError(err, "expected no error during verification of client")
vm := &importjob.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
Namespace: "default",
},
Spec: importjob.VirtualMachineImportSpec{
SourceCluster: corev1.ObjectReference{},
VirtualMachineName: "DC0_H0_VM0",
},
}
err = c.PowerOffVirtualMachine(vm)
assert.NoError(err, "expected no error during VM power off")
}
func Test_IsPoweredOff(t *testing.T) {
ctx := context.TODO()
endpoint := fmt.Sprintf("https://localhost:%s/sdk", vcsimPort)
dc := "DC0"
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Data: map[string][]byte{
"username": []byte("user"),
"password": []byte("pass"),
},
}
c, err := NewClient(ctx, endpoint, dc, secret)
assert := require.New(t)
assert.NoError(err, "expected no error during creation of client")
err = c.Verify()
assert.NoError(err, "expected no error during verification of client")
vm := &importjob.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
Namespace: "default",
},
Spec: importjob.VirtualMachineImportSpec{
SourceCluster: corev1.ObjectReference{},
VirtualMachineName: "DC0_H0_VM0",
},
}
ok, err := c.IsPoweredOff(vm)
assert.NoError(err, "expected no error during check for power status")
assert.True(ok, "expected machine to be powered")
}
// Test_ExportVirtualMachine needs to reference a real vcenter as the vcsim doesnt support ovf export functionality
func Test_ExportVirtualMachine(t *testing.T) {
ctx := context.TODO()
assert := require.New(t)
govc_url := os.Getenv("GOVC_URL")
assert.NotEmpty(govc_url, "expected govc_url to be set")
govc_datacenter := os.Getenv("GOVC_DATACENTER")
assert.NotEmpty(govc_datacenter, "expected govc_datacenter to be set")
data := make(map[string]string)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
}
govc_username := os.Getenv("GOVC_USERNAME")
assert.NotEmpty(govc_username, "expected govc_username to be set")
data["username"] = govc_username
govc_password := os.Getenv("GOVC_PASSWORD")
assert.NotEmpty(govc_password, "expected govc_password to be set")
data["password"] = govc_password
secret.StringData = data
vm_name := os.Getenv("VM_NAME")
assert.NotEmpty(vm_name, "expected vm_name to be set")
c, err := NewClient(ctx, govc_url, govc_datacenter, secret)
assert.NoError(err, "expected no error during creation of client")
err = c.Verify()
assert.NoError(err, "expected no error during verification of client")
vm := &importjob.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
Namespace: "default",
},
Spec: importjob.VirtualMachineImportSpec{
SourceCluster: corev1.ObjectReference{},
VirtualMachineName: vm_name,
},
}
err = c.ExportVirtualMachine(vm)
assert.NoError(err, "expected no error during vm export")
t.Log(vm.Status)
}
// Test_GenerateVirtualMachine
func Test_GenerateVirtualMachine(t *testing.T) {
ctx := context.TODO()
endpoint := fmt.Sprintf("https://localhost:%s/sdk", vcsimPort)
dc := "DC0"
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Data: map[string][]byte{
"username": []byte("user"),
"password": []byte("pass"),
},
}
c, err := NewClient(ctx, endpoint, dc, secret)
assert := require.New(t)
assert.NoError(err, "expected no error during creation of client")
err = c.Verify()
assert.NoError(err, "expected no error during verification of client")
vm := &importjob.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
Namespace: "default",
},
Spec: importjob.VirtualMachineImportSpec{
SourceCluster: corev1.ObjectReference{},
VirtualMachineName: "DC0_H0_VM0",
Mapping: []importjob.NetworkMapping{
{
SourceNetwork: "DVSwitch: fea97929-4b2d-5972-b146-930c6d0b4014",
DestinationNetwork: "default/vlan",
},
},
},
}
newVM, err := c.GenerateVirtualMachine(vm)
assert.NoError(err, "expected no error during vm export")
assert.Len(newVM.Spec.Template.Spec.Networks, 1, "should have found the default pod network")
assert.Len(newVM.Spec.Template.Spec.Domain.Devices.Interfaces, 1, "should have found a network map")
assert.Equal(newVM.Spec.Template.Spec.Domain.Memory.Guest.String(), "32M", "expected VM to have 32M memory")
assert.NotEmpty(newVM.Spec.Template.Spec.Domain.Resources.Limits, "expect to find resource requests to be present")
}
func Test_identifyNetworkCards(t *testing.T) {
ctx := context.TODO()
endpoint := fmt.Sprintf("https://localhost:%s/sdk", vcsimPort)
dc := "DC0"
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Data: map[string][]byte{
"username": []byte("user"),
"password": []byte("pass"),
},
}
c, err := NewClient(ctx, endpoint, dc, secret)
assert := require.New(t)
assert.NoError(err, "expected no error during creation of client")
err = c.Verify()
assert.NoError(err, "expected no error during verification of client")
vmObj, err := c.findVM("", "DC0_H0_VM0")
assert.NoError(err, "expected no error during vm lookup")
var o mo.VirtualMachine
err = vmObj.Properties(c.ctx, vmObj.Reference(), []string{}, &o)
assert.NoError(err, "expected no error looking up vmObj properties")
networkInfo := identifyNetworkCards(o.Config.Hardware.Device)
assert.Len(networkInfo, 1, "expected to find only 1 item in the networkInfo")
networkMapping := []importjob.NetworkMapping{
{
SourceNetwork: "dummyNetwork",
DestinationNetwork: "harvester1",
},
{
SourceNetwork: "DVSwitch: fea97929-4b2d-5972-b146-930c6d0b4014",
DestinationNetwork: "pod-network",
},
}
mappedInfo := mapNetworkCards(networkInfo, networkMapping)
assert.Len(mappedInfo, 1, "expected to find only 1 item in the mapped networkinfo")
noNetworkMapping := []importjob.NetworkMapping{}
noMappedInfo := mapNetworkCards(networkInfo, noNetworkMapping)
assert.Len(noMappedInfo, 0, "expected to find no item in the mapped networkinfo")
}

55
pkg/util/conditions.go Normal file
View File

@ -0,0 +1,55 @@
package util
import (
"github.com/harvester/vm-import-controller/pkg/apis/common"
"github.com/rancher/wrangler/pkg/condition"
v1 "k8s.io/api/core/v1"
)
func ConditionExists(conditions []common.Condition, c condition.Cond, condType v1.ConditionStatus) bool {
for _, v := range conditions {
if v.Type == c && v.Status == condType {
return true
}
}
return false
}
func AddOrUpdateCondition(conditions []common.Condition, newCond common.Condition) []common.Condition {
var found bool
for _, v := range conditions {
if v.Type == newCond.Type {
found = true
v.Status = newCond.Status
v.LastTransitionTime = newCond.LastTransitionTime
v.LastUpdateTime = newCond.LastUpdateTime
v.Message = newCond.Message
v.Reason = newCond.Reason
}
}
if !found {
conditions = append(conditions, newCond)
}
return conditions
}
func MergeConditions(srcConditions []common.Condition, newCond []common.Condition) []common.Condition {
for _, v := range newCond {
srcConditions = AddOrUpdateCondition(srcConditions, v)
}
return srcConditions
}
func RemoveCondition(conditions []common.Condition, c condition.Cond, condType v1.ConditionStatus) []common.Condition {
var retConditions []common.Condition
for _, v := range conditions {
if v.Type != c || v.Status != condType {
retConditions = append(retConditions, v)
}
}
return retConditions
}

125
pkg/util/conditions_test.go Normal file
View File

@ -0,0 +1,125 @@
package util
import (
"testing"
"time"
"github.com/harvester/vm-import-controller/pkg/apis/common"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Test_ConditionExists(t *testing.T) {
conditions := []common.Condition{
{
Type: source.ClusterReadyCondition,
Status: corev1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
{
Type: source.ClusterErrorCondition,
Status: corev1.ConditionFalse,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
assert := require.New(t)
assert.True(ConditionExists(conditions, source.ClusterReadyCondition, corev1.ConditionTrue))
assert.True(ConditionExists(conditions, source.ClusterErrorCondition, corev1.ConditionFalse))
}
func Test_AddOrUpdateCondition(t *testing.T) {
conditions := []common.Condition{
{
Type: source.ClusterReadyCondition,
Status: corev1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
{
Type: source.ClusterErrorCondition,
Status: corev1.ConditionFalse,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
extraCondition := common.Condition{
Type: importjob.VirtualMachinePoweringOff,
Status: corev1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
}
newCond := AddOrUpdateCondition(conditions, extraCondition)
assert := require.New(t)
assert.True(ConditionExists(newCond, importjob.VirtualMachinePoweringOff, corev1.ConditionTrue))
assert.True(ConditionExists(conditions, source.ClusterErrorCondition, corev1.ConditionFalse))
assert.True(ConditionExists(conditions, source.ClusterReadyCondition, corev1.ConditionTrue))
}
func Test_MergeConditions(t *testing.T) {
conditions := []common.Condition{
{
Type: source.ClusterReadyCondition,
Status: corev1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
{
Type: source.ClusterErrorCondition,
Status: corev1.ConditionFalse,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
extraConditions := []common.Condition{
{
Type: importjob.VirtualMachineExported,
Status: corev1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
{
Type: importjob.VirtualMachineImageReady,
Status: corev1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
newConds := MergeConditions(conditions, extraConditions)
assert := require.New(t)
assert.Len(newConds, 4, "expected to find 4 conditions in the merged conditions")
}
func Test_RemoveCondition(t *testing.T) {
conditions := []common.Condition{
{
Type: source.ClusterReadyCondition,
Status: corev1.ConditionTrue,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
{
Type: source.ClusterErrorCondition,
Status: corev1.ConditionFalse,
LastUpdateTime: metav1.Now().Format(time.RFC3339),
LastTransitionTime: metav1.Now().Format(time.RFC3339),
},
}
noRemoveCond := RemoveCondition(conditions, source.ClusterErrorCondition, corev1.ConditionTrue)
assert := require.New(t)
assert.True(ConditionExists(noRemoveCond, source.ClusterErrorCondition, corev1.ConditionFalse))
removeCond := RemoveCondition(conditions, source.ClusterErrorCondition, corev1.ConditionFalse)
assert.False(ConditionExists(removeCond, source.ClusterErrorCondition, corev1.ConditionFalse))
}

View File

@ -0,0 +1,15 @@
/*
Copyright YEAR Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@ -0,0 +1,244 @@
package integration
import (
"fmt"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/util"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
var _ = Describe("verify vmware is ready", func() {
var creds *corev1.Secret
var vcsim *source.Vmware
BeforeEach(func() {
creds = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "vcsim-creds",
Namespace: "default",
},
StringData: map[string]string{
"username": "user",
"password": "pass",
},
}
vcsim = &source.Vmware{
ObjectMeta: metav1.ObjectMeta{
Name: "local",
Namespace: "default",
},
Spec: source.VmwareClusterSpec{
EndpointAddress: "",
Datacenter: "DC0",
Credentials: corev1.SecretReference{
Name: creds.Name,
Namespace: creds.Namespace,
},
},
}
err := k8sClient.Create(ctx, creds)
Expect(err).ToNot(HaveOccurred())
vcsim.Spec.EndpointAddress = fmt.Sprintf("https://localhost:%s/sdk", vcsimPort)
err = k8sClient.Create(ctx, vcsim)
Expect(err).ToNot(HaveOccurred())
})
It("check vmware source is ready", func() {
// check status of source object
Eventually(func() error {
vcsimObj := &source.Vmware{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: vcsim.Name,
Namespace: vcsim.Namespace}, vcsimObj)
if err != nil {
return err
}
if vcsimObj.Status.Status == source.ClusterReady {
return nil
}
return fmt.Errorf("source currently in state: %v, expected to be %s", vcsimObj.Status.Status, source.ClusterReady)
}, "30s", "5s").ShouldNot(HaveOccurred())
// check conditions on source object
Eventually(func() error {
vcsimObj := &source.Vmware{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: vcsim.Name,
Namespace: vcsim.Namespace}, vcsimObj)
if err != nil {
return err
}
logrus.Info(vcsimObj.Status.Conditions)
if util.ConditionExists(vcsimObj.Status.Conditions, source.ClusterReadyCondition, corev1.ConditionTrue) &&
util.ConditionExists(vcsimObj.Status.Conditions, source.ClusterErrorCondition, corev1.ConditionFalse) {
return nil
}
return fmt.Errorf("expected source to have condition %s as %v", source.ClusterReadyCondition, corev1.ConditionTrue)
}, "30s", "5s").ShouldNot(HaveOccurred())
})
AfterEach(func() {
err := k8sClient.Delete(ctx, creds)
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, vcsim)
Expect(err).ToNot(HaveOccurred())
})
})
var _ = Describe("verify vmware is errored", func() {
var creds *corev1.Secret
var vcsim *source.Vmware
BeforeEach(func() {
creds = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "vcsim-creds",
Namespace: "default",
},
StringData: map[string]string{
"username": "user",
"password": "pass",
},
}
vcsim = &source.Vmware{
ObjectMeta: metav1.ObjectMeta{
Name: "local",
Namespace: "default",
},
Spec: source.VmwareClusterSpec{
EndpointAddress: "https://localhost/sdk",
Datacenter: "DC0",
Credentials: corev1.SecretReference{
Name: creds.Name,
Namespace: creds.Namespace,
},
},
}
err := k8sClient.Create(ctx, creds)
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Create(ctx, vcsim)
Expect(err).ToNot(HaveOccurred())
})
It("check vmware source is ready", func() {
// check status of source object
Eventually(func() error {
vcsimObj := &source.Vmware{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: vcsim.Name,
Namespace: vcsim.Namespace}, vcsimObj)
if err != nil {
return err
}
if vcsimObj.Status.Status == "" {
return nil
}
return fmt.Errorf("source currently in state: %v, expected to be %s", vcsimObj.Status.Status, "")
}, "30s", "5s").ShouldNot(HaveOccurred())
})
AfterEach(func() {
err := k8sClient.Delete(ctx, creds)
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, vcsim)
Expect(err).ToNot(HaveOccurred())
})
})
var _ = Describe("verify vmware has invalid DC", func() {
var creds *corev1.Secret
var vcsim *source.Vmware
BeforeEach(func() {
creds = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "vcsim-creds",
Namespace: "default",
},
StringData: map[string]string{
"username": "user",
"password": "pass",
},
}
vcsim = &source.Vmware{
ObjectMeta: metav1.ObjectMeta{
Name: "local",
Namespace: "default",
},
Spec: source.VmwareClusterSpec{
EndpointAddress: "",
Datacenter: "DC2",
Credentials: corev1.SecretReference{
Name: creds.Name,
Namespace: creds.Namespace,
},
},
}
err := k8sClient.Create(ctx, creds)
Expect(err).ToNot(HaveOccurred())
vcsim.Spec.EndpointAddress = fmt.Sprintf("https://localhost:%s/sdk", vcsimPort)
err = k8sClient.Create(ctx, vcsim)
Expect(err).ToNot(HaveOccurred())
})
It("check vmware source is ready", func() {
// check status of source object
Eventually(func() error {
vcsimObj := &source.Vmware{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: vcsim.Name,
Namespace: vcsim.Namespace}, vcsimObj)
if err != nil {
return err
}
if vcsimObj.Status.Status == source.ClusterNotReady {
return nil
}
return fmt.Errorf("source currently in state: %v, expected to be %s", vcsimObj.Status.Status, source.ClusterNotReady)
}, "30s", "5s").ShouldNot(HaveOccurred())
// check conditions on source object
Eventually(func() error {
vcsimObj := &source.Vmware{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: vcsim.Name,
Namespace: vcsim.Namespace}, vcsimObj)
if err != nil {
return err
}
logrus.Info(vcsimObj.Status.Conditions)
if util.ConditionExists(vcsimObj.Status.Conditions, source.ClusterReadyCondition, corev1.ConditionFalse) &&
util.ConditionExists(vcsimObj.Status.Conditions, source.ClusterErrorCondition, corev1.ConditionTrue) {
return nil
}
return fmt.Errorf("expected source to have condition %s as %v", source.ClusterErrorCondition, corev1.ConditionTrue)
}, "30s", "5s").ShouldNot(HaveOccurred())
})
AfterEach(func() {
err := k8sClient.Delete(ctx, creds)
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, vcsim)
Expect(err).ToNot(HaveOccurred())
})
})

View File

@ -0,0 +1,135 @@
package integration
import (
"context"
"os"
"testing"
"time"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/server"
"github.com/harvester/vm-import-controller/tests/setup"
harvesterv1beta1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/controllers"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/ory/dockertest/v3"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
kubevirtv1 "kubevirt.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
log "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)
var (
ctx, egctx context.Context
k8sClient client.Client
testEnv *envtest.Environment
cancel context.CancelFunc
scheme = runtime.NewScheme()
eg *errgroup.Group
pool *dockertest.Pool
vcsimPort string
vcsimMock *dockertest.Resource
useExisting bool
)
func TestIntegration(t *testing.T) {
defer GinkgoRecover()
RegisterFailHandler(Fail)
RunSpecs(t, "Integration Suite")
}
var _ = BeforeSuite(func() {
log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
existing, ok := os.LookupEnv("USE_EXISTING_CLUSTER")
if ok && existing == "true" {
useExisting = true
}
By("bootstrapping test environment")
testEnv = &envtest.Environment{}
if !useExisting {
crds, err := setup.GenerateKubeVirtCRD()
Expect(err).ToNot(HaveOccurred())
testEnv.CRDInstallOptions = envtest.CRDInstallOptions{
CRDs: crds,
}
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = source.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = apiextensions.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = harvesterv1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = kubevirtv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = importjob.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
err = setup.InstallCRD(ctx, cfg)
Expect(err).NotTo(HaveOccurred())
eg, egctx = errgroup.WithContext(ctx)
eg.Go(func() error {
return controllers.Start(egctx, cfg)
})
eg.Go(func() error {
return server.NewServer(egctx)
})
eg.Go(func() error {
return eg.Wait()
})
pool, err = dockertest.NewPool("")
Expect(err).NotTo(HaveOccurred())
runOpts := &dockertest.RunOptions{
Name: "vcsim",
Repository: "vmware/vcsim",
Tag: "v0.29.0",
}
vcsimMock, err = pool.RunWithOptions(runOpts)
Expect(err).NotTo(HaveOccurred())
vcsimPort = vcsimMock.GetPort("8989/tcp")
time.Sleep(30 * time.Second)
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := pool.Purge(vcsimMock)
Expect(err).NotTo(HaveOccurred())
egctx.Done()
cancel()
testEnv.Stop()
})

View File

@ -0,0 +1,185 @@
package integration
import (
"fmt"
"strings"
harvesterv1beta1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
"github.com/harvester/vm-import-controller/pkg/util"
"github.com/harvester/vm-import-controller/tests/setup"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
kubevirt "kubevirt.io/api/core/v1"
)
var _ = Describe("test vmware export/import integration", func() {
BeforeEach(func() {
if !useExisting {
return
}
err := setup.SetupVMware(ctx, k8sClient)
Expect(err).ToNot(HaveOccurred())
})
It("reconcile object status", func() {
if !useExisting {
Skip("skipping vmware integration tests as not using an existing environment")
}
By("checking if vmware source is ready", func() {
Eventually(func() error {
v := &source.Vmware{}
err := k8sClient.Get(ctx, setup.VmwareSourceNamespacedName, v)
if err != nil {
return err
}
if v.Status.Status != source.ClusterReady {
return fmt.Errorf("waiting for cluster source to be ready. current condition is %s", v.Status.Status)
}
return nil
}, "30s", "10s").ShouldNot(HaveOccurred())
})
By("vm importjob has the correct conditions", func() {
Eventually(func() error {
v := &importjob.VirtualMachine{}
err := k8sClient.Get(ctx, setup.VmwareVMNamespacedName, v)
if err != nil {
return err
}
if !util.ConditionExists(v.Status.ImportConditions, importjob.VirtualMachinePoweringOff, v1.ConditionTrue) {
return fmt.Errorf("expected virtualmachinepoweringoff condition to be present")
}
if !util.ConditionExists(v.Status.ImportConditions, importjob.VirtualMachinePoweredOff, v1.ConditionTrue) {
return fmt.Errorf("expected virtualmachinepoweredoff condition to be present")
}
if !util.ConditionExists(v.Status.ImportConditions, importjob.VirtualMachineExported, v1.ConditionTrue) {
return fmt.Errorf("expected virtualmachineexported condition to be present")
}
return nil
}, "300s", "10s").ShouldNot(HaveOccurred())
})
By("checking status of virtualmachineimage objects", func() {
Eventually(func() error {
v := &importjob.VirtualMachine{}
err := k8sClient.Get(ctx, setup.VmwareVMNamespacedName, v)
if err != nil {
return err
}
if len(v.Status.DiskImportStatus) == 0 {
return fmt.Errorf("waiting for DiskImportStatus to be populated")
}
for _, d := range v.Status.DiskImportStatus {
if d.VirtualMachineImage == "" {
return fmt.Errorf("waiting for VMI to be populated")
}
vmi := &harvesterv1beta1.VirtualMachineImage{}
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: setup.VmwareVMNamespacedName.Namespace,
Name: d.VirtualMachineImage}, vmi)
if err != nil {
return err
}
if vmi.Status.Progress != 100 {
return fmt.Errorf("vmi %s not yet ready", vmi.Name)
}
}
return nil
}, "1800s", "60s").ShouldNot(HaveOccurred())
})
By("checking that PVC claim has been created", func() {
Eventually(func() error {
v := &importjob.VirtualMachine{}
err := k8sClient.Get(ctx, setup.VmwareVMNamespacedName, v)
if err != nil {
return err
}
if len(v.Status.DiskImportStatus) == 0 {
return fmt.Errorf("diskimportstatus should have image details available")
}
for _, d := range v.Status.DiskImportStatus {
if d.VirtualMachineImage == "" {
return fmt.Errorf("waiting for VMI to be populated")
}
pvc := &v1.PersistentVolumeClaim{}
pvcName := strings.ToLower(strings.Split(d.Name, ".img")[0])
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: setup.VmwareVMNamespacedName.Namespace,
Name: pvcName}, pvc)
if err != nil {
return err
}
if pvc.Status.Phase != v1.ClaimBound {
return fmt.Errorf("waiting for pvc claim to be in state bound")
}
}
return nil
}, "30s", "5s").ShouldNot(HaveOccurred())
})
By("checking that the virtualmachine has been created", func() {
Eventually(func() error {
v := &importjob.VirtualMachine{}
err := k8sClient.Get(ctx, setup.VmwareVMNamespacedName, v)
if err != nil {
return err
}
vm := &kubevirt.VirtualMachine{}
err = k8sClient.Get(ctx, types.NamespacedName{
Namespace: setup.VmwareVMNamespacedName.Namespace,
Name: v.Spec.VirtualMachineName,
}, vm)
return err
}, "300s", "10s").ShouldNot(HaveOccurred())
})
By("checking that the virtualmachineimage ownership has been removed", func() {
Eventually(func() error {
v := &importjob.VirtualMachine{}
err := k8sClient.Get(ctx, setup.VmwareVMNamespacedName, v)
if err != nil {
return err
}
for _, d := range v.Status.DiskImportStatus {
vmi := &harvesterv1beta1.VirtualMachineImage{}
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: setup.VmwareVMNamespacedName.Namespace,
Name: d.VirtualMachineImage}, vmi)
if err != nil {
return err
}
if len(vmi.OwnerReferences) != 0 {
return fmt.Errorf("waiting for ownerRef to be cleared")
}
}
return nil
}, "300s", "10s").ShouldNot(HaveOccurred())
})
})
AfterEach(func() {
if !useExisting {
return
}
err := setup.CleanupVmware(ctx, k8sClient)
Expect(err).ToNot(HaveOccurred())
})
})

View File

@ -0,0 +1,68 @@
package setup
import (
"context"
harvesterv1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
"github.com/harvester/harvester/pkg/util/crd"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/client-go/rest"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
)
// InstallCRD will install the core harvester CRD's
// copied from harvester/harvester/pkg/data/crd.go
func InstallCRD(ctx context.Context, cfg *rest.Config) error {
factory, err := crd.NewFactoryFromClient(ctx, cfg)
if err != nil {
return err
}
return factory.
BatchCreateCRDsIfNotExisted(
crd.NonNamespacedFromGV(harvesterv1.SchemeGroupVersion, "Setting", harvesterv1.Setting{}),
).
BatchCreateCRDsIfNotExisted(
crd.FromGV(harvesterv1.SchemeGroupVersion, "KeyPair", harvesterv1.KeyPair{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "Upgrade", harvesterv1.Upgrade{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "Version", harvesterv1.Version{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "VirtualMachineImage", harvesterv1.VirtualMachineImage{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "VirtualMachineTemplate", harvesterv1.VirtualMachineTemplate{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "VirtualMachineTemplateVersion", harvesterv1.VirtualMachineTemplateVersion{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "VirtualMachineBackup", harvesterv1.VirtualMachineBackup{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "VirtualMachineRestore", harvesterv1.VirtualMachineRestore{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "Preference", harvesterv1.Preference{}),
crd.FromGV(harvesterv1.SchemeGroupVersion, "SupportBundle", harvesterv1.SupportBundle{}),
).
BatchWait()
}
type kubeVirtCRDGenerator func() (*extv1.CustomResourceDefinition, error)
// GenerateKubeVirtCRD's will generate kubevirt CRDs
func GenerateKubeVirtCRD() ([]*extv1.CustomResourceDefinition, error) {
v := []kubeVirtCRDGenerator{
components.NewVirtualMachineCrd,
components.NewVirtualMachineInstanceCrd,
components.NewPresetCrd,
components.NewReplicaSetCrd,
components.NewVirtualMachineInstanceMigrationCrd,
components.NewVirtualMachinePoolCrd,
components.NewVirtualMachineSnapshotCrd,
components.NewVirtualMachineSnapshotContentCrd,
components.NewVirtualMachineRestoreCrd,
components.NewVirtualMachineFlavorCrd,
components.NewVirtualMachineClusterFlavorCrd,
components.NewMigrationPolicyCrd,
}
var result []*extv1.CustomResourceDefinition
for _, m := range v {
crdList, err := m()
if err != nil {
return nil, err
}
result = append(result, crdList)
}
return result, nil
}

185
tests/setup/setup_vmare.go Normal file
View File

@ -0,0 +1,185 @@
package setup
import (
"context"
"fmt"
"os"
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
secret = "vmware-integration"
sourceCluster = "vmware-integration"
virtualmachine = "vm-export-test"
defaultNamespace = "default"
defaultKind = "vmware"
defaultAPIVersion = "source.harvesterhci.io/v1beta1"
)
var (
VmwareSourceNamespacedName, VmwareVMNamespacedName types.NamespacedName
)
type applyObject func(context.Context, client.Client) error
// SetupVMware will try and setup a vmware source based on GOVC environment variables
// It will check the following environment variables to build source and importjob CRD's
// GOVC_URL: Identify vsphere endpoint
// GOVC_DATACENTER: Identify vsphere datacenter
// GOVC_USERNAME: Username for source secret
// GOVC_PASSWORD: Password for source secret
// SVC_ADDRESS: local machine address, used to generate the URL that Harvester downloads the exported images from
// VM_NAME: name of VM to be exported
// VM_FOLDER: folder where VM pointed to by VM_NAME is located
func SetupVMware(ctx context.Context, k8sClient client.Client) error {
VmwareSourceNamespacedName = types.NamespacedName{
Name: sourceCluster,
Namespace: defaultNamespace,
}
VmwareVMNamespacedName = types.NamespacedName{
Name: virtualmachine,
Namespace: defaultNamespace,
}
fnList := []applyObject{
setupSecret,
setupSource,
setupVMExport,
}
for _, v := range fnList {
if err := v(ctx, k8sClient); err != nil {
return err
}
}
return nil
}
func setupSecret(ctx context.Context, k8sClient client.Client) error {
username, ok := os.LookupEnv("GOVC_USERNAME")
if !ok {
return fmt.Errorf("env variable GOVC_USERNAME not set")
}
password, ok := os.LookupEnv("GOVC_PASSWORD")
if !ok {
return fmt.Errorf("env variable GOVC_PASSWORD not set")
}
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secret,
Namespace: defaultNamespace,
},
StringData: map[string]string{
"username": username,
"password": password,
},
}
return k8sClient.Create(ctx, s)
}
func setupSource(ctx context.Context, k8sClient client.Client) error {
endpoint, ok := os.LookupEnv("GOVC_URL")
if !ok {
return fmt.Errorf("env variable GOVC_URL not set")
}
dc, ok := os.LookupEnv("GOVC_DATACENTER")
if !ok {
return fmt.Errorf("env variable GOVC_DATACENTER not set")
}
s := &source.Vmware{
ObjectMeta: metav1.ObjectMeta{
Name: sourceCluster,
Namespace: defaultNamespace,
},
Spec: source.VmwareClusterSpec{
EndpointAddress: endpoint,
Datacenter: dc,
Credentials: corev1.SecretReference{
Name: secret,
Namespace: defaultNamespace,
},
},
}
return k8sClient.Create(ctx, s)
}
func setupVMExport(ctx context.Context, k8sClient client.Client) error {
vm, ok := os.LookupEnv("VM_NAME")
if !ok {
return fmt.Errorf("env variable VM_NAME not specified")
}
_, ok = os.LookupEnv("SVC_ADDRESS")
if !ok {
return fmt.Errorf("env variable SVC_ADDRESS not specified")
}
folder, _ := os.LookupEnv("VM_FOLDER")
j := &importjob.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: virtualmachine,
Namespace: defaultNamespace,
},
Spec: importjob.VirtualMachineImportSpec{
SourceCluster: corev1.ObjectReference{
Name: sourceCluster,
Namespace: defaultNamespace,
Kind: defaultKind,
APIVersion: defaultAPIVersion,
},
VirtualMachineName: vm,
Folder: folder,
},
}
return k8sClient.Create(ctx, j)
}
func CleanupVmware(ctx context.Context, k8sClient client.Client) error {
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secret,
Namespace: defaultNamespace,
},
}
err := k8sClient.Delete(ctx, s)
if err != nil {
return err
}
vmware := &source.Vmware{
ObjectMeta: metav1.ObjectMeta{
Name: sourceCluster,
Namespace: defaultNamespace,
},
}
err = k8sClient.Delete(ctx, vmware)
if err != nil {
return err
}
i := &importjob.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: virtualmachine,
Namespace: defaultNamespace,
},
}
return k8sClient.Delete(ctx, i)
}