mirror of
https://github.com/harvester/vm-import-controller.git
synced 2025-06-03 01:44:51 +00:00
Openstack reconcilliation (#1)
* openstack source reconilliation * tmp staging for openstack migration * openstack import working and integration tests * updated readme
This commit is contained in:
parent
112008bb81
commit
fddce7d565
96
README.md
96
README.md
@ -51,6 +51,45 @@ NAME STATUS
|
||||
vcsim clusterReady
|
||||
```
|
||||
|
||||
For openstack based source clusters a sample definition is as follows:
|
||||
|
||||
```yaml
|
||||
apiVersion: source.harvesterhci.io/v1beta1
|
||||
kind: Openstack
|
||||
metadata:
|
||||
name: devstack
|
||||
namespace: default
|
||||
spec:
|
||||
endpoint: "https://devstack/identity"
|
||||
region: "RegionOne"
|
||||
credentials:
|
||||
name: devstack-credentials
|
||||
namespace: default
|
||||
```
|
||||
|
||||
The secret contains the credentials for the vcenter endpoint:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: devstack-credentials
|
||||
namespace: default
|
||||
stringData:
|
||||
"username": "user"
|
||||
"password": "password"
|
||||
"project_name": "admin"
|
||||
"domain_name": "default"
|
||||
```
|
||||
|
||||
Openstack source reconcile process, attempts to list VM's in the project, and marks the source as ready
|
||||
|
||||
```shell
|
||||
$ kubectl get openstack.source
|
||||
NAME STATUS
|
||||
devstack clusterReady
|
||||
```
|
||||
|
||||
### ImportJob
|
||||
The ImportJob crd provides a way for users to define the source VM and mapping to the actual source cluster to perform the VM export-import from.
|
||||
|
||||
@ -88,7 +127,60 @@ Once the virtual machine has been imported successfully the object will reflect
|
||||
|
||||
```shell
|
||||
$ kubectl get virtualmachine.importjob
|
||||
NAME STATUS
|
||||
alpine-export-test virtualMachineRunning
|
||||
NAME STATUS
|
||||
alpine-export-test virtualMachineRunning
|
||||
openstack-cirros-test virtualMachineRunning
|
||||
|
||||
```
|
||||
|
||||
Similarly, users can define a VirtualMachine importjob for Openstack source as well:
|
||||
|
||||
```yaml
|
||||
apiVersion: importjob.harvesterhci.io/v1beta1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: openstack-demo
|
||||
namespace: default
|
||||
spec:
|
||||
virtualMachineName: "openstack-demo" #Name or UUID for instance
|
||||
networkMapping:
|
||||
- sourceNetwork: "shared"
|
||||
destinationNetwork: "default/vlan1"
|
||||
- sourceNetwork: "public"
|
||||
destinationNetwork: "default/vlan2"
|
||||
sourceCluster:
|
||||
name: devstack
|
||||
namespace: default
|
||||
kind: Openstack
|
||||
apiVersion: source.harvesterhci.io/v1beta1
|
||||
```
|
||||
|
||||
*NOTE:* Openstack allows users to have multiple instances with the same name. In such a scenario the users are advised to use the Instance ID. The reconcile logic tries to perform a lookup from name to ID when a name is used.
|
||||
|
||||
|
||||
## Testing
|
||||
Currently basic integration tests are available under `tests/integration`
|
||||
|
||||
However a lot of these tests need access to a working Harvester, Openstack and Vmware cluster.
|
||||
|
||||
The integration tests can be setup by using the following environment variables to point the tests to a working environment to perform the actual vm migration tests
|
||||
|
||||
```shell
|
||||
export GOVC_PASSWORD="vsphere-password"
|
||||
export GOVC_USERNAME="vsphere-username"
|
||||
export GOVC_URL="https://vcenter/sdk"
|
||||
export GOVC_DATACENTER="vsphere-datacenter"
|
||||
#The controller exposes the converted disks via a http endpoint and leverages the download capability of longhorn backing images
|
||||
# the SVC address needs to be the address of node where integration tests are running and should be reachable from harvester cluster
|
||||
export SVC_ADDRESS="address for node"
|
||||
export VM_NAME="vmware-export-test-vm-name"
|
||||
export USE_EXISTING_CLUSTER=true
|
||||
export OS_AUTH_URL="openstack/identity" #Keystone endpoint
|
||||
export OS_PROJECT_NAME="openstack-project-name"
|
||||
export OS_USER_DOMAIN_NAME="openstack-user-domain"
|
||||
export OS_USERNAME="openstack-username"
|
||||
export OS_PASSWORD="openstack-password"
|
||||
export OS_VM_NAME="openstack-export-test-vm-name"
|
||||
export OS_REGION_NAME="openstack-region"
|
||||
export KUBECONFIG="kubeconfig-for-harvester-cluster"
|
||||
```
|
1
go.mod
1
go.mod
@ -3,6 +3,7 @@ module github.com/harvester/vm-import-controller
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/gophercloud/gophercloud v0.7.0
|
||||
github.com/harvester/harvester v1.0.2
|
||||
github.com/onsi/ginkgo/v2 v2.1.4
|
||||
github.com/onsi/gomega v1.19.0
|
||||
|
1
go.sum
1
go.sum
@ -946,6 +946,7 @@ github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM=
|
||||
github.com/gophercloud/gophercloud v0.7.0 h1:vhmQQEM2SbnGCg2/3EzQnQZ3V7+UCGy9s8exQCprNYg=
|
||||
github.com/gophercloud/gophercloud v0.7.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
|
||||
github.com/gophercloud/utils v0.0.0-20191129022341-463e26ffa30d/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
|
@ -22,7 +22,7 @@ type Openstack struct {
|
||||
|
||||
type OpenstackSpec struct {
|
||||
EndpointAddress string `json:"endpoint"`
|
||||
Project string `json:"dc"`
|
||||
Region string `json:"region"`
|
||||
Credentials corev1.SecretReference `json:"credentials"`
|
||||
}
|
||||
|
||||
@ -37,7 +37,7 @@ func (o *Openstack) ClusterStatus() ClusterStatus {
|
||||
}
|
||||
|
||||
func (o *Openstack) GenerateClient(ctx context.Context, secret *corev1.Secret) (VirtualMachineOperations, error) {
|
||||
return openstack.NewClient(ctx, o.Spec.EndpointAddress, o.Spec.Project, secret)
|
||||
return openstack.NewClient(ctx, o.Spec.EndpointAddress, o.Spec.Region, secret)
|
||||
}
|
||||
|
||||
func (o *Openstack) SecretReference() corev1.SecretReference {
|
||||
|
@ -78,6 +78,7 @@ func Register(ctx context.Context, restConfig *rest.Config) error {
|
||||
SharedControllerFactory: scf,
|
||||
})
|
||||
sc.RegisterVmareController(ctx, sourceFactory.Source().V1beta1().Vmware(), coreFactory.Core().V1().Secret())
|
||||
sc.RegisterOpenstackController(ctx, sourceFactory.Source().V1beta1().Openstack(), coreFactory.Core().V1().Secret())
|
||||
|
||||
ic.RegisterVMImportController(ctx, sourceFactory.Source().V1beta1().Vmware(), sourceFactory.Source().V1beta1().Openstack(),
|
||||
coreFactory.Core().V1().Secret(), importJobFactory.Importjob().V1beta1().VirtualMachine(),
|
||||
|
93
pkg/controllers/source/openstack.go
Normal file
93
pkg/controllers/source/openstack.go
Normal file
@ -0,0 +1,93 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/harvester/vm-import-controller/pkg/apis/common"
|
||||
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
|
||||
sourceController "github.com/harvester/vm-import-controller/pkg/generated/controllers/source.harvesterhci.io/v1beta1"
|
||||
"github.com/harvester/vm-import-controller/pkg/source/openstack"
|
||||
"github.com/harvester/vm-import-controller/pkg/util"
|
||||
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"time"
|
||||
)
|
||||
|
||||
type openstackHandler struct {
|
||||
ctx context.Context
|
||||
os sourceController.OpenstackController
|
||||
secret corecontrollers.SecretController
|
||||
}
|
||||
|
||||
func RegisterOpenstackController(ctx context.Context, os sourceController.OpenstackController, secret corecontrollers.SecretController) {
|
||||
oHandler := &openstackHandler{
|
||||
ctx: ctx,
|
||||
os: os,
|
||||
secret: secret,
|
||||
}
|
||||
|
||||
os.OnChange(ctx, "openstack-source-change", oHandler.OnSourceChange)
|
||||
}
|
||||
|
||||
func (h *openstackHandler) OnSourceChange(key string, o *source.Openstack) (*source.Openstack, error) {
|
||||
if o == nil || o.DeletionTimestamp != nil {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
logrus.Infof("reconcilling openstack soure :%s", key)
|
||||
if o.Status.Status != source.ClusterReady {
|
||||
// process source logic
|
||||
secretObj, err := h.secret.Get(o.Spec.Credentials.Namespace, o.Spec.Credentials.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return o, fmt.Errorf("error looking up secret for openstacksource: %v", err)
|
||||
}
|
||||
|
||||
client, err := openstack.NewClient(h.ctx, o.Spec.EndpointAddress, o.Spec.Region, secretObj)
|
||||
if err != nil {
|
||||
return o, fmt.Errorf("error generating openstack client for openstack source: %s: %v", o.Name, err)
|
||||
}
|
||||
|
||||
err = client.Verify()
|
||||
if err != nil {
|
||||
conds := []common.Condition{
|
||||
{
|
||||
Type: source.ClusterErrorCondition,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now().Format(time.RFC3339),
|
||||
LastTransitionTime: metav1.Now().Format(time.RFC3339),
|
||||
}, {
|
||||
Type: source.ClusterReadyCondition,
|
||||
Status: v1.ConditionFalse,
|
||||
LastUpdateTime: metav1.Now().Format(time.RFC3339),
|
||||
LastTransitionTime: metav1.Now().Format(time.RFC3339),
|
||||
},
|
||||
}
|
||||
|
||||
o.Status.Conditions = util.MergeConditions(o.Status.Conditions, conds)
|
||||
o.Status.Status = source.ClusterNotReady
|
||||
return h.os.UpdateStatus(o)
|
||||
}
|
||||
|
||||
conds := []common.Condition{
|
||||
{
|
||||
Type: source.ClusterReadyCondition,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now().Format(time.RFC3339),
|
||||
LastTransitionTime: metav1.Now().Format(time.RFC3339),
|
||||
}, {
|
||||
Type: source.ClusterErrorCondition,
|
||||
Status: v1.ConditionFalse,
|
||||
LastUpdateTime: metav1.Now().Format(time.RFC3339),
|
||||
LastTransitionTime: metav1.Now().Format(time.RFC3339),
|
||||
},
|
||||
}
|
||||
|
||||
o.Status.Conditions = util.MergeConditions(o.Status.Conditions, conds)
|
||||
o.Status.Status = source.ClusterReady
|
||||
return h.os.UpdateStatus(o)
|
||||
|
||||
}
|
||||
return o, nil
|
||||
}
|
@ -42,7 +42,7 @@ func (h *vmwareHandler) OnSourceChange(key string, v *source.Vmware) (*source.Vm
|
||||
if v.Status.Status != source.ClusterReady {
|
||||
secretObj, err := h.secret.Get(v.Spec.Credentials.Namespace, v.Spec.Credentials.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return v, fmt.Errorf("error looking up secret for vmware source: %s", err)
|
||||
return v, fmt.Errorf("error looking up secret for vmware source: %v", err)
|
||||
}
|
||||
client, err := vmware.NewClient(h.ctx, v.Spec.EndpointAddress, v.Spec.Datacenter, secretObj)
|
||||
if err != nil {
|
||||
|
@ -12,6 +12,12 @@ func ConvertVMDKtoRAW(source, target string) error {
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func ConvertQCOW2toRAW(source, target string) error {
|
||||
args := []string{"convert", "-f", "qcow2", "-O", "raw", source, target}
|
||||
cmd := exec.Command(defaultCommand, args...)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func createVMDK(path string, size string) error {
|
||||
args := []string{"create", "-f", "vmdk", path, size}
|
||||
cmd := exec.Command(defaultCommand, args...)
|
||||
|
@ -2,32 +2,538 @@ package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
|
||||
"github.com/gophercloud/gophercloud/openstack/imageservice/v2/images"
|
||||
"github.com/harvester/vm-import-controller/pkg/qemu"
|
||||
"github.com/harvester/vm-import-controller/pkg/server"
|
||||
"io/ioutil"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata"
|
||||
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubevirt "kubevirt.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
NotUniqueName = "notUniqueName"
|
||||
NotServerFound = "noServerFound"
|
||||
defaultInterval = 10 * time.Second
|
||||
defaultCount = 30
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
ctx context.Context
|
||||
pClient *gophercloud.ProviderClient
|
||||
opts gophercloud.EndpointOpts
|
||||
storageClient *gophercloud.ServiceClient
|
||||
computeClient *gophercloud.ServiceClient
|
||||
imageClient *gophercloud.ServiceClient
|
||||
}
|
||||
|
||||
func NewClient(ctx context.Context, endpoint string, dc string, secret *corev1.Secret) (*Client, error) {
|
||||
return nil, nil
|
||||
// NewClient will generate a GopherCloud client
|
||||
func NewClient(ctx context.Context, endpoint string, region string, secret *corev1.Secret) (*Client, error) {
|
||||
username, ok := secret.Data["username"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no username provided in secret %s", secret.Name)
|
||||
}
|
||||
|
||||
password, ok := secret.Data["password"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no password provided in secret %s", secret.Name)
|
||||
}
|
||||
|
||||
projectName, ok := secret.Data["project_name"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no project_name provided in secret %s", secret.Name)
|
||||
}
|
||||
|
||||
domainName, ok := secret.Data["domain_name"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no domain_name provided in secret %s", secret.Name)
|
||||
}
|
||||
|
||||
authOpts := gophercloud.AuthOptions{
|
||||
IdentityEndpoint: endpoint,
|
||||
Username: string(username),
|
||||
Password: string(password),
|
||||
TenantName: string(projectName),
|
||||
DomainName: string(domainName),
|
||||
}
|
||||
|
||||
endPointOpts := gophercloud.EndpointOpts{
|
||||
Region: region,
|
||||
}
|
||||
client, err := openstack.AuthenticatedClient(authOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticated client: %v", err)
|
||||
}
|
||||
|
||||
storageClient, err := openstack.NewBlockStorageV3(client, endPointOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating storage client: %v", err)
|
||||
}
|
||||
|
||||
computeClient, err := openstack.NewComputeV2(client, endPointOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating compute client: %v", err)
|
||||
}
|
||||
|
||||
imageClient, err := openstack.NewImageServiceV2(client, endPointOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating image client: %v", err)
|
||||
}
|
||||
return &Client{
|
||||
ctx: ctx,
|
||||
pClient: client,
|
||||
opts: endPointOpts,
|
||||
storageClient: storageClient,
|
||||
computeClient: computeClient,
|
||||
imageClient: imageClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) ExportVirtualMachine(vm *importjob.VirtualMachine) error {
|
||||
func (c *Client) Verify() error {
|
||||
computeClient, err := openstack.NewComputeV2(c.pClient, c.opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating compute client during verify phase :%v", err)
|
||||
}
|
||||
|
||||
pg := servers.List(computeClient, servers.ListOpts{})
|
||||
allPg, err := pg.AllPages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating all pages :%v", err)
|
||||
}
|
||||
|
||||
ok, err := allPg.IsEmpty()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking if pages were empty: %v", err)
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
allServers, err := servers.ExtractServers(allPg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error extracting servers :%v", err)
|
||||
}
|
||||
|
||||
logrus.Infof("found %d servers", len(allServers))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) ExportVirtualMachine(vm *importjob.VirtualMachine) error {
|
||||
vmObj, err := c.findVM(vm.Spec.VirtualMachineName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir("/tmp", "openstack-image-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating tmp image directory: %v", err)
|
||||
}
|
||||
|
||||
for i, v := range vmObj.AttachedVolumes {
|
||||
// create snapshot for volume
|
||||
snapInfo, err := snapshots.Create(c.storageClient, snapshots.CreateOpts{
|
||||
Name: fmt.Sprintf("import-controller-%v-%d", vm.Spec.VirtualMachineName, i),
|
||||
VolumeID: v.ID,
|
||||
Force: true,
|
||||
}).Extract()
|
||||
|
||||
// snapshot creation is async, so call returns a 202 error when successful.
|
||||
// this is ignored
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < defaultCount; i++ {
|
||||
snapObj, err := snapshots.Get(c.storageClient, snapInfo.ID).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if snapObj.Status == "available" {
|
||||
break
|
||||
}
|
||||
time.Sleep(defaultInterval)
|
||||
}
|
||||
|
||||
volObj, err := volumes.Create(c.storageClient, volumes.CreateOpts{
|
||||
SnapshotID: snapInfo.ID,
|
||||
Size: snapInfo.Size,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Info(volObj)
|
||||
|
||||
for i := 0; i < defaultCount; i++ {
|
||||
tmpVolObj, err := volumes.Get(c.storageClient, volObj.ID).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tmpVolObj.Status == "available" {
|
||||
break
|
||||
}
|
||||
time.Sleep(defaultInterval)
|
||||
}
|
||||
|
||||
logrus.Info("attempting to create new image from volume")
|
||||
|
||||
volImage, err := volumeactions.UploadImage(c.storageClient, volObj.ID, volumeactions.UploadImageOpts{
|
||||
ImageName: fmt.Sprintf("import-controller-%s-%d", vm.Spec.VirtualMachineName, i),
|
||||
DiskFormat: "qcow2",
|
||||
}).Extract()
|
||||
|
||||
// wait for image to be ready
|
||||
for i := 0; i < defaultCount; i++ {
|
||||
imgObj, err := images.Get(c.imageClient, volImage.ImageID).Extract()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking status of volume image: %v", err)
|
||||
}
|
||||
if imgObj.Status == "active" {
|
||||
break
|
||||
}
|
||||
time.Sleep(defaultInterval)
|
||||
}
|
||||
|
||||
contents, err := imagedata.Download(c.imageClient, volImage.ImageID).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageContents, err := ioutil.ReadAll(contents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
qcowFileName := filepath.Join(tmpDir, fmt.Sprintf("%s-%d", vm.Spec.VirtualMachineName, i))
|
||||
imgFile, err := os.Create(qcowFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating disk file: %v", err)
|
||||
}
|
||||
|
||||
_, err = imgFile.Write(imageContents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
imgFile.Close()
|
||||
|
||||
// downloaded image is qcow2. Convert to raw file
|
||||
rawFileName := filepath.Join(server.TempDir(), fmt.Sprintf("%s-%d.img", vmObj.Name, i))
|
||||
err = qemu.ConvertQCOW2toRAW(qcowFileName, rawFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting qcow2 to raw file: %v", err)
|
||||
}
|
||||
|
||||
if err := volumes.Delete(c.storageClient, volObj.ID, volumes.DeleteOpts{}).ExtractErr(); err != nil {
|
||||
return fmt.Errorf("error deleting volume %s: %v", volObj.ID, err)
|
||||
}
|
||||
|
||||
if err := snapshots.Delete(c.storageClient, snapInfo.ID).ExtractErr(); err != nil {
|
||||
return fmt.Errorf("error deleting snapshot %s: %v", snapInfo.ID, err)
|
||||
}
|
||||
|
||||
if err := images.Delete(c.imageClient, volImage.ImageID).ExtractErr(); err != nil {
|
||||
return fmt.Errorf("error deleting image %s: %v", volImage.ImageID, err)
|
||||
}
|
||||
|
||||
vm.Status.DiskImportStatus = append(vm.Status.DiskImportStatus, importjob.DiskInfo{
|
||||
Name: fmt.Sprintf("%s-%d.img", vmObj.Name, i),
|
||||
DiskSize: int64(volObj.Size),
|
||||
DiskLocalPath: server.TempDir(),
|
||||
})
|
||||
}
|
||||
return os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
func (c *Client) PowerOffVirtualMachine(vm *importjob.VirtualMachine) error {
|
||||
computeClient, err := openstack.NewComputeV2(c.pClient, c.opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating compute client during poweroffvirtualmachine: %v", err)
|
||||
}
|
||||
uuid, err := c.checkOrGetUUID(vm.Spec.VirtualMachineName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, err := c.IsPoweredOff(vm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return startstop.Stop(computeClient, uuid).ExtractErr()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) IsPoweredOff(vm *importjob.VirtualMachine) (bool, error) {
|
||||
|
||||
s, err := c.findVM(vm.Spec.VirtualMachineName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if s.Status == "SHUTOFF" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *Client) GenerateVirtualMachine(vm *importjob.VirtualMachine) (*kubevirt.VirtualMachine, error) {
|
||||
vmObj, err := c.findVM(vm.Spec.VirtualMachineName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error finding vm in generatevirtualmachine: %v", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
flavorObj, err := flavors.Get(c.computeClient, vmObj.Flavor["id"].(string)).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error looking up flavor: %v", err)
|
||||
}
|
||||
|
||||
var networks []networkInfo
|
||||
for network, values := range vmObj.Addresses {
|
||||
valArr, ok := values.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error asserting interface []interface")
|
||||
}
|
||||
for _, v := range valArr {
|
||||
valMap, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error asserting network array element into map[string]string")
|
||||
}
|
||||
networks = append(networks, networkInfo{
|
||||
NetworkName: network,
|
||||
MAC: valMap["OS-EXT-IPS-MAC:mac_addr"].(string),
|
||||
})
|
||||
}
|
||||
}
|
||||
newVM := &kubevirt.VirtualMachine{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: vm.Spec.VirtualMachineName,
|
||||
Namespace: vm.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
vmSpec := kubevirt.VirtualMachineSpec{
|
||||
RunStrategy: &[]kubevirt.VirtualMachineRunStrategy{kubevirt.RunStrategyRerunOnFailure}[0],
|
||||
Template: &kubevirt.VirtualMachineInstanceTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"harvesterhci.io/vmName": vm.Spec.VirtualMachineName,
|
||||
},
|
||||
},
|
||||
Spec: kubevirt.VirtualMachineInstanceSpec{
|
||||
Domain: kubevirt.DomainSpec{
|
||||
CPU: &kubevirt.CPU{
|
||||
Cores: uint32(flavorObj.VCPUs),
|
||||
Sockets: uint32(1),
|
||||
Threads: 1,
|
||||
},
|
||||
Memory: &kubevirt.Memory{
|
||||
Guest: &[]resource.Quantity{resource.MustParse(fmt.Sprintf("%dM", flavorObj.RAM))}[0],
|
||||
},
|
||||
Resources: kubevirt.ResourceRequirements{
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", flavorObj.RAM)),
|
||||
corev1.ResourceCPU: resource.MustParse(fmt.Sprintf("%d", flavorObj.VCPUs)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var networkConfig []kubevirt.Network
|
||||
mappedNetwork := mapNetworkCards(networks, vm.Spec.Mapping)
|
||||
for i, v := range mappedNetwork {
|
||||
networkConfig = append(networkConfig, kubevirt.Network{
|
||||
NetworkSource: kubevirt.NetworkSource{
|
||||
Multus: &kubevirt.MultusNetwork{
|
||||
NetworkName: v.MappedNetwork,
|
||||
},
|
||||
},
|
||||
Name: fmt.Sprintf("migrated-%d", i),
|
||||
})
|
||||
}
|
||||
|
||||
var interfaces []kubevirt.Interface
|
||||
for i, v := range mappedNetwork {
|
||||
interfaces = append(interfaces, kubevirt.Interface{
|
||||
Name: fmt.Sprintf("migrated-%d", i),
|
||||
MacAddress: v.MAC,
|
||||
Model: "virtio",
|
||||
InterfaceBindingMethod: kubevirt.InterfaceBindingMethod{
|
||||
Bridge: &kubevirt.InterfaceBridge{},
|
||||
},
|
||||
})
|
||||
}
|
||||
// if there is no network, attach to Pod network. Essential for VM to be booted up
|
||||
if len(networkConfig) == 0 {
|
||||
networkConfig = append(networkConfig, kubevirt.Network{
|
||||
Name: "pod-network",
|
||||
NetworkSource: kubevirt.NetworkSource{
|
||||
Pod: &kubevirt.PodNetwork{},
|
||||
},
|
||||
})
|
||||
interfaces = append(interfaces, kubevirt.Interface{
|
||||
Name: "pod-network",
|
||||
Model: "virtio",
|
||||
InterfaceBindingMethod: kubevirt.InterfaceBindingMethod{
|
||||
Masquerade: &kubevirt.InterfaceMasquerade{},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
vmSpec.Template.Spec.Networks = networkConfig
|
||||
vmSpec.Template.Spec.Domain.Devices.Interfaces = interfaces
|
||||
newVM.Spec = vmSpec
|
||||
// disk attachment needs query by core controller for storage classes, so will be added by the importjob controller
|
||||
return newVM, nil
|
||||
}
|
||||
|
||||
// SetupOpenStackSecretFromEnv is a helper function to ease with testing
|
||||
func SetupOpenstackSecretFromEnv(name string) (*corev1.Secret, error) {
|
||||
s := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
|
||||
username, ok := os.LookupEnv("OS_USERNAME")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no env variable OS_USERNAME specified")
|
||||
}
|
||||
|
||||
password, ok := os.LookupEnv("OS_PASSWORD")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no env variable OS_PASSWORD specified")
|
||||
}
|
||||
|
||||
tenant, ok := os.LookupEnv("OS_PROJECT_NAME")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no env variable OS_PROJECT_NAME specified")
|
||||
}
|
||||
|
||||
domain, ok := os.LookupEnv("OS_USER_DOMAIN_NAME")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no env variable OS_DOMAIN_NAME specified")
|
||||
}
|
||||
|
||||
// generate common secret
|
||||
data := map[string][]byte{
|
||||
"username": []byte(username),
|
||||
"password": []byte(password),
|
||||
"project_name": []byte(tenant),
|
||||
"domain_name": []byte(domain),
|
||||
}
|
||||
s.Data = data
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// SetupOpenstackSourceFromEnv is a helper function to ease with testing
|
||||
func SetupOpenstackSourceFromEnv() (string, string, error) {
|
||||
var endpoint, region string
|
||||
var ok bool
|
||||
endpoint, ok = os.LookupEnv("OS_AUTH_URL")
|
||||
if !ok {
|
||||
return endpoint, region, fmt.Errorf("no env variable OS_AUTH_URL specified")
|
||||
}
|
||||
|
||||
region, ok = os.LookupEnv("OS_REGION_NAME")
|
||||
if !ok {
|
||||
return endpoint, region, fmt.Errorf("no env variable OS_AUTH_URL specified")
|
||||
}
|
||||
|
||||
return endpoint, region, nil
|
||||
}
|
||||
|
||||
// checkOrGetUUID will check if input is a valid uuid. If not, it assume that the given input
|
||||
// is a servername and will try and find a uuid for this server.
|
||||
// openstack allows multiple server names to have the same name, in which case an error will be returned
|
||||
func (c *Client) checkOrGetUUID(input string) (string, error) {
|
||||
parsedUuid, err := uuid.Parse(input)
|
||||
if err == nil {
|
||||
return parsedUuid.String(), nil
|
||||
}
|
||||
|
||||
// assume this is a name and find server based on name
|
||||
/*computeClient, err := openstack.NewComputeV2(c.pClient, c.opts)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error generating compute client during checkorGetUUID: %v", err)
|
||||
}*/
|
||||
|
||||
pg := servers.List(c.computeClient, servers.ListOpts{Name: input})
|
||||
allPg, err := pg.AllPages()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error generating all pages in checkorgetuuid :%v", err)
|
||||
}
|
||||
|
||||
ok, err := allPg.IsEmpty()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error checking if pages were empty in checkorgetuuid: %v", err)
|
||||
}
|
||||
|
||||
if ok {
|
||||
return "", fmt.Errorf(NotServerFound)
|
||||
}
|
||||
|
||||
allServers, err := servers.ExtractServers(allPg)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error extracting servers in checkorgetuuid:%v", err)
|
||||
}
|
||||
|
||||
if len(allServers) > 1 {
|
||||
return "", fmt.Errorf(NotUniqueName)
|
||||
}
|
||||
|
||||
return allServers[0].ID, nil
|
||||
}
|
||||
|
||||
func (c *Client) findVM(name string) (*servers.Server, error) {
|
||||
parsedUuid, err := c.checkOrGetUUID(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return servers.Get(c.computeClient, parsedUuid).Extract()
|
||||
|
||||
}
|
||||
|
||||
type networkInfo struct {
|
||||
NetworkName string
|
||||
MAC string
|
||||
MappedNetwork string
|
||||
}
|
||||
|
||||
func mapNetworkCards(networkCards []networkInfo, mapping []importjob.NetworkMapping) []networkInfo {
|
||||
var retNetwork []networkInfo
|
||||
for _, nc := range networkCards {
|
||||
for _, m := range mapping {
|
||||
if m.SourceNetwork == nc.NetworkName {
|
||||
nc.MappedNetwork = m.DestinationNetwork
|
||||
retNetwork = append(retNetwork, nc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return retNetwork
|
||||
}
|
||||
|
115
pkg/source/openstack/client_test.go
Normal file
115
pkg/source/openstack/client_test.go
Normal file
@ -0,0 +1,115 @@
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
|
||||
"github.com/harvester/vm-import-controller/pkg/server"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
c *Client
|
||||
testVM string
|
||||
)
|
||||
|
||||
func TestMain(t *testing.M) {
|
||||
var err error
|
||||
s, err := SetupOpenstackSecretFromEnv("devstack")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
endpoint, region, err := SetupOpenstackSourceFromEnv()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
c, err = NewClient(context.TODO(), endpoint, region, s)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
server.NewServer(context.TODO())
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
code := t.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
func Test_NewClient(t *testing.T) {
|
||||
assert := require.New(t)
|
||||
err := c.Verify()
|
||||
assert.NoError(err, "expect no error during verify of client")
|
||||
}
|
||||
|
||||
func Test_checkOrGetUUID(t *testing.T) {
|
||||
assert := require.New(t)
|
||||
vmName, ok := os.LookupEnv("OS_VM_NAME")
|
||||
assert.True(ok, "expected env variable VM_NAME to be set")
|
||||
_, err := c.checkOrGetUUID(vmName)
|
||||
assert.NoError(err, "expected no error during checkOrGetUUID")
|
||||
}
|
||||
|
||||
func Test_IsPoweredOff(t *testing.T) {
|
||||
assert := require.New(t)
|
||||
vmName, ok := os.LookupEnv("OS_VM_NAME")
|
||||
assert.True(ok, "expected env variable VM_NAME to be set")
|
||||
vm := &importjob.VirtualMachine{
|
||||
Spec: importjob.VirtualMachineImportSpec{
|
||||
VirtualMachineName: vmName,
|
||||
},
|
||||
}
|
||||
_, err := c.IsPoweredOff(vm)
|
||||
assert.NoError(err, "expected no error during check of power status")
|
||||
}
|
||||
|
||||
func Test_PowerOffVirtualMachine(t *testing.T) {
|
||||
assert := require.New(t)
|
||||
vmName, ok := os.LookupEnv("OS_VM_NAME")
|
||||
assert.True(ok, "expected env variable VM_NAME to be set")
|
||||
vm := &importjob.VirtualMachine{
|
||||
Spec: importjob.VirtualMachineImportSpec{
|
||||
VirtualMachineName: vmName,
|
||||
},
|
||||
}
|
||||
err := c.PowerOffVirtualMachine(vm)
|
||||
assert.NoError(err, "expected no error during check of power status")
|
||||
}
|
||||
|
||||
func Test_ExportVirtualMachine(t *testing.T) {
|
||||
assert := require.New(t)
|
||||
vmName, ok := os.LookupEnv("OS_VM_NAME")
|
||||
assert.True(ok, "expected env variable VM_NAME to be set")
|
||||
vm := &importjob.VirtualMachine{
|
||||
Spec: importjob.VirtualMachineImportSpec{
|
||||
VirtualMachineName: vmName,
|
||||
},
|
||||
}
|
||||
err := c.ExportVirtualMachine(vm)
|
||||
assert.NoError(err, "expected no error during exportvirtualmachines")
|
||||
assert.NotEmpty(vm.Status.DiskImportStatus, "expected diskimportstatus to be populated")
|
||||
t.Log(vm.Status.DiskImportStatus)
|
||||
}
|
||||
|
||||
func Test_GenerateVirtualMachine(t *testing.T) {
|
||||
assert := require.New(t)
|
||||
vmName := os.Getenv("OS_VM_NAME")
|
||||
assert.NotEmpty(vmName, "expected env variable VM_NAME to be set")
|
||||
vm := &importjob.VirtualMachine{
|
||||
Spec: importjob.VirtualMachineImportSpec{
|
||||
VirtualMachineName: vmName,
|
||||
},
|
||||
}
|
||||
newVM, err := c.GenerateVirtualMachine(vm)
|
||||
assert.NoError(err, "expected no error during GenerateVirtualMachine")
|
||||
assert.NotEmpty(newVM.Spec.Template.Spec.Domain.CPU, "expected CPU's to not be empty")
|
||||
assert.NotEmpty(newVM.Spec.Template.Spec.Domain.Resources.Limits.Memory(), "expected memory limit to not be empty")
|
||||
assert.NotEmpty(newVM.Spec.Template.Spec.Networks, "expected to find atleast 1 network as pod network should have been applied")
|
||||
assert.NotEmpty(newVM.Spec.Template.Spec.Domain.Devices.Interfaces, "expected to find atleast 1 interface for pod-network")
|
||||
}
|
152
tests/integration/openstack_test.go
Normal file
152
tests/integration/openstack_test.go
Normal file
@ -0,0 +1,152 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
harvesterv1beta1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
|
||||
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
|
||||
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
|
||||
"github.com/harvester/vm-import-controller/pkg/util"
|
||||
"github.com/harvester/vm-import-controller/tests/setup"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubevirt "kubevirt.io/api/core/v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = Describe("test openstack export/import integration", func() {
|
||||
BeforeEach(func() {
|
||||
if !useExisting {
|
||||
return
|
||||
}
|
||||
err := setup.SetupOpenstack(ctx, k8sClient)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("reconcile openstack importjob object status", func() {
|
||||
if !useExisting {
|
||||
Skip("skipping openstack integration tests as not using an existing environment")
|
||||
}
|
||||
|
||||
By("checking if openstack source is ready", func() {
|
||||
Eventually(func() error {
|
||||
o := &source.Openstack{}
|
||||
err := k8sClient.Get(ctx, setup.OpenstackSourceNamespacedName, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.Status.Status != source.ClusterReady {
|
||||
return fmt.Errorf("waiting for cluster source to be ready. current status is %s", o.Status.Status)
|
||||
}
|
||||
return nil
|
||||
}, "30s", "10s").ShouldNot(HaveOccurred())
|
||||
})
|
||||
|
||||
By("vm importjob has the correct conditions", func() {
|
||||
Eventually(func() error {
|
||||
v := &importjob.VirtualMachine{}
|
||||
err := k8sClient.Get(ctx, setup.OpenstackVMNamespacedName, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !util.ConditionExists(v.Status.ImportConditions, importjob.VirtualMachinePoweringOff, v1.ConditionTrue) {
|
||||
return fmt.Errorf("expected virtualmachinepoweringoff condition to be present")
|
||||
}
|
||||
|
||||
if !util.ConditionExists(v.Status.ImportConditions, importjob.VirtualMachinePoweredOff, v1.ConditionTrue) {
|
||||
return fmt.Errorf("expected virtualmachinepoweredoff condition to be present")
|
||||
}
|
||||
|
||||
if !util.ConditionExists(v.Status.ImportConditions, importjob.VirtualMachineExported, v1.ConditionTrue) {
|
||||
return fmt.Errorf("expected virtualmachineexported condition to be present")
|
||||
}
|
||||
|
||||
return nil
|
||||
}, "300s", "10s").ShouldNot(HaveOccurred())
|
||||
})
|
||||
|
||||
By("checking that PVC claim has been created", func() {
|
||||
Eventually(func() error {
|
||||
v := &importjob.VirtualMachine{}
|
||||
err := k8sClient.Get(ctx, setup.OpenstackVMNamespacedName, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(v.Status.DiskImportStatus) == 0 {
|
||||
return fmt.Errorf("diskimportstatus should have image details available")
|
||||
}
|
||||
for _, d := range v.Status.DiskImportStatus {
|
||||
if d.VirtualMachineImage == "" {
|
||||
return fmt.Errorf("waiting for VMI to be populated")
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{}
|
||||
pvcName := strings.ToLower(strings.Split(d.Name, ".img")[0])
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: setup.OpenstackVMNamespacedName.Namespace,
|
||||
Name: pvcName}, pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pvc.Status.Phase != v1.ClaimBound {
|
||||
return fmt.Errorf("waiting for pvc claim to be in state bound")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}, "120s", "10s").ShouldNot(HaveOccurred())
|
||||
})
|
||||
|
||||
By("checking that the virtualmachine has been created", func() {
|
||||
Eventually(func() error {
|
||||
v := &importjob.VirtualMachine{}
|
||||
err := k8sClient.Get(ctx, setup.OpenstackVMNamespacedName, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vm := &kubevirt.VirtualMachine{}
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: setup.OpenstackVMNamespacedName.Namespace,
|
||||
Name: v.Spec.VirtualMachineName,
|
||||
}, vm)
|
||||
|
||||
return err
|
||||
}, "300s", "10s").ShouldNot(HaveOccurred())
|
||||
})
|
||||
|
||||
By("checking that the virtualmachineimage ownership has been removed", func() {
|
||||
Eventually(func() error {
|
||||
v := &importjob.VirtualMachine{}
|
||||
err := k8sClient.Get(ctx, setup.OpenstackVMNamespacedName, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, d := range v.Status.DiskImportStatus {
|
||||
vmi := &harvesterv1beta1.VirtualMachineImage{}
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: setup.OpenstackVMNamespacedName.Namespace,
|
||||
Name: d.VirtualMachineImage}, vmi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(vmi.OwnerReferences) != 0 {
|
||||
return fmt.Errorf("waiting for ownerRef to be cleared")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}, "300s", "10s").ShouldNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if !useExisting {
|
||||
return
|
||||
}
|
||||
err := setup.CleanupOpenstack(ctx, k8sClient)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
@ -3,6 +3,8 @@ package integration
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/harvester/vm-import-controller/pkg/source/openstack"
|
||||
|
||||
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
|
||||
"github.com/harvester/vm-import-controller/pkg/util"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
@ -76,8 +78,6 @@ var _ = Describe("verify vmware is ready", func() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Info(vcsimObj.Status.Conditions)
|
||||
if util.ConditionExists(vcsimObj.Status.Conditions, source.ClusterReadyCondition, corev1.ConditionTrue) &&
|
||||
util.ConditionExists(vcsimObj.Status.Conditions, source.ClusterErrorCondition, corev1.ConditionFalse) {
|
||||
return nil
|
||||
@ -242,3 +242,73 @@ var _ = Describe("verify vmware has invalid DC", func() {
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
var _ = Describe("verify openstack is ready", func() {
|
||||
var creds *corev1.Secret
|
||||
var o *source.Openstack
|
||||
|
||||
const secretName = "devstack"
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
creds, err = openstack.SetupOpenstackSecretFromEnv(secretName)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
endpoint, region, err := openstack.SetupOpenstackSourceFromEnv()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = k8sClient.Create(ctx, creds)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
o = &source.Openstack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "devstack",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: source.OpenstackSpec{
|
||||
EndpointAddress: endpoint,
|
||||
Region: region,
|
||||
Credentials: corev1.SecretReference{
|
||||
Name: secretName,
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
}
|
||||
err = k8sClient.Create(ctx, o)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("check openstack source is ready", func() {
|
||||
Eventually(func() error {
|
||||
oObj := &source.Openstack{}
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: o.Name, Namespace: o.Namespace}, oObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if oObj.Status.Status == source.ClusterReady {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("source currently in state: %v, expected to be %s", oObj.Status.Status, source.ClusterReady)
|
||||
}, "30s", "5s").ShouldNot(HaveOccurred())
|
||||
|
||||
// check conditions on source object
|
||||
Eventually(func() error {
|
||||
oObj := &source.Openstack{}
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: o.Name,
|
||||
Namespace: o.Namespace}, oObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if util.ConditionExists(oObj.Status.Conditions, source.ClusterReadyCondition, corev1.ConditionTrue) &&
|
||||
util.ConditionExists(oObj.Status.Conditions, source.ClusterErrorCondition, corev1.ConditionFalse) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("expected source to have condition %s as %v", source.ClusterReadyCondition, corev1.ConditionTrue)
|
||||
}, "30s", "5s").ShouldNot(HaveOccurred())
|
||||
})
|
||||
AfterEach(func() {
|
||||
err := k8sClient.Delete(ctx, creds)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = k8sClient.Delete(ctx, o)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
})
|
||||
})
|
||||
|
158
tests/setup/setup_openstack.go
Normal file
158
tests/setup/setup_openstack.go
Normal file
@ -0,0 +1,158 @@
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
importjob "github.com/harvester/vm-import-controller/pkg/apis/importjob.harvesterhci.io/v1beta1"
|
||||
source "github.com/harvester/vm-import-controller/pkg/apis/source.harvesterhci.io/v1beta1"
|
||||
"github.com/harvester/vm-import-controller/pkg/source/openstack"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"os"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
openstackSecret = "openstack-integration"
|
||||
openstackSourceCluster = "openstack-integration"
|
||||
openstackKind = "openstack"
|
||||
openstackVirtualMachine = "openstack-vm-export"
|
||||
)
|
||||
|
||||
var (
|
||||
OpenstackSourceNamespacedName, OpenstackVMNamespacedName types.NamespacedName
|
||||
)
|
||||
|
||||
// SetupOpenstack will try and setup a vmware source based on GOVC environment variables
|
||||
// It will check the following environment variables to build source and importjob CRD's
|
||||
// OS_AUTH_URL: Identify keystone endpoint
|
||||
// OS_PROJECT_NAME: Project name where test instance is located
|
||||
// OS_USERNAME: Username for source secret
|
||||
// OS_PASSWORD: Password for source secret
|
||||
// OS_USER_DOMAIN_NAME: domain name for user auth
|
||||
// OS_VM_NAME: name of VM to be exported
|
||||
// OS_REGION_NAME: Openstack instance region to be used for testing
|
||||
// SVC_ADDRESS: Exposes the local host as SVC url when creating VirtualDiskImage endpoints to download images from
|
||||
func SetupOpenstack(ctx context.Context, k8sClient client.Client) error {
|
||||
OpenstackSourceNamespacedName = types.NamespacedName{
|
||||
Name: openstackSourceCluster,
|
||||
Namespace: defaultNamespace,
|
||||
}
|
||||
|
||||
OpenstackVMNamespacedName = types.NamespacedName{
|
||||
Name: openstackVirtualMachine,
|
||||
Namespace: defaultNamespace,
|
||||
}
|
||||
fnList := []applyObject{
|
||||
setupOpenstackSecret,
|
||||
setupOpenstackSource,
|
||||
setupOpenstackVMExport,
|
||||
}
|
||||
|
||||
for _, v := range fnList {
|
||||
if err := v(ctx, k8sClient); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupOpenstackSecret(ctx context.Context, k8sClient client.Client) error {
|
||||
s, err := openstack.SetupOpenstackSecretFromEnv(openstackSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return k8sClient.Create(ctx, s)
|
||||
}
|
||||
|
||||
func setupOpenstackSource(ctx context.Context, k8sClient client.Client) error {
|
||||
|
||||
endpoint, region, err := openstack.SetupOpenstackSourceFromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := &source.Openstack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: openstackSourceCluster,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
Spec: source.OpenstackSpec{
|
||||
EndpointAddress: endpoint,
|
||||
Region: region,
|
||||
Credentials: corev1.SecretReference{
|
||||
Name: openstackSecret,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return k8sClient.Create(ctx, s)
|
||||
}
|
||||
|
||||
func setupOpenstackVMExport(ctx context.Context, k8sClient client.Client) error {
|
||||
vm, ok := os.LookupEnv("OS_VM_NAME")
|
||||
if !ok {
|
||||
return fmt.Errorf("env variable VM_NAME not specified")
|
||||
}
|
||||
|
||||
_, ok = os.LookupEnv("SVC_ADDRESS")
|
||||
if !ok {
|
||||
return fmt.Errorf("env variable SVC_ADDRESS not specified")
|
||||
}
|
||||
|
||||
j := &importjob.VirtualMachine{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: openstackVirtualMachine,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
Spec: importjob.VirtualMachineImportSpec{
|
||||
SourceCluster: corev1.ObjectReference{
|
||||
Name: openstackSourceCluster,
|
||||
Namespace: defaultNamespace,
|
||||
Kind: openstackKind,
|
||||
APIVersion: defaultAPIVersion,
|
||||
},
|
||||
VirtualMachineName: vm,
|
||||
},
|
||||
}
|
||||
|
||||
return k8sClient.Create(ctx, j)
|
||||
}
|
||||
|
||||
func CleanupOpenstack(ctx context.Context, k8sClient client.Client) error {
|
||||
s := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: openstackSecret,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
}
|
||||
err := k8sClient.Delete(ctx, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vmware := &source.Openstack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: openstackSourceCluster,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Delete(ctx, vmware)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i := &importjob.VirtualMachine{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: OpenstackVMNamespacedName.Name,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
return k8sClient.Delete(ctx, i)
|
||||
}
|
@ -49,9 +49,9 @@ func SetupVMware(ctx context.Context, k8sClient client.Client) error {
|
||||
}
|
||||
|
||||
fnList := []applyObject{
|
||||
setupSecret,
|
||||
setupSource,
|
||||
setupVMExport,
|
||||
setupVmwareSecret,
|
||||
setupVmwareSource,
|
||||
setupVmwareVMExport,
|
||||
}
|
||||
|
||||
for _, v := range fnList {
|
||||
@ -63,7 +63,7 @@ func SetupVMware(ctx context.Context, k8sClient client.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupSecret(ctx context.Context, k8sClient client.Client) error {
|
||||
func setupVmwareSecret(ctx context.Context, k8sClient client.Client) error {
|
||||
username, ok := os.LookupEnv("GOVC_USERNAME")
|
||||
if !ok {
|
||||
return fmt.Errorf("env variable GOVC_USERNAME not set")
|
||||
@ -87,7 +87,7 @@ func setupSecret(ctx context.Context, k8sClient client.Client) error {
|
||||
return k8sClient.Create(ctx, s)
|
||||
}
|
||||
|
||||
func setupSource(ctx context.Context, k8sClient client.Client) error {
|
||||
func setupVmwareSource(ctx context.Context, k8sClient client.Client) error {
|
||||
endpoint, ok := os.LookupEnv("GOVC_URL")
|
||||
if !ok {
|
||||
return fmt.Errorf("env variable GOVC_URL not set")
|
||||
@ -117,7 +117,7 @@ func setupSource(ctx context.Context, k8sClient client.Client) error {
|
||||
|
||||
}
|
||||
|
||||
func setupVMExport(ctx context.Context, k8sClient client.Client) error {
|
||||
func setupVmwareVMExport(ctx context.Context, k8sClient client.Client) error {
|
||||
vm, ok := os.LookupEnv("VM_NAME")
|
||||
if !ok {
|
||||
return fmt.Errorf("env variable VM_NAME not specified")
|
||||
|
Loading…
Reference in New Issue
Block a user