From 87ff97ca581d07fb863391ceeb8866129259375e Mon Sep 17 00:00:00 2001 From: Pavel Boldyrev <627562+bpg@users.noreply.github.com> Date: Sat, 3 Feb 2024 16:57:46 -0500 Subject: [PATCH] more disk management refactoring Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com> --- proxmox/nodes/vms/vms.go | 89 +++++--- proxmox/nodes/vms/vms_types.go | 29 ++- proxmox/nodes/vms/vms_types_test.go | 96 ++++++++ proxmoxtf/resource/vm/disk.go | 330 +++++++++++----------------- proxmoxtf/resource/vm/vm.go | 194 ++++++++-------- 5 files changed, 396 insertions(+), 342 deletions(-) diff --git a/proxmox/nodes/vms/vms.go b/proxmox/nodes/vms/vms.go index 38f86977..aeb1e6c9 100644 --- a/proxmox/nodes/vms/vms.go +++ b/proxmox/nodes/vms/vms.go @@ -262,44 +262,75 @@ func (c *Client) RebootVMAsync(ctx context.Context, d *RebootRequestBody) (*stri return resBody.Data, nil } +// // ResizeVMDisk resizes a virtual machine disk. +// func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody) error { +// var err error + +// tflog.Debug(ctx, "resize disk", map[string]interface{}{ +// "disk": d.Disk, +// "size": d.Size, +// }) + +// for i := 0; i < 5; i++ { +// err = c.DoRequest( +// ctx, +// http.MethodPut, +// c.ExpandPath("resize"), +// d, +// nil, +// ) +// if err == nil { +// return nil +// } + +// tflog.Debug(ctx, "resize disk failed", map[string]interface{}{ +// "retry": i, +// }) +// time.Sleep(5 * time.Second) + +// if ctx.Err() != nil { +// return fmt.Errorf("error resizing VM disk: %w", ctx.Err()) +// } +// } + +// if err != nil { +// return fmt.Errorf("error resizing VM disk: %w", err) +// } + +// return nil +// } + // ResizeVMDisk resizes a virtual machine disk. -func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody) error { - var err error - - tflog.Debug(ctx, "resize disk", map[string]interface{}{ - "disk": d.Disk, - "size": d.Size, - }) - - for i := 0; i < 5; i++ { - err = c.DoRequest( - ctx, - http.MethodPut, - c.ExpandPath("resize"), - d, - nil, - ) - if err == nil { - return nil - } - - tflog.Debug(ctx, "resize disk failed", map[string]interface{}{ - "retry": i, - }) - time.Sleep(5 * time.Second) - - if ctx.Err() != nil { - return fmt.Errorf("error resizing VM disk: %w", ctx.Err()) - } +func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody, timeout int) error { + taskID, err := c.ResizeVMDiskAsync(ctx, d) + if err != nil { + return err } + err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5) if err != nil { - return fmt.Errorf("error resizing VM disk: %w", err) + return fmt.Errorf("error waiting for VM disk resize: %w", err) } return nil } +// ResizeVMDiskAsync resizes a virtual machine disk asynchronously. +func (c *Client) ResizeVMDiskAsync(ctx context.Context, d *ResizeDiskRequestBody) (*string, error) { + resBody := &MoveDiskResponseBody{} + + err := c.DoRequest(ctx, http.MethodPut, c.ExpandPath("resize"), d, resBody) + if err != nil { + return nil, fmt.Errorf("error moving VM disk: %w", err) + } + + if resBody.Data == nil { + return nil, api.ErrNoDataObjectInResponse + } + + return resBody.Data, nil +} + // ShutdownVM shuts down a virtual machine. func (c *Client) ShutdownVM(ctx context.Context, d *ShutdownRequestBody, timeout int) error { taskID, err := c.ShutdownVMAsync(ctx, d) diff --git a/proxmox/nodes/vms/vms_types.go b/proxmox/nodes/vms/vms_types.go index 0cb7148d..1de4d9b2 100644 --- a/proxmox/nodes/vms/vms_types.go +++ b/proxmox/nodes/vms/vms_types.go @@ -14,6 +14,7 @@ import ( "path/filepath" "strconv" "strings" + "unicode" "github.com/bpg/terraform-provider-proxmox/proxmox/types" ) @@ -242,8 +243,32 @@ func (d CustomStorageDevice) IsCloudInitDrive(vmID int) bool { strings.Contains(d.FileVolume, fmt.Sprintf("vm-%d-cloudinit", vmID)) } -// CustomStorageDevices handles QEMU SATA device parameters. -type CustomStorageDevices map[string]CustomStorageDevice +// StorageInterface returns the storage interface of the CustomStorageDevice, e.g. "virtio" or "scsi" for "virtio0" or "scsi2". +func (d CustomStorageDevice) StorageInterface() string { + for i, r := range *d.Interface { + if unicode.IsDigit(r) { + return (*d.Interface)[:i] + } + } + + panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface)) +} + +// CustomStorageDevices handles map of QEMU storage device per disk interface. +type CustomStorageDevices map[string]*CustomStorageDevice + +// ByStorageInterface returns a map of CustomStorageDevices filtered by the given storage interface. +func (d CustomStorageDevices) ByStorageInterface(storageInterface string) CustomStorageDevices { + result := make(CustomStorageDevices) + + for k, v := range d { + if v.StorageInterface() == storageInterface { + result[k] = v + } + } + + return result +} // CustomTPMState handles QEMU TPM state parameters. type CustomTPMState struct { diff --git a/proxmox/nodes/vms/vms_types_test.go b/proxmox/nodes/vms/vms_types_test.go index 18cc7bc5..a716026e 100644 --- a/proxmox/nodes/vms/vms_types_test.go +++ b/proxmox/nodes/vms/vms_types_test.go @@ -120,6 +120,102 @@ func TestCustomStorageDevice_IsCloudInitDrive(t *testing.T) { } } +func TestCustomStorageDevice_StorageInterface(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + device CustomStorageDevice + want string + }{ + { + name: "virtio0", + device: CustomStorageDevice{ + Interface: types.StrPtr("virtio0"), + }, + want: "virtio", + }, { + name: "scsi13", + device: CustomStorageDevice{ + Interface: types.StrPtr("scsi13"), + }, + want: "scsi", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := tt.device.StorageInterface() + assert.Equal(t, tt.want, got) + }) + } +} + +func TestCustomStorageDevices_ByStorageInterface(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + iface string + devices CustomStorageDevices + want CustomStorageDevices + }{ + { + name: "empty", + iface: "virtio", + devices: CustomStorageDevices{}, + want: CustomStorageDevices{}, + }, + { + name: "not in the list", + iface: "sata", + devices: CustomStorageDevices{ + "virtio0": CustomStorageDevice{ + Interface: types.StrPtr("virtio0"), + }, + "scsi13": CustomStorageDevice{ + Interface: types.StrPtr("scsi13"), + }, + }, + want: CustomStorageDevices{}, + }, + { + name: "not in the list", + iface: "virtio", + devices: CustomStorageDevices{ + "virtio0": CustomStorageDevice{ + Interface: types.StrPtr("virtio0"), + }, + "scsi13": CustomStorageDevice{ + Interface: types.StrPtr("scsi13"), + }, + "virtio1": CustomStorageDevice{ + Interface: types.StrPtr("virtio1"), + }, + }, + want: CustomStorageDevices{ + "virtio0": CustomStorageDevice{ + Interface: types.StrPtr("virtio0"), + }, + "virtio1": CustomStorageDevice{ + Interface: types.StrPtr("virtio1"), + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := tt.devices.ByStorageInterface(tt.iface) + assert.Equal(t, tt.want, got) + }) + } +} + func TestCustomPCIDevice_UnmarshalJSON(t *testing.T) { t.Parallel() diff --git a/proxmoxtf/resource/vm/disk.go b/proxmoxtf/resource/vm/disk.go index f4121ad8..28478ed5 100644 --- a/proxmoxtf/resource/vm/disk.go +++ b/proxmoxtf/resource/vm/disk.go @@ -2,7 +2,6 @@ package vm import ( "context" - "errors" "fmt" "strconv" "strings" @@ -17,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "golang.org/x/exp/maps" ) const ( @@ -203,143 +201,100 @@ func createDisks( ctx context.Context, vmConfig *vms.GetResponseData, d *schema.ResourceData, vmAPI *vms.Client, ) (map[string]*vms.CustomStorageDevice, error) { // this is what VM has at the moment: map of interface name (virtio1) -> disk object - currentStorageDevices := populateFileIDs(mapStorageDevices(vmConfig), d) + currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d) - // map of interface type (virtio|sata|scsi|...) -> map of interface name (virtio1) -> disk object - planStorageDevices, e := getStorageDevicesFromResource(d) + // map of interface name (virtio1) -> disk object + planDisks, e := getStorageDevicesFromResource(d) if e != nil { return nil, e } - // create disks that are not present in the current configuration - for prefix, disks := range planStorageDevices { - for diskInterface, disk := range disks { - if currentStorageDevices[diskInterface] == nil { - diskUpdateBody := &vms.UpdateRequestBody{} + for diskInterface, planDisk := range planDisks { + currentDisk := currentDisks[diskInterface] + if currentDisk == nil { + // create disks that are not present in the current configuration + err := createDisk(ctx, planDisk, vmAPI) + if err != nil { + return nil, err + } + } else { + // disk is present, i.e. when cloned a template, but we need to check if it needs to be resized + if planDisk.Size.InGigabytes() < currentDisk.Size.InGigabytes() { + return nil, fmt.Errorf("disk resize fails requests size (%dG) is lower than current size (%s)", + planDisk.Size.InGigabytes(), + *currentDisk.Size, + ) + } - switch prefix { - case "virtio": - if diskUpdateBody.VirtualIODevices == nil { - diskUpdateBody.VirtualIODevices = vms.CustomStorageDevices{} - } - diskUpdateBody.VirtualIODevices[diskInterface] = disk - case "sata": - if diskUpdateBody.SATADevices == nil { - diskUpdateBody.SATADevices = vms.CustomStorageDevices{} - } - diskUpdateBody.SATADevices[diskInterface] = disk - case "scsi": - if diskUpdateBody.SCSIDevices == nil { - diskUpdateBody.SCSIDevices = vms.CustomStorageDevices{} - } - diskUpdateBody.SCSIDevices[diskInterface] = disk + moveDisk := false + if *planDisk.ID != "" { + fileIDParts := strings.Split(currentDisk.FileVolume, ":") + moveDisk = *planDisk.ID != fileIDParts[0] + } + + if moveDisk { + moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int) + deleteOriginalDisk := types.CustomBool(true) + + diskMoveBody := &vms.MoveDiskRequestBody{ + DeleteOriginalDisk: &deleteOriginalDisk, + Disk: diskInterface, + TargetStorage: *planDisk.ID, } - e = vmAPI.UpdateVM(ctx, diskUpdateBody) - if e != nil { - return nil, e + err := vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout) + if err != nil { + return nil, err + } + } + + if planDisk.Size.InGigabytes() > currentDisk.Size.InGigabytes() { + moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int) + + diskResizeBody := &vms.ResizeDiskRequestBody{ + Disk: diskInterface, + Size: *types.DiskSizeFromGigabytes(planDisk.Size.InGigabytes()), + } + + err := vmAPI.ResizeVMDisk(ctx, diskResizeBody, moveDiskTimeout) + if err != nil { + return nil, err } } } } + return currentDisks, nil +} - - - disk := d.Get(mkDisk).([]interface{}) - for i := range disk { - diskBlock := disk[i].(map[string]interface{}) - - diskInterface := diskBlock[mkDiskInterface].(string) - dataStoreID := diskBlock[mkDiskDatastoreID].(string) - diskSize := int64(diskBlock[mkDiskSize].(int)) - - prefix := diskDigitPrefix(diskInterface) - - currentDiskInfo := currentStorageDevices[diskInterface] - configuredDiskInfo := planStorageDevices[prefix][diskInterface] - - if currentDiskInfo == nil { - diskUpdateBody := &vms.UpdateRequestBody{} - - switch prefix { - case "virtio": - if diskUpdateBody.VirtualIODevices == nil { - diskUpdateBody.VirtualIODevices = vms.CustomStorageDevices{} - } - - diskUpdateBody.VirtualIODevices[diskInterface] = configuredDiskInfo - case "sata": - if diskUpdateBody.SATADevices == nil { - diskUpdateBody.SATADevices = vms.CustomStorageDevices{} - } - - diskUpdateBody.SATADevices[diskInterface] = configuredDiskInfo - case "scsi": - if diskUpdateBody.SCSIDevices == nil { - diskUpdateBody.SCSIDevices = vms.CustomStorageDevices{} - } - - diskUpdateBody.SCSIDevices[diskInterface] = configuredDiskInfo - } - - e = vmAPI.UpdateVM(ctx, diskUpdateBody) - if e != nil { - return nil, e - } - - continue +func createDisk(ctx context.Context, disk *vms.CustomStorageDevice, vmAPI *vms.Client) error { + addToDevices := func(ds vms.CustomStorageDevices, disk *vms.CustomStorageDevice) vms.CustomStorageDevices { + if ds == nil { + ds = vms.CustomStorageDevices{} } - if diskSize < currentDiskInfo.Size.InGigabytes() { - return nil, fmt.Errorf("disk resize fails requests size (%dG) is lower than current size (%s)", - diskSize, - *currentDiskInfo.Size, - ) - } + ds[*disk.Interface] = disk - deleteOriginalDisk := types.CustomBool(true) - - diskMoveBody := &vms.MoveDiskRequestBody{ - DeleteOriginalDisk: &deleteOriginalDisk, - Disk: diskInterface, - TargetStorage: dataStoreID, - } - - diskResizeBody := &vms.ResizeDiskRequestBody{ - Disk: diskInterface, - Size: *types.DiskSizeFromGigabytes(diskSize), - } - - moveDisk := false - - if dataStoreID != "" { - moveDisk = true - - if currentStorageDevices[diskInterface] != nil { - fileIDParts := strings.Split(currentStorageDevices[diskInterface].FileVolume, ":") - moveDisk = dataStoreID != fileIDParts[0] - } - } - - if moveDisk { - moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int) - - e = vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout) - if e != nil { - return nil, e - } - } - - if diskSize > currentDiskInfo.Size.InGigabytes() { - e = vmAPI.ResizeVMDisk(ctx, diskResizeBody) - if e != nil { - return nil, e - } - } + return ds } - return currentStorageDevices, nil + diskUpdateBody := &vms.UpdateRequestBody{} + + switch disk.StorageInterface() { + case "virtio": + diskUpdateBody.VirtualIODevices = addToDevices(diskUpdateBody.VirtualIODevices, disk) + case "sata": + diskUpdateBody.SATADevices = addToDevices(diskUpdateBody.SATADevices, disk) + case "scsi": + diskUpdateBody.SCSIDevices = addToDevices(diskUpdateBody.SCSIDevices, disk) + } + + err := vmAPI.UpdateVM(ctx, diskUpdateBody) + if err != nil { + return err + } + + return nil } func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error { @@ -503,15 +458,12 @@ func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interfac return nil } -func getStorageDevicesFromResource(d *schema.ResourceData) (map[string]map[string]vms.CustomStorageDevice, error) { +func getStorageDevicesFromResource(d *schema.ResourceData) (vms.CustomStorageDevices, error) { return getDiskDeviceObjects1(d, d.Get(mkDisk).([]interface{})) } -func getDiskDeviceObjects1( - d *schema.ResourceData, - disks []interface{}, -) (map[string]map[string]vms.CustomStorageDevice, error) { - diskDeviceObjects := map[string]map[string]vms.CustomStorageDevice{} +func getDiskDeviceObjects1(d *schema.ResourceData, disks []interface{}) (vms.CustomStorageDevices, error) { + diskDeviceObjects := vms.CustomStorageDevices{} resource := VM() for _, diskEntry := range disks { @@ -601,23 +553,16 @@ func getDiskDeviceObjects1( } } - baseDiskInterface := diskDigitPrefix(diskInterface) + storageInterface := diskDevice.StorageInterface() - if baseDiskInterface != "virtio" && baseDiskInterface != "scsi" && - baseDiskInterface != "sata" { - errorMsg := fmt.Sprintf( - "Defined disk interface not supported. Interface was %s, but only virtio, sata and scsi are supported", + if storageInterface != "virtio" && storageInterface != "scsi" && storageInterface != "sata" { + return diskDeviceObjects, fmt.Errorf( + "Defined disk interface not supported. Interface was '%s', but only 'virtio', 'sata' and 'scsi' are supported", diskInterface, ) - - return diskDeviceObjects, errors.New(errorMsg) } - if _, present := diskDeviceObjects[baseDiskInterface]; !present { - diskDeviceObjects[baseDiskInterface] = map[string]vms.CustomStorageDevice{} - } - - diskDeviceObjects[baseDiskInterface][diskInterface] = diskDevice + diskDeviceObjects[diskInterface] = &diskDevice } return diskDeviceObjects, nil @@ -756,63 +701,53 @@ func updateDisk(d *schema.ResourceData, vmConfig *vms.GetResponseData, updateBod return nil } - diskDeviceObjects, err := getStorageDevicesFromResource(d) + currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d) + + planDisks, err := getStorageDevicesFromResource(d) if err != nil { return err } - diskDeviceInfo := populateFileIDs(mapStorageDevices(vmConfig), d) - - for prefix, diskMap := range diskDeviceObjects { - if diskMap == nil { - continue + addToDevices := func(ds vms.CustomStorageDevices, disk *vms.CustomStorageDevice) vms.CustomStorageDevices { + if ds == nil { + ds = vms.CustomStorageDevices{} } - for key, value := range diskMap { - if diskDeviceInfo[key] == nil { - // TODO: create a new disk here - return fmt.Errorf("missing %s device %s", prefix, key) - } - - tmp := *diskDeviceInfo[key] - tmp.BurstableReadSpeedMbps = value.BurstableReadSpeedMbps - tmp.BurstableWriteSpeedMbps = value.BurstableWriteSpeedMbps - tmp.MaxReadSpeedMbps = value.MaxReadSpeedMbps - tmp.MaxWriteSpeedMbps = value.MaxWriteSpeedMbps - tmp.Cache = value.Cache - - switch prefix { - case "virtio": - { - if updateBody.VirtualIODevices == nil { - updateBody.VirtualIODevices = vms.CustomStorageDevices{} - } - - updateBody.VirtualIODevices[key] = tmp - } - case "sata": - { - if updateBody.SATADevices == nil { - updateBody.SATADevices = vms.CustomStorageDevices{} - } - - updateBody.SATADevices[key] = tmp - } - case "scsi": - { - if updateBody.SCSIDevices == nil { - updateBody.SCSIDevices = vms.CustomStorageDevices{} - } - - updateBody.SCSIDevices[key] = tmp - } - case "ide": - { - // Investigate whether to support IDE mapping. - } - default: - return fmt.Errorf("device prefix %s not supported", prefix) + ds[*disk.Interface] = disk + + return ds + } + + for diskInterface, disk := range planDisks { + if currentDisks[diskInterface] == nil { + // TODO: create a new disk here + return fmt.Errorf("missing device %s", diskInterface) + } + + // copy the current disk and update the fields + tmp := *currentDisks[diskInterface] + tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps + tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps + tmp.MaxReadSpeedMbps = disk.MaxReadSpeedMbps + tmp.MaxWriteSpeedMbps = disk.MaxWriteSpeedMbps + tmp.Cache = disk.Cache + tmp.Discard = disk.Discard + tmp.IOThread = disk.IOThread + tmp.SSD = disk.SSD + + switch disk.StorageInterface() { + case "virtio": + updateBody.VirtualIODevices = addToDevices(updateBody.VirtualIODevices, &tmp) + case "sata": + updateBody.SATADevices = addToDevices(updateBody.SATADevices, &tmp) + case "scsi": + updateBody.SCSIDevices = addToDevices(updateBody.SCSIDevices, &tmp) + case "ide": + { + // Investigate whether to support IDE mapping. } + default: + return fmt.Errorf("device storage interface %s not supported", disk.StorageInterface()) } } @@ -942,22 +877,3 @@ func getDiskDatastores(vm *vms.GetResponseData, d *schema.ResourceData) []string return datastores } - -type customStorageDeviceMap struct { - // map of interface type (virtio|sata|scsi|...) -> map of interface name (virtio1) -> disk object - devices map[string]map[string]vms.CustomStorageDevice -} - -func (c *customStorageDeviceMap) byInterfaceType(interfaceType string) []vms.CustomStorageDevice { - return maps.Values[map[string]vms.CustomStorageDevice](c.devices[interfaceType]) -} - -func (c *customStorageDeviceMap) byInterfaceName(interfaceName string) (*vms.CustomStorageDevice, bool) { - for _, devices := range c.devices { - if device, ok := devices[interfaceName]; ok { - return &device, true - } - } - - return nil, false -} diff --git a/proxmoxtf/resource/vm/vm.go b/proxmoxtf/resource/vm/vm.go index c523e6ec..e59277a1 100644 --- a/proxmoxtf/resource/vm/vm.go +++ b/proxmoxtf/resource/vm/vm.go @@ -15,7 +15,6 @@ import ( "strconv" "strings" "time" - "unicode" "github.com/google/go-cmp/cmp" "github.com/google/uuid" @@ -357,16 +356,16 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d if len(cdrom) > 0 || len(initialization) > 0 { ideDevices = vms.CustomStorageDevices{ - "ide0": vms.CustomStorageDevice{ + "ide0": &vms.CustomStorageDevice{ Enabled: false, }, - "ide1": vms.CustomStorageDevice{ + "ide1": &vms.CustomStorageDevice{ Enabled: false, }, - "ide2": vms.CustomStorageDevice{ + "ide2": &vms.CustomStorageDevice{ Enabled: false, }, - "ide3": vms.CustomStorageDevice{ + "ide3": &vms.CustomStorageDevice{ Enabled: false, }, } @@ -385,7 +384,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d cdromMedia := "cdrom" - ideDevices[cdromInterface] = vms.CustomStorageDevice{ + ideDevices[cdromInterface] = &vms.CustomStorageDevice{ Enabled: cdromEnabled, FileVolume: cdromFileID, Media: &cdromMedia, @@ -458,7 +457,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d cdromCloudInitFileID := fmt.Sprintf("%s:cloudinit", initializationDatastoreID) cdromCloudInitMedia := "cdrom" - ideDevices[initializationInterface] = vms.CustomStorageDevice{ + ideDevices[initializationInterface] = &vms.CustomStorageDevice{ Enabled: cdromCloudInitEnabled, FileVolume: cdromCloudInitFileID, Media: &cdromCloudInitMedia, @@ -784,11 +783,6 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) description := d.Get(mkDescription).(string) - diskDeviceObjects, err := getStorageDevicesFromResource(d) - if err != nil { - return diag.FromErr(err) - } - var efiDisk *vms.CustomEFIDisk efiDiskBlock := d.Get(mkEFIDisk).([]interface{}) @@ -831,11 +825,6 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) } } - virtioDeviceObjects := diskDeviceObjects["virtio"] - scsiDeviceObjects := diskDeviceObjects["scsi"] - // ideDeviceObjects := getOrderedDiskDeviceList(diskDeviceObjects, "ide") - sataDeviceObjects := diskDeviceObjects["sata"] - initializationConfig := vmGetCloudInitConfig(d) if initializationConfig != nil { @@ -935,19 +924,25 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) bootOrderConverted = []string{cdromInterface} } + planDisks, err := getStorageDevicesFromResource(d) + if err != nil { + return diag.FromErr(err) + } + bootOrder := d.Get(mkBootOrder).([]interface{}) - //nolint:nestif if len(bootOrder) == 0 { - if sataDeviceObjects != nil { - bootOrderConverted = append(bootOrderConverted, "sata0") - } + for _, disk := range planDisks { + if *disk.Interface == "sata0" { + bootOrderConverted = append(bootOrderConverted, "sata0") + } - if scsiDeviceObjects != nil { - bootOrderConverted = append(bootOrderConverted, "scsi0") - } + if *disk.Interface == "scsi0" { + bootOrderConverted = append(bootOrderConverted, "scsi0") + } - if virtioDeviceObjects != nil { - bootOrderConverted = append(bootOrderConverted, "virtio0") + if *disk.Interface == "virtio0" { + bootOrderConverted = append(bootOrderConverted, "virtio0") + } } if networkDeviceObjects != nil { @@ -967,12 +962,12 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) ideDevice2Media := "cdrom" ideDevices := vms.CustomStorageDevices{ - cdromCloudInitInterface: vms.CustomStorageDevice{ + cdromCloudInitInterface: &vms.CustomStorageDevice{ Enabled: cdromCloudInitEnabled, FileVolume: cdromCloudInitFileID, Media: &ideDevice2Media, }, - cdromInterface: vms.CustomStorageDevice{ + cdromInterface: &vms.CustomStorageDevice{ Enabled: cdromEnabled, FileVolume: cdromFileID, Media: &ideDevice2Media, @@ -1028,15 +1023,18 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) VMID: &vmID, } - if sataDeviceObjects != nil { + sataDeviceObjects := planDisks.ByStorageInterface("sata") + if len(sataDeviceObjects) > 0 { createBody.SATADevices = sataDeviceObjects } - if scsiDeviceObjects != nil { + scsiDeviceObjects := planDisks.ByStorageInterface("scsi") + if len(scsiDeviceObjects) > 0 { createBody.SCSIDevices = scsiDeviceObjects } - if virtioDeviceObjects != nil { + virtioDeviceObjects := planDisks.ByStorageInterface("virtio") + if len(virtioDeviceObjects) > 0 { createBody.VirtualIODevices = virtioDeviceObjects } @@ -3266,16 +3264,16 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D updateBody := &vms.UpdateRequestBody{ IDEDevices: vms.CustomStorageDevices{ - "ide0": vms.CustomStorageDevice{ + "ide0": &vms.CustomStorageDevice{ Enabled: false, }, - "ide1": vms.CustomStorageDevice{ + "ide1": &vms.CustomStorageDevice{ Enabled: false, }, - "ide2": vms.CustomStorageDevice{ + "ide2": &vms.CustomStorageDevice{ Enabled: false, }, - "ide3": vms.CustomStorageDevice{ + "ide3": &vms.CustomStorageDevice{ Enabled: false, }, }, @@ -3462,7 +3460,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D cdromMedia := "cdrom" - updateBody.IDEDevices[cdromInterface] = vms.CustomStorageDevice{ + updateBody.IDEDevices[cdromInterface] = &vms.CustomStorageDevice{ Enabled: cdromEnabled, FileVolume: cdromFileID, Media: &cdromMedia, @@ -3609,7 +3607,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D fileVolume = ideDevice.FileVolume } - updateBody.IDEDevices[initializationInterface] = vms.CustomStorageDevice{ + updateBody.IDEDevices[initializationInterface] = &vms.CustomStorageDevice{ Enabled: true, FileVolume: fileVolume, Media: &cdromMedia, @@ -3854,13 +3852,11 @@ func vmUpdateDiskLocationAndSize( } if oldEfiDisk != nil { - baseDiskInterface := diskDigitPrefix(*oldEfiDisk.Interface) - diskOldEntries[baseDiskInterface][*oldEfiDisk.Interface] = *oldEfiDisk + diskOldEntries[*oldEfiDisk.Interface] = oldEfiDisk } if newEfiDisk != nil { - baseDiskInterface := diskDigitPrefix(*newEfiDisk.Interface) - diskNewEntries[baseDiskInterface][*newEfiDisk.Interface] = *newEfiDisk + diskNewEntries[*newEfiDisk.Interface] = newEfiDisk } if oldEfiDisk != nil && newEfiDisk != nil && oldEfiDisk.Size != newEfiDisk.Size { @@ -3878,13 +3874,11 @@ func vmUpdateDiskLocationAndSize( newTPMState := vmGetTPMStateAsStorageDevice(d, diskNew.([]interface{})) if oldTPMState != nil { - baseDiskInterface := diskDigitPrefix(*oldTPMState.Interface) - diskOldEntries[baseDiskInterface][*oldTPMState.Interface] = *oldTPMState + diskOldEntries[*oldTPMState.Interface] = oldTPMState } if newTPMState != nil { - baseDiskInterface := diskDigitPrefix(*newTPMState.Interface) - diskNewEntries[baseDiskInterface][*newTPMState.Interface] = *newTPMState + diskNewEntries[*newTPMState.Interface] = newTPMState } if oldTPMState != nil && newTPMState != nil && oldTPMState.Size != newTPMState.Size { @@ -3894,64 +3888,64 @@ func vmUpdateDiskLocationAndSize( } } + // TODO: move to disks.go + var diskMoveBodies []*vms.MoveDiskRequestBody var diskResizeBodies []*vms.ResizeDiskRequestBody shutdownForDisksRequired := false - for prefix, diskMap := range diskOldEntries { - for oldKey, oldDisk := range diskMap { - if _, present := diskNewEntries[prefix][oldKey]; !present { + for oldKey, oldDisk := range diskOldEntries { + if _, present := diskNewEntries[oldKey]; !present { + return diag.Errorf( + "deletion of disks not supported. Please delete disk by hand. Old Interface was %s", + *oldDisk.Interface, + ) + } + + if *oldDisk.ID != *diskNewEntries[oldKey].ID { + if oldDisk.IsOwnedBy(vmID) { + deleteOriginalDisk := types.CustomBool(true) + + diskMoveBodies = append( + diskMoveBodies, + &vms.MoveDiskRequestBody{ + DeleteOriginalDisk: &deleteOriginalDisk, + Disk: *oldDisk.Interface, + TargetStorage: *diskNewEntries[oldKey].ID, + }, + ) + + // Cannot be done while VM is running. + shutdownForDisksRequired = true + } else { return diag.Errorf( - "deletion of disks not supported. Please delete disk by hand. Old Interface was %s", - *oldDisk.Interface, + "Cannot move %s:%s to datastore %s in VM %d configuration, it is not owned by this VM!", + *oldDisk.ID, + *oldDisk.PathInDatastore(), + *diskNewEntries[oldKey].ID, + vmID, ) } + } - if *oldDisk.ID != *diskNewEntries[prefix][oldKey].ID { - if oldDisk.IsOwnedBy(vmID) { - deleteOriginalDisk := types.CustomBool(true) - - diskMoveBodies = append( - diskMoveBodies, - &vms.MoveDiskRequestBody{ - DeleteOriginalDisk: &deleteOriginalDisk, - Disk: *oldDisk.Interface, - TargetStorage: *diskNewEntries[prefix][oldKey].ID, - }, - ) - - // Cannot be done while VM is running. - shutdownForDisksRequired = true - } else { - return diag.Errorf( - "Cannot move %s:%s to datastore %s in VM %d configuration, it is not owned by this VM!", - *oldDisk.ID, - *oldDisk.PathInDatastore(), - *diskNewEntries[prefix][oldKey].ID, - vmID, - ) - } - } - - if *oldDisk.Size < *diskNewEntries[prefix][oldKey].Size { - if oldDisk.IsOwnedBy(vmID) { - diskResizeBodies = append( - diskResizeBodies, - &vms.ResizeDiskRequestBody{ - Disk: *oldDisk.Interface, - Size: *diskNewEntries[prefix][oldKey].Size, - }, - ) - } else { - return diag.Errorf( - "Cannot resize %s:%s in VM %d configuration, it is not owned by this VM!", - *oldDisk.ID, - *oldDisk.PathInDatastore(), - vmID, - ) - } + if *oldDisk.Size < *diskNewEntries[oldKey].Size { + if oldDisk.IsOwnedBy(vmID) { + diskResizeBodies = append( + diskResizeBodies, + &vms.ResizeDiskRequestBody{ + Disk: *oldDisk.Interface, + Size: *diskNewEntries[oldKey].Size, + }, + ) + } else { + return diag.Errorf( + "Cannot resize %s:%s in VM %d configuration, it is not owned by this VM!", + *oldDisk.ID, + *oldDisk.PathInDatastore(), + vmID, + ) } } } @@ -3972,7 +3966,9 @@ func vmUpdateDiskLocationAndSize( } for _, reqBody := range diskResizeBodies { - err = vmAPI.ResizeVMDisk(ctx, reqBody) + moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int) + + err = vmAPI.ResizeVMDisk(ctx, reqBody, moveDiskTimeout) if err != nil { return diag.FromErr(err) } @@ -4075,16 +4071,6 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D return nil } -func diskDigitPrefix(s string) string { - for i, r := range s { - if unicode.IsDigit(r) { - return s[:i] - } - } - - return s -} - func getPCIInfo(resp *vms.GetResponseData, _ *schema.ResourceData) map[string]*vms.CustomPCIDevice { pciDevices := map[string]*vms.CustomPCIDevice{}