0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-08-22 11:28:33 +00:00

current state

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-02-18 08:15:25 -05:00
parent 7d2554db7d
commit bd1e937cb0
No known key found for this signature in database
GPG Key ID: 02A24794ADAC7455
9 changed files with 377 additions and 247 deletions

View File

@ -104,13 +104,14 @@ func TestAccResourceVMDisks(t *testing.T) {
}`, }`,
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testResourceAttributes("proxmox_virtual_environment_vm.test_disk1", map[string]string{ testResourceAttributes("proxmox_virtual_environment_vm.test_disk1", map[string]string{
"disk.0.cache": "none", // those are empty by default, but we can't check for that
// "disk.0.cache": "",
// "disk.0.discard": "",
// "disk.0.file_id": "",
"disk.0.datastore_id": "local-lvm", "disk.0.datastore_id": "local-lvm",
"disk.0.discard": "ignore",
"disk.0.file_format": "raw", "disk.0.file_format": "raw",
// "disk.0.file_id": "", // is empty by default, but we can't check for that
"disk.0.interface": "virtio0", "disk.0.interface": "virtio0",
"disk.0.iothread": "false", "disk.0.iothread": "true",
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`, "disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
"disk.0.size": "8", "disk.0.size": "8",
"disk.0.ssd": "false", "disk.0.ssd": "false",
@ -144,7 +145,6 @@ func TestAccResourceVMDisks(t *testing.T) {
"disk.0.datastore_id": "local-lvm", "disk.0.datastore_id": "local-lvm",
"disk.0.discard": "on", "disk.0.discard": "on",
"disk.0.file_format": "raw", "disk.0.file_format": "raw",
// "disk.0.file_id": "", // is empty by default, but we can't check for that
"disk.0.interface": "virtio0", "disk.0.interface": "virtio0",
"disk.0.iothread": "true", "disk.0.iothread": "true",
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`, "disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
@ -153,7 +153,7 @@ func TestAccResourceVMDisks(t *testing.T) {
}), }),
), ),
}}}, }}},
{"clone default disk", []resource.TestStep{ {"clone default disk without overrides", []resource.TestStep{
{ {
Config: ` Config: `
resource "proxmox_virtual_environment_vm" "test_disk3_template" { resource "proxmox_virtual_environment_vm" "test_disk3_template" {
@ -188,6 +188,58 @@ func TestAccResourceVMDisks(t *testing.T) {
RefreshState: true, RefreshState: true,
}, },
}}, }},
{"clone disk with new size", []resource.TestStep{
{
Config: `
resource "proxmox_virtual_environment_vm" "test_disk3_template" {
node_name = "pve"
started = false
name = "test-disk3-template"
template = "true"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
discard = "on"
iothread = true
}
}
resource "proxmox_virtual_environment_vm" "test_disk3" {
node_name = "pve"
started = false
name = "test-disk3"
clone {
vm_id = proxmox_virtual_environment_vm.test_disk3_template.id
}
disk {
interface = "scsi0"
size = 10
ssd = true
}
}
`,
Check: resource.ComposeTestCheckFunc(
testResourceAttributes("proxmox_virtual_environment_vm.test_disk3", map[string]string{
"disk.0.datastore_id": "local-lvm",
"disk.0.discard": "on",
"disk.0.file_format": "raw",
"disk.0.interface": "virtio0",
"disk.0.iothread": "true",
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
"disk.0.size": "10",
"disk.0.ssd": "true",
}),
),
},
//{
// RefreshState: true,
// Destroy: false,
//},
}},
//{"default disk parameters", resource.TestStep{}}, //{"default disk parameters", resource.TestStep{}},
//{"default disk parameters", resource.TestStep{}}, //{"default disk parameters", resource.TestStep{}},
} }
@ -213,10 +265,10 @@ func testResourceAttributes(res string, attrs map[string]string) resource.TestCh
if err := resource.TestCheckResourceAttrWith(res, k, func(got string) error { if err := resource.TestCheckResourceAttrWith(res, k, func(got string) error {
match, err := regexp.Match(v, []byte(got)) //nolint:mirror match, err := regexp.Match(v, []byte(got)) //nolint:mirror
if err != nil { if err != nil {
return fmt.Errorf("error matching %s: %w", v, err) return fmt.Errorf("error matching '%s': %w", v, err)
} }
if !match { if !match {
return fmt.Errorf("expected %s to match %s", got, v) return fmt.Errorf("expected '%s' to match '%s'", got, v)
} }
return nil return nil
})(s); err != nil { })(s); err != nil {

View File

@ -0,0 +1,239 @@
package vms
import (
"fmt"
"net/url"
"strings"
"unicode"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
)
// CustomStorageDevice handles QEMU SATA device parameters.
type CustomStorageDevice struct {
AIO *string `json:"aio,omitempty" url:"aio,omitempty"`
BackupEnabled *types.CustomBool `json:"backup,omitempty" url:"backup,omitempty,int"`
BurstableReadSpeedMbps *int `json:"mbps_rd_max,omitempty" url:"mbps_rd_max,omitempty"`
Cache *string `json:"cache,omitempty" url:"cache,omitempty"`
BurstableWriteSpeedMbps *int `json:"mbps_wr_max,omitempty" url:"mbps_wr_max,omitempty"`
Discard *string `json:"discard,omitempty" url:"discard,omitempty"`
Enabled bool `json:"-" url:"-"`
FileVolume string `json:"file" url:"file"`
Format *string `json:"format,omitempty" url:"format,omitempty"`
IOThread *types.CustomBool `json:"iothread,omitempty" url:"iothread,omitempty,int"`
SSD *types.CustomBool `json:"ssd,omitempty" url:"ssd,omitempty,int"`
MaxReadSpeedMbps *int `json:"mbps_rd,omitempty" url:"mbps_rd,omitempty"`
MaxWriteSpeedMbps *int `json:"mbps_wr,omitempty" url:"mbps_wr,omitempty"`
Media *string `json:"media,omitempty" url:"media,omitempty"`
Size *types.DiskSize `json:"size,omitempty" url:"size,omitempty"`
Interface *string `json:"-" url:"-"`
DatastoreID *string `json:"-" url:"-"`
FileID *string `json:"-" url:"-"`
}
// PathInDatastore returns path part of FileVolume or nil if it is not yet allocated.
func (d CustomStorageDevice) PathInDatastore() *string {
probablyDatastoreID, pathInDatastore, hasDatastoreID := strings.Cut(d.FileVolume, ":")
if !hasDatastoreID {
// when no ':' separator is found, 'Cut' places the whole string to 'probablyDatastoreID',
// we want it in 'pathInDatastore' (as it is absolute filesystem path)
pathInDatastore = probablyDatastoreID
return &pathInDatastore
}
pathInDatastoreWithoutDigits := strings.Map(
func(c rune) rune {
if c < '0' || c > '9' {
return -1
}
return c
},
pathInDatastore)
if pathInDatastoreWithoutDigits == "" {
// FileVolume is not yet allocated, it is in the "STORAGE_ID:SIZE_IN_GiB" format
return nil
}
return &pathInDatastore
}
// IsOwnedBy returns true, if CustomStorageDevice is owned by given VM.
// Not yet allocated volumes are not owned by any VM.
func (d CustomStorageDevice) IsOwnedBy(vmID int) bool {
pathInDatastore := d.PathInDatastore()
if pathInDatastore == nil {
// not yet allocated volume, consider disk not owned by any VM
// NOTE: if needed, create IsOwnedByOtherThan(vmId) instead of changing this return value.
return false
}
// ZFS uses "local-zfs:vm-123-disk-0"
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("vm-%d-", vmID)) {
return true
}
// directory uses "local:123/vm-123-disk-0"
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("%d/vm-%d-", vmID, vmID)) {
return true
}
return false
}
// IsCloudInitDrive returns true, if CustomStorageDevice is a cloud-init drive.
func (d CustomStorageDevice) IsCloudInitDrive(vmID int) bool {
return d.Media != nil && *d.Media == "cdrom" &&
strings.Contains(d.FileVolume, fmt.Sprintf("vm-%d-cloudinit", vmID))
}
// StorageInterface returns the storage interface of the CustomStorageDevice,
// e.g. "virtio" or "scsi" for "virtio0" or "scsi2".
func (d CustomStorageDevice) StorageInterface() string {
for i, r := range *d.Interface {
if unicode.IsDigit(r) {
return (*d.Interface)[:i]
}
}
// panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface))
return ""
}
// EncodeOptions converts a CustomStorageDevice's common options a URL value.
func (d CustomStorageDevice) EncodeOptions() string {
values := []string{}
if d.AIO != nil {
values = append(values, fmt.Sprintf("aio=%s", *d.AIO))
}
if d.BackupEnabled != nil {
if *d.BackupEnabled {
values = append(values, "backup=1")
} else {
values = append(values, "backup=0")
}
}
if d.IOThread != nil {
if *d.IOThread {
values = append(values, "iothread=1")
} else {
values = append(values, "iothread=0")
}
}
if d.SSD != nil {
if *d.SSD {
values = append(values, "ssd=1")
} else {
values = append(values, "ssd=0")
}
}
if d.Discard != nil && *d.Discard != "" {
values = append(values, fmt.Sprintf("discard=%s", *d.Discard))
}
if d.Cache != nil && *d.Cache != "" {
values = append(values, fmt.Sprintf("cache=%s", *d.Cache))
}
if d.BurstableReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
}
if d.BurstableWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
}
if d.MaxReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
}
if d.MaxWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
}
return strings.Join(values, ",")
}
// EncodeValues converts a CustomStorageDevice struct to a URL value.
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
values := []string{
fmt.Sprintf("file=%s", d.FileVolume),
}
if d.Format != nil {
values = append(values, fmt.Sprintf("format=%s", *d.Format))
}
if d.Media != nil {
values = append(values, fmt.Sprintf("media=%s", *d.Media))
}
if d.Size != nil {
values = append(values, fmt.Sprintf("size=%s", *d.Size))
}
values = append(values, d.EncodeOptions())
v.Add(key, strings.Join(values, ","))
return nil
}
// Copy returns a deep copy of the CustomStorageDevice.
func (d CustomStorageDevice) Copy() *CustomStorageDevice {
return &CustomStorageDevice{
AIO: types.CopyString(d.AIO),
BackupEnabled: d.BackupEnabled.Copy(),
BurstableReadSpeedMbps: types.CopyInt(d.BurstableReadSpeedMbps),
Cache: types.CopyString(d.Cache),
BurstableWriteSpeedMbps: types.CopyInt(d.BurstableWriteSpeedMbps),
Discard: types.CopyString(d.Discard),
Enabled: d.Enabled,
FileVolume: d.FileVolume,
Format: types.CopyString(d.Format),
IOThread: d.IOThread.Copy(),
SSD: d.SSD.Copy(),
MaxReadSpeedMbps: types.CopyInt(d.MaxReadSpeedMbps),
MaxWriteSpeedMbps: types.CopyInt(d.MaxWriteSpeedMbps),
Media: types.CopyString(d.Media),
Size: d.Size.Copy(),
Interface: types.CopyString(d.Interface),
DatastoreID: types.CopyString(d.DatastoreID),
FileID: types.CopyString(d.FileID),
}
}
// CustomStorageDevices handles map of QEMU storage device per disk interface.
type CustomStorageDevices map[string]*CustomStorageDevice
// ByStorageInterface returns a map of CustomStorageDevices filtered by the given storage interface.
func (d CustomStorageDevices) ByStorageInterface(storageInterface string) CustomStorageDevices {
result := make(CustomStorageDevices)
for k, v := range d {
if v.StorageInterface() == storageInterface {
result[k] = v
}
}
return result
}
// EncodeValues converts a CustomStorageDevices array to multiple URL values.
func (d CustomStorageDevices) EncodeValues(_ string, v *url.Values) error {
for s, d := range d {
if d.Enabled {
if err := d.EncodeValues(s, v); err != nil {
return fmt.Errorf("error encoding storage device %s: %w", s, err)
}
}
}
return nil
}

View File

@ -14,7 +14,6 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"unicode"
"github.com/bpg/terraform-provider-proxmox/proxmox/types" "github.com/bpg/terraform-provider-proxmox/proxmox/types"
) )
@ -165,112 +164,6 @@ type CustomStartupOrder struct {
Up *int `json:"up,omitempty" url:"up,omitempty"` Up *int `json:"up,omitempty" url:"up,omitempty"`
} }
// CustomStorageDevice handles QEMU SATA device parameters.
type CustomStorageDevice struct {
AIO *string `json:"aio,omitempty" url:"aio,omitempty"`
BackupEnabled *types.CustomBool `json:"backup,omitempty" url:"backup,omitempty,int"`
BurstableReadSpeedMbps *int `json:"mbps_rd_max,omitempty" url:"mbps_rd_max,omitempty"`
Cache *string `json:"cache,omitempty" url:"cache,omitempty"`
BurstableWriteSpeedMbps *int `json:"mbps_wr_max,omitempty" url:"mbps_wr_max,omitempty"`
Discard *string `json:"discard,omitempty" url:"discard,omitempty"`
Enabled bool `json:"-" url:"-"`
FileVolume string `json:"file" url:"file"`
Format *string `json:"format,omitempty" url:"format,omitempty"`
IOThread *types.CustomBool `json:"iothread,omitempty" url:"iothread,omitempty,int"`
SSD *types.CustomBool `json:"ssd,omitempty" url:"ssd,omitempty,int"`
MaxReadSpeedMbps *int `json:"mbps_rd,omitempty" url:"mbps_rd,omitempty"`
MaxWriteSpeedMbps *int `json:"mbps_wr,omitempty" url:"mbps_wr,omitempty"`
Media *string `json:"media,omitempty" url:"media,omitempty"`
Size *types.DiskSize `json:"size,omitempty" url:"size,omitempty"`
Interface *string
ID *string
FileID *string
}
// PathInDatastore returns path part of FileVolume or nil if it is not yet allocated.
func (d CustomStorageDevice) PathInDatastore() *string {
probablyDatastoreID, pathInDatastore, hasDatastoreID := strings.Cut(d.FileVolume, ":")
if !hasDatastoreID {
// when no ':' separator is found, 'Cut' places the whole string to 'probablyDatastoreID',
// we want it in 'pathInDatastore' (as it is absolute filesystem path)
pathInDatastore = probablyDatastoreID
return &pathInDatastore
}
pathInDatastoreWithoutDigits := strings.Map(
func(c rune) rune {
if c < '0' || c > '9' {
return -1
}
return c
},
pathInDatastore)
if pathInDatastoreWithoutDigits == "" {
// FileVolume is not yet allocated, it is in the "STORAGE_ID:SIZE_IN_GiB" format
return nil
}
return &pathInDatastore
}
// IsOwnedBy returns true, if CustomStorageDevice is owned by given VM. Not yet allocated volumes are not owned by any VM.
func (d CustomStorageDevice) IsOwnedBy(vmID int) bool {
pathInDatastore := d.PathInDatastore()
if pathInDatastore == nil {
// not yet allocated volume, consider disk not owned by any VM
// NOTE: if needed, create IsOwnedByOtherThan(vmId) instead of changing this return value.
return false
}
// ZFS uses "local-zfs:vm-123-disk-0"
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("vm-%d-", vmID)) {
return true
}
// directory uses "local:123/vm-123-disk-0"
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("%d/vm-%d-", vmID, vmID)) {
return true
}
return false
}
// IsCloudInitDrive returns true, if CustomStorageDevice is a cloud-init drive.
func (d CustomStorageDevice) IsCloudInitDrive(vmID int) bool {
return d.Media != nil && *d.Media == "cdrom" &&
strings.Contains(d.FileVolume, fmt.Sprintf("vm-%d-cloudinit", vmID))
}
// StorageInterface returns the storage interface of the CustomStorageDevice, e.g. "virtio" or "scsi" for "virtio0" or "scsi2".
func (d CustomStorageDevice) StorageInterface() string {
for i, r := range *d.Interface {
if unicode.IsDigit(r) {
return (*d.Interface)[:i]
}
}
// panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface))
return ""
}
// CustomStorageDevices handles map of QEMU storage device per disk interface.
type CustomStorageDevices map[string]*CustomStorageDevice
// ByStorageInterface returns a map of CustomStorageDevices filtered by the given storage interface.
func (d CustomStorageDevices) ByStorageInterface(storageInterface string) CustomStorageDevices {
result := make(CustomStorageDevices)
for k, v := range d {
if v.StorageInterface() == storageInterface {
result[k] = v
}
}
return result
}
// CustomTPMState handles QEMU TPM state parameters. // CustomTPMState handles QEMU TPM state parameters.
type CustomTPMState struct { type CustomTPMState struct {
FileVolume string `json:"file" url:"file"` FileVolume string `json:"file" url:"file"`
@ -1246,103 +1139,6 @@ func (r CustomStartupOrder) EncodeValues(key string, v *url.Values) error {
return nil return nil
} }
// EncodeOptions converts a CustomStorageDevice's common options a URL vlaue.
func (d CustomStorageDevice) EncodeOptions() string {
values := []string{}
if d.AIO != nil {
values = append(values, fmt.Sprintf("aio=%s", *d.AIO))
}
if d.BackupEnabled != nil {
if *d.BackupEnabled {
values = append(values, "backup=1")
} else {
values = append(values, "backup=0")
}
}
if d.IOThread != nil {
if *d.IOThread {
values = append(values, "iothread=1")
} else {
values = append(values, "iothread=0")
}
}
if d.SSD != nil {
if *d.SSD {
values = append(values, "ssd=1")
} else {
values = append(values, "ssd=0")
}
}
if d.Discard != nil && *d.Discard != "" {
values = append(values, fmt.Sprintf("discard=%s", *d.Discard))
}
if d.Cache != nil && *d.Cache != "" {
values = append(values, fmt.Sprintf("cache=%s", *d.Cache))
}
if d.BurstableReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
}
if d.BurstableWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
}
if d.MaxReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
}
if d.MaxWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
}
return strings.Join(values, ",")
}
// EncodeValues converts a CustomStorageDevice struct to a URL vlaue.
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
values := []string{
fmt.Sprintf("file=%s", d.FileVolume),
}
if d.Format != nil {
values = append(values, fmt.Sprintf("format=%s", *d.Format))
}
if d.Media != nil {
values = append(values, fmt.Sprintf("media=%s", *d.Media))
}
if d.Size != nil {
values = append(values, fmt.Sprintf("size=%s", *d.Size))
}
values = append(values, d.EncodeOptions())
v.Add(key, strings.Join(values, ","))
return nil
}
// EncodeValues converts a CustomStorageDevices array to multiple URL values.
func (d CustomStorageDevices) EncodeValues(_ string, v *url.Values) error {
for s, d := range d {
if d.Enabled {
if err := d.EncodeValues(s, v); err != nil {
return fmt.Errorf("error encoding storage device %s: %w", s, err)
}
}
}
return nil
}
// EncodeValues converts a CustomTPMState struct to a URL vlaue. // EncodeValues converts a CustomTPMState struct to a URL vlaue.
func (r CustomTPMState) EncodeValues(key string, v *url.Values) error { func (r CustomTPMState) EncodeValues(key string, v *url.Values) error {
values := []string{ values := []string{

View File

@ -79,6 +79,15 @@ func (r *CustomBool) FromValue(tfValue types.Bool) {
*r = CustomBool(tfValue.ValueBool()) *r = CustomBool(tfValue.ValueBool())
} }
// Copy returns a copy of the boolean.
func (r *CustomBool) Copy() *CustomBool {
if r == nil {
return nil
}
return BoolPtr(bool(*r))
}
// MarshalJSON converts a boolean to a JSON value. // MarshalJSON converts a boolean to a JSON value.
func (r *CustomCommaSeparatedList) MarshalJSON() ([]byte, error) { func (r *CustomCommaSeparatedList) MarshalJSON() ([]byte, error) {
s := strings.Join(*r, ",") s := strings.Join(*r, ",")

View File

@ -67,6 +67,14 @@ func (r *DiskSize) UnmarshalJSON(b []byte) error {
return nil return nil
} }
func (r *DiskSize) Copy() *DiskSize {
if r == nil {
return nil
}
return &(*r)
}
// ParseDiskSize parses a disk size string into a number of bytes. // ParseDiskSize parses a disk size string into a number of bytes.
func ParseDiskSize(size string) (DiskSize, error) { func ParseDiskSize(size string) (DiskSize, error) {
matches := sizeRegex.FindStringSubmatch(size) matches := sizeRegex.FindStringSubmatch(size)

View File

@ -11,8 +11,30 @@ func StrPtr(s string) *string {
return &s return &s
} }
// IntPtr returns a pointer to an int.
func IntPtr(i int) *int {
return &i
}
// BoolPtr returns a pointer to a bool. // BoolPtr returns a pointer to a bool.
func BoolPtr(s bool) *CustomBool { func BoolPtr(s bool) *CustomBool {
customBool := CustomBool(s) customBool := CustomBool(s)
return &customBool return &customBool
} }
// CopyString copies content of a string pointer.
func CopyString(s *string) *string {
if s == nil {
return nil
}
return StrPtr(*s)
}
func CopyInt(i *int) *int {
if i == nil {
return nil
}
return IntPtr(*i)
}

View File

@ -205,6 +205,7 @@ func createDisks(
ctx context.Context, vmConfig *vms.GetResponseData, d *schema.ResourceData, vmAPI *vms.Client, ctx context.Context, vmConfig *vms.GetResponseData, d *schema.ResourceData, vmAPI *vms.Client,
) (vms.CustomStorageDevices, error) { ) (vms.CustomStorageDevices, error) {
// this is what VM has at the moment: map of interface name (virtio1) -> disk object // this is what VM has at the moment: map of interface name (virtio1) -> disk object
// the disks have already been cloned, they have all original properties from the template
currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d) currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d)
// map of interface name (virtio1) -> disk object // map of interface name (virtio1) -> disk object
@ -277,16 +278,16 @@ func moveDiskIfRequired(
) error { ) error {
needToMove := false needToMove := false
if *planDisk.ID != "" { if *planDisk.DatastoreID != "" {
fileIDParts := strings.Split(currentDisk.FileVolume, ":") fileIDParts := strings.Split(currentDisk.FileVolume, ":")
needToMove = *planDisk.ID != fileIDParts[0] needToMove = *planDisk.DatastoreID != fileIDParts[0]
} }
if needToMove { if needToMove {
diskMoveBody := &vms.MoveDiskRequestBody{ diskMoveBody := &vms.MoveDiskRequestBody{
DeleteOriginalDisk: types.CustomBool(true).Pointer(), DeleteOriginalDisk: types.CustomBool(true).Pointer(),
Disk: *planDisk.Interface, Disk: *planDisk.Interface,
TargetStorage: *planDisk.ID, TargetStorage: *planDisk.DatastoreID,
} }
err := vmAPI.MoveVMDisk(ctx, diskMoveBody, timeoutSec) err := vmAPI.MoveVMDisk(ctx, diskMoveBody, timeoutSec)
@ -375,7 +376,7 @@ func vmImportCustomDisks(ctx context.Context, d *schema.ResourceData, m interfac
ssh.TrySudo, ssh.TrySudo,
fmt.Sprintf(`file_id="%s"`, *d.FileID), fmt.Sprintf(`file_id="%s"`, *d.FileID),
fmt.Sprintf(`file_format="%s"`, *d.Format), fmt.Sprintf(`file_format="%s"`, *d.Format),
fmt.Sprintf(`datastore_id_target="%s"`, *d.ID), fmt.Sprintf(`datastore_id_target="%s"`, *d.DatastoreID),
fmt.Sprintf(`disk_options="%s"`, diskOptions), fmt.Sprintf(`disk_options="%s"`, diskOptions),
fmt.Sprintf(`disk_size="%d"`, d.Size.InGigabytes()), fmt.Sprintf(`disk_size="%d"`, d.Size.InGigabytes()),
fmt.Sprintf(`disk_interface="%s"`, *d.Interface), fmt.Sprintf(`disk_interface="%s"`, *d.Interface),
@ -476,10 +477,11 @@ func getDiskDeviceObjects1(d *schema.ResourceData, disks []interface{}) (vms.Cus
diskDevice.FileVolume = pathInDatastore diskDevice.FileVolume = pathInDatastore
} }
} else { } else {
// a new disk, not yet allocated
diskDevice.FileVolume = fmt.Sprintf("%s:%d", datastoreID, size) diskDevice.FileVolume = fmt.Sprintf("%s:%d", datastoreID, size)
} }
diskDevice.ID = &datastoreID diskDevice.DatastoreID = &datastoreID
diskDevice.Interface = &diskInterface diskDevice.Interface = &diskInterface
diskDevice.Format = &fileFormat diskDevice.Format = &fileFormat
diskDevice.FileID = &fileID diskDevice.FileID = &fileID

View File

@ -3,13 +3,16 @@ package vm
import ( import (
"testing" "testing"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
) )
func TestMapStorageDevices(t *testing.T) { func TestMapStorageDevices(t *testing.T) {
t.Parallel()
devices := &vms.GetResponseData{ devices := &vms.GetResponseData{
VirtualIODevice0: &vms.CustomStorageDevice{ VirtualIODevice0: &vms.CustomStorageDevice{
Interface: types.StrPtr("virtio0"), Interface: types.StrPtr("virtio0"),
@ -37,6 +40,8 @@ func TestMapStorageDevices(t *testing.T) {
} }
func TestPopulateFileID(t *testing.T) { func TestPopulateFileID(t *testing.T) {
t.Parallel()
devicesMap := map[string]*vms.CustomStorageDevice{ devicesMap := map[string]*vms.CustomStorageDevice{
"virtio0": {}, "virtio0": {},
"virtio1": {}, "virtio1": {},

View File

@ -182,6 +182,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
} }
vmID = *vmIDNew vmID = *vmIDNew
err = d.Set(mkVMID, vmID) err = d.Set(mkVMID, vmID)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
@ -304,6 +305,8 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
return diag.FromErr(e) return diag.FromErr(e)
} }
//// UPDATE AFTER CLONE, can we just call update?
// Now that the virtual machine has been cloned, we need to perform some modifications. // Now that the virtual machine has been cloned, we need to perform some modifications.
acpi := types.CustomBool(d.Get(mkACPI).(bool)) acpi := types.CustomBool(d.Get(mkACPI).(bool))
audioDevices := vmGetAudioDeviceList(d) audioDevices := vmGetAudioDeviceList(d)
@ -1382,7 +1385,7 @@ func vmGetEfiDiskAsStorageDevice(d *schema.ResourceData, disk []interface{}) (*v
FileVolume: efiDisk.FileVolume, FileVolume: efiDisk.FileVolume,
Format: efiDisk.Format, Format: efiDisk.Format,
Interface: &diskInterface, Interface: &diskInterface,
ID: &id, DatastoreID: &id,
} }
if efiDisk.Type != nil { if efiDisk.Type != nil {
@ -1439,7 +1442,7 @@ func vmGetTPMStateAsStorageDevice(d *schema.ResourceData, disk []interface{}) *v
Enabled: true, Enabled: true,
FileVolume: tpmState.FileVolume, FileVolume: tpmState.FileVolume,
Interface: &diskInterface, Interface: &diskInterface,
ID: &id, DatastoreID: &id,
} }
} }
@ -3819,18 +3822,12 @@ func vmUpdateDiskLocationAndSize(
if d.HasChange(mkDisk) { if d.HasChange(mkDisk) {
diskOld, diskNew := d.GetChange(mkDisk) diskOld, diskNew := d.GetChange(mkDisk)
diskOldEntries, err := getDiskDeviceObjects1( diskOldEntries, err := getDiskDeviceObjects1(d, diskOld.([]interface{}))
d,
diskOld.([]interface{}),
)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
diskNewEntries, err := getDiskDeviceObjects1( diskNewEntries, err := getDiskDeviceObjects1(d, diskNew.([]interface{}))
d,
diskNew.([]interface{}),
)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -3902,7 +3899,7 @@ func vmUpdateDiskLocationAndSize(
) )
} }
if *oldDisk.ID != *diskNewEntries[oldKey].ID { if *oldDisk.DatastoreID != *diskNewEntries[oldKey].DatastoreID {
if oldDisk.IsOwnedBy(vmID) { if oldDisk.IsOwnedBy(vmID) {
deleteOriginalDisk := types.CustomBool(true) deleteOriginalDisk := types.CustomBool(true)
@ -3911,7 +3908,7 @@ func vmUpdateDiskLocationAndSize(
&vms.MoveDiskRequestBody{ &vms.MoveDiskRequestBody{
DeleteOriginalDisk: &deleteOriginalDisk, DeleteOriginalDisk: &deleteOriginalDisk,
Disk: *oldDisk.Interface, Disk: *oldDisk.Interface,
TargetStorage: *diskNewEntries[oldKey].ID, TargetStorage: *diskNewEntries[oldKey].DatastoreID,
}, },
) )
@ -3920,9 +3917,9 @@ func vmUpdateDiskLocationAndSize(
} else { } else {
return diag.Errorf( return diag.Errorf(
"Cannot move %s:%s to datastore %s in VM %d configuration, it is not owned by this VM!", "Cannot move %s:%s to datastore %s in VM %d configuration, it is not owned by this VM!",
*oldDisk.ID, *oldDisk.DatastoreID,
*oldDisk.PathInDatastore(), *oldDisk.PathInDatastore(),
*diskNewEntries[oldKey].ID, *diskNewEntries[oldKey].DatastoreID,
vmID, vmID,
) )
} }
@ -3940,7 +3937,7 @@ func vmUpdateDiskLocationAndSize(
} else { } else {
return diag.Errorf( return diag.Errorf(
"Cannot resize %s:%s in VM %d configuration, it is not owned by this VM!", "Cannot resize %s:%s in VM %d configuration, it is not owned by this VM!",
*oldDisk.ID, *oldDisk.DatastoreID,
*oldDisk.PathInDatastore(), *oldDisk.PathInDatastore(),
vmID, vmID,
) )