0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-08-25 12:55:41 +00:00

refactor import custom disk, add tests

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-02-04 01:03:20 -05:00
parent 87ff97ca58
commit 3616b4b79e
No known key found for this signature in database
GPG Key ID: 02A24794ADAC7455
12 changed files with 287 additions and 314 deletions

2
.vscode/launch.json vendored
View File

@ -8,7 +8,7 @@
"mode": "test", "mode": "test",
"program": "${workspaceFolder}/fwprovider/tests", "program": "${workspaceFolder}/fwprovider/tests",
"envFile": "${workspaceFolder}/testacc.env", "envFile": "${workspaceFolder}/testacc.env",
"args": ["-test.v", "-test.timeout", "30s"] "args": ["-debug", "-test.v", "-test.timeout", "30s"]
}, },
{ {

View File

@ -27,5 +27,5 @@
"--fast" "--fast"
], ],
"go.lintOnSave": "workspace", "go.lintOnSave": "workspace",
"go.testEnvFile": "${workspaceFolder}/test.env", "go.testEnvFile": "${workspaceFolder}/testacc.env",
} }

View File

@ -63,103 +63,3 @@ local-hostname: myhost.internal
file_name = "meta-config.yaml" file_name = "meta-config.yaml"
} }
} }
#===============================================================================
# Ubuntu Cloud Image
#===============================================================================
resource "proxmox_virtual_environment_file" "ubuntu_cloud_image" {
content_type = "iso"
datastore_id = element(data.proxmox_virtual_environment_datastores.example.datastore_ids, index(data.proxmox_virtual_environment_datastores.example.datastore_ids, "local"))
node_name = data.proxmox_virtual_environment_datastores.example.node_name
source_file {
path = "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"
}
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_content_type" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.content_type
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_datastore_id" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.datastore_id
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_modification_date" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_modification_date
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_name" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_name
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_size" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_size
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_tag" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_tag
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_id" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.id
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_node_name" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.node_name
}
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_source_file" {
value = proxmox_virtual_environment_file.ubuntu_cloud_image.source_file
}
#===============================================================================
# Ubuntu Container Template
#===============================================================================
resource "proxmox_virtual_environment_file" "ubuntu_container_template" {
content_type = "vztmpl"
datastore_id = element(data.proxmox_virtual_environment_datastores.example.datastore_ids, index(data.proxmox_virtual_environment_datastores.example.datastore_ids, "local"))
node_name = data.proxmox_virtual_environment_datastores.example.node_name
source_file {
path = "http://download.proxmox.com/images/system/ubuntu-18.04-standard_18.04.1-1_amd64.tar.gz"
}
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_content_type" {
value = proxmox_virtual_environment_file.ubuntu_container_template.content_type
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_datastore_id" {
value = proxmox_virtual_environment_file.ubuntu_container_template.datastore_id
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_modification_date" {
value = proxmox_virtual_environment_file.ubuntu_container_template.file_modification_date
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_name" {
value = proxmox_virtual_environment_file.ubuntu_container_template.file_name
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_size" {
value = proxmox_virtual_environment_file.ubuntu_container_template.file_size
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_tag" {
value = proxmox_virtual_environment_file.ubuntu_container_template.file_tag
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_id" {
value = proxmox_virtual_environment_file.ubuntu_container_template.id
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_node_name" {
value = proxmox_virtual_environment_file.ubuntu_container_template.node_name
}
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_source_file" {
value = proxmox_virtual_environment_file.ubuntu_container_template.source_file
}

View File

@ -41,7 +41,7 @@ resource "proxmox_virtual_environment_vm" "example_template" {
# disk { # disk {
# datastore_id = local.datastore_id # datastore_id = local.datastore_id
# file_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id # file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
# interface = "virtio0" # interface = "virtio0"
# iothread = true # iothread = true
# } # }

View File

@ -0,0 +1,101 @@
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*/
package tests
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"github.com/stretchr/testify/require"
)
const (
accTestVMName = "proxmox_virtual_environment_vm.test_vm"
accTestVMCloneName = "proxmox_virtual_environment_vm.test_vm_clone"
)
func TestAccResourceVM(t *testing.T) {
t.Parallel()
accProviders := testAccMuxProviders(context.Background(), t)
resource.Test(t, resource.TestCase{
ProtoV6ProviderFactories: accProviders,
Steps: []resource.TestStep{
{
Config: testAccResourceVMCreateConfig(false),
Check: testAccResourceVMCreateCheck(t),
},
{
Config: testAccResourceVMCreateConfig(true) + testAccResourceVMCreateCloneConfig(),
Check: testAccResourceVMCreateCloneCheck(t),
},
},
})
}
func testAccResourceVMCreateConfig(isTemplate bool) string {
return fmt.Sprintf(`
resource "proxmox_virtual_environment_vm" "test_vm" {
node_name = "%s"
vm_id = 2100
template = %t
started = false
disk {
file_format= "raw"
datastore_id = "local-lvm"
interface = "virtio0"
size = 8
}
}
`, accTestNodeName, isTemplate)
}
func testAccResourceVMCreateCheck(t *testing.T) resource.TestCheckFunc {
t.Helper()
return resource.ComposeTestCheckFunc(
func(*terraform.State) error {
err := getNodesClient().VM(2100).WaitForVMStatus(context.Background(), "stopped", 10, 1)
require.NoError(t, err, "vm did not start")
return nil
},
)
}
func testAccResourceVMCreateCloneConfig() string {
return fmt.Sprintf(`
resource "proxmox_virtual_environment_vm" "test_vm_clone" {
depends_on = [proxmox_virtual_environment_vm.test_vm]
node_name = "%s"
vm_id = 2101
started = false
clone {
vm_id = 2100
}
}
`, accTestNodeName)
}
func testAccResourceVMCreateCloneCheck(t *testing.T) resource.TestCheckFunc {
t.Helper()
return resource.ComposeTestCheckFunc(
func(*terraform.State) error {
err := getNodesClient().VM(2101).WaitForVMStatus(context.Background(), "stopped", 20, 1)
require.NoError(t, err, "vm did not start")
return nil
},
)
}

View File

@ -625,8 +625,8 @@ func (c *Client) WaitForVMConfigUnlock(ctx context.Context, timeout int, delay i
return fmt.Errorf("timeout while waiting for VM \"%d\" configuration to become unlocked", c.VMID) return fmt.Errorf("timeout while waiting for VM \"%d\" configuration to become unlocked", c.VMID)
} }
// WaitForVMState waits for a virtual machine to reach a specific state. // WaitForVMStatus waits for a virtual machine to reach a specific status.
func (c *Client) WaitForVMState(ctx context.Context, state string, timeout int, delay int) error { func (c *Client) WaitForVMStatus(ctx context.Context, state string, timeout int, delay int) error {
state = strings.ToLower(state) state = strings.ToLower(state)
timeDelay := int64(delay) timeDelay := int64(delay)

View File

@ -251,7 +251,8 @@ func (d CustomStorageDevice) StorageInterface() string {
} }
} }
panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface)) // panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface))
return ""
} }
// CustomStorageDevices handles map of QEMU storage device per disk interface. // CustomStorageDevices handles map of QEMU storage device per disk interface.
@ -1245,11 +1246,9 @@ func (r CustomStartupOrder) EncodeValues(key string, v *url.Values) error {
return nil return nil
} }
// EncodeValues converts a CustomStorageDevice struct to a URL vlaue. // EncodeOptions converts a CustomStorageDevice's common options a URL vlaue.
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error { func (d CustomStorageDevice) EncodeOptions() string {
values := []string{ values := []string{}
fmt.Sprintf("file=%s", d.FileVolume),
}
if d.AIO != nil { if d.AIO != nil {
values = append(values, fmt.Sprintf("aio=%s", *d.AIO)) values = append(values, fmt.Sprintf("aio=%s", *d.AIO))
@ -1263,34 +1262,6 @@ func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
} }
} }
if d.BurstableReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
}
if d.BurstableWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
}
if d.Format != nil {
values = append(values, fmt.Sprintf("format=%s", *d.Format))
}
if d.MaxReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
}
if d.MaxWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
}
if d.Media != nil {
values = append(values, fmt.Sprintf("media=%s", *d.Media))
}
if d.Size != nil {
values = append(values, fmt.Sprintf("size=%s", *d.Size))
}
if d.IOThread != nil { if d.IOThread != nil {
if *d.IOThread { if *d.IOThread {
values = append(values, "iothread=1") values = append(values, "iothread=1")
@ -1315,6 +1286,45 @@ func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
values = append(values, fmt.Sprintf("cache=%s", *d.Cache)) values = append(values, fmt.Sprintf("cache=%s", *d.Cache))
} }
if d.BurstableReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
}
if d.BurstableWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
}
if d.MaxReadSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
}
if d.MaxWriteSpeedMbps != nil {
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
}
return strings.Join(values, ",")
}
// EncodeValues converts a CustomStorageDevice struct to a URL vlaue.
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
values := []string{
fmt.Sprintf("file=%s", d.FileVolume),
}
if d.Format != nil {
values = append(values, fmt.Sprintf("format=%s", *d.Format))
}
if d.Media != nil {
values = append(values, fmt.Sprintf("media=%s", *d.Media))
}
if d.Size != nil {
values = append(values, fmt.Sprintf("size=%s", *d.Size))
}
values = append(values, d.EncodeOptions())
v.Add(key, strings.Join(values, ",")) v.Add(key, strings.Join(values, ","))
return nil return nil

View File

@ -172,10 +172,10 @@ func TestCustomStorageDevices_ByStorageInterface(t *testing.T) {
name: "not in the list", name: "not in the list",
iface: "sata", iface: "sata",
devices: CustomStorageDevices{ devices: CustomStorageDevices{
"virtio0": CustomStorageDevice{ "virtio0": &CustomStorageDevice{
Interface: types.StrPtr("virtio0"), Interface: types.StrPtr("virtio0"),
}, },
"scsi13": CustomStorageDevice{ "scsi13": &CustomStorageDevice{
Interface: types.StrPtr("scsi13"), Interface: types.StrPtr("scsi13"),
}, },
}, },
@ -185,21 +185,21 @@ func TestCustomStorageDevices_ByStorageInterface(t *testing.T) {
name: "not in the list", name: "not in the list",
iface: "virtio", iface: "virtio",
devices: CustomStorageDevices{ devices: CustomStorageDevices{
"virtio0": CustomStorageDevice{ "virtio0": &CustomStorageDevice{
Interface: types.StrPtr("virtio0"), Interface: types.StrPtr("virtio0"),
}, },
"scsi13": CustomStorageDevice{ "scsi13": &CustomStorageDevice{
Interface: types.StrPtr("scsi13"), Interface: types.StrPtr("scsi13"),
}, },
"virtio1": CustomStorageDevice{ "virtio1": &CustomStorageDevice{
Interface: types.StrPtr("virtio1"), Interface: types.StrPtr("virtio1"),
}, },
}, },
want: CustomStorageDevices{ want: CustomStorageDevices{
"virtio0": CustomStorageDevice{ "virtio0": &CustomStorageDevice{
Interface: types.StrPtr("virtio0"), Interface: types.StrPtr("virtio0"),
}, },
"virtio1": CustomStorageDevice{ "virtio1": &CustomStorageDevice{
Interface: types.StrPtr("virtio1"), Interface: types.StrPtr("virtio1"),
}, },
}, },

View File

@ -197,9 +197,10 @@ func diskSchema() *schema.Schema {
} }
} }
// called from vmCreateClone
func createDisks( func createDisks(
ctx context.Context, vmConfig *vms.GetResponseData, d *schema.ResourceData, vmAPI *vms.Client, ctx context.Context, vmConfig *vms.GetResponseData, d *schema.ResourceData, vmAPI *vms.Client,
) (map[string]*vms.CustomStorageDevice, error) { ) (vms.CustomStorageDevices, error) {
// this is what VM has at the moment: map of interface name (virtio1) -> disk object // this is what VM has at the moment: map of interface name (virtio1) -> disk object
currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d) currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d)
@ -209,62 +210,90 @@ func createDisks(
return nil, e return nil, e
} }
for diskInterface, planDisk := range planDisks { for iface, planDisk := range planDisks {
currentDisk := currentDisks[diskInterface] currentDisk := currentDisks[iface]
if currentDisk == nil {
// create disks that are not present in the current configuration // create disks that are not present in the current configuration
if currentDisk == nil {
err := createDisk(ctx, planDisk, vmAPI) err := createDisk(ctx, planDisk, vmAPI)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else {
// disk is present, i.e. when cloned a template, but we need to check if it needs to be resized continue
}
// disk is present, i.e. when cloning a template, but we need to check if it needs to be moved or resized
timeoutSec := d.Get(mkTimeoutMoveDisk).(int)
err := resizeDiskIfRequired(ctx, currentDisk, planDisk, vmAPI, timeoutSec)
if err != nil {
return nil, err
}
err = moveDiskIfRequired(ctx, currentDisk, planDisk, vmAPI, timeoutSec)
if err != nil {
return nil, err
}
}
return currentDisks, nil
}
func resizeDiskIfRequired(
ctx context.Context,
currentDisk *vms.CustomStorageDevice, planDisk *vms.CustomStorageDevice,
vmAPI *vms.Client, timeoutSec int,
) error {
if planDisk.Size.InGigabytes() < currentDisk.Size.InGigabytes() { if planDisk.Size.InGigabytes() < currentDisk.Size.InGigabytes() {
return nil, fmt.Errorf("disk resize fails requests size (%dG) is lower than current size (%s)", return fmt.Errorf("the planned disk size (%dG) is lower than the current size (%s)",
planDisk.Size.InGigabytes(), planDisk.Size.InGigabytes(),
*currentDisk.Size, *currentDisk.Size,
) )
} }
moveDisk := false if planDisk.Size.InGigabytes() > currentDisk.Size.InGigabytes() {
if *planDisk.ID != "" { diskResizeBody := &vms.ResizeDiskRequestBody{
fileIDParts := strings.Split(currentDisk.FileVolume, ":") Disk: *planDisk.Interface,
moveDisk = *planDisk.ID != fileIDParts[0] Size: *planDisk.Size,
} }
if moveDisk { err := vmAPI.ResizeVMDisk(ctx, diskResizeBody, timeoutSec)
moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int) if err != nil {
deleteOriginalDisk := types.CustomBool(true) return err
}
}
return nil
}
func moveDiskIfRequired(
ctx context.Context,
currentDisk *vms.CustomStorageDevice, planDisk *vms.CustomStorageDevice,
vmAPI *vms.Client, timeoutSec int,
) error {
needToMove := false
if *planDisk.ID != "" {
fileIDParts := strings.Split(currentDisk.FileVolume, ":")
needToMove = *planDisk.ID != fileIDParts[0]
}
if needToMove {
diskMoveBody := &vms.MoveDiskRequestBody{ diskMoveBody := &vms.MoveDiskRequestBody{
DeleteOriginalDisk: &deleteOriginalDisk, DeleteOriginalDisk: types.CustomBool(true).Pointer(),
Disk: diskInterface, Disk: *planDisk.Interface,
TargetStorage: *planDisk.ID, TargetStorage: *planDisk.ID,
} }
err := vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout) err := vmAPI.MoveVMDisk(ctx, diskMoveBody, timeoutSec)
if err != nil { if err != nil {
return nil, err return err
} }
} }
if planDisk.Size.InGigabytes() > currentDisk.Size.InGigabytes() { return nil
moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int)
diskResizeBody := &vms.ResizeDiskRequestBody{
Disk: diskInterface,
Size: *types.DiskSizeFromGigabytes(planDisk.Size.InGigabytes()),
}
err := vmAPI.ResizeVMDisk(ctx, diskResizeBody, moveDiskTimeout)
if err != nil {
return nil, err
}
}
}
}
return currentDisks, nil
} }
func createDisk(ctx context.Context, disk *vms.CustomStorageDevice, vmAPI *vms.Client) error { func createDisk(ctx context.Context, disk *vms.CustomStorageDevice, vmAPI *vms.Client) error {
@ -297,112 +326,44 @@ func createDisk(ctx context.Context, disk *vms.CustomStorageDevice, vmAPI *vms.C
return nil return nil
} }
func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error { func vmImportCustomDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error {
vmID, err := strconv.Atoi(d.Id()) vmID, err := strconv.Atoi(d.Id())
if err != nil { if err != nil {
return err return err
} }
// Determine the ID of the next disk. planDisks, err := getStorageDevicesFromResource(d)
disk := d.Get(mkDisk).([]interface{}) if err != nil {
return err
}
diskCount := 0 diskCount := 0
for _, d := range disk { for _, d := range planDisks {
block := d.(map[string]interface{}) if *d.FileID == "" {
fileID, _ := block[mkDiskFileID].(string)
if fileID == "" {
diskCount++ diskCount++
} }
} }
// Retrieve some information about the disk schema.
resourceSchema := VM().Schema
diskSchemaElem := resourceSchema[mkDisk].Elem
diskSchemaResource := diskSchemaElem.(*schema.Resource)
diskSpeedResource := diskSchemaResource.Schema[mkDiskSpeed]
// Generate the commands required to import the specified disks. // Generate the commands required to import the specified disks.
commands := []string{} commands := []string{}
importedDiskCount := 0 importedDiskCount := 0
for _, d := range disk { for _, d := range planDisks {
block := d.(map[string]interface{}) if *d.FileID == "" {
fileID, _ := block[mkDiskFileID].(string)
if fileID == "" {
continue continue
} }
datastoreID, _ := block[mkDiskDatastoreID].(string) diskOptions := d.EncodeOptions()
fileFormat, _ := block[mkDiskFileFormat].(string) if diskOptions != "" {
size, _ := block[mkDiskSize].(int) diskOptions = "," + diskOptions
speed := block[mkDiskSpeed].([]interface{})
diskInterface, _ := block[mkDiskInterface].(string)
ioThread := types.CustomBool(block[mkDiskIOThread].(bool))
ssd := types.CustomBool(block[mkDiskSSD].(bool))
discard, _ := block[mkDiskDiscard].(string)
cache, _ := block[mkDiskCache].(string)
if fileFormat == "" {
fileFormat = dvDiskFileFormat
}
if len(speed) == 0 {
diskSpeedDefault, err := diskSpeedResource.DefaultValue()
if err != nil {
return err
}
speed = diskSpeedDefault.([]interface{})
}
speedBlock := speed[0].(map[string]interface{})
speedLimitRead := speedBlock[mkDiskSpeedRead].(int)
speedLimitReadBurstable := speedBlock[mkDiskSpeedReadBurstable].(int)
speedLimitWrite := speedBlock[mkDiskSpeedWrite].(int)
speedLimitWriteBurstable := speedBlock[mkDiskSpeedWriteBurstable].(int)
diskOptions := ""
if ioThread {
diskOptions += ",iothread=1"
}
if ssd {
diskOptions += ",ssd=1"
}
if discard != "" {
diskOptions += fmt.Sprintf(",discard=%s", discard)
}
if cache != "" {
diskOptions += fmt.Sprintf(",cache=%s", cache)
}
if speedLimitRead > 0 {
diskOptions += fmt.Sprintf(",mbps_rd=%d", speedLimitRead)
}
if speedLimitReadBurstable > 0 {
diskOptions += fmt.Sprintf(",mbps_rd_max=%d", speedLimitReadBurstable)
}
if speedLimitWrite > 0 {
diskOptions += fmt.Sprintf(",mbps_wr=%d", speedLimitWrite)
}
if speedLimitWriteBurstable > 0 {
diskOptions += fmt.Sprintf(",mbps_wr_max=%d", speedLimitWriteBurstable)
} }
filePathTmp := fmt.Sprintf( filePathTmp := fmt.Sprintf(
"/tmp/vm-%d-disk-%d.%s", "/tmp/vm-%d-disk-%d.%s",
vmID, vmID,
diskCount+importedDiskCount, diskCount+importedDiskCount,
fileFormat, *d.Format,
) )
//nolint:lll //nolint:lll
@ -410,12 +371,12 @@ func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interfac
commands, commands,
`set -e`, `set -e`,
`try_sudo(){ if [ $(sudo -n echo tfpve 2>&1 | grep "tfpve" | wc -l) -gt 0 ]; then sudo $1; else $1; fi }`, `try_sudo(){ if [ $(sudo -n echo tfpve 2>&1 | grep "tfpve" | wc -l) -gt 0 ]; then sudo $1; else $1; fi }`,
fmt.Sprintf(`file_id="%s"`, fileID), fmt.Sprintf(`file_id="%s"`, *d.FileID),
fmt.Sprintf(`file_format="%s"`, fileFormat), fmt.Sprintf(`file_format="%s"`, *d.Format),
fmt.Sprintf(`datastore_id_target="%s"`, datastoreID), fmt.Sprintf(`datastore_id_target="%s"`, *d.ID),
fmt.Sprintf(`disk_options="%s"`, diskOptions), fmt.Sprintf(`disk_options="%s"`, diskOptions),
fmt.Sprintf(`disk_size="%d"`, size), fmt.Sprintf(`disk_size="%d"`, d.Size.InGigabytes()),
fmt.Sprintf(`disk_interface="%s"`, diskInterface), fmt.Sprintf(`disk_interface="%s"`, *d.Interface),
fmt.Sprintf(`file_path_tmp="%s"`, filePathTmp), fmt.Sprintf(`file_path_tmp="%s"`, filePathTmp),
fmt.Sprintf(`vm_id="%d"`, vmID), fmt.Sprintf(`vm_id="%d"`, vmID),
`source_image=$(try_sudo "pvesm path $file_id")`, `source_image=$(try_sudo "pvesm path $file_id")`,
@ -557,7 +518,7 @@ func getDiskDeviceObjects1(d *schema.ResourceData, disks []interface{}) (vms.Cus
if storageInterface != "virtio" && storageInterface != "scsi" && storageInterface != "sata" { if storageInterface != "virtio" && storageInterface != "scsi" && storageInterface != "sata" {
return diskDeviceObjects, fmt.Errorf( return diskDeviceObjects, fmt.Errorf(
"Defined disk interface not supported. Interface was '%s', but only 'virtio', 'sata' and 'scsi' are supported", "The disk interface '%s' is not supported, should be one of 'virtioN', 'sataN', or 'scsiN'",
diskInterface, diskInterface,
) )
} }
@ -701,7 +662,7 @@ func updateDisk(d *schema.ResourceData, vmConfig *vms.GetResponseData, updateBod
return nil return nil
} }
currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d) // currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d)
planDisks, err := getStorageDevicesFromResource(d) planDisks, err := getStorageDevicesFromResource(d)
if err != nil { if err != nil {
@ -718,22 +679,25 @@ func updateDisk(d *schema.ResourceData, vmConfig *vms.GetResponseData, updateBod
return ds return ds
} }
for diskInterface, disk := range planDisks { for _, disk := range planDisks {
if currentDisks[diskInterface] == nil { // for diskInterface, disk := range planDisks {
// TODO: create a new disk here // if currentDisks[diskInterface] == nil {
return fmt.Errorf("missing device %s", diskInterface) // // TODO: create a new disk here
} // return fmt.Errorf("missing device %s", diskInterface)
// }
tmp := *disk
// copy the current disk and update the fields // copy the current disk and update the fields
tmp := *currentDisks[diskInterface] // tmp := *currentDisks[diskInterface]
tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps // tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps
tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps // tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps
tmp.MaxReadSpeedMbps = disk.MaxReadSpeedMbps // tmp.MaxReadSpeedMbps = disk.MaxReadSpeedMbps
tmp.MaxWriteSpeedMbps = disk.MaxWriteSpeedMbps // tmp.MaxWriteSpeedMbps = disk.MaxWriteSpeedMbps
tmp.Cache = disk.Cache // tmp.Cache = disk.Cache
tmp.Discard = disk.Discard // tmp.Discard = disk.Discard
tmp.IOThread = disk.IOThread // tmp.IOThread = disk.IOThread
tmp.SSD = disk.SSD // tmp.SSD = disk.SSD
switch disk.StorageInterface() { switch disk.StorageInterface() {
case "virtio": case "virtio":
@ -820,7 +784,7 @@ func mapStorageDevices(resp *vms.GetResponseData) map[string]*vms.CustomStorageD
} }
// mapStorageDevices maps the current VM storage devices by their interface names. // mapStorageDevices maps the current VM storage devices by their interface names.
func populateFileIDs(devices map[string]*vms.CustomStorageDevice, d *schema.ResourceData) map[string]*vms.CustomStorageDevice { func populateFileIDs(devices vms.CustomStorageDevices, d *schema.ResourceData) vms.CustomStorageDevices {
planDisk := d.Get(mkDisk) planDisk := d.Get(mkDisk)
planDiskList := planDisk.([]interface{}) planDiskList := planDisk.([]interface{})

View File

@ -57,7 +57,7 @@ func TestPopulateFileID(t *testing.T) {
err := d.Set("disk", disk) err := d.Set("disk", disk)
require.NoError(t, err) require.NoError(t, err)
expected := map[string]*vms.CustomStorageDevice{ expected := vms.CustomStorageDevices{
"virtio0": { "virtio0": {
FileID: types.StrPtr("local:100/vm-100-disk-1.qcow2"), FileID: types.StrPtr("local:100/vm-100-disk-1.qcow2"),
}, },

View File

@ -981,6 +981,7 @@ func VM() *schema.Resource {
Type: schema.TypeList, Type: schema.TypeList,
Description: "The MAC addresses for the network interfaces", Description: "The MAC addresses for the network interfaces",
Computed: true, Computed: true,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString}, Elem: &schema.Schema{Type: schema.TypeString},
}, },
mkMemory: { mkMemory: {

View File

@ -115,7 +115,7 @@ func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) dia
}) })
} }
return append(diags, diag.FromErr(vmAPI.WaitForVMState(ctx, "running", startVMTimeout, 1))...) return append(diags, diag.FromErr(vmAPI.WaitForVMStatus(ctx, "running", startVMTimeout, 1))...)
} }
// Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if // Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if
@ -134,7 +134,7 @@ func vmShutdown(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData)
return diag.FromErr(e) return diag.FromErr(e)
} }
return diag.FromErr(vmAPI.WaitForVMState(ctx, "stopped", shutdownTimeout, 1)) return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", shutdownTimeout, 1))
} }
// Forcefully stop the VM, then wait for it to actually stop. // Forcefully stop the VM, then wait for it to actually stop.
@ -148,7 +148,7 @@ func vmStop(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag
return diag.FromErr(e) return diag.FromErr(e)
} }
return diag.FromErr(vmAPI.WaitForVMState(ctx, "stopped", stopTimeout, 1)) return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", stopTimeout, 1))
} }
func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -1091,7 +1091,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
d.SetId(strconv.Itoa(vmID)) d.SetId(strconv.Itoa(vmID))
err = vmCreateCustomDisks(ctx, d, m) err = vmImportCustomDisks(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -2625,28 +2625,25 @@ func vmReadCustom(
networkDeviceList[ni] = networkDevice networkDeviceList[ni] = networkDevice
} }
if len(clone) > 0 { if len(currentNetworkDeviceList) == 0 {
if len(currentNetworkDeviceList) > 0 { err := d.Set(mkMACAddresses, []interface{}{})
err := d.Set(
mkMACAddresses,
macAddresses[0:len(currentNetworkDeviceList)],
)
diags = append(diags, diag.FromErr(err)...) diags = append(diags, diag.FromErr(err)...)
err = d.Set( err = d.Set(mkNetworkDevice, []interface{}{})
mkNetworkDevice,
networkDeviceList[:networkDeviceLast+1],
)
diags = append(diags, diag.FromErr(err)...) diags = append(diags, diag.FromErr(err)...)
}
} else { } else {
err := d.Set(mkMACAddresses, macAddresses[0:len(currentNetworkDeviceList)]) err := d.Set(mkMACAddresses, macAddresses[0:len(currentNetworkDeviceList)])
diags = append(diags, diag.FromErr(err)...) diags = append(diags, diag.FromErr(err)...)
if len(clone) > 0 {
err = d.Set(mkNetworkDevice, networkDeviceList[:networkDeviceLast+1])
diags = append(diags, diag.FromErr(err)...)
} else {
if len(currentNetworkDeviceList) > 0 || networkDeviceLast > -1 { if len(currentNetworkDeviceList) > 0 || networkDeviceLast > -1 {
err := d.Set(mkNetworkDevice, networkDeviceList[:networkDeviceLast+1]) err := d.Set(mkNetworkDevice, networkDeviceList[:networkDeviceLast+1])
diags = append(diags, diag.FromErr(err)...) diags = append(diags, diag.FromErr(err)...)
} }
} }
}
// Compare the operating system configuration to the one stored in the state. // Compare the operating system configuration to the one stored in the state.
operatingSystem := map[string]interface{}{} operatingSystem := map[string]interface{}{}
@ -4061,7 +4058,7 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
} }
// Wait for the state to become unavailable as that clearly indicates the destruction of the VM. // Wait for the state to become unavailable as that clearly indicates the destruction of the VM.
err = vmAPI.WaitForVMState(ctx, "", 60, 2) err = vmAPI.WaitForVMStatus(ctx, "", 60, 2)
if err == nil { if err == nil {
return diag.Errorf("failed to delete VM \"%d\"", vmID) return diag.Errorf("failed to delete VM \"%d\"", vmID)
} }