mirror of
https://github.com/bpg/terraform-provider-proxmox.git
synced 2025-06-30 02:31:10 +00:00
refactor import custom disk, add tests
Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
parent
87ff97ca58
commit
3616b4b79e
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@ -8,7 +8,7 @@
|
||||
"mode": "test",
|
||||
"program": "${workspaceFolder}/fwprovider/tests",
|
||||
"envFile": "${workspaceFolder}/testacc.env",
|
||||
"args": ["-test.v", "-test.timeout", "30s"]
|
||||
"args": ["-debug", "-test.v", "-test.timeout", "30s"]
|
||||
|
||||
},
|
||||
{
|
||||
|
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@ -27,5 +27,5 @@
|
||||
"--fast"
|
||||
],
|
||||
"go.lintOnSave": "workspace",
|
||||
"go.testEnvFile": "${workspaceFolder}/test.env",
|
||||
"go.testEnvFile": "${workspaceFolder}/testacc.env",
|
||||
}
|
||||
|
@ -63,103 +63,3 @@ local-hostname: myhost.internal
|
||||
file_name = "meta-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
#===============================================================================
|
||||
# Ubuntu Cloud Image
|
||||
#===============================================================================
|
||||
|
||||
resource "proxmox_virtual_environment_file" "ubuntu_cloud_image" {
|
||||
content_type = "iso"
|
||||
datastore_id = element(data.proxmox_virtual_environment_datastores.example.datastore_ids, index(data.proxmox_virtual_environment_datastores.example.datastore_ids, "local"))
|
||||
node_name = data.proxmox_virtual_environment_datastores.example.node_name
|
||||
|
||||
source_file {
|
||||
path = "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"
|
||||
}
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_content_type" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.content_type
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_datastore_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.datastore_id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_modification_date" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_modification_date
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_size" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_size
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_tag" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_tag
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_node_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.node_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_source_file" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.source_file
|
||||
}
|
||||
|
||||
#===============================================================================
|
||||
# Ubuntu Container Template
|
||||
#===============================================================================
|
||||
|
||||
resource "proxmox_virtual_environment_file" "ubuntu_container_template" {
|
||||
content_type = "vztmpl"
|
||||
datastore_id = element(data.proxmox_virtual_environment_datastores.example.datastore_ids, index(data.proxmox_virtual_environment_datastores.example.datastore_ids, "local"))
|
||||
node_name = data.proxmox_virtual_environment_datastores.example.node_name
|
||||
|
||||
source_file {
|
||||
path = "http://download.proxmox.com/images/system/ubuntu-18.04-standard_18.04.1-1_amd64.tar.gz"
|
||||
}
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_content_type" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.content_type
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_datastore_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.datastore_id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_modification_date" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_modification_date
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_size" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_size
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_tag" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_tag
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_node_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.node_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_source_file" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.source_file
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ resource "proxmox_virtual_environment_vm" "example_template" {
|
||||
|
||||
# disk {
|
||||
# datastore_id = local.datastore_id
|
||||
# file_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
# file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
|
||||
# interface = "virtio0"
|
||||
# iothread = true
|
||||
# }
|
||||
|
101
fwprovider/tests/resource_vm_test.go
Normal file
101
fwprovider/tests/resource_vm_test.go
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
*/
|
||||
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
accTestVMName = "proxmox_virtual_environment_vm.test_vm"
|
||||
accTestVMCloneName = "proxmox_virtual_environment_vm.test_vm_clone"
|
||||
)
|
||||
|
||||
func TestAccResourceVM(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accProviders := testAccMuxProviders(context.Background(), t)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
ProtoV6ProviderFactories: accProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccResourceVMCreateConfig(false),
|
||||
Check: testAccResourceVMCreateCheck(t),
|
||||
},
|
||||
{
|
||||
Config: testAccResourceVMCreateConfig(true) + testAccResourceVMCreateCloneConfig(),
|
||||
Check: testAccResourceVMCreateCloneCheck(t),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccResourceVMCreateConfig(isTemplate bool) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "proxmox_virtual_environment_vm" "test_vm" {
|
||||
node_name = "%s"
|
||||
vm_id = 2100
|
||||
template = %t
|
||||
started = false
|
||||
|
||||
disk {
|
||||
file_format= "raw"
|
||||
datastore_id = "local-lvm"
|
||||
interface = "virtio0"
|
||||
size = 8
|
||||
}
|
||||
|
||||
}
|
||||
`, accTestNodeName, isTemplate)
|
||||
}
|
||||
|
||||
func testAccResourceVMCreateCheck(t *testing.T) resource.TestCheckFunc {
|
||||
t.Helper()
|
||||
|
||||
return resource.ComposeTestCheckFunc(
|
||||
func(*terraform.State) error {
|
||||
err := getNodesClient().VM(2100).WaitForVMStatus(context.Background(), "stopped", 10, 1)
|
||||
require.NoError(t, err, "vm did not start")
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func testAccResourceVMCreateCloneConfig() string {
|
||||
return fmt.Sprintf(`
|
||||
resource "proxmox_virtual_environment_vm" "test_vm_clone" {
|
||||
depends_on = [proxmox_virtual_environment_vm.test_vm]
|
||||
|
||||
node_name = "%s"
|
||||
vm_id = 2101
|
||||
started = false
|
||||
|
||||
clone {
|
||||
vm_id = 2100
|
||||
}
|
||||
}
|
||||
`, accTestNodeName)
|
||||
}
|
||||
|
||||
func testAccResourceVMCreateCloneCheck(t *testing.T) resource.TestCheckFunc {
|
||||
t.Helper()
|
||||
|
||||
return resource.ComposeTestCheckFunc(
|
||||
func(*terraform.State) error {
|
||||
err := getNodesClient().VM(2101).WaitForVMStatus(context.Background(), "stopped", 20, 1)
|
||||
require.NoError(t, err, "vm did not start")
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
@ -625,8 +625,8 @@ func (c *Client) WaitForVMConfigUnlock(ctx context.Context, timeout int, delay i
|
||||
return fmt.Errorf("timeout while waiting for VM \"%d\" configuration to become unlocked", c.VMID)
|
||||
}
|
||||
|
||||
// WaitForVMState waits for a virtual machine to reach a specific state.
|
||||
func (c *Client) WaitForVMState(ctx context.Context, state string, timeout int, delay int) error {
|
||||
// WaitForVMStatus waits for a virtual machine to reach a specific status.
|
||||
func (c *Client) WaitForVMStatus(ctx context.Context, state string, timeout int, delay int) error {
|
||||
state = strings.ToLower(state)
|
||||
|
||||
timeDelay := int64(delay)
|
||||
|
@ -251,7 +251,8 @@ func (d CustomStorageDevice) StorageInterface() string {
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface))
|
||||
// panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface))
|
||||
return ""
|
||||
}
|
||||
|
||||
// CustomStorageDevices handles map of QEMU storage device per disk interface.
|
||||
@ -1245,11 +1246,9 @@ func (r CustomStartupOrder) EncodeValues(key string, v *url.Values) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeValues converts a CustomStorageDevice struct to a URL vlaue.
|
||||
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
|
||||
values := []string{
|
||||
fmt.Sprintf("file=%s", d.FileVolume),
|
||||
}
|
||||
// EncodeOptions converts a CustomStorageDevice's common options a URL vlaue.
|
||||
func (d CustomStorageDevice) EncodeOptions() string {
|
||||
values := []string{}
|
||||
|
||||
if d.AIO != nil {
|
||||
values = append(values, fmt.Sprintf("aio=%s", *d.AIO))
|
||||
@ -1263,34 +1262,6 @@ func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
|
||||
}
|
||||
}
|
||||
|
||||
if d.BurstableReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.BurstableWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
|
||||
}
|
||||
|
||||
if d.Format != nil {
|
||||
values = append(values, fmt.Sprintf("format=%s", *d.Format))
|
||||
}
|
||||
|
||||
if d.MaxReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.MaxWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
|
||||
}
|
||||
|
||||
if d.Media != nil {
|
||||
values = append(values, fmt.Sprintf("media=%s", *d.Media))
|
||||
}
|
||||
|
||||
if d.Size != nil {
|
||||
values = append(values, fmt.Sprintf("size=%s", *d.Size))
|
||||
}
|
||||
|
||||
if d.IOThread != nil {
|
||||
if *d.IOThread {
|
||||
values = append(values, "iothread=1")
|
||||
@ -1315,6 +1286,45 @@ func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
|
||||
values = append(values, fmt.Sprintf("cache=%s", *d.Cache))
|
||||
}
|
||||
|
||||
if d.BurstableReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.BurstableWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
|
||||
}
|
||||
|
||||
if d.MaxReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.MaxWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
|
||||
}
|
||||
|
||||
return strings.Join(values, ",")
|
||||
}
|
||||
|
||||
// EncodeValues converts a CustomStorageDevice struct to a URL vlaue.
|
||||
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
|
||||
values := []string{
|
||||
fmt.Sprintf("file=%s", d.FileVolume),
|
||||
}
|
||||
|
||||
if d.Format != nil {
|
||||
values = append(values, fmt.Sprintf("format=%s", *d.Format))
|
||||
}
|
||||
|
||||
if d.Media != nil {
|
||||
values = append(values, fmt.Sprintf("media=%s", *d.Media))
|
||||
}
|
||||
|
||||
if d.Size != nil {
|
||||
values = append(values, fmt.Sprintf("size=%s", *d.Size))
|
||||
}
|
||||
|
||||
values = append(values, d.EncodeOptions())
|
||||
|
||||
v.Add(key, strings.Join(values, ","))
|
||||
|
||||
return nil
|
||||
|
@ -172,10 +172,10 @@ func TestCustomStorageDevices_ByStorageInterface(t *testing.T) {
|
||||
name: "not in the list",
|
||||
iface: "sata",
|
||||
devices: CustomStorageDevices{
|
||||
"virtio0": CustomStorageDevice{
|
||||
"virtio0": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio0"),
|
||||
},
|
||||
"scsi13": CustomStorageDevice{
|
||||
"scsi13": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("scsi13"),
|
||||
},
|
||||
},
|
||||
@ -185,21 +185,21 @@ func TestCustomStorageDevices_ByStorageInterface(t *testing.T) {
|
||||
name: "not in the list",
|
||||
iface: "virtio",
|
||||
devices: CustomStorageDevices{
|
||||
"virtio0": CustomStorageDevice{
|
||||
"virtio0": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio0"),
|
||||
},
|
||||
"scsi13": CustomStorageDevice{
|
||||
"scsi13": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("scsi13"),
|
||||
},
|
||||
"virtio1": CustomStorageDevice{
|
||||
"virtio1": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio1"),
|
||||
},
|
||||
},
|
||||
want: CustomStorageDevices{
|
||||
"virtio0": CustomStorageDevice{
|
||||
"virtio0": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio0"),
|
||||
},
|
||||
"virtio1": CustomStorageDevice{
|
||||
"virtio1": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio1"),
|
||||
},
|
||||
},
|
||||
|
@ -197,9 +197,10 @@ func diskSchema() *schema.Schema {
|
||||
}
|
||||
}
|
||||
|
||||
// called from vmCreateClone
|
||||
func createDisks(
|
||||
ctx context.Context, vmConfig *vms.GetResponseData, d *schema.ResourceData, vmAPI *vms.Client,
|
||||
) (map[string]*vms.CustomStorageDevice, error) {
|
||||
) (vms.CustomStorageDevices, error) {
|
||||
// this is what VM has at the moment: map of interface name (virtio1) -> disk object
|
||||
currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d)
|
||||
|
||||
@ -209,64 +210,92 @@ func createDisks(
|
||||
return nil, e
|
||||
}
|
||||
|
||||
for diskInterface, planDisk := range planDisks {
|
||||
currentDisk := currentDisks[diskInterface]
|
||||
for iface, planDisk := range planDisks {
|
||||
currentDisk := currentDisks[iface]
|
||||
|
||||
// create disks that are not present in the current configuration
|
||||
if currentDisk == nil {
|
||||
// create disks that are not present in the current configuration
|
||||
err := createDisk(ctx, planDisk, vmAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// disk is present, i.e. when cloned a template, but we need to check if it needs to be resized
|
||||
if planDisk.Size.InGigabytes() < currentDisk.Size.InGigabytes() {
|
||||
return nil, fmt.Errorf("disk resize fails requests size (%dG) is lower than current size (%s)",
|
||||
planDisk.Size.InGigabytes(),
|
||||
*currentDisk.Size,
|
||||
)
|
||||
}
|
||||
|
||||
moveDisk := false
|
||||
if *planDisk.ID != "" {
|
||||
fileIDParts := strings.Split(currentDisk.FileVolume, ":")
|
||||
moveDisk = *planDisk.ID != fileIDParts[0]
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if moveDisk {
|
||||
moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int)
|
||||
deleteOriginalDisk := types.CustomBool(true)
|
||||
// disk is present, i.e. when cloning a template, but we need to check if it needs to be moved or resized
|
||||
|
||||
diskMoveBody := &vms.MoveDiskRequestBody{
|
||||
DeleteOriginalDisk: &deleteOriginalDisk,
|
||||
Disk: diskInterface,
|
||||
TargetStorage: *planDisk.ID,
|
||||
}
|
||||
timeoutSec := d.Get(mkTimeoutMoveDisk).(int)
|
||||
|
||||
err := vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err := resizeDiskIfRequired(ctx, currentDisk, planDisk, vmAPI, timeoutSec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if planDisk.Size.InGigabytes() > currentDisk.Size.InGigabytes() {
|
||||
moveDiskTimeout := d.Get(mkTimeoutMoveDisk).(int)
|
||||
|
||||
diskResizeBody := &vms.ResizeDiskRequestBody{
|
||||
Disk: diskInterface,
|
||||
Size: *types.DiskSizeFromGigabytes(planDisk.Size.InGigabytes()),
|
||||
}
|
||||
|
||||
err := vmAPI.ResizeVMDisk(ctx, diskResizeBody, moveDiskTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = moveDiskIfRequired(ctx, currentDisk, planDisk, vmAPI, timeoutSec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return currentDisks, nil
|
||||
}
|
||||
|
||||
func resizeDiskIfRequired(
|
||||
ctx context.Context,
|
||||
currentDisk *vms.CustomStorageDevice, planDisk *vms.CustomStorageDevice,
|
||||
vmAPI *vms.Client, timeoutSec int,
|
||||
) error {
|
||||
if planDisk.Size.InGigabytes() < currentDisk.Size.InGigabytes() {
|
||||
return fmt.Errorf("the planned disk size (%dG) is lower than the current size (%s)",
|
||||
planDisk.Size.InGigabytes(),
|
||||
*currentDisk.Size,
|
||||
)
|
||||
}
|
||||
|
||||
if planDisk.Size.InGigabytes() > currentDisk.Size.InGigabytes() {
|
||||
diskResizeBody := &vms.ResizeDiskRequestBody{
|
||||
Disk: *planDisk.Interface,
|
||||
Size: *planDisk.Size,
|
||||
}
|
||||
|
||||
err := vmAPI.ResizeVMDisk(ctx, diskResizeBody, timeoutSec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func moveDiskIfRequired(
|
||||
ctx context.Context,
|
||||
currentDisk *vms.CustomStorageDevice, planDisk *vms.CustomStorageDevice,
|
||||
vmAPI *vms.Client, timeoutSec int,
|
||||
) error {
|
||||
needToMove := false
|
||||
|
||||
if *planDisk.ID != "" {
|
||||
fileIDParts := strings.Split(currentDisk.FileVolume, ":")
|
||||
needToMove = *planDisk.ID != fileIDParts[0]
|
||||
}
|
||||
|
||||
if needToMove {
|
||||
diskMoveBody := &vms.MoveDiskRequestBody{
|
||||
DeleteOriginalDisk: types.CustomBool(true).Pointer(),
|
||||
Disk: *planDisk.Interface,
|
||||
TargetStorage: *planDisk.ID,
|
||||
}
|
||||
|
||||
err := vmAPI.MoveVMDisk(ctx, diskMoveBody, timeoutSec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDisk(ctx context.Context, disk *vms.CustomStorageDevice, vmAPI *vms.Client) error {
|
||||
addToDevices := func(ds vms.CustomStorageDevices, disk *vms.CustomStorageDevice) vms.CustomStorageDevices {
|
||||
if ds == nil {
|
||||
@ -297,112 +326,44 @@ func createDisk(ctx context.Context, disk *vms.CustomStorageDevice, vmAPI *vms.C
|
||||
return nil
|
||||
}
|
||||
|
||||
func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
func vmImportCustomDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
vmID, err := strconv.Atoi(d.Id())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine the ID of the next disk.
|
||||
disk := d.Get(mkDisk).([]interface{})
|
||||
planDisks, err := getStorageDevicesFromResource(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskCount := 0
|
||||
|
||||
for _, d := range disk {
|
||||
block := d.(map[string]interface{})
|
||||
fileID, _ := block[mkDiskFileID].(string)
|
||||
|
||||
if fileID == "" {
|
||||
for _, d := range planDisks {
|
||||
if *d.FileID == "" {
|
||||
diskCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve some information about the disk schema.
|
||||
resourceSchema := VM().Schema
|
||||
diskSchemaElem := resourceSchema[mkDisk].Elem
|
||||
diskSchemaResource := diskSchemaElem.(*schema.Resource)
|
||||
diskSpeedResource := diskSchemaResource.Schema[mkDiskSpeed]
|
||||
|
||||
// Generate the commands required to import the specified disks.
|
||||
commands := []string{}
|
||||
importedDiskCount := 0
|
||||
|
||||
for _, d := range disk {
|
||||
block := d.(map[string]interface{})
|
||||
|
||||
fileID, _ := block[mkDiskFileID].(string)
|
||||
|
||||
if fileID == "" {
|
||||
for _, d := range planDisks {
|
||||
if *d.FileID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
datastoreID, _ := block[mkDiskDatastoreID].(string)
|
||||
fileFormat, _ := block[mkDiskFileFormat].(string)
|
||||
size, _ := block[mkDiskSize].(int)
|
||||
speed := block[mkDiskSpeed].([]interface{})
|
||||
diskInterface, _ := block[mkDiskInterface].(string)
|
||||
ioThread := types.CustomBool(block[mkDiskIOThread].(bool))
|
||||
ssd := types.CustomBool(block[mkDiskSSD].(bool))
|
||||
discard, _ := block[mkDiskDiscard].(string)
|
||||
cache, _ := block[mkDiskCache].(string)
|
||||
|
||||
if fileFormat == "" {
|
||||
fileFormat = dvDiskFileFormat
|
||||
}
|
||||
|
||||
if len(speed) == 0 {
|
||||
diskSpeedDefault, err := diskSpeedResource.DefaultValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
speed = diskSpeedDefault.([]interface{})
|
||||
}
|
||||
|
||||
speedBlock := speed[0].(map[string]interface{})
|
||||
speedLimitRead := speedBlock[mkDiskSpeedRead].(int)
|
||||
speedLimitReadBurstable := speedBlock[mkDiskSpeedReadBurstable].(int)
|
||||
speedLimitWrite := speedBlock[mkDiskSpeedWrite].(int)
|
||||
speedLimitWriteBurstable := speedBlock[mkDiskSpeedWriteBurstable].(int)
|
||||
|
||||
diskOptions := ""
|
||||
|
||||
if ioThread {
|
||||
diskOptions += ",iothread=1"
|
||||
}
|
||||
|
||||
if ssd {
|
||||
diskOptions += ",ssd=1"
|
||||
}
|
||||
|
||||
if discard != "" {
|
||||
diskOptions += fmt.Sprintf(",discard=%s", discard)
|
||||
}
|
||||
|
||||
if cache != "" {
|
||||
diskOptions += fmt.Sprintf(",cache=%s", cache)
|
||||
}
|
||||
|
||||
if speedLimitRead > 0 {
|
||||
diskOptions += fmt.Sprintf(",mbps_rd=%d", speedLimitRead)
|
||||
}
|
||||
|
||||
if speedLimitReadBurstable > 0 {
|
||||
diskOptions += fmt.Sprintf(",mbps_rd_max=%d", speedLimitReadBurstable)
|
||||
}
|
||||
|
||||
if speedLimitWrite > 0 {
|
||||
diskOptions += fmt.Sprintf(",mbps_wr=%d", speedLimitWrite)
|
||||
}
|
||||
|
||||
if speedLimitWriteBurstable > 0 {
|
||||
diskOptions += fmt.Sprintf(",mbps_wr_max=%d", speedLimitWriteBurstable)
|
||||
diskOptions := d.EncodeOptions()
|
||||
if diskOptions != "" {
|
||||
diskOptions = "," + diskOptions
|
||||
}
|
||||
|
||||
filePathTmp := fmt.Sprintf(
|
||||
"/tmp/vm-%d-disk-%d.%s",
|
||||
vmID,
|
||||
diskCount+importedDiskCount,
|
||||
fileFormat,
|
||||
*d.Format,
|
||||
)
|
||||
|
||||
//nolint:lll
|
||||
@ -410,12 +371,12 @@ func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
commands,
|
||||
`set -e`,
|
||||
`try_sudo(){ if [ $(sudo -n echo tfpve 2>&1 | grep "tfpve" | wc -l) -gt 0 ]; then sudo $1; else $1; fi }`,
|
||||
fmt.Sprintf(`file_id="%s"`, fileID),
|
||||
fmt.Sprintf(`file_format="%s"`, fileFormat),
|
||||
fmt.Sprintf(`datastore_id_target="%s"`, datastoreID),
|
||||
fmt.Sprintf(`file_id="%s"`, *d.FileID),
|
||||
fmt.Sprintf(`file_format="%s"`, *d.Format),
|
||||
fmt.Sprintf(`datastore_id_target="%s"`, *d.ID),
|
||||
fmt.Sprintf(`disk_options="%s"`, diskOptions),
|
||||
fmt.Sprintf(`disk_size="%d"`, size),
|
||||
fmt.Sprintf(`disk_interface="%s"`, diskInterface),
|
||||
fmt.Sprintf(`disk_size="%d"`, d.Size.InGigabytes()),
|
||||
fmt.Sprintf(`disk_interface="%s"`, *d.Interface),
|
||||
fmt.Sprintf(`file_path_tmp="%s"`, filePathTmp),
|
||||
fmt.Sprintf(`vm_id="%d"`, vmID),
|
||||
`source_image=$(try_sudo "pvesm path $file_id")`,
|
||||
@ -557,7 +518,7 @@ func getDiskDeviceObjects1(d *schema.ResourceData, disks []interface{}) (vms.Cus
|
||||
|
||||
if storageInterface != "virtio" && storageInterface != "scsi" && storageInterface != "sata" {
|
||||
return diskDeviceObjects, fmt.Errorf(
|
||||
"Defined disk interface not supported. Interface was '%s', but only 'virtio', 'sata' and 'scsi' are supported",
|
||||
"The disk interface '%s' is not supported, should be one of 'virtioN', 'sataN', or 'scsiN'",
|
||||
diskInterface,
|
||||
)
|
||||
}
|
||||
@ -701,7 +662,7 @@ func updateDisk(d *schema.ResourceData, vmConfig *vms.GetResponseData, updateBod
|
||||
return nil
|
||||
}
|
||||
|
||||
currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d)
|
||||
// currentDisks := populateFileIDs(mapStorageDevices(vmConfig), d)
|
||||
|
||||
planDisks, err := getStorageDevicesFromResource(d)
|
||||
if err != nil {
|
||||
@ -718,22 +679,25 @@ func updateDisk(d *schema.ResourceData, vmConfig *vms.GetResponseData, updateBod
|
||||
return ds
|
||||
}
|
||||
|
||||
for diskInterface, disk := range planDisks {
|
||||
if currentDisks[diskInterface] == nil {
|
||||
// TODO: create a new disk here
|
||||
return fmt.Errorf("missing device %s", diskInterface)
|
||||
}
|
||||
for _, disk := range planDisks {
|
||||
// for diskInterface, disk := range planDisks {
|
||||
// if currentDisks[diskInterface] == nil {
|
||||
// // TODO: create a new disk here
|
||||
// return fmt.Errorf("missing device %s", diskInterface)
|
||||
// }
|
||||
|
||||
tmp := *disk
|
||||
|
||||
// copy the current disk and update the fields
|
||||
tmp := *currentDisks[diskInterface]
|
||||
tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps
|
||||
tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps
|
||||
tmp.MaxReadSpeedMbps = disk.MaxReadSpeedMbps
|
||||
tmp.MaxWriteSpeedMbps = disk.MaxWriteSpeedMbps
|
||||
tmp.Cache = disk.Cache
|
||||
tmp.Discard = disk.Discard
|
||||
tmp.IOThread = disk.IOThread
|
||||
tmp.SSD = disk.SSD
|
||||
// tmp := *currentDisks[diskInterface]
|
||||
// tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps
|
||||
// tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps
|
||||
// tmp.MaxReadSpeedMbps = disk.MaxReadSpeedMbps
|
||||
// tmp.MaxWriteSpeedMbps = disk.MaxWriteSpeedMbps
|
||||
// tmp.Cache = disk.Cache
|
||||
// tmp.Discard = disk.Discard
|
||||
// tmp.IOThread = disk.IOThread
|
||||
// tmp.SSD = disk.SSD
|
||||
|
||||
switch disk.StorageInterface() {
|
||||
case "virtio":
|
||||
@ -820,7 +784,7 @@ func mapStorageDevices(resp *vms.GetResponseData) map[string]*vms.CustomStorageD
|
||||
}
|
||||
|
||||
// mapStorageDevices maps the current VM storage devices by their interface names.
|
||||
func populateFileIDs(devices map[string]*vms.CustomStorageDevice, d *schema.ResourceData) map[string]*vms.CustomStorageDevice {
|
||||
func populateFileIDs(devices vms.CustomStorageDevices, d *schema.ResourceData) vms.CustomStorageDevices {
|
||||
planDisk := d.Get(mkDisk)
|
||||
|
||||
planDiskList := planDisk.([]interface{})
|
||||
|
@ -57,7 +57,7 @@ func TestPopulateFileID(t *testing.T) {
|
||||
err := d.Set("disk", disk)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := map[string]*vms.CustomStorageDevice{
|
||||
expected := vms.CustomStorageDevices{
|
||||
"virtio0": {
|
||||
FileID: types.StrPtr("local:100/vm-100-disk-1.qcow2"),
|
||||
},
|
||||
|
@ -981,6 +981,7 @@ func VM() *schema.Resource {
|
||||
Type: schema.TypeList,
|
||||
Description: "The MAC addresses for the network interfaces",
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
mkMemory: {
|
||||
|
@ -115,7 +115,7 @@ func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) dia
|
||||
})
|
||||
}
|
||||
|
||||
return append(diags, diag.FromErr(vmAPI.WaitForVMState(ctx, "running", startVMTimeout, 1))...)
|
||||
return append(diags, diag.FromErr(vmAPI.WaitForVMStatus(ctx, "running", startVMTimeout, 1))...)
|
||||
}
|
||||
|
||||
// Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if
|
||||
@ -134,7 +134,7 @@ func vmShutdown(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData)
|
||||
return diag.FromErr(e)
|
||||
}
|
||||
|
||||
return diag.FromErr(vmAPI.WaitForVMState(ctx, "stopped", shutdownTimeout, 1))
|
||||
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", shutdownTimeout, 1))
|
||||
}
|
||||
|
||||
// Forcefully stop the VM, then wait for it to actually stop.
|
||||
@ -148,7 +148,7 @@ func vmStop(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag
|
||||
return diag.FromErr(e)
|
||||
}
|
||||
|
||||
return diag.FromErr(vmAPI.WaitForVMState(ctx, "stopped", stopTimeout, 1))
|
||||
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", stopTimeout, 1))
|
||||
}
|
||||
|
||||
func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
@ -1091,7 +1091,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
|
||||
|
||||
d.SetId(strconv.Itoa(vmID))
|
||||
|
||||
err = vmCreateCustomDisks(ctx, d, m)
|
||||
err = vmImportCustomDisks(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@ -2625,26 +2625,23 @@ func vmReadCustom(
|
||||
networkDeviceList[ni] = networkDevice
|
||||
}
|
||||
|
||||
if len(clone) > 0 {
|
||||
if len(currentNetworkDeviceList) > 0 {
|
||||
err := d.Set(
|
||||
mkMACAddresses,
|
||||
macAddresses[0:len(currentNetworkDeviceList)],
|
||||
)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
err = d.Set(
|
||||
mkNetworkDevice,
|
||||
networkDeviceList[:networkDeviceLast+1],
|
||||
)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
if len(currentNetworkDeviceList) == 0 {
|
||||
err := d.Set(mkMACAddresses, []interface{}{})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
err = d.Set(mkNetworkDevice, []interface{}{})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
} else {
|
||||
err := d.Set(mkMACAddresses, macAddresses[0:len(currentNetworkDeviceList)])
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
|
||||
if len(currentNetworkDeviceList) > 0 || networkDeviceLast > -1 {
|
||||
err := d.Set(mkNetworkDevice, networkDeviceList[:networkDeviceLast+1])
|
||||
if len(clone) > 0 {
|
||||
err = d.Set(mkNetworkDevice, networkDeviceList[:networkDeviceLast+1])
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
} else {
|
||||
if len(currentNetworkDeviceList) > 0 || networkDeviceLast > -1 {
|
||||
err := d.Set(mkNetworkDevice, networkDeviceList[:networkDeviceLast+1])
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4061,7 +4058,7 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
||||
}
|
||||
|
||||
// Wait for the state to become unavailable as that clearly indicates the destruction of the VM.
|
||||
err = vmAPI.WaitForVMState(ctx, "", 60, 2)
|
||||
err = vmAPI.WaitForVMStatus(ctx, "", 60, 2)
|
||||
if err == nil {
|
||||
return diag.Errorf("failed to delete VM \"%d\"", vmID)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user