mirror of
https://github.com/bpg/terraform-provider-proxmox.git
synced 2025-06-30 02:31:10 +00:00
chore(vm): refactoring, add acceptance tests (#1040)
cleaning up and refactoring the VM code, add some acceptance tests around disks, few minor bugfixes Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
parent
861d609882
commit
b648e5bcb0
@ -7,7 +7,9 @@
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/terraform:1": {}
|
||||
"ghcr.io/devcontainers/features/terraform:1": {
|
||||
"version": "1.7.2"
|
||||
}
|
||||
},
|
||||
|
||||
// Workaround for https://github.com/orgs/community/discussions/75161
|
||||
|
@ -73,6 +73,7 @@ linters:
|
||||
- ireturn
|
||||
- maintidx
|
||||
- nlreturn
|
||||
- perfsprint
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- varnamelen
|
||||
|
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@ -8,7 +8,7 @@
|
||||
"mode": "test",
|
||||
"program": "${workspaceFolder}/fwprovider/tests",
|
||||
"envFile": "${workspaceFolder}/testacc.env",
|
||||
"args": ["-test.v", "-test.timeout", "30s"]
|
||||
"args": ["-debug", "-test.v", "-test.timeout", "30s"]
|
||||
|
||||
},
|
||||
{
|
||||
|
6
.vscode/settings.json
vendored
6
.vscode/settings.json
vendored
@ -3,9 +3,13 @@
|
||||
"cSpell.words": [
|
||||
"capi",
|
||||
"CLRF",
|
||||
"deepcode",
|
||||
"iface",
|
||||
"iothread",
|
||||
"keyctl",
|
||||
"mbps",
|
||||
"nolint",
|
||||
"NUMA",
|
||||
"proxmoxtf",
|
||||
"qcow",
|
||||
"rootfs",
|
||||
@ -25,5 +29,5 @@
|
||||
"--fast"
|
||||
],
|
||||
"go.lintOnSave": "workspace",
|
||||
"go.testEnvFile": "${workspaceFolder}/test.env",
|
||||
"go.testEnvFile": "${workspaceFolder}/testacc.env",
|
||||
}
|
||||
|
@ -63,103 +63,3 @@ local-hostname: myhost.internal
|
||||
file_name = "meta-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
#===============================================================================
|
||||
# Ubuntu Cloud Image
|
||||
#===============================================================================
|
||||
|
||||
resource "proxmox_virtual_environment_file" "ubuntu_cloud_image" {
|
||||
content_type = "iso"
|
||||
datastore_id = element(data.proxmox_virtual_environment_datastores.example.datastore_ids, index(data.proxmox_virtual_environment_datastores.example.datastore_ids, "local"))
|
||||
node_name = data.proxmox_virtual_environment_datastores.example.node_name
|
||||
|
||||
source_file {
|
||||
path = "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"
|
||||
}
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_content_type" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.content_type
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_datastore_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.datastore_id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_modification_date" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_modification_date
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_size" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_size
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_file_tag" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.file_tag
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_node_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.node_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_cloud_image_source_file" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_cloud_image.source_file
|
||||
}
|
||||
|
||||
#===============================================================================
|
||||
# Ubuntu Container Template
|
||||
#===============================================================================
|
||||
|
||||
resource "proxmox_virtual_environment_file" "ubuntu_container_template" {
|
||||
content_type = "vztmpl"
|
||||
datastore_id = element(data.proxmox_virtual_environment_datastores.example.datastore_ids, index(data.proxmox_virtual_environment_datastores.example.datastore_ids, "local"))
|
||||
node_name = data.proxmox_virtual_environment_datastores.example.node_name
|
||||
|
||||
source_file {
|
||||
path = "http://download.proxmox.com/images/system/ubuntu-18.04-standard_18.04.1-1_amd64.tar.gz"
|
||||
}
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_content_type" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.content_type
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_datastore_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.datastore_id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_modification_date" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_modification_date
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_size" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_size
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_file_tag" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.file_tag
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_id" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.id
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_node_name" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.node_name
|
||||
}
|
||||
|
||||
output "resource_proxmox_virtual_environment_file_ubuntu_container_template_source_file" {
|
||||
value = proxmox_virtual_environment_file.ubuntu_container_template.source_file
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ resource "proxmox_virtual_environment_vm" "example_template" {
|
||||
|
||||
# disk {
|
||||
# datastore_id = local.datastore_id
|
||||
# file_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
# file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
|
||||
# interface = "virtio0"
|
||||
# iothread = true
|
||||
# }
|
||||
|
@ -21,9 +21,8 @@ const (
|
||||
accTestContainerCloneName = "proxmox_virtual_environment_container.test_container_clone"
|
||||
)
|
||||
|
||||
//nolint:paralleltest
|
||||
func TestAccResourceContainer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accProviders := testAccMuxProviders(context.Background(), t)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
|
@ -19,9 +19,8 @@ const (
|
||||
accTestDownloadQcow2FileName = "proxmox_virtual_environment_download_file.qcow2_image"
|
||||
)
|
||||
|
||||
//nolint:paralleltest
|
||||
func TestAccResourceDownloadFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accProviders := testAccMuxProviders(context.Background(), t)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
|
@ -20,9 +20,8 @@ const (
|
||||
accTestLinuxBridgeName = "proxmox_virtual_environment_network_linux_bridge.test"
|
||||
)
|
||||
|
||||
//nolint:paralleltest
|
||||
func TestAccResourceLinuxBridge(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accProviders := testAccMuxProviders(context.Background(), t)
|
||||
|
||||
iface := fmt.Sprintf("vmbr%d", gofakeit.Number(10, 9999))
|
||||
|
@ -20,9 +20,8 @@ const (
|
||||
accTestLinuxVLANName = "proxmox_virtual_environment_network_linux_vlan.test"
|
||||
)
|
||||
|
||||
//nolint:paralleltest
|
||||
func TestAccResourceLinuxVLAN(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accProviders := testAccMuxProviders(context.Background(), t)
|
||||
|
||||
iface := "ens18"
|
||||
|
280
fwprovider/tests/resource_vm_test.go
Normal file
280
fwprovider/tests/resource_vm_test.go
Normal file
@ -0,0 +1,280 @@
|
||||
/*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
*/
|
||||
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
|
||||
)
|
||||
|
||||
func TestAccResourceVM(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
step resource.TestStep
|
||||
}{
|
||||
{"multiline description", resource.TestStep{
|
||||
Config: `
|
||||
resource "proxmox_virtual_environment_vm" "test_vm1" {
|
||||
node_name = "pve"
|
||||
started = false
|
||||
|
||||
description = <<-EOT
|
||||
my
|
||||
description
|
||||
value
|
||||
EOT
|
||||
}`,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("proxmox_virtual_environment_vm.test_vm1", "description", "my\ndescription\nvalue"),
|
||||
),
|
||||
}},
|
||||
{"single line description", resource.TestStep{
|
||||
Config: `
|
||||
resource "proxmox_virtual_environment_vm" "test_vm2" {
|
||||
node_name = "pve"
|
||||
started = false
|
||||
|
||||
description = "my description value"
|
||||
}`,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("proxmox_virtual_environment_vm.test_vm2", "description", "my description value"),
|
||||
),
|
||||
}},
|
||||
{"no description", resource.TestStep{
|
||||
Config: `
|
||||
resource "proxmox_virtual_environment_vm" "test_vm3" {
|
||||
node_name = "pve"
|
||||
started = false
|
||||
|
||||
description = ""
|
||||
}`,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("proxmox_virtual_environment_vm.test_vm3", "description", ""),
|
||||
),
|
||||
}},
|
||||
}
|
||||
|
||||
accProviders := testAccMuxProviders(context.Background(), t)
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
ProtoV6ProviderFactories: accProviders,
|
||||
Steps: []resource.TestStep{tt.step},
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccResourceVMDisks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
steps []resource.TestStep
|
||||
}{
|
||||
{"create disk with default parameters", []resource.TestStep{{
|
||||
Config: `
|
||||
resource "proxmox_virtual_environment_vm" "test_disk1" {
|
||||
node_name = "pve"
|
||||
started = false
|
||||
name = "test-disk1"
|
||||
|
||||
disk {
|
||||
// note: default qcow2 is not supported by lvm (?)
|
||||
file_format = "raw"
|
||||
datastore_id = "local-lvm"
|
||||
interface = "virtio0"
|
||||
size = 8
|
||||
}
|
||||
}`,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testResourceAttributes("proxmox_virtual_environment_vm.test_disk1", map[string]string{
|
||||
// those are empty by default, but we can't check for that
|
||||
// "disk.0.cache": "",
|
||||
// "disk.0.discard": "",
|
||||
// "disk.0.file_id": "",
|
||||
"disk.0.datastore_id": "local-lvm",
|
||||
"disk.0.file_format": "raw",
|
||||
"disk.0.interface": "virtio0",
|
||||
"disk.0.iothread": "false",
|
||||
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
|
||||
"disk.0.size": "8",
|
||||
"disk.0.ssd": "false",
|
||||
}),
|
||||
),
|
||||
}}},
|
||||
{"create disk from an image", []resource.TestStep{{
|
||||
Config: `
|
||||
resource "proxmox_virtual_environment_download_file" "test_disk2_image" {
|
||||
content_type = "iso"
|
||||
datastore_id = "local"
|
||||
node_name = "pve"
|
||||
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
|
||||
}
|
||||
resource "proxmox_virtual_environment_vm" "test_disk2" {
|
||||
node_name = "pve"
|
||||
started = false
|
||||
name = "test-disk2"
|
||||
disk {
|
||||
datastore_id = "local-lvm"
|
||||
file_id = proxmox_virtual_environment_download_file.test_disk2_image.id
|
||||
interface = "virtio0"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
size = 20
|
||||
}
|
||||
}`,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testResourceAttributes("proxmox_virtual_environment_vm.test_disk2", map[string]string{
|
||||
"disk.0.cache": "none",
|
||||
"disk.0.datastore_id": "local-lvm",
|
||||
"disk.0.discard": "on",
|
||||
"disk.0.file_format": "raw",
|
||||
"disk.0.interface": "virtio0",
|
||||
"disk.0.iothread": "true",
|
||||
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
|
||||
"disk.0.size": "20",
|
||||
"disk.0.ssd": "false",
|
||||
}),
|
||||
),
|
||||
}}},
|
||||
{"clone default disk without overrides", []resource.TestStep{
|
||||
{
|
||||
Config: `
|
||||
resource "proxmox_virtual_environment_vm" "test_disk3_template" {
|
||||
node_name = "pve"
|
||||
started = false
|
||||
name = "test-disk3-template"
|
||||
template = "true"
|
||||
|
||||
disk {
|
||||
file_format = "raw"
|
||||
datastore_id = "local-lvm"
|
||||
interface = "virtio0"
|
||||
size = 8
|
||||
}
|
||||
}
|
||||
resource "proxmox_virtual_environment_vm" "test_disk3" {
|
||||
node_name = "pve"
|
||||
started = false
|
||||
name = "test-disk3"
|
||||
|
||||
clone {
|
||||
vm_id = proxmox_virtual_environment_vm.test_disk3_template.id
|
||||
}
|
||||
}
|
||||
`,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
// fully cloned disk, does not have any attributes in state
|
||||
resource.TestCheckNoResourceAttr("proxmox_virtual_environment_vm.test_disk3", "disk.0"),
|
||||
),
|
||||
},
|
||||
{
|
||||
RefreshState: true,
|
||||
},
|
||||
}},
|
||||
// this test is failing because of https://github.com/bpg/terraform-provider-proxmox/issues/360
|
||||
// {"clone disk with new size", []resource.TestStep{
|
||||
// {
|
||||
// Config: `
|
||||
// resource "proxmox_virtual_environment_vm" "test_disk3_template" {
|
||||
// node_name = "pve"
|
||||
// started = false
|
||||
// name = "test-disk3-template"
|
||||
// template = "true"
|
||||
//
|
||||
// disk {
|
||||
// file_format = "raw"
|
||||
// datastore_id = "local-lvm"
|
||||
// interface = "scsi0"
|
||||
// size = 8
|
||||
// discard = "on"
|
||||
// iothread = true
|
||||
// }
|
||||
// }
|
||||
// resource "proxmox_virtual_environment_vm" "test_disk3" {
|
||||
// node_name = "pve"
|
||||
// started = false
|
||||
// name = "test-disk3"
|
||||
//
|
||||
// clone {
|
||||
// vm_id = proxmox_virtual_environment_vm.test_disk3_template.id
|
||||
// }
|
||||
//
|
||||
// disk {
|
||||
// interface = "scsi0"
|
||||
// size = 10
|
||||
// //ssd = true
|
||||
// }
|
||||
// }
|
||||
// `,
|
||||
// Check: resource.ComposeTestCheckFunc(
|
||||
// testResourceAttributes("proxmox_virtual_environment_vm.test_disk3", map[string]string{
|
||||
// "disk.0.datastore_id": "local-lvm",
|
||||
// "disk.0.discard": "on",
|
||||
// "disk.0.file_format": "raw",
|
||||
// "disk.0.interface": "scsi0",
|
||||
// "disk.0.iothread": "true",
|
||||
// "disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
|
||||
// "disk.0.size": "10",
|
||||
// "disk.0.ssd": "false",
|
||||
// }),
|
||||
// ),
|
||||
// },
|
||||
//{
|
||||
// RefreshState: true,
|
||||
// Destroy: false,
|
||||
// },
|
||||
// }},
|
||||
}
|
||||
|
||||
accProviders := testAccMuxProviders(context.Background(), t)
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
ProtoV6ProviderFactories: accProviders,
|
||||
Steps: tt.steps,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceAttributes(res string, attrs map[string]string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
for k, v := range attrs {
|
||||
if err := resource.TestCheckResourceAttrWith(res, k, func(got string) error {
|
||||
match, err := regexp.Match(v, []byte(got)) //nolint:mirror
|
||||
if err != nil {
|
||||
return fmt.Errorf("error matching '%s': %w", v, err)
|
||||
}
|
||||
if !match {
|
||||
return fmt.Errorf("expected '%s' to match '%s'", got, v)
|
||||
}
|
||||
return nil
|
||||
})(s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
@ -86,6 +86,7 @@ func NewConnection(endpoint string, insecure bool, minTLS string) (*Connection,
|
||||
var transport http.RoundTripper = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{
|
||||
// deepcode ignore InsecureTLSConfig: the min TLS version is configurable
|
||||
MinVersion: version,
|
||||
InsecureSkipVerify: insecure, //nolint:gosec
|
||||
},
|
||||
|
@ -464,7 +464,7 @@ func (r *CustomRootFS) EncodeValues(key string, v *url.Values) error {
|
||||
}
|
||||
|
||||
if r.Size != nil {
|
||||
values = append(values, fmt.Sprintf("size=%s", *r.Size))
|
||||
values = append(values, fmt.Sprintf("size=%d", *r.Size))
|
||||
}
|
||||
|
||||
if r.MountOptions != nil {
|
||||
|
@ -4,6 +4,8 @@
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
*/
|
||||
|
||||
// file deepcode ignore NoHardcodedCredentials/test: test file
|
||||
|
||||
package tasks
|
||||
|
||||
import (
|
||||
|
239
proxmox/nodes/vms/customstoragedevice.go
Normal file
239
proxmox/nodes/vms/customstoragedevice.go
Normal file
@ -0,0 +1,239 @@
|
||||
package vms
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
||||
)
|
||||
|
||||
// CustomStorageDevice handles QEMU SATA device parameters.
|
||||
type CustomStorageDevice struct {
|
||||
AIO *string `json:"aio,omitempty" url:"aio,omitempty"`
|
||||
BackupEnabled *types.CustomBool `json:"backup,omitempty" url:"backup,omitempty,int"`
|
||||
BurstableReadSpeedMbps *int `json:"mbps_rd_max,omitempty" url:"mbps_rd_max,omitempty"`
|
||||
Cache *string `json:"cache,omitempty" url:"cache,omitempty"`
|
||||
BurstableWriteSpeedMbps *int `json:"mbps_wr_max,omitempty" url:"mbps_wr_max,omitempty"`
|
||||
Discard *string `json:"discard,omitempty" url:"discard,omitempty"`
|
||||
Enabled bool `json:"-" url:"-"`
|
||||
FileVolume string `json:"file" url:"file"`
|
||||
Format *string `json:"format,omitempty" url:"format,omitempty"`
|
||||
IOThread *types.CustomBool `json:"iothread,omitempty" url:"iothread,omitempty,int"`
|
||||
SSD *types.CustomBool `json:"ssd,omitempty" url:"ssd,omitempty,int"`
|
||||
MaxReadSpeedMbps *int `json:"mbps_rd,omitempty" url:"mbps_rd,omitempty"`
|
||||
MaxWriteSpeedMbps *int `json:"mbps_wr,omitempty" url:"mbps_wr,omitempty"`
|
||||
Media *string `json:"media,omitempty" url:"media,omitempty"`
|
||||
Size *types.DiskSize `json:"size,omitempty" url:"size,omitempty"`
|
||||
Interface *string `json:"-" url:"-"`
|
||||
DatastoreID *string `json:"-" url:"-"`
|
||||
FileID *string `json:"-" url:"-"`
|
||||
}
|
||||
|
||||
// PathInDatastore returns path part of FileVolume or nil if it is not yet allocated.
|
||||
func (d CustomStorageDevice) PathInDatastore() *string {
|
||||
probablyDatastoreID, pathInDatastore, hasDatastoreID := strings.Cut(d.FileVolume, ":")
|
||||
if !hasDatastoreID {
|
||||
// when no ':' separator is found, 'Cut' places the whole string to 'probablyDatastoreID',
|
||||
// we want it in 'pathInDatastore' (as it is absolute filesystem path)
|
||||
pathInDatastore = probablyDatastoreID
|
||||
|
||||
return &pathInDatastore
|
||||
}
|
||||
|
||||
pathInDatastoreWithoutDigits := strings.Map(
|
||||
func(c rune) rune {
|
||||
if c < '0' || c > '9' {
|
||||
return -1
|
||||
}
|
||||
return c
|
||||
},
|
||||
pathInDatastore)
|
||||
|
||||
if pathInDatastoreWithoutDigits == "" {
|
||||
// FileVolume is not yet allocated, it is in the "STORAGE_ID:SIZE_IN_GiB" format
|
||||
return nil
|
||||
}
|
||||
|
||||
return &pathInDatastore
|
||||
}
|
||||
|
||||
// IsOwnedBy returns true, if CustomStorageDevice is owned by given VM.
|
||||
// Not yet allocated volumes are not owned by any VM.
|
||||
func (d CustomStorageDevice) IsOwnedBy(vmID int) bool {
|
||||
pathInDatastore := d.PathInDatastore()
|
||||
if pathInDatastore == nil {
|
||||
// not yet allocated volume, consider disk not owned by any VM
|
||||
// NOTE: if needed, create IsOwnedByOtherThan(vmId) instead of changing this return value.
|
||||
return false
|
||||
}
|
||||
|
||||
// ZFS uses "local-zfs:vm-123-disk-0"
|
||||
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("vm-%d-", vmID)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// directory uses "local:123/vm-123-disk-0"
|
||||
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("%d/vm-%d-", vmID, vmID)) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCloudInitDrive returns true, if CustomStorageDevice is a cloud-init drive.
|
||||
func (d CustomStorageDevice) IsCloudInitDrive(vmID int) bool {
|
||||
return d.Media != nil && *d.Media == "cdrom" &&
|
||||
strings.Contains(d.FileVolume, fmt.Sprintf("vm-%d-cloudinit", vmID))
|
||||
}
|
||||
|
||||
// StorageInterface returns the storage interface of the CustomStorageDevice,
|
||||
// e.g. "virtio" or "scsi" for "virtio0" or "scsi2".
|
||||
func (d CustomStorageDevice) StorageInterface() string {
|
||||
for i, r := range *d.Interface {
|
||||
if unicode.IsDigit(r) {
|
||||
return (*d.Interface)[:i]
|
||||
}
|
||||
}
|
||||
|
||||
// panic(fmt.Sprintf("cannot determine storage interface for disk interface '%s'", *d.Interface))
|
||||
return ""
|
||||
}
|
||||
|
||||
// EncodeOptions converts a CustomStorageDevice's common options a URL value.
|
||||
func (d CustomStorageDevice) EncodeOptions() string {
|
||||
values := []string{}
|
||||
|
||||
if d.AIO != nil {
|
||||
values = append(values, fmt.Sprintf("aio=%s", *d.AIO))
|
||||
}
|
||||
|
||||
if d.BackupEnabled != nil {
|
||||
if *d.BackupEnabled {
|
||||
values = append(values, "backup=1")
|
||||
} else {
|
||||
values = append(values, "backup=0")
|
||||
}
|
||||
}
|
||||
|
||||
if d.IOThread != nil {
|
||||
if *d.IOThread {
|
||||
values = append(values, "iothread=1")
|
||||
} else {
|
||||
values = append(values, "iothread=0")
|
||||
}
|
||||
}
|
||||
|
||||
if d.SSD != nil {
|
||||
if *d.SSD {
|
||||
values = append(values, "ssd=1")
|
||||
} else {
|
||||
values = append(values, "ssd=0")
|
||||
}
|
||||
}
|
||||
|
||||
if d.Discard != nil && *d.Discard != "" {
|
||||
values = append(values, fmt.Sprintf("discard=%s", *d.Discard))
|
||||
}
|
||||
|
||||
if d.Cache != nil && *d.Cache != "" {
|
||||
values = append(values, fmt.Sprintf("cache=%s", *d.Cache))
|
||||
}
|
||||
|
||||
if d.BurstableReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.BurstableWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
|
||||
}
|
||||
|
||||
if d.MaxReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.MaxWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
|
||||
}
|
||||
|
||||
return strings.Join(values, ",")
|
||||
}
|
||||
|
||||
// EncodeValues converts a CustomStorageDevice struct to a URL value.
|
||||
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
|
||||
values := []string{
|
||||
fmt.Sprintf("file=%s", d.FileVolume),
|
||||
}
|
||||
|
||||
if d.Format != nil {
|
||||
values = append(values, fmt.Sprintf("format=%s", *d.Format))
|
||||
}
|
||||
|
||||
if d.Media != nil {
|
||||
values = append(values, fmt.Sprintf("media=%s", *d.Media))
|
||||
}
|
||||
|
||||
if d.Size != nil {
|
||||
values = append(values, fmt.Sprintf("size=%d", *d.Size))
|
||||
}
|
||||
|
||||
values = append(values, d.EncodeOptions())
|
||||
|
||||
v.Add(key, strings.Join(values, ","))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the CustomStorageDevice.
|
||||
func (d CustomStorageDevice) Copy() *CustomStorageDevice {
|
||||
return &CustomStorageDevice{
|
||||
AIO: types.CopyString(d.AIO),
|
||||
BackupEnabled: d.BackupEnabled.Copy(),
|
||||
BurstableReadSpeedMbps: types.CopyInt(d.BurstableReadSpeedMbps),
|
||||
Cache: types.CopyString(d.Cache),
|
||||
BurstableWriteSpeedMbps: types.CopyInt(d.BurstableWriteSpeedMbps),
|
||||
Discard: types.CopyString(d.Discard),
|
||||
Enabled: d.Enabled,
|
||||
FileVolume: d.FileVolume,
|
||||
Format: types.CopyString(d.Format),
|
||||
IOThread: d.IOThread.Copy(),
|
||||
SSD: d.SSD.Copy(),
|
||||
MaxReadSpeedMbps: types.CopyInt(d.MaxReadSpeedMbps),
|
||||
MaxWriteSpeedMbps: types.CopyInt(d.MaxWriteSpeedMbps),
|
||||
Media: types.CopyString(d.Media),
|
||||
Size: d.Size.Copy(),
|
||||
Interface: types.CopyString(d.Interface),
|
||||
DatastoreID: types.CopyString(d.DatastoreID),
|
||||
FileID: types.CopyString(d.FileID),
|
||||
}
|
||||
}
|
||||
|
||||
// CustomStorageDevices handles map of QEMU storage device per disk interface.
|
||||
type CustomStorageDevices map[string]*CustomStorageDevice
|
||||
|
||||
// ByStorageInterface returns a map of CustomStorageDevices filtered by the given storage interface.
|
||||
func (d CustomStorageDevices) ByStorageInterface(storageInterface string) CustomStorageDevices {
|
||||
result := make(CustomStorageDevices)
|
||||
|
||||
for k, v := range d {
|
||||
if v.StorageInterface() == storageInterface {
|
||||
result[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// EncodeValues converts a CustomStorageDevices array to multiple URL values.
|
||||
func (d CustomStorageDevices) EncodeValues(_ string, v *url.Values) error {
|
||||
for s, d := range d {
|
||||
if d.Enabled {
|
||||
if err := d.EncodeValues(s, v); err != nil {
|
||||
return fmt.Errorf("error encoding storage device %s: %w", s, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -263,43 +263,36 @@ func (c *Client) RebootVMAsync(ctx context.Context, d *RebootRequestBody) (*stri
|
||||
}
|
||||
|
||||
// ResizeVMDisk resizes a virtual machine disk.
|
||||
func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody) error {
|
||||
var err error
|
||||
|
||||
tflog.Debug(ctx, "resize disk", map[string]interface{}{
|
||||
"disk": d.Disk,
|
||||
"size": d.Size,
|
||||
})
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
err = c.DoRequest(
|
||||
ctx,
|
||||
http.MethodPut,
|
||||
c.ExpandPath("resize"),
|
||||
d,
|
||||
nil,
|
||||
)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tflog.Debug(ctx, "resize disk failed", map[string]interface{}{
|
||||
"retry": i,
|
||||
})
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("error resizing VM disk: %w", ctx.Err())
|
||||
}
|
||||
func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody, timeout int) error {
|
||||
taskID, err := c.ResizeVMDiskAsync(ctx, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resizing VM disk: %w", err)
|
||||
return fmt.Errorf("error waiting for VM disk resize: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResizeVMDiskAsync resizes a virtual machine disk asynchronously.
|
||||
func (c *Client) ResizeVMDiskAsync(ctx context.Context, d *ResizeDiskRequestBody) (*string, error) {
|
||||
resBody := &MoveDiskResponseBody{}
|
||||
|
||||
err := c.DoRequest(ctx, http.MethodPut, c.ExpandPath("resize"), d, resBody)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error moving VM disk: %w", err)
|
||||
}
|
||||
|
||||
if resBody.Data == nil {
|
||||
return nil, api.ErrNoDataObjectInResponse
|
||||
}
|
||||
|
||||
return resBody.Data, nil
|
||||
}
|
||||
|
||||
// ShutdownVM shuts down a virtual machine.
|
||||
func (c *Client) ShutdownVM(ctx context.Context, d *ShutdownRequestBody, timeout int) error {
|
||||
taskID, err := c.ShutdownVMAsync(ctx, d)
|
||||
@ -594,8 +587,8 @@ func (c *Client) WaitForVMConfigUnlock(ctx context.Context, timeout int, delay i
|
||||
return fmt.Errorf("timeout while waiting for VM \"%d\" configuration to become unlocked", c.VMID)
|
||||
}
|
||||
|
||||
// WaitForVMState waits for a virtual machine to reach a specific state.
|
||||
func (c *Client) WaitForVMState(ctx context.Context, state string, timeout int, delay int) error {
|
||||
// WaitForVMStatus waits for a virtual machine to reach a specific status.
|
||||
func (c *Client) WaitForVMStatus(ctx context.Context, state string, timeout int, delay int) error {
|
||||
state = strings.ToLower(state)
|
||||
|
||||
timeDelay := int64(delay)
|
||||
|
@ -164,87 +164,6 @@ type CustomStartupOrder struct {
|
||||
Up *int `json:"up,omitempty" url:"up,omitempty"`
|
||||
}
|
||||
|
||||
// CustomStorageDevice handles QEMU SATA device parameters.
|
||||
type CustomStorageDevice struct {
|
||||
AIO *string `json:"aio,omitempty" url:"aio,omitempty"`
|
||||
BackupEnabled *types.CustomBool `json:"backup,omitempty" url:"backup,omitempty,int"`
|
||||
BurstableReadSpeedMbps *int `json:"mbps_rd_max,omitempty" url:"mbps_rd_max,omitempty"`
|
||||
Cache *string `json:"cache,omitempty" url:"cache,omitempty"`
|
||||
BurstableWriteSpeedMbps *int `json:"mbps_wr_max,omitempty" url:"mbps_wr_max,omitempty"`
|
||||
Discard *string `json:"discard,omitempty" url:"discard,omitempty"`
|
||||
Enabled bool `json:"-" url:"-"`
|
||||
FileVolume string `json:"file" url:"file"`
|
||||
Format *string `json:"format,omitempty" url:"format,omitempty"`
|
||||
IOThread *types.CustomBool `json:"iothread,omitempty" url:"iothread,omitempty,int"`
|
||||
SSD *types.CustomBool `json:"ssd,omitempty" url:"ssd,omitempty,int"`
|
||||
MaxReadSpeedMbps *int `json:"mbps_rd,omitempty" url:"mbps_rd,omitempty"`
|
||||
MaxWriteSpeedMbps *int `json:"mbps_wr,omitempty" url:"mbps_wr,omitempty"`
|
||||
Media *string `json:"media,omitempty" url:"media,omitempty"`
|
||||
Size *types.DiskSize `json:"size,omitempty" url:"size,omitempty"`
|
||||
Interface *string
|
||||
ID *string
|
||||
FileID *string
|
||||
}
|
||||
|
||||
// PathInDatastore returns path part of FileVolume or nil if it is not yet allocated.
|
||||
func (d CustomStorageDevice) PathInDatastore() *string {
|
||||
probablyDatastoreID, pathInDatastore, hasDatastoreID := strings.Cut(d.FileVolume, ":")
|
||||
if !hasDatastoreID {
|
||||
// when no ':' separator is found, 'Cut' places the whole string to 'probablyDatastoreID',
|
||||
// we want it in 'pathInDatastore' (as it is absolute filesystem path)
|
||||
pathInDatastore = probablyDatastoreID
|
||||
|
||||
return &pathInDatastore
|
||||
}
|
||||
|
||||
pathInDatastoreWithoutDigits := strings.Map(
|
||||
func(c rune) rune {
|
||||
if c < '0' || c > '9' {
|
||||
return -1
|
||||
}
|
||||
return c
|
||||
},
|
||||
pathInDatastore)
|
||||
|
||||
if pathInDatastoreWithoutDigits == "" {
|
||||
// FileVolume is not yet allocated, it is in the "STORAGE_ID:SIZE_IN_GiB" format
|
||||
return nil
|
||||
}
|
||||
|
||||
return &pathInDatastore
|
||||
}
|
||||
|
||||
// IsOwnedBy returns true, if CustomStorageDevice is owned by given VM. Not yet allocated volumes are not owned by any VM.
|
||||
func (d CustomStorageDevice) IsOwnedBy(vmID int) bool {
|
||||
pathInDatastore := d.PathInDatastore()
|
||||
if pathInDatastore == nil {
|
||||
// not yet allocated volume, consider disk not owned by any VM
|
||||
// NOTE: if needed, create IsOwnedByOtherThan(vmId) instead of changing this return value.
|
||||
return false
|
||||
}
|
||||
|
||||
// ZFS uses "local-zfs:vm-123-disk-0"
|
||||
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("vm-%d-", vmID)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// directory uses "local:123/vm-123-disk-0"
|
||||
if strings.HasPrefix(*pathInDatastore, fmt.Sprintf("%d/vm-%d-", vmID, vmID)) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCloudInitDrive returns true, if CustomStorageDevice is a cloud-init drive.
|
||||
func (d CustomStorageDevice) IsCloudInitDrive(vmID int) bool {
|
||||
return d.Media != nil && *d.Media == "cdrom" &&
|
||||
strings.Contains(d.FileVolume, fmt.Sprintf("vm-%d-cloudinit", vmID))
|
||||
}
|
||||
|
||||
// CustomStorageDevices handles QEMU SATA device parameters.
|
||||
type CustomStorageDevices map[string]CustomStorageDevice
|
||||
|
||||
// CustomTPMState handles QEMU TPM state parameters.
|
||||
type CustomTPMState struct {
|
||||
FileVolume string `json:"file" url:"file"`
|
||||
@ -1220,94 +1139,6 @@ func (r CustomStartupOrder) EncodeValues(key string, v *url.Values) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeValues converts a CustomStorageDevice struct to a URL vlaue.
|
||||
func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
|
||||
values := []string{
|
||||
fmt.Sprintf("file=%s", d.FileVolume),
|
||||
}
|
||||
|
||||
if d.AIO != nil {
|
||||
values = append(values, fmt.Sprintf("aio=%s", *d.AIO))
|
||||
}
|
||||
|
||||
if d.BackupEnabled != nil {
|
||||
if *d.BackupEnabled {
|
||||
values = append(values, "backup=1")
|
||||
} else {
|
||||
values = append(values, "backup=0")
|
||||
}
|
||||
}
|
||||
|
||||
if d.BurstableReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd_max=%d", *d.BurstableReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.BurstableWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr_max=%d", *d.BurstableWriteSpeedMbps))
|
||||
}
|
||||
|
||||
if d.Format != nil {
|
||||
values = append(values, fmt.Sprintf("format=%s", *d.Format))
|
||||
}
|
||||
|
||||
if d.MaxReadSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_rd=%d", *d.MaxReadSpeedMbps))
|
||||
}
|
||||
|
||||
if d.MaxWriteSpeedMbps != nil {
|
||||
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
|
||||
}
|
||||
|
||||
if d.Media != nil {
|
||||
values = append(values, fmt.Sprintf("media=%s", *d.Media))
|
||||
}
|
||||
|
||||
if d.Size != nil {
|
||||
values = append(values, fmt.Sprintf("size=%s", *d.Size))
|
||||
}
|
||||
|
||||
if d.IOThread != nil {
|
||||
if *d.IOThread {
|
||||
values = append(values, "iothread=1")
|
||||
} else {
|
||||
values = append(values, "iothread=0")
|
||||
}
|
||||
}
|
||||
|
||||
if d.SSD != nil {
|
||||
if *d.SSD {
|
||||
values = append(values, "ssd=1")
|
||||
} else {
|
||||
values = append(values, "ssd=0")
|
||||
}
|
||||
}
|
||||
|
||||
if d.Discard != nil && *d.Discard != "" {
|
||||
values = append(values, fmt.Sprintf("discard=%s", *d.Discard))
|
||||
}
|
||||
|
||||
if d.Cache != nil && *d.Cache != "" {
|
||||
values = append(values, fmt.Sprintf("cache=%s", *d.Cache))
|
||||
}
|
||||
|
||||
v.Add(key, strings.Join(values, ","))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeValues converts a CustomStorageDevices array to multiple URL values.
|
||||
func (d CustomStorageDevices) EncodeValues(_ string, v *url.Values) error {
|
||||
for s, d := range d {
|
||||
if d.Enabled {
|
||||
if err := d.EncodeValues(s, v); err != nil {
|
||||
return fmt.Errorf("error encoding storage device %s: %w", s, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeValues converts a CustomTPMState struct to a URL vlaue.
|
||||
func (r CustomTPMState) EncodeValues(key string, v *url.Values) error {
|
||||
values := []string{
|
||||
|
@ -34,7 +34,7 @@ func TestCustomStorageDevice_UnmarshalJSON(t *testing.T) {
|
||||
Enabled: true,
|
||||
FileVolume: "local-lvm:vm-2041-disk-0",
|
||||
IOThread: types.BoolPtr(true),
|
||||
Size: &ds8gig,
|
||||
Size: ds8gig,
|
||||
SSD: types.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
@ -47,7 +47,7 @@ func TestCustomStorageDevice_UnmarshalJSON(t *testing.T) {
|
||||
FileVolume: "nfs:2041/vm-2041-disk-0.raw",
|
||||
Format: types.StrPtr("raw"),
|
||||
IOThread: types.BoolPtr(true),
|
||||
Size: &ds8gig,
|
||||
Size: ds8gig,
|
||||
SSD: types.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
@ -120,6 +120,102 @@ func TestCustomStorageDevice_IsCloudInitDrive(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustomStorageDevice_StorageInterface(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
device CustomStorageDevice
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "virtio0",
|
||||
device: CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio0"),
|
||||
},
|
||||
want: "virtio",
|
||||
}, {
|
||||
name: "scsi13",
|
||||
device: CustomStorageDevice{
|
||||
Interface: types.StrPtr("scsi13"),
|
||||
},
|
||||
want: "scsi",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := tt.device.StorageInterface()
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustomStorageDevices_ByStorageInterface(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
iface string
|
||||
devices CustomStorageDevices
|
||||
want CustomStorageDevices
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
iface: "virtio",
|
||||
devices: CustomStorageDevices{},
|
||||
want: CustomStorageDevices{},
|
||||
},
|
||||
{
|
||||
name: "not in the list",
|
||||
iface: "sata",
|
||||
devices: CustomStorageDevices{
|
||||
"virtio0": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio0"),
|
||||
},
|
||||
"scsi13": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("scsi13"),
|
||||
},
|
||||
},
|
||||
want: CustomStorageDevices{},
|
||||
},
|
||||
{
|
||||
name: "not in the list",
|
||||
iface: "virtio",
|
||||
devices: CustomStorageDevices{
|
||||
"virtio0": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio0"),
|
||||
},
|
||||
"scsi13": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("scsi13"),
|
||||
},
|
||||
"virtio1": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio1"),
|
||||
},
|
||||
},
|
||||
want: CustomStorageDevices{
|
||||
"virtio0": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio0"),
|
||||
},
|
||||
"virtio1": &CustomStorageDevice{
|
||||
Interface: types.StrPtr("virtio1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := tt.devices.ByStorageInterface(tt.iface)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustomPCIDevice_UnmarshalJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -79,6 +79,15 @@ func (r *CustomBool) FromValue(tfValue types.Bool) {
|
||||
*r = CustomBool(tfValue.ValueBool())
|
||||
}
|
||||
|
||||
// Copy returns a copy of the boolean.
|
||||
func (r *CustomBool) Copy() *CustomBool {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return BoolPtr(bool(*r))
|
||||
}
|
||||
|
||||
// MarshalJSON converts a boolean to a JSON value.
|
||||
func (r *CustomCommaSeparatedList) MarshalJSON() ([]byte, error) {
|
||||
s := strings.Join(*r, ",")
|
||||
|
@ -23,27 +23,37 @@ var sizeRegex = regexp.MustCompile(`(?i)^(\d+(\.\d+)?)(k|kb|kib|m|mb|mib|g|gb|gi
|
||||
type DiskSize int64
|
||||
|
||||
// String returns the string representation of the disk size.
|
||||
func (r DiskSize) String() string {
|
||||
func (r *DiskSize) String() string {
|
||||
return FormatDiskSize(r)
|
||||
}
|
||||
|
||||
// InMegabytes returns the disk size in megabytes.
|
||||
func (r DiskSize) InMegabytes() int64 {
|
||||
return int64(r) / 1024 / 1024
|
||||
func (r *DiskSize) InMegabytes() int64 {
|
||||
if r == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return int64(*r) / 1024 / 1024
|
||||
}
|
||||
|
||||
// InGigabytes returns the disk size in gigabytes.
|
||||
func (r DiskSize) InGigabytes() int64 {
|
||||
return int64(r) / 1024 / 1024 / 1024
|
||||
func (r *DiskSize) InGigabytes() int64 {
|
||||
if r == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return int64(*r) / 1024 / 1024 / 1024
|
||||
}
|
||||
|
||||
// DiskSizeFromGigabytes creates a DiskSize from gigabytes.
|
||||
func DiskSizeFromGigabytes(size int64) DiskSize {
|
||||
return DiskSize(size * 1024 * 1024 * 1024)
|
||||
func DiskSizeFromGigabytes(size int64) *DiskSize {
|
||||
ds := DiskSize(size * 1024 * 1024 * 1024)
|
||||
|
||||
return &ds
|
||||
}
|
||||
|
||||
// MarshalJSON marshals a disk size into a Proxmox API `<DiskSize>` string.
|
||||
func (r DiskSize) MarshalJSON() ([]byte, error) {
|
||||
func (r *DiskSize) MarshalJSON() ([]byte, error) {
|
||||
bytes, err := json.Marshal(FormatDiskSize(r))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot marshal disk size: %w", err)
|
||||
@ -66,57 +76,68 @@ func (r *DiskSize) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the disk size.
|
||||
func (r *DiskSize) Copy() *DiskSize {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := *r
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
// ParseDiskSize parses a disk size string into a number of bytes.
|
||||
func ParseDiskSize(size string) (DiskSize, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(size)
|
||||
if len(matches) > 0 {
|
||||
fsize, err := strconv.ParseFloat(matches[1], 64)
|
||||
fSize, err := strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("cannot parse disk size \"%s\": %w", size, err)
|
||||
}
|
||||
|
||||
switch strings.ToLower(matches[3]) {
|
||||
case "k", "kb", "kib":
|
||||
fsize *= 1024
|
||||
fSize *= 1024
|
||||
case "m", "mb", "mib":
|
||||
fsize = fsize * 1024 * 1024
|
||||
fSize = fSize * 1024 * 1024
|
||||
case "g", "gb", "gib":
|
||||
fsize = fsize * 1024 * 1024 * 1024
|
||||
fSize = fSize * 1024 * 1024 * 1024
|
||||
case "t", "tb", "tib":
|
||||
fsize = fsize * 1024 * 1024 * 1024 * 1024
|
||||
fSize = fSize * 1024 * 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
return DiskSize(math.Ceil(fsize)), nil
|
||||
return DiskSize(math.Ceil(fSize)), nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("cannot parse disk size \"%s\"", size)
|
||||
}
|
||||
|
||||
// FormatDiskSize turns a number of bytes into a disk size string.
|
||||
func FormatDiskSize(size DiskSize) string {
|
||||
if size < 0 {
|
||||
func FormatDiskSize(size *DiskSize) string {
|
||||
if size == nil || *size < 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if size < 1024 {
|
||||
return fmt.Sprintf("%d", size)
|
||||
if *size < 1024 {
|
||||
return fmt.Sprintf("%d", *size)
|
||||
}
|
||||
|
||||
round := func(f float64) string {
|
||||
return strconv.FormatFloat(math.Ceil(f*100)/100, 'f', -1, 64)
|
||||
}
|
||||
|
||||
if size < 1024*1024 {
|
||||
return round(float64(size)/1024) + "K"
|
||||
if *size < 1024*1024 {
|
||||
return round(float64(*size)/1024) + "K"
|
||||
}
|
||||
|
||||
if size < 1024*1024*1024 {
|
||||
return round(float64(size)/1024/1024) + "M"
|
||||
if *size < 1024*1024*1024 {
|
||||
return round(float64(*size)/1024/1024) + "M"
|
||||
}
|
||||
|
||||
if size < 1024*1024*1024*1024 {
|
||||
return round(float64(size)/1024/1024/1024) + "G"
|
||||
if *size < 1024*1024*1024*1024 {
|
||||
return round(float64(*size)/1024/1024/1024) + "G"
|
||||
}
|
||||
|
||||
return round(float64(size)/1024/1024/1024/1024) + "T"
|
||||
return round(float64(*size)/1024/1024/1024/1024) + "T"
|
||||
}
|
||||
|
@ -41,11 +41,14 @@ func TestParseDiskSize(t *testing.T) {
|
||||
tt := test
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got, err := ParseDiskSize(tt.size)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseDiskSize() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if int64(got) != tt.want {
|
||||
t.Errorf("parseDiskSize() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
@ -72,7 +75,10 @@ func TestFormatDiskSize(t *testing.T) {
|
||||
tt := test
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if got := FormatDiskSize(DiskSize(tt.size)); got != tt.want {
|
||||
|
||||
size := DiskSize(tt.size)
|
||||
|
||||
if got := FormatDiskSize(&size); got != tt.want {
|
||||
t.Errorf("formatDiskSize() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@ -99,9 +105,12 @@ func TestToFromGigabytes(t *testing.T) {
|
||||
tt := test
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ds := DiskSizeFromGigabytes(tt.size)
|
||||
|
||||
gb := ds.InGigabytes()
|
||||
assert.Equal(t, tt.size, gb)
|
||||
|
||||
if got := ds.String(); got != tt.want {
|
||||
t.Errorf("DiskSize.String() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
@ -11,8 +11,31 @@ func StrPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
// IntPtr returns a pointer to an int.
|
||||
func IntPtr(i int) *int {
|
||||
return &i
|
||||
}
|
||||
|
||||
// BoolPtr returns a pointer to a bool.
|
||||
func BoolPtr(s bool) *CustomBool {
|
||||
customBool := CustomBool(s)
|
||||
return &customBool
|
||||
}
|
||||
|
||||
// CopyString copies content of a string pointer.
|
||||
func CopyString(s *string) *string {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return StrPtr(*s)
|
||||
}
|
||||
|
||||
// CopyInt copies content of an int pointer.
|
||||
func CopyInt(i *int) *int {
|
||||
if i == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return IntPtr(*i)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf/resource/validator"
|
||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf/structure"
|
||||
"github.com/bpg/terraform-provider-proxmox/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -63,6 +64,7 @@ const (
|
||||
dvResourceVirtualEnvironmentContainerNetworkInterfaceMACAddress = ""
|
||||
dvResourceVirtualEnvironmentContainerNetworkInterfaceRateLimit = 0
|
||||
dvResourceVirtualEnvironmentContainerNetworkInterfaceVLANID = 0
|
||||
dvResourceVirtualEnvironmentContainerNetworkInterfaceMTU = 0
|
||||
dvResourceVirtualEnvironmentContainerOperatingSystemType = "unmanaged"
|
||||
dvResourceVirtualEnvironmentContainerPoolID = ""
|
||||
dvResourceVirtualEnvironmentContainerStarted = true
|
||||
@ -710,11 +712,11 @@ func Container() *schema.Resource {
|
||||
Optional: true,
|
||||
Default: dvResourceVirtualEnvironmentContainerNetworkInterfaceVLANID,
|
||||
},
|
||||
mkResourceVirtualEnvironmentVMNetworkDeviceMTU: {
|
||||
mkResourceVirtualEnvironmentContainerNetworkInterfaceMTU: {
|
||||
Type: schema.TypeInt,
|
||||
Description: "Maximum transmission unit (MTU)",
|
||||
Optional: true,
|
||||
Default: dvResourceVirtualEnvironmentVMNetworkDeviceMTU,
|
||||
Default: dvResourceVirtualEnvironmentContainerNetworkInterfaceMTU,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1010,7 +1012,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
deprecatedServer := initializationDNSBlock[mkResourceVirtualEnvironmentContainerInitializationDNSServer].(string)
|
||||
|
||||
if len(servers) > 0 {
|
||||
nameserver := strings.Join(ConvertToStringSlice(servers), " ")
|
||||
nameserver := strings.Join(utils.ConvertToStringSlice(servers), " ")
|
||||
|
||||
updateBody.DNSServer = &nameserver
|
||||
} else {
|
||||
@ -1137,7 +1139,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
name := networkInterfaceMap[mkResourceVirtualEnvironmentContainerNetworkInterfaceName].(string)
|
||||
rateLimit := networkInterfaceMap[mkResourceVirtualEnvironmentContainerNetworkInterfaceRateLimit].(float64)
|
||||
vlanID := networkInterfaceMap[mkResourceVirtualEnvironmentContainerNetworkInterfaceVLANID].(int)
|
||||
mtu, _ := networkInterfaceMap[mkResourceVirtualEnvironmentVMNetworkDeviceMTU].(int)
|
||||
mtu, _ := networkInterfaceMap[mkResourceVirtualEnvironmentContainerNetworkInterfaceMTU].(int)
|
||||
|
||||
if bridge != "" {
|
||||
networkInterfaceObject.Bridge = &bridge
|
||||
@ -1332,7 +1334,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
|
||||
deprecatedServer := initializationDNSBlock[mkResourceVirtualEnvironmentContainerInitializationDNSServer].(string)
|
||||
|
||||
if len(servers) > 0 {
|
||||
nameserver := strings.Join(ConvertToStringSlice(servers), " ")
|
||||
nameserver := strings.Join(utils.ConvertToStringSlice(servers), " ")
|
||||
|
||||
initializationDNSServer = nameserver
|
||||
} else {
|
||||
@ -2545,7 +2547,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
|
||||
resource := Container()
|
||||
|
||||
// Retrieve the clone argument as the update logic varies for clones.
|
||||
clone := d.Get(mkResourceVirtualEnvironmentVMClone).([]interface{})
|
||||
clone := d.Get(mkResourceVirtualEnvironmentContainerClone).([]interface{})
|
||||
|
||||
// Prepare the new primitive values.
|
||||
description := d.Get(mkResourceVirtualEnvironmentContainerDescription).(string)
|
||||
@ -2638,7 +2640,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
|
||||
deprecatedServer := initializationDNSBlock[mkResourceVirtualEnvironmentContainerInitializationDNSServer].(string)
|
||||
|
||||
if len(servers) > 0 {
|
||||
initializationDNSServer = strings.Join(ConvertToStringSlice(servers), " ")
|
||||
initializationDNSServer = strings.Join(utils.ConvertToStringSlice(servers), " ")
|
||||
} else {
|
||||
initializationDNSServer = deprecatedServer
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ func File() *schema.Resource {
|
||||
DeleteContext: fileDelete,
|
||||
UpdateContext: fileUpdate,
|
||||
Importer: &schema.ResourceImporter{
|
||||
StateContext: func(ctx context.Context, d *schema.ResourceData, i interface{}) ([]*schema.ResourceData, error) {
|
||||
StateContext: func(_ context.Context, d *schema.ResourceData, _ interface{}) ([]*schema.ResourceData, error) {
|
||||
node, volID, err := fileParseImportID(d.Id())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -899,8 +899,8 @@ func readURL(
|
||||
fileModificationDate := ""
|
||||
fileSize := res.ContentLength
|
||||
fileTag := ""
|
||||
|
||||
httpLastModified := res.Header.Get("Last-Modified")
|
||||
|
||||
if httpLastModified != "" {
|
||||
var timeParsed time.Time
|
||||
timeParsed, err = time.Parse(time.RFC1123, httpLastModified)
|
||||
@ -916,8 +916,10 @@ func readURL(
|
||||
}
|
||||
|
||||
httpTag := res.Header.Get("ETag")
|
||||
|
||||
if httpTag != "" {
|
||||
httpTagParts := strings.Split(httpTag, "\"")
|
||||
|
||||
if len(httpTagParts) > 1 {
|
||||
fileTag = httpTagParts[1]
|
||||
}
|
||||
|
@ -624,8 +624,9 @@ func VM() *schema.Resource {
|
||||
StateFunc: func(i interface{}) string {
|
||||
// PVE always adds a newline to the description, so we have to do the same,
|
||||
// also taking in account the CLRF case (Windows)
|
||||
// Unlike container, VM description does not have trailing "\n"
|
||||
if i.(string) != "" {
|
||||
return strings.ReplaceAll(strings.TrimSpace(i.(string)), "\r\n", "\n") + "\n"
|
||||
return strings.ReplaceAll(strings.TrimSpace(i.(string)), "\r\n", "\n")
|
||||
}
|
||||
|
||||
return ""
|
||||
@ -1187,6 +1188,7 @@ func VM() *schema.Resource {
|
||||
Type: schema.TypeList,
|
||||
Description: "The MAC addresses for the network interfaces",
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
mkResourceVirtualEnvironmentVMMemory: {
|
||||
@ -1773,7 +1775,7 @@ func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) dia
|
||||
})
|
||||
}
|
||||
|
||||
return append(diags, diag.FromErr(vmAPI.WaitForVMState(ctx, "running", startVMTimeout, 1))...)
|
||||
return append(diags, diag.FromErr(vmAPI.WaitForVMStatus(ctx, "running", startVMTimeout, 1))...)
|
||||
}
|
||||
|
||||
// Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if
|
||||
@ -1792,7 +1794,7 @@ func vmShutdown(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData)
|
||||
return diag.FromErr(e)
|
||||
}
|
||||
|
||||
return diag.FromErr(vmAPI.WaitForVMState(ctx, "stopped", shutdownTimeout, 1))
|
||||
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", shutdownTimeout, 1))
|
||||
}
|
||||
|
||||
// Forcefully stop the VM, then wait for it to actually stop.
|
||||
@ -1806,7 +1808,7 @@ func vmStop(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag
|
||||
return diag.FromErr(e)
|
||||
}
|
||||
|
||||
return diag.FromErr(vmAPI.WaitForVMState(ctx, "stopped", stopTimeout, 1))
|
||||
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", stopTimeout, 1))
|
||||
}
|
||||
|
||||
func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
@ -2027,16 +2029,16 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
||||
|
||||
if len(cdrom) > 0 || len(initialization) > 0 {
|
||||
ideDevices = vms.CustomStorageDevices{
|
||||
"ide0": vms.CustomStorageDevice{
|
||||
"ide0": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
"ide1": vms.CustomStorageDevice{
|
||||
"ide1": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
"ide2": vms.CustomStorageDevice{
|
||||
"ide2": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
"ide3": vms.CustomStorageDevice{
|
||||
"ide3": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
}
|
||||
@ -2055,7 +2057,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
||||
|
||||
cdromMedia := "cdrom"
|
||||
|
||||
ideDevices[cdromInterface] = vms.CustomStorageDevice{
|
||||
ideDevices[cdromInterface] = &vms.CustomStorageDevice{
|
||||
Enabled: cdromEnabled,
|
||||
FileVolume: cdromFileID,
|
||||
Media: &cdromMedia,
|
||||
@ -2128,7 +2130,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
||||
|
||||
cdromCloudInitFileID := fmt.Sprintf("%s:cloudinit", initializationDatastoreID)
|
||||
cdromCloudInitMedia := "cdrom"
|
||||
ideDevices[initializationInterface] = vms.CustomStorageDevice{
|
||||
ideDevices[initializationInterface] = &vms.CustomStorageDevice{
|
||||
Enabled: cdromCloudInitEnabled,
|
||||
FileVolume: cdromCloudInitFileID,
|
||||
Media: &cdromCloudInitMedia,
|
||||
@ -2318,7 +2320,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
||||
|
||||
if diskSize < currentDiskInfo.Size.InGigabytes() {
|
||||
return diag.Errorf(
|
||||
"disk resize fails requests size (%dG) is lower than current size (%s)",
|
||||
"disk resize fails requests size (%dG) is lower than current size (%d)",
|
||||
diskSize,
|
||||
*currentDiskInfo.Size,
|
||||
)
|
||||
@ -2334,7 +2336,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
||||
|
||||
diskResizeBody := &vms.ResizeDiskRequestBody{
|
||||
Disk: diskInterface,
|
||||
Size: types.DiskSizeFromGigabytes(diskSize),
|
||||
Size: *types.DiskSizeFromGigabytes(diskSize),
|
||||
}
|
||||
|
||||
moveDisk := false
|
||||
@ -2348,17 +2350,17 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
||||
}
|
||||
}
|
||||
|
||||
if moveDisk {
|
||||
moveDiskTimeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutMoveDisk).(int)
|
||||
timeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutMoveDisk).(int)
|
||||
|
||||
e = vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout)
|
||||
if moveDisk {
|
||||
e = vmAPI.MoveVMDisk(ctx, diskMoveBody, timeout)
|
||||
if e != nil {
|
||||
return diag.FromErr(e)
|
||||
}
|
||||
}
|
||||
|
||||
if diskSize > currentDiskInfo.Size.InGigabytes() {
|
||||
e = vmAPI.ResizeVMDisk(ctx, diskResizeBody)
|
||||
e = vmAPI.ResizeVMDisk(ctx, diskResizeBody, timeout)
|
||||
if e != nil {
|
||||
return diag.FromErr(e)
|
||||
}
|
||||
@ -2741,12 +2743,12 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
|
||||
|
||||
ideDevice2Media := "cdrom"
|
||||
ideDevices := vms.CustomStorageDevices{
|
||||
cdromCloudInitInterface: vms.CustomStorageDevice{
|
||||
cdromCloudInitInterface: &vms.CustomStorageDevice{
|
||||
Enabled: cdromCloudInitEnabled,
|
||||
FileVolume: cdromCloudInitFileID,
|
||||
Media: &ideDevice2Media,
|
||||
},
|
||||
cdromInterface: vms.CustomStorageDevice{
|
||||
cdromInterface: &vms.CustomStorageDevice{
|
||||
Enabled: cdromEnabled,
|
||||
FileVolume: cdromFileID,
|
||||
Media: &ideDevice2Media,
|
||||
@ -3272,7 +3274,7 @@ func vmGetCPUArchitectureValidator() schema.SchemaValidateDiagFunc {
|
||||
func vmGetDiskDeviceObjects(
|
||||
d *schema.ResourceData,
|
||||
disks []interface{},
|
||||
) (map[string]map[string]vms.CustomStorageDevice, error) {
|
||||
) (map[string]map[string]*vms.CustomStorageDevice, error) {
|
||||
var diskDevice []interface{}
|
||||
|
||||
if disks != nil {
|
||||
@ -3281,11 +3283,11 @@ func vmGetDiskDeviceObjects(
|
||||
diskDevice = d.Get(mkResourceVirtualEnvironmentVMDisk).([]interface{})
|
||||
}
|
||||
|
||||
diskDeviceObjects := map[string]map[string]vms.CustomStorageDevice{}
|
||||
diskDeviceObjects := map[string]map[string]*vms.CustomStorageDevice{}
|
||||
resource := VM()
|
||||
|
||||
for _, diskEntry := range diskDevice {
|
||||
diskDevice := vms.CustomStorageDevice{
|
||||
diskDevice := &vms.CustomStorageDevice{
|
||||
Enabled: true,
|
||||
}
|
||||
|
||||
@ -3335,12 +3337,12 @@ func vmGetDiskDeviceObjects(
|
||||
diskDevice.FileVolume = fmt.Sprintf("%s:%d", datastoreID, size)
|
||||
}
|
||||
|
||||
diskDevice.ID = &datastoreID
|
||||
diskDevice.DatastoreID = &datastoreID
|
||||
diskDevice.Interface = &diskInterface
|
||||
diskDevice.Format = &fileFormat
|
||||
diskDevice.FileID = &fileID
|
||||
diskSize := types.DiskSizeFromGigabytes(int64(size))
|
||||
diskDevice.Size = &diskSize
|
||||
diskDevice.Size = diskSize
|
||||
diskDevice.IOThread = &ioThread
|
||||
diskDevice.Discard = &discard
|
||||
diskDevice.Cache = &cache
|
||||
@ -3384,7 +3386,7 @@ func vmGetDiskDeviceObjects(
|
||||
}
|
||||
|
||||
if _, present := diskDeviceObjects[baseDiskInterface]; !present {
|
||||
diskDeviceObjects[baseDiskInterface] = map[string]vms.CustomStorageDevice{}
|
||||
diskDeviceObjects[baseDiskInterface] = map[string]*vms.CustomStorageDevice{}
|
||||
}
|
||||
|
||||
diskDeviceObjects[baseDiskInterface][diskInterface] = diskDevice
|
||||
@ -3435,11 +3437,11 @@ func vmGetEfiDiskAsStorageDevice(d *schema.ResourceData, disk []interface{}) (*v
|
||||
diskInterface := fmt.Sprint(baseDiskInterface, id)
|
||||
|
||||
storageDevice = &vms.CustomStorageDevice{
|
||||
Enabled: true,
|
||||
FileVolume: efiDisk.FileVolume,
|
||||
Format: efiDisk.Format,
|
||||
Interface: &diskInterface,
|
||||
ID: &id,
|
||||
Enabled: true,
|
||||
FileVolume: efiDisk.FileVolume,
|
||||
Format: efiDisk.Format,
|
||||
Interface: &diskInterface,
|
||||
DatastoreID: &id,
|
||||
}
|
||||
|
||||
if efiDisk.Type != nil {
|
||||
@ -3493,10 +3495,10 @@ func vmGetTPMStateAsStorageDevice(d *schema.ResourceData, disk []interface{}) *v
|
||||
diskInterface := fmt.Sprint(baseDiskInterface, id)
|
||||
|
||||
storageDevice = &vms.CustomStorageDevice{
|
||||
Enabled: true,
|
||||
FileVolume: tpmState.FileVolume,
|
||||
Interface: &diskInterface,
|
||||
ID: &id,
|
||||
Enabled: true,
|
||||
FileVolume: tpmState.FileVolume,
|
||||
Interface: &diskInterface,
|
||||
DatastoreID: &id,
|
||||
}
|
||||
}
|
||||
|
||||
@ -4209,9 +4211,9 @@ func vmReadCustom(
|
||||
if datastoreID != "" {
|
||||
// disk format may not be returned by config API if it is default for the storage, and that may be different
|
||||
// from the default qcow2, so we need to read it from the storage API to make sure we have the correct value
|
||||
volume, err := api.Node(nodeName).Storage(datastoreID).GetDatastoreFile(ctx, dd.FileVolume)
|
||||
if err != nil {
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
volume, e := api.Node(nodeName).Storage(datastoreID).GetDatastoreFile(ctx, dd.FileVolume)
|
||||
if e != nil {
|
||||
diags = append(diags, diag.FromErr(e)...)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -4292,7 +4294,7 @@ func vmReadCustom(
|
||||
|
||||
if len(clone) == 0 || len(currentDiskList) > 0 {
|
||||
orderedDiskList := orderedListFromMap(diskMap)
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMDisk, orderedDiskList)
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMDisk, orderedDiskList)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
|
||||
@ -4309,9 +4311,9 @@ func vmReadCustom(
|
||||
} else {
|
||||
// disk format may not be returned by config API if it is default for the storage, and that may be different
|
||||
// from the default qcow2, so we need to read it from the storage API to make sure we have the correct value
|
||||
volume, err := api.Node(nodeName).Storage(fileIDParts[0]).GetDatastoreFile(ctx, vmConfig.EFIDisk.FileVolume)
|
||||
if err != nil {
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
volume, e := api.Node(nodeName).Storage(fileIDParts[0]).GetDatastoreFile(ctx, vmConfig.EFIDisk.FileVolume)
|
||||
if e != nil {
|
||||
diags = append(diags, diag.FromErr(e)...)
|
||||
} else {
|
||||
efiDisk[mkResourceVirtualEnvironmentVMEFIDiskFileFormat] = volume.FileFormat
|
||||
}
|
||||
@ -4333,7 +4335,7 @@ func vmReadCustom(
|
||||
|
||||
if len(clone) > 0 {
|
||||
if len(currentEfiDisk) > 0 {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMEFIDisk, []interface{}{efiDisk})
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMEFIDisk, []interface{}{efiDisk})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
} else if len(currentEfiDisk) > 0 ||
|
||||
@ -4341,7 +4343,7 @@ func vmReadCustom(
|
||||
efiDisk[mkResourceVirtualEnvironmentVMEFIDiskType] != dvResourceVirtualEnvironmentVMEFIDiskType ||
|
||||
efiDisk[mkResourceVirtualEnvironmentVMEFIDiskPreEnrolledKeys] != dvResourceVirtualEnvironmentVMEFIDiskPreEnrolledKeys || //nolint:lll
|
||||
efiDisk[mkResourceVirtualEnvironmentVMEFIDiskFileFormat] != dvResourceVirtualEnvironmentVMEFIDiskFileFormat {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMEFIDisk, []interface{}{efiDisk})
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMEFIDisk, []interface{}{efiDisk})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
}
|
||||
@ -4358,13 +4360,13 @@ func vmReadCustom(
|
||||
|
||||
if len(clone) > 0 {
|
||||
if len(currentTPMState) > 0 {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMTPMState, []interface{}{tpmState})
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMTPMState, []interface{}{tpmState})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
} else if len(currentTPMState) > 0 ||
|
||||
tpmState[mkResourceVirtualEnvironmentVMTPMStateDatastoreID] != dvResourceVirtualEnvironmentVMTPMStateDatastoreID ||
|
||||
tpmState[mkResourceVirtualEnvironmentVMTPMStateVersion] != dvResourceVirtualEnvironmentVMTPMStateVersion {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMTPMState, []interface{}{tpmState})
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMTPMState, []interface{}{tpmState})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
}
|
||||
@ -4428,7 +4430,7 @@ func vmReadCustom(
|
||||
|
||||
if len(clone) == 0 || len(currentPCIList) > 0 {
|
||||
orderedPCIList := orderedListFromMap(pciMap)
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMHostPCI, orderedPCIList)
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMHostPCI, orderedPCIList)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
|
||||
@ -4467,7 +4469,7 @@ func vmReadCustom(
|
||||
if len(clone) == 0 || len(currentUSBList) > 0 {
|
||||
// todo: reordering of devices by PVE may cause an issue here
|
||||
orderedUSBList := orderedListFromMap(usbMap)
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMHostUSB, orderedUSBList)
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMHostUSB, orderedUSBList)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
|
||||
@ -4696,21 +4698,18 @@ func vmReadCustom(
|
||||
if len(clone) > 0 {
|
||||
if len(currentInitialization) > 0 {
|
||||
if len(initialization) > 0 {
|
||||
err := d.Set(
|
||||
mkResourceVirtualEnvironmentVMInitialization,
|
||||
[]interface{}{initialization},
|
||||
)
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMInitialization, []interface{}{initialization})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
} else {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMInitialization, []interface{}{})
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMInitialization, []interface{}{})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
}
|
||||
} else if len(initialization) > 0 {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMInitialization, []interface{}{initialization})
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMInitialization, []interface{}{initialization})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
} else {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMInitialization, []interface{}{})
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMInitialization, []interface{}{})
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
|
||||
@ -4859,27 +4858,12 @@ func vmReadCustom(
|
||||
networkDeviceList[ni] = networkDevice
|
||||
}
|
||||
|
||||
if len(clone) > 0 {
|
||||
if len(currentNetworkDeviceList) > 0 {
|
||||
err := d.Set(
|
||||
mkResourceVirtualEnvironmentVMMACAddresses,
|
||||
macAddresses[0:len(currentNetworkDeviceList)],
|
||||
)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
err = d.Set(
|
||||
mkResourceVirtualEnvironmentVMNetworkDevice,
|
||||
networkDeviceList[:networkDeviceLast+1],
|
||||
)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
} else {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMMACAddresses, macAddresses[0:len(currentNetworkDeviceList)])
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMMACAddresses, macAddresses[0:len(currentNetworkDeviceList)])
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
|
||||
if len(currentNetworkDeviceList) > 0 || networkDeviceLast > -1 {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMNetworkDevice, networkDeviceList[:networkDeviceLast+1])
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
if len(currentNetworkDeviceList) > 0 || networkDeviceLast > -1 {
|
||||
err := d.Set(mkResourceVirtualEnvironmentVMNetworkDevice, networkDeviceList[:networkDeviceLast+1])
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
|
||||
// Compare the operating system configuration to the one stored in the state.
|
||||
@ -5230,8 +5214,6 @@ func vmReadNetworkValues(
|
||||
}
|
||||
}
|
||||
|
||||
err = d.Set(mkResourceVirtualEnvironmentVMMACAddresses, macAddresses)
|
||||
diags = append(diags, diag.FromErr(err)...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -5482,16 +5464,16 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
||||
|
||||
updateBody := &vms.UpdateRequestBody{
|
||||
IDEDevices: vms.CustomStorageDevices{
|
||||
"ide0": vms.CustomStorageDevice{
|
||||
"ide0": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
"ide1": vms.CustomStorageDevice{
|
||||
"ide1": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
"ide2": vms.CustomStorageDevice{
|
||||
"ide2": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
"ide3": vms.CustomStorageDevice{
|
||||
"ide3": &vms.CustomStorageDevice{
|
||||
Enabled: false,
|
||||
},
|
||||
},
|
||||
@ -5678,7 +5660,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
||||
|
||||
cdromMedia := "cdrom"
|
||||
|
||||
updateBody.IDEDevices[cdromInterface] = vms.CustomStorageDevice{
|
||||
updateBody.IDEDevices[cdromInterface] = &vms.CustomStorageDevice{
|
||||
Enabled: cdromEnabled,
|
||||
FileVolume: cdromFileID,
|
||||
Media: &cdromMedia,
|
||||
@ -5764,7 +5746,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
||||
return diag.Errorf("missing %s device %s", prefix, key)
|
||||
}
|
||||
|
||||
tmp := *diskDeviceInfo[key]
|
||||
tmp := diskDeviceInfo[key]
|
||||
tmp.BurstableReadSpeedMbps = value.BurstableReadSpeedMbps
|
||||
tmp.BurstableWriteSpeedMbps = value.BurstableWriteSpeedMbps
|
||||
tmp.MaxReadSpeedMbps = value.MaxReadSpeedMbps
|
||||
@ -5880,7 +5862,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
||||
fileVolume = ideDevice.FileVolume
|
||||
}
|
||||
|
||||
updateBody.IDEDevices[initializationInterface] = vms.CustomStorageDevice{
|
||||
updateBody.IDEDevices[initializationInterface] = &vms.CustomStorageDevice{
|
||||
Enabled: true,
|
||||
FileVolume: fileVolume,
|
||||
Media: &cdromMedia,
|
||||
@ -6124,12 +6106,12 @@ func vmUpdateDiskLocationAndSize(
|
||||
|
||||
if oldEfiDisk != nil {
|
||||
baseDiskInterface := diskDigitPrefix(*oldEfiDisk.Interface)
|
||||
diskOldEntries[baseDiskInterface][*oldEfiDisk.Interface] = *oldEfiDisk
|
||||
diskOldEntries[baseDiskInterface][*oldEfiDisk.Interface] = oldEfiDisk
|
||||
}
|
||||
|
||||
if newEfiDisk != nil {
|
||||
baseDiskInterface := diskDigitPrefix(*newEfiDisk.Interface)
|
||||
diskNewEntries[baseDiskInterface][*newEfiDisk.Interface] = *newEfiDisk
|
||||
diskNewEntries[baseDiskInterface][*newEfiDisk.Interface] = newEfiDisk
|
||||
}
|
||||
|
||||
if oldEfiDisk != nil && newEfiDisk != nil && oldEfiDisk.Size != newEfiDisk.Size {
|
||||
@ -6148,12 +6130,12 @@ func vmUpdateDiskLocationAndSize(
|
||||
|
||||
if oldTPMState != nil {
|
||||
baseDiskInterface := diskDigitPrefix(*oldTPMState.Interface)
|
||||
diskOldEntries[baseDiskInterface][*oldTPMState.Interface] = *oldTPMState
|
||||
diskOldEntries[baseDiskInterface][*oldTPMState.Interface] = oldTPMState
|
||||
}
|
||||
|
||||
if newTPMState != nil {
|
||||
baseDiskInterface := diskDigitPrefix(*newTPMState.Interface)
|
||||
diskNewEntries[baseDiskInterface][*newTPMState.Interface] = *newTPMState
|
||||
diskNewEntries[baseDiskInterface][*newTPMState.Interface] = newTPMState
|
||||
}
|
||||
|
||||
if oldTPMState != nil && newTPMState != nil && oldTPMState.Size != newTPMState.Size {
|
||||
@ -6178,7 +6160,7 @@ func vmUpdateDiskLocationAndSize(
|
||||
)
|
||||
}
|
||||
|
||||
if *oldDisk.ID != *diskNewEntries[prefix][oldKey].ID {
|
||||
if *oldDisk.DatastoreID != *diskNewEntries[prefix][oldKey].DatastoreID {
|
||||
if oldDisk.IsOwnedBy(vmID) {
|
||||
deleteOriginalDisk := types.CustomBool(true)
|
||||
|
||||
@ -6187,7 +6169,7 @@ func vmUpdateDiskLocationAndSize(
|
||||
&vms.MoveDiskRequestBody{
|
||||
DeleteOriginalDisk: &deleteOriginalDisk,
|
||||
Disk: *oldDisk.Interface,
|
||||
TargetStorage: *diskNewEntries[prefix][oldKey].ID,
|
||||
TargetStorage: *diskNewEntries[prefix][oldKey].DatastoreID,
|
||||
},
|
||||
)
|
||||
|
||||
@ -6196,9 +6178,9 @@ func vmUpdateDiskLocationAndSize(
|
||||
} else {
|
||||
return diag.Errorf(
|
||||
"Cannot move %s:%s to datastore %s in VM %d configuration, it is not owned by this VM!",
|
||||
*oldDisk.ID,
|
||||
*oldDisk.DatastoreID,
|
||||
*oldDisk.PathInDatastore(),
|
||||
*diskNewEntries[prefix][oldKey].ID,
|
||||
*diskNewEntries[prefix][oldKey].DatastoreID,
|
||||
vmID,
|
||||
)
|
||||
}
|
||||
@ -6216,7 +6198,7 @@ func vmUpdateDiskLocationAndSize(
|
||||
} else {
|
||||
return diag.Errorf(
|
||||
"Cannot resize %s:%s in VM %d configuration, it is not owned by this VM!",
|
||||
*oldDisk.ID,
|
||||
*oldDisk.DatastoreID,
|
||||
*oldDisk.PathInDatastore(),
|
||||
vmID,
|
||||
)
|
||||
@ -6231,16 +6213,17 @@ func vmUpdateDiskLocationAndSize(
|
||||
}
|
||||
}
|
||||
|
||||
timeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutMoveDisk).(int)
|
||||
|
||||
for _, reqBody := range diskMoveBodies {
|
||||
moveDiskTimeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutMoveDisk).(int)
|
||||
err = vmAPI.MoveVMDisk(ctx, reqBody, moveDiskTimeout)
|
||||
err = vmAPI.MoveVMDisk(ctx, reqBody, timeout)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, reqBody := range diskResizeBodies {
|
||||
err = vmAPI.ResizeVMDisk(ctx, reqBody)
|
||||
err = vmAPI.ResizeVMDisk(ctx, reqBody, timeout)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@ -6329,7 +6312,7 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
||||
}
|
||||
|
||||
// Wait for the state to become unavailable as that clearly indicates the destruction of the VM.
|
||||
err = vmAPI.WaitForVMState(ctx, "", 60, 2)
|
||||
err = vmAPI.WaitForVMStatus(ctx, "", 60, 2)
|
||||
if err == nil {
|
||||
return diag.Errorf("failed to delete VM \"%d\"", vmID)
|
||||
}
|
||||
|
11
utils/strings.go
Normal file
11
utils/strings.go
Normal file
@ -0,0 +1,11 @@
|
||||
package utils
|
||||
|
||||
// ConvertToStringSlice helps convert interface slice to string slice.
|
||||
func ConvertToStringSlice(interfaceSlice []interface{}) []string {
|
||||
resultSlice := []string{}
|
||||
for _, val := range interfaceSlice {
|
||||
resultSlice = append(resultSlice, val.(string))
|
||||
}
|
||||
|
||||
return resultSlice
|
||||
}
|
Loading…
Reference in New Issue
Block a user