0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-08-22 19:38:35 +00:00

feat(vm): add support for disk aio, backup, iops* attributes (#1124)

* feat(vm): add support for disk `aio`, `backup`, `iops*` attributes

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-03-14 00:24:17 -04:00 committed by GitHub
parent 732f0188d7
commit 014b59e04f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 511 additions and 182 deletions

View File

@ -63,6 +63,8 @@
"tfvars",
"tpmstate",
"unmanaged",
"unthrottled",
"uring",
"usbdisk",
"vcpus",
"virtio",

View File

@ -233,6 +233,12 @@ output "ubuntu_vm_public_key" {
- `units` - (Optional) The CPU units (defaults to `1024`).
- `description` - (Optional) The description.
- `disk` - (Optional) A disk (multiple blocks supported).
- `aio` - (Optional) The disk AIO mode (defaults to `io_uring`).
- `io_uring` - Use io_uring.
- `native` - Use native AIO. Should be used with to unbuffered, O_DIRECT, raw block storage only,
with the disk `cache` must be set to `none`. Raw block storage types include iSCSI, CEPH/RBD, and NVMe.
- `threads` - Use thread-based AIO.
- `backup` - (Optional) Whether the drive should be included when making backups (defaults to `true`).
- `cache` - (Optional) The cache type (defaults to `none`).
- `none` - No cache.
- `directsync` - Write to the host cache and wait for completion.
@ -263,8 +269,13 @@ output "ubuntu_vm_public_key" {
the second, etc.
- `iothread` - (Optional) Whether to use iothreads for this disk (defaults
to `false`).
- `replicate` - (Optional) Whether the drive should be considered for replication jobs (defaults to `true`).
- `size` - (Optional) The disk size in gigabytes (defaults to `8`).
- `speed` - (Optional) The speed limits.
- `iops_read` - (Optional) The maximum read I/O in operations per second.
- `iops_read_burstable` - (Optional) The maximum unthrottled read I/O pool in operations per second.
- `iops_write` - (Optional) The maximum write I/O in operations per second.
- `iops_write_burstable` - (Optional) The maximum unthrottled write I/O pool in operations per second.
- `read` - (Optional) The maximum read speed in megabytes per second.
- `read_burstable` - (Optional) The maximum burstable read speed in
megabytes per second.

View File

@ -181,7 +181,8 @@ func TestAccResourceVMDisks(t *testing.T) {
name string
steps []resource.TestStep
}{
{"create disk with default parameters", []resource.TestStep{{
{"create disk with default parameters, then update it", []resource.TestStep{
{
Config: providerConfig + `
resource "proxmox_virtual_environment_vm" "test_disk1" {
node_name = "pve"
@ -198,6 +199,8 @@ func TestAccResourceVMDisks(t *testing.T) {
}`,
Check: resource.ComposeTestCheckFunc(
testResourceAttributes("proxmox_virtual_environment_vm.test_disk1", map[string]string{
"disk.0.aio": "io_uring",
"disk.0.backup": "true",
"disk.0.cache": "none",
"disk.0.discard": "ignore",
"disk.0.file_id": "",
@ -206,11 +209,58 @@ func TestAccResourceVMDisks(t *testing.T) {
"disk.0.interface": "virtio0",
"disk.0.iothread": "false",
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
"disk.0.replicate": "true",
"disk.0.size": "8",
"disk.0.ssd": "false",
}),
),
}}},
},
{
Config: providerConfig + `
resource "proxmox_virtual_environment_vm" "test_disk1" {
node_name = "pve"
started = false
name = "test-disk1"
disk {
// note: default qcow2 is not supported by lvm (?)
file_format = "raw"
datastore_id = "local-lvm"
interface = "virtio0"
size = 8
replicate = false
aio = "native"
speed {
iops_read = 100
iops_read_burstable = 1000
iops_write = 400
iops_write_burstable = 800
}
}
}`,
Check: resource.ComposeTestCheckFunc(
testResourceAttributes("proxmox_virtual_environment_vm.test_disk1", map[string]string{
"disk.0.aio": "native",
"disk.0.backup": "true",
"disk.0.cache": "none",
"disk.0.discard": "ignore",
"disk.0.file_id": "",
"disk.0.datastore_id": "local-lvm",
"disk.0.file_format": "raw",
"disk.0.interface": "virtio0",
"disk.0.iothread": "false",
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
"disk.0.replicate": "false",
"disk.0.size": "8",
"disk.0.ssd": "false",
"disk.0.speed.0.iops_read": "100",
"disk.0.speed.0.iops_read_burstable": "1000",
"disk.0.speed.0.iops_write": "400",
"disk.0.speed.0.iops_write_burstable": "800",
}),
),
},
}},
{"create disk from an image", []resource.TestStep{{
Config: providerConfig + `
resource "proxmox_virtual_environment_download_file" "test_disk2_image" {
@ -282,59 +332,62 @@ func TestAccResourceVMDisks(t *testing.T) {
RefreshState: true,
},
}},
// this test is failing because of https://github.com/bpg/terraform-provider-proxmox/issues/360
// {"clone disk with new size", []resource.TestStep{
// {
// Config: providerConfig + `
// resource "proxmox_virtual_environment_vm" "test_disk3_template" {
// node_name = "pve"
// started = false
// name = "test-disk3-template"
// template = "true"
//
// disk {
// file_format = "raw"
// datastore_id = "local-lvm"
// interface = "scsi0"
// size = 8
// discard = "on"
// iothread = true
// }
// }
// resource "proxmox_virtual_environment_vm" "test_disk3" {
// node_name = "pve"
// started = false
// name = "test-disk3"
//
// clone {
// vm_id = proxmox_virtual_environment_vm.test_disk3_template.id
// }
//
// disk {
// interface = "scsi0"
{"clone disk with overrides", []resource.TestStep{
{
SkipFunc: func() (bool, error) {
// this test is failing because of https://github.com/bpg/terraform-provider-proxmox/issues/873
return true, nil
},
Config: providerConfig + `
resource "proxmox_virtual_environment_vm" "test_disk3_template" {
node_name = "pve"
started = false
name = "test-disk3-template"
template = "true"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
discard = "on"
iothread = true
ssd = true
}
}
resource "proxmox_virtual_environment_vm" "test_disk3" {
node_name = "pve"
started = false
name = "test-disk3"
clone {
vm_id = proxmox_virtual_environment_vm.test_disk3_template.id
}
disk {
interface = "scsi0"
//size = 10
// //ssd = true
// }
// }
// `,
// Check: resource.ComposeTestCheckFunc(
// testResourceAttributes("proxmox_virtual_environment_vm.test_disk3", map[string]string{
// "disk.0.datastore_id": "local-lvm",
// "disk.0.discard": "on",
// "disk.0.file_format": "raw",
// "disk.0.interface": "scsi0",
// "disk.0.iothread": "true",
// "disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
// "disk.0.size": "10",
// "disk.0.ssd": "false",
// }),
// ),
// },
//{
// RefreshState: true,
// Destroy: false,
// },
// }},
}
}
`,
Check: resource.ComposeTestCheckFunc(
testResourceAttributes("proxmox_virtual_environment_vm.test_disk3", map[string]string{
"disk.0.datastore_id": "local-lvm",
"disk.0.discard": "on",
"disk.0.file_format": "raw",
"disk.0.interface": "scsi0",
"disk.0.iothread": "true",
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
"disk.0.size": "8",
"disk.0.ssd": "true",
}),
),
},
{
RefreshState: true,
Destroy: false,
},
}},
}
accProviders := testAccMuxProviders(context.Background(), t)

View File

@ -12,23 +12,28 @@ import (
// CustomStorageDevice handles QEMU SATA device parameters.
type CustomStorageDevice struct {
AIO *string `json:"aio,omitempty" url:"aio,omitempty"`
BackupEnabled *types.CustomBool `json:"backup,omitempty" url:"backup,omitempty,int"`
Backup *types.CustomBool `json:"backup,omitempty" url:"backup,omitempty,int"`
BurstableReadSpeedMbps *int `json:"mbps_rd_max,omitempty" url:"mbps_rd_max,omitempty"`
Cache *string `json:"cache,omitempty" url:"cache,omitempty"`
BurstableWriteSpeedMbps *int `json:"mbps_wr_max,omitempty" url:"mbps_wr_max,omitempty"`
Cache *string `json:"cache,omitempty" url:"cache,omitempty"`
Discard *string `json:"discard,omitempty" url:"discard,omitempty"`
Enabled bool `json:"-" url:"-"`
FileVolume string `json:"file" url:"file"`
Format *string `json:"format,omitempty" url:"format,omitempty"`
IopsRead *int `json:"iops_rd,omitempty" url:"iops_rd,omitempty"`
IopsWrite *int `json:"iops_wr,omitempty" url:"iops_wr,omitempty"`
IOThread *types.CustomBool `json:"iothread,omitempty" url:"iothread,omitempty,int"`
SSD *types.CustomBool `json:"ssd,omitempty" url:"ssd,omitempty,int"`
MaxIopsRead *int `json:"iops_rd_max,omitempty" url:"iops_rd_max,omitempty"`
MaxIopsWrite *int `json:"iops_wr_max,omitempty" url:"iops_wr_max,omitempty"`
MaxReadSpeedMbps *int `json:"mbps_rd,omitempty" url:"mbps_rd,omitempty"`
MaxWriteSpeedMbps *int `json:"mbps_wr,omitempty" url:"mbps_wr,omitempty"`
Media *string `json:"media,omitempty" url:"media,omitempty"`
Replicate *types.CustomBool `json:"replicate,omitempty" url:"replicate,omitempty,int"`
Size *types.DiskSize `json:"size,omitempty" url:"size,omitempty"`
Interface *string `json:"-" url:"-"`
SSD *types.CustomBool `json:"ssd,omitempty" url:"ssd,omitempty,int"`
DatastoreID *string `json:"-" url:"-"`
Enabled bool `json:"-" url:"-"`
FileID *string `json:"-" url:"-"`
Interface *string `json:"-" url:"-"`
}
// PathInDatastore returns path part of FileVolume or nil if it is not yet allocated.
@ -110,14 +115,30 @@ func (d CustomStorageDevice) EncodeOptions() string {
values = append(values, fmt.Sprintf("aio=%s", *d.AIO))
}
if d.BackupEnabled != nil {
if *d.BackupEnabled {
if d.Backup != nil {
if *d.Backup {
values = append(values, "backup=1")
} else {
values = append(values, "backup=0")
}
}
if d.IopsRead != nil {
values = append(values, fmt.Sprintf("iops_rd=%d", *d.IopsRead))
}
if d.IopsWrite != nil {
values = append(values, fmt.Sprintf("iops_wr=%d", *d.IopsWrite))
}
if d.MaxIopsRead != nil {
values = append(values, fmt.Sprintf("iops_rd_max=%d", *d.MaxIopsRead))
}
if d.MaxIopsWrite != nil {
values = append(values, fmt.Sprintf("iops_wr_max=%d", *d.MaxIopsWrite))
}
if d.IOThread != nil {
if *d.IOThread {
values = append(values, "iothread=1")
@ -158,6 +179,14 @@ func (d CustomStorageDevice) EncodeOptions() string {
values = append(values, fmt.Sprintf("mbps_wr=%d", *d.MaxWriteSpeedMbps))
}
if d.Replicate != nil {
if *d.Replicate {
values = append(values, "replicate=1")
} else {
values = append(values, "replicate=0")
}
}
return strings.Join(values, ",")
}
@ -190,23 +219,28 @@ func (d CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
func (d CustomStorageDevice) Copy() *CustomStorageDevice {
return &CustomStorageDevice{
AIO: types.CopyString(d.AIO),
BackupEnabled: d.BackupEnabled.Copy(),
Backup: d.Backup.Copy(),
BurstableReadSpeedMbps: types.CopyInt(d.BurstableReadSpeedMbps),
Cache: types.CopyString(d.Cache),
BurstableWriteSpeedMbps: types.CopyInt(d.BurstableWriteSpeedMbps),
Cache: types.CopyString(d.Cache),
DatastoreID: types.CopyString(d.DatastoreID),
Discard: types.CopyString(d.Discard),
Enabled: d.Enabled,
FileID: types.CopyString(d.FileID),
FileVolume: d.FileVolume,
Format: types.CopyString(d.Format),
Interface: types.CopyString(d.Interface),
IopsRead: types.CopyInt(d.IopsRead),
IopsWrite: types.CopyInt(d.IopsWrite),
IOThread: d.IOThread.Copy(),
SSD: d.SSD.Copy(),
MaxIopsRead: types.CopyInt(d.MaxIopsRead),
MaxIopsWrite: types.CopyInt(d.MaxIopsWrite),
MaxReadSpeedMbps: types.CopyInt(d.MaxReadSpeedMbps),
MaxWriteSpeedMbps: types.CopyInt(d.MaxWriteSpeedMbps),
Media: types.CopyString(d.Media),
Replicate: d.Replicate.Copy(),
Size: d.Size.Copy(),
Interface: types.CopyString(d.Interface),
DatastoreID: types.CopyString(d.DatastoreID),
FileID: types.CopyString(d.FileID),
SSD: d.SSD.Copy(),
}
}

View File

@ -1827,11 +1827,56 @@ func (d *CustomStorageDevice) UnmarshalJSON(b []byte) error {
case "backup":
bv := types.CustomBool(v[1] == "1")
d.BackupEnabled = &bv
d.Backup = &bv
case "cache":
d.Cache = &v[1]
case "discard":
d.Discard = &v[1]
case "file":
d.FileVolume = v[1]
case "format":
d.Format = &v[1]
case "iops_rd":
iv, err := strconv.Atoi(v[1])
if err != nil {
return fmt.Errorf("failed to convert iops_rd to int: %w", err)
}
d.IopsRead = &iv
case "iops_rd_max":
iv, err := strconv.Atoi(v[1])
if err != nil {
return fmt.Errorf("failed to convert iops_rd_max to int: %w", err)
}
d.MaxIopsRead = &iv
case "iops_wr":
iv, err := strconv.Atoi(v[1])
if err != nil {
return fmt.Errorf("failed to convert iops_wr to int: %w", err)
}
d.IopsWrite = &iv
case "iops_wr_max":
iv, err := strconv.Atoi(v[1])
if err != nil {
return fmt.Errorf("failed to convert iops_wr_max to int: %w", err)
}
d.MaxIopsWrite = &iv
case "iothread":
bv := types.CustomBool(v[1] == "1")
d.IOThread = &bv
case "mbps_rd":
iv, err := strconv.Atoi(v[1])
if err != nil {
@ -1839,6 +1884,7 @@ func (d *CustomStorageDevice) UnmarshalJSON(b []byte) error {
}
d.MaxReadSpeedMbps = &iv
case "mbps_rd_max":
iv, err := strconv.Atoi(v[1])
if err != nil {
@ -1846,6 +1892,7 @@ func (d *CustomStorageDevice) UnmarshalJSON(b []byte) error {
}
d.BurstableReadSpeedMbps = &iv
case "mbps_wr":
iv, err := strconv.Atoi(v[1])
if err != nil {
@ -1853,6 +1900,7 @@ func (d *CustomStorageDevice) UnmarshalJSON(b []byte) error {
}
d.MaxWriteSpeedMbps = &iv
case "mbps_wr_max":
iv, err := strconv.Atoi(v[1])
if err != nil {
@ -1860,9 +1908,14 @@ func (d *CustomStorageDevice) UnmarshalJSON(b []byte) error {
}
d.BurstableWriteSpeedMbps = &iv
case "media":
d.Media = &v[1]
case "replicate":
bv := types.CustomBool(v[1] == "1")
d.Replicate = &bv
case "size":
d.Size = new(types.DiskSize)
@ -1871,22 +1924,9 @@ func (d *CustomStorageDevice) UnmarshalJSON(b []byte) error {
return fmt.Errorf("failed to unmarshal disk size: %w", err)
}
case "format":
d.Format = &v[1]
case "iothread":
bv := types.CustomBool(v[1] == "1")
d.IOThread = &bv
case "ssd":
bv := types.CustomBool(v[1] == "1")
d.SSD = &bv
case "discard":
d.Discard = &v[1]
case "cache":
d.Cache = &v[1]
}
}
}

View File

@ -246,14 +246,17 @@ func GetDiskDeviceObjects(
pathInDatastore = untyped.(string)
}
aio := block[mkDiskAIO].(string)
backup := types.CustomBool(block[mkDiskBackup].(bool))
cache := block[mkDiskCache].(string)
discard := block[mkDiskDiscard].(string)
diskInterface, _ := block[mkDiskInterface].(string)
fileFormat, _ := block[mkDiskFileFormat].(string)
fileID, _ := block[mkDiskFileID].(string)
size, _ := block[mkDiskSize].(int)
diskInterface, _ := block[mkDiskInterface].(string)
ioThread := types.CustomBool(block[mkDiskIOThread].(bool))
replicate := types.CustomBool(block[mkDiskReplicate].(bool))
size, _ := block[mkDiskSize].(int)
ssd := types.CustomBool(block[mkDiskSSD].(bool))
discard := block[mkDiskDiscard].(string)
cache := block[mkDiskCache].(string)
speedBlock, err := structure.GetSchemaBlock(
resource,
@ -285,26 +288,51 @@ func GetDiskDeviceObjects(
diskDevice.FileVolume = fmt.Sprintf("%s:%d", datastoreID, size)
}
diskDevice.DatastoreID = &datastoreID
diskDevice.Interface = &diskInterface
diskDevice.Format = &fileFormat
diskDevice.FileID = &fileID
diskSize := types.DiskSizeFromGigabytes(int64(size))
diskDevice.Size = diskSize
diskDevice.IOThread = &ioThread
diskDevice.Discard = &discard
diskDevice.AIO = &aio
diskDevice.Backup = &backup
diskDevice.Cache = &cache
diskDevice.DatastoreID = &datastoreID
diskDevice.Discard = &discard
diskDevice.FileID = &fileID
diskDevice.Format = &fileFormat
diskDevice.Interface = &diskInterface
diskDevice.Replicate = &replicate
diskDevice.Size = types.DiskSizeFromGigabytes(int64(size))
if !strings.HasPrefix(diskInterface, "virtio") {
diskDevice.SSD = &ssd
}
if !strings.HasPrefix(diskInterface, "sata") {
diskDevice.IOThread = &ioThread
}
if len(speedBlock) > 0 {
iopsRead := speedBlock[mkDiskIopsRead].(int)
iopsReadBurstable := speedBlock[mkDiskIopsReadBurstable].(int)
iopsWrite := speedBlock[mkDiskIopsWrite].(int)
iopsWriteBurstable := speedBlock[mkDiskIopsWriteBurstable].(int)
speedLimitRead := speedBlock[mkDiskSpeedRead].(int)
speedLimitReadBurstable := speedBlock[mkDiskSpeedReadBurstable].(int)
speedLimitWrite := speedBlock[mkDiskSpeedWrite].(int)
speedLimitWriteBurstable := speedBlock[mkDiskSpeedWriteBurstable].(int)
if iopsRead > 0 {
diskDevice.IopsRead = &iopsRead
}
if iopsReadBurstable > 0 {
diskDevice.MaxIopsRead = &iopsReadBurstable
}
if iopsWrite > 0 {
diskDevice.IopsWrite = &iopsWrite
}
if iopsWriteBurstable > 0 {
diskDevice.MaxIopsWrite = &iopsWriteBurstable
}
if speedLimitRead > 0 {
diskDevice.MaxReadSpeedMbps = &speedLimitRead
}
@ -391,15 +419,18 @@ func CreateCustomDisks(
continue
}
aio, _ := block[mkDiskAIO].(string)
backup := types.CustomBool(block[mkDiskBackup].(bool))
cache, _ := block[mkDiskCache].(string)
datastoreID, _ := block[mkDiskDatastoreID].(string)
discard, _ := block[mkDiskDiscard].(string)
diskInterface, _ := block[mkDiskInterface].(string)
fileFormat, _ := block[mkDiskFileFormat].(string)
ioThread := types.CustomBool(block[mkDiskIOThread].(bool))
replicate := types.CustomBool(block[mkDiskReplicate].(bool))
size, _ := block[mkDiskSize].(int)
speed := block[mkDiskSpeed].([]interface{})
diskInterface, _ := block[mkDiskInterface].(string)
ioThread := types.CustomBool(block[mkDiskIOThread].(bool))
ssd := types.CustomBool(block[mkDiskSSD].(bool))
discard, _ := block[mkDiskDiscard].(string)
cache, _ := block[mkDiskCache].(string)
if fileFormat == "" {
fileFormat = dvDiskFileFormat
@ -415,6 +446,10 @@ func CreateCustomDisks(
}
speedBlock := speed[0].(map[string]interface{})
iopsRead := speedBlock[mkDiskIopsRead].(int)
iopsReadBurstable := speedBlock[mkDiskIopsReadBurstable].(int)
iopsWrite := speedBlock[mkDiskIopsWrite].(int)
iopsWriteBurstable := speedBlock[mkDiskIopsWriteBurstable].(int)
speedLimitRead := speedBlock[mkDiskSpeedRead].(int)
speedLimitReadBurstable := speedBlock[mkDiskSpeedReadBurstable].(int)
speedLimitWrite := speedBlock[mkDiskSpeedWrite].(int)
@ -422,10 +457,26 @@ func CreateCustomDisks(
diskOptions := ""
if aio != "" {
diskOptions += fmt.Sprintf(",aio=%s", aio)
}
if backup {
diskOptions += ",backup=1"
} else {
diskOptions += ",backup=0"
}
if ioThread {
diskOptions += ",iothread=1"
}
if replicate {
diskOptions += ",replicate=1"
} else {
diskOptions += ",replicate=0"
}
if ssd {
diskOptions += ",ssd=1"
}
@ -438,6 +489,22 @@ func CreateCustomDisks(
diskOptions += fmt.Sprintf(",cache=%s", cache)
}
if iopsRead > 0 {
diskOptions += fmt.Sprintf(",iops_rd=%d", iopsRead)
}
if iopsReadBurstable > 0 {
diskOptions += fmt.Sprintf(",iops_rd_max=%d", iopsReadBurstable)
}
if iopsWrite > 0 {
diskOptions += fmt.Sprintf(",iops_wr=%d", iopsWrite)
}
if iopsWriteBurstable > 0 {
diskOptions += fmt.Sprintf(",iops_wr_max=%d", iopsWriteBurstable)
}
if speedLimitRead > 0 {
diskOptions += fmt.Sprintf(",mbps_rd=%d", speedLimitRead)
}
@ -573,12 +640,82 @@ func Read(
disk[mkDiskInterface] = di
disk[mkDiskSize] = dd.Size.InGigabytes()
if dd.BurstableReadSpeedMbps != nil ||
if dd.AIO != nil {
disk[mkDiskAIO] = *dd.AIO
} else {
disk[mkDiskAIO] = dvDiskAIO
}
if dd.Backup != nil {
disk[mkDiskBackup] = *dd.Backup
} else {
disk[mkDiskBackup] = true
}
if dd.IOThread != nil {
disk[mkDiskIOThread] = *dd.IOThread
} else {
disk[mkDiskIOThread] = false
}
if dd.Replicate != nil {
disk[mkDiskReplicate] = *dd.Replicate
} else {
disk[mkDiskReplicate] = true
}
if dd.SSD != nil {
disk[mkDiskSSD] = *dd.SSD
} else {
disk[mkDiskSSD] = false
}
if dd.Discard != nil {
disk[mkDiskDiscard] = *dd.Discard
} else {
disk[mkDiskDiscard] = dvDiskDiscard
}
if dd.Cache != nil {
disk[mkDiskCache] = *dd.Cache
} else {
disk[mkDiskCache] = dvDiskCache
}
if dd.IopsRead != nil ||
dd.MaxIopsRead != nil ||
dd.IopsWrite != nil ||
dd.MaxIopsWrite != nil ||
dd.BurstableReadSpeedMbps != nil ||
dd.BurstableWriteSpeedMbps != nil ||
dd.MaxReadSpeedMbps != nil ||
dd.MaxWriteSpeedMbps != nil {
speed := map[string]interface{}{}
if dd.IopsRead != nil {
speed[mkDiskIopsRead] = *dd.IopsRead
} else {
speed[mkDiskIopsRead] = 0
}
if dd.MaxIopsRead != nil {
speed[mkDiskIopsReadBurstable] = *dd.MaxIopsRead
} else {
speed[mkDiskIopsReadBurstable] = 0
}
if dd.IopsWrite != nil {
speed[mkDiskIopsWrite] = *dd.IopsWrite
} else {
speed[mkDiskIopsWrite] = 0
}
if dd.MaxIopsWrite != nil {
speed[mkDiskIopsWriteBurstable] = *dd.MaxIopsWrite
} else {
speed[mkDiskIopsWriteBurstable] = 0
}
if dd.MaxReadSpeedMbps != nil {
speed[mkDiskSpeedRead] = *dd.MaxReadSpeedMbps
} else {
@ -608,30 +745,6 @@ func Read(
disk[mkDiskSpeed] = []interface{}{}
}
if dd.IOThread != nil {
disk[mkDiskIOThread] = *dd.IOThread
} else {
disk[mkDiskIOThread] = false
}
if dd.SSD != nil {
disk[mkDiskSSD] = *dd.SSD
} else {
disk[mkDiskSSD] = false
}
if dd.Discard != nil {
disk[mkDiskDiscard] = *dd.Discard
} else {
disk[mkDiskDiscard] = dvDiskDiscard
}
if dd.Cache != nil {
disk[mkDiskCache] = *dd.Cache
} else {
disk[mkDiskCache] = dvDiskCache
}
diskMap[di] = disk
}
@ -650,7 +763,9 @@ func Update(
planDisks map[string]vms.CustomStorageDevices,
allDiskInfo vms.CustomStorageDevices,
updateBody *vms.UpdateRequestBody,
) error {
) (bool, error) {
rebootRequired := false
if d.HasChange(MkDisk) {
for prefix, diskMap := range planDisks {
if diskMap == nil {
@ -659,15 +774,30 @@ func Update(
for key, value := range diskMap {
if allDiskInfo[key] == nil {
return fmt.Errorf("missing %s device %s", prefix, key)
return false, fmt.Errorf("missing %s device %s", prefix, key)
}
tmp := allDiskInfo[key]
if tmp.AIO != value.AIO {
rebootRequired = true
tmp.AIO = value.AIO
}
tmp.Backup = value.Backup
tmp.BurstableReadSpeedMbps = value.BurstableReadSpeedMbps
tmp.BurstableWriteSpeedMbps = value.BurstableWriteSpeedMbps
tmp.Cache = value.Cache
tmp.Discard = value.Discard
tmp.IOThread = value.IOThread
tmp.IopsRead = value.IopsRead
tmp.IopsWrite = value.IopsWrite
tmp.MaxIopsRead = value.MaxIopsRead
tmp.MaxIopsWrite = value.MaxIopsWrite
tmp.MaxReadSpeedMbps = value.MaxReadSpeedMbps
tmp.MaxWriteSpeedMbps = value.MaxWriteSpeedMbps
tmp.Cache = value.Cache
tmp.Replicate = value.Replicate
tmp.SSD = value.SSD
switch prefix {
case "virtio":
@ -700,11 +830,11 @@ func Update(
// Investigate whether to support IDE mapping.
}
default:
return fmt.Errorf("device prefix %s not supported", prefix)
return false, fmt.Errorf("device prefix %s not supported", prefix)
}
}
}
}
return nil
return rebootRequired, nil
}

View File

@ -11,34 +11,35 @@ const (
dvDiskInterface = "scsi0"
dvDiskDatastoreID = "local-lvm"
dvDiskFileFormat = "qcow2"
dvDiskFileID = ""
dvDiskSize = 8
dvDiskIOThread = false
dvDiskSSD = false
dvDiskAIO = "io_uring"
dvDiskDiscard = "ignore"
dvDiskCache = "none"
dvDiskSpeedRead = 0
dvDiskSpeedReadBurstable = 0
dvDiskSpeedWrite = 0
dvDiskSpeedWriteBurstable = 0
// MkDisk is the name of the disk resource.
MkDisk = "disk"
mkDiskInterface = "interface"
mkDiskAIO = "aio"
mkDiskBackup = "backup"
mkDiskCache = "cache"
mkDiskDatastoreID = "datastore_id"
mkDiskPathInDatastore = "path_in_datastore"
mkDiskDiscard = "discard"
mkDiskFileFormat = "file_format"
mkDiskFileID = "file_id"
mkDiskSize = "size"
mkDiskInterface = "interface"
mkDiskIopsRead = "iops_read"
mkDiskIopsReadBurstable = "iops_read_burstable"
mkDiskIopsWrite = "iops_write"
mkDiskIopsWriteBurstable = "iops_write_burstable"
mkDiskIOThread = "iothread"
mkDiskSSD = "ssd"
mkDiskDiscard = "discard"
mkDiskCache = "cache"
mkDiskPathInDatastore = "path_in_datastore"
mkDiskReplicate = "replicate"
mkDiskSize = "size"
mkDiskSpeed = "speed"
mkDiskSpeedRead = "read"
mkDiskSpeedReadBurstable = "read_burstable"
mkDiskSpeedWrite = "write"
mkDiskSpeedWriteBurstable = "write_burstable"
mkDiskSSD = "ssd"
// MkTimeoutMoveDisk is the name of the timeout_move_disk attribute.
MkTimeoutMoveDisk = "timeout_move_disk"
@ -55,15 +56,18 @@ func Schema() map[string]*schema.Schema {
DefaultFunc: func() (interface{}, error) {
return []interface{}{
map[string]interface{}{
mkDiskDatastoreID: dvDiskDatastoreID,
mkDiskPathInDatastore: nil,
mkDiskFileID: dvDiskFileID,
mkDiskInterface: dvDiskInterface,
mkDiskSize: dvDiskSize,
mkDiskIOThread: dvDiskIOThread,
mkDiskSSD: dvDiskSSD,
mkDiskDiscard: dvDiskDiscard,
mkDiskAIO: dvDiskAIO,
mkDiskBackup: true,
mkDiskCache: dvDiskCache,
mkDiskDatastoreID: dvDiskDatastoreID,
mkDiskDiscard: dvDiskDiscard,
mkDiskFileID: "",
mkDiskInterface: dvDiskInterface,
mkDiskIOThread: false,
mkDiskPathInDatastore: nil,
mkDiskReplicate: true,
mkDiskSize: dvDiskSize,
mkDiskSSD: false,
},
}, nil
},
@ -95,12 +99,31 @@ func Schema() map[string]*schema.Schema {
Computed: true,
ValidateDiagFunc: validators.FileFormat(),
},
mkDiskAIO: {
Type: schema.TypeString,
Description: "The disk AIO mode",
Optional: true,
Default: dvDiskAIO,
ValidateDiagFunc: validation.ToDiagFunc(
validation.StringInSlice([]string{
"io_uring",
"native",
"threads",
}, false),
),
},
mkDiskBackup: {
Type: schema.TypeBool,
Description: "Whether the drive should be included when making backups",
Optional: true,
Default: true,
},
mkDiskFileID: {
Type: schema.TypeString,
Description: "The file id for a disk image",
Optional: true,
ForceNew: true,
Default: dvDiskFileID,
Default: "",
ValidateDiagFunc: validators.FileID(),
},
mkDiskSize: {
@ -114,13 +137,19 @@ func Schema() map[string]*schema.Schema {
Type: schema.TypeBool,
Description: "Whether to use iothreads for this disk drive",
Optional: true,
Default: dvDiskIOThread,
Default: false,
},
mkDiskReplicate: {
Type: schema.TypeBool,
Description: "Whether the drive should be considered for replication jobs",
Optional: true,
Default: true,
},
mkDiskSSD: {
Type: schema.TypeBool,
Description: "Whether to use ssd for this disk drive",
Optional: true,
Default: dvDiskSSD,
Default: false,
},
mkDiskDiscard: {
Type: schema.TypeString,
@ -150,38 +179,66 @@ func Schema() map[string]*schema.Schema {
DefaultFunc: func() (interface{}, error) {
return []interface{}{
map[string]interface{}{
mkDiskSpeedRead: dvDiskSpeedRead,
mkDiskSpeedReadBurstable: dvDiskSpeedReadBurstable,
mkDiskSpeedWrite: dvDiskSpeedWrite,
mkDiskSpeedWriteBurstable: dvDiskSpeedWriteBurstable,
mkDiskIopsRead: 0,
mkDiskIopsWrite: 0,
mkDiskIopsReadBurstable: 0,
mkDiskIopsWriteBurstable: 0,
mkDiskSpeedRead: 0,
mkDiskSpeedReadBurstable: 0,
mkDiskSpeedWrite: 0,
mkDiskSpeedWriteBurstable: 0,
},
}, nil
},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
mkDiskIopsRead: {
Type: schema.TypeInt,
Description: "The maximum read I/O in operations per second",
Optional: true,
Default: 0,
},
mkDiskIopsWrite: {
Type: schema.TypeInt,
Description: "The maximum write I/O in operations per second",
Optional: true,
Default: 0,
},
mkDiskIopsReadBurstable: {
Type: schema.TypeInt,
Description: "The maximum unthrottled read I/O pool in operations per second",
Optional: true,
Default: 0,
},
mkDiskIopsWriteBurstable: {
Type: schema.TypeInt,
Description: "The maximum unthrottled write I/O pool in operations per second",
Optional: true,
Default: 0,
},
mkDiskSpeedRead: {
Type: schema.TypeInt,
Description: "The maximum read speed in megabytes per second",
Optional: true,
Default: dvDiskSpeedRead,
Default: 0,
},
mkDiskSpeedReadBurstable: {
Type: schema.TypeInt,
Description: "The maximum burstable read speed in megabytes per second",
Optional: true,
Default: dvDiskSpeedReadBurstable,
Default: 0,
},
mkDiskSpeedWrite: {
Type: schema.TypeInt,
Description: "The maximum write speed in megabytes per second",
Optional: true,
Default: dvDiskSpeedWrite,
Default: 0,
},
mkDiskSpeedWriteBurstable: {
Type: schema.TypeInt,
Description: "The maximum burstable write speed in megabytes per second",
Optional: true,
Default: dvDiskSpeedWriteBurstable,
Default: 0,
},
},
},

View File

@ -5072,11 +5072,13 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
return diag.FromErr(err)
}
err = disk.Update(d, planDisks, allDiskInfo, updateBody)
rr, err := disk.Update(d, planDisks, allDiskInfo, updateBody)
if err != nil {
return diag.FromErr(err)
}
rebootRequired = rebootRequired || rr
// Prepare the new efi disk configuration.
if d.HasChange(mkEFIDisk) {
efiDisk := vmGetEfiDisk(d, nil)