From 634ad690fefa719df7be1c7f8962cd4a5bead79a Mon Sep 17 00:00:00 2001 From: Pavel Boldyrev <627562+bpg@users.noreply.github.com> Date: Sun, 17 Aug 2025 18:58:40 -0400 Subject: [PATCH] fix(vm): regression: disk re-ordering on re-apply (#2114) Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com> --- fwprovider/test/resource_vm_disks_test.go | 196 ++++++++++++++++++++++ proxmoxtf/resource/vm/disk/disk.go | 5 +- proxmoxtf/resource/vm/disk/disk_test.go | 190 +++++++++++++++++++++ utils/maps.go | 4 +- utils/maps_test.go | 2 +- 5 files changed, 391 insertions(+), 6 deletions(-) create mode 100644 proxmoxtf/resource/vm/disk/disk_test.go diff --git a/fwprovider/test/resource_vm_disks_test.go b/fwprovider/test/resource_vm_disks_test.go index 825c8ea9..0714a2a3 100644 --- a/fwprovider/test/resource_vm_disks_test.go +++ b/fwprovider/test/resource_vm_disks_test.go @@ -246,6 +246,202 @@ func TestAccResourceVMDisks(t *testing.T) { RefreshState: true, }, }}, + {"disk ordering consistency", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_virtual_environment_vm" "test_disk_ordering" { + node_name = "{{.NodeName}}" + started = false + name = "test-disk-ordering" + + disk { + datastore_id = "local-lvm" + interface = "virtio2" + size = 8 + } + disk { + datastore_id = "local-lvm" + interface = "scsi1" + size = 15 + } + disk { + datastore_id = "local-lvm" + interface = "sata0" + size = 12 + } + disk { + datastore_id = "local-lvm" + interface = "scsi0" + size = 10 + } + disk { + datastore_id = "local-lvm" + interface = "virtio0" + size = 20 + } + disk { + datastore_id = "local-lvm" + interface = "scsi3" + size = 5 + } + disk { + datastore_id = "local-lvm" + interface = "virtio1" + size = 18 + } + disk { + datastore_id = "local-lvm" + interface = "scsi2" + size = 25 + } + }`), + Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk_ordering", map[string]string{ + "disk.0.interface": "virtio2", + "disk.0.size": "8", + "disk.1.interface": "scsi1", + "disk.1.size": "15", + "disk.2.interface": "sata0", + "disk.2.size": "12", + "disk.3.interface": "scsi0", + "disk.3.size": "10", + "disk.4.interface": "virtio0", + "disk.4.size": "20", + "disk.5.interface": "scsi3", + "disk.5.size": "5", + "disk.6.interface": "virtio1", + "disk.6.size": "18", + "disk.7.interface": "scsi2", + "disk.7.size": "25", + }), + }, + { + Config: te.RenderConfig(` + resource "proxmox_virtual_environment_vm" "test_disk_ordering" { + node_name = "{{.NodeName}}" + started = false + name = "test-disk-ordering" + + disk { + datastore_id = "local-lvm" + interface = "virtio2" + size = 8 + } + disk { + datastore_id = "local-lvm" + interface = "scsi1" + size = 15 + } + disk { + datastore_id = "local-lvm" + interface = "sata0" + size = 12 + } + disk { + datastore_id = "local-lvm" + interface = "scsi0" + size = 10 + } + disk { + datastore_id = "local-lvm" + interface = "virtio0" + size = 20 + } + disk { + datastore_id = "local-lvm" + interface = "scsi3" + size = 5 + } + disk { + datastore_id = "local-lvm" + interface = "virtio1" + size = 18 + } + disk { + datastore_id = "local-lvm" + interface = "scsi2" + size = 25 + } + }`), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk_ordering", map[string]string{ + "disk.0.interface": "virtio2", + "disk.0.size": "8", + "disk.1.interface": "scsi1", + "disk.1.size": "15", + "disk.2.interface": "sata0", + "disk.2.size": "12", + "disk.3.interface": "scsi0", + "disk.3.size": "10", + "disk.4.interface": "virtio0", + "disk.4.size": "20", + "disk.5.interface": "scsi3", + "disk.5.size": "5", + "disk.6.interface": "virtio1", + "disk.6.size": "18", + "disk.7.interface": "scsi2", + "disk.7.size": "25", + }), + }, + { + // Third apply to ensure consistency across multiple applies + Config: te.RenderConfig(` + resource "proxmox_virtual_environment_vm" "test_disk_ordering" { + node_name = "{{.NodeName}}" + started = false + name = "test-disk-ordering" + + disk { + datastore_id = "local-lvm" + interface = "virtio2" + size = 8 + } + disk { + datastore_id = "local-lvm" + interface = "scsi1" + size = 15 + } + disk { + datastore_id = "local-lvm" + interface = "sata0" + size = 12 + } + disk { + datastore_id = "local-lvm" + interface = "scsi0" + size = 10 + } + disk { + datastore_id = "local-lvm" + interface = "virtio0" + size = 20 + } + disk { + datastore_id = "local-lvm" + interface = "scsi3" + size = 5 + } + disk { + datastore_id = "local-lvm" + interface = "virtio1" + size = 18 + } + disk { + datastore_id = "local-lvm" + interface = "scsi2" + size = 25 + } + }`), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }}, {"adding disks", []resource.TestStep{ { Config: te.RenderConfig(` diff --git a/proxmoxtf/resource/vm/disk/disk.go b/proxmoxtf/resource/vm/disk/disk.go index 903e7fb8..3e6ee446 100644 --- a/proxmoxtf/resource/vm/disk/disk.go +++ b/proxmoxtf/resource/vm/disk/disk.go @@ -10,7 +10,6 @@ import ( "context" "errors" "fmt" - "maps" "regexp" "slices" "strings" @@ -560,8 +559,8 @@ func Read( } } - diskList = utils.OrderedListFromMapByKeyValues(diskMap, - slices.AppendSeq(make([]string, 0, len(currentDiskMap)), maps.Keys(currentDiskMap))) + disks := utils.ListResourcesAttributeValue(currentDiskList, mkDiskInterface) + diskList = utils.OrderedListFromMapByKeyValues(diskMap, disks) } else { diskList = utils.OrderedListFromMap(diskMap) } diff --git a/proxmoxtf/resource/vm/disk/disk_test.go b/proxmoxtf/resource/vm/disk/disk_test.go new file mode 100644 index 00000000..94801356 --- /dev/null +++ b/proxmoxtf/resource/vm/disk/disk_test.go @@ -0,0 +1,190 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package disk + +import ( + "context" + "reflect" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/require" + + "github.com/bpg/terraform-provider-proxmox/proxmox" + "github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms" + "github.com/bpg/terraform-provider-proxmox/proxmox/types" +) + +// TestDiskOrderingDeterministic tests that disk ordering is deterministic +// and preserves the order from currentDiskList. This test addresses the issue where +// disk interfaces would randomly reorder on each terraform apply due to +// Go's non-deterministic map iteration. +func TestDiskOrderingDeterministic(t *testing.T) { + t.Parallel() + + // Create test schema + diskSchema := Schema() + + // Create resource data with multiple disks in a specific order that could be affected by map iteration + currentDiskList := []interface{}{ + map[string]interface{}{ + mkDiskInterface: "scsi1", // Intentionally put scsi1 first + mkDiskDatastoreID: "local", + mkDiskSize: 150, + mkDiskSpeed: []interface{}{}, + }, + map[string]interface{}{ + mkDiskInterface: "scsi0", // Then scsi0 second + mkDiskDatastoreID: "local", + mkDiskSize: 50, + mkDiskSpeed: []interface{}{}, + }, + } + + // Mock VM disk data from API that matches the currentDiskList + // Set Format to avoid API calls in the Read function + qcow2Format := "qcow2" + diskDeviceObjects := vms.CustomStorageDevices{ + "scsi0": &vms.CustomStorageDevice{ + FileVolume: "local:50", + Size: types.DiskSizeFromGigabytes(50), + Format: &qcow2Format, + }, + "scsi1": &vms.CustomStorageDevice{ + FileVolume: "local:150", + Size: types.DiskSizeFromGigabytes(150), + Format: &qcow2Format, + }, + } + + // Run the ordering logic multiple times to ensure deterministic results + const iterations = 10 + + results := make([][]interface{}, 0, iterations) + + for range iterations { + // Create a new resource data for each iteration + resourceData := schema.TestResourceDataRaw(t, diskSchema, map[string]interface{}{ + MkDisk: currentDiskList, + }) + + // Call the Read function which contains our fixed ordering logic + ctx := context.Background() + vmID := 100 // Test VM ID + + var client proxmox.Client = nil + + diags := Read(ctx, resourceData, diskDeviceObjects, vmID, client, "test-node", false) + require.Empty(t, diags, "Read should not return any diagnostics") + + // Get the resulting disk list + diskList := resourceData.Get(MkDisk).([]interface{}) + results = append(results, diskList) + } + + // Verify all results are identical (deterministic ordering) + expectedResult := results[0] + for i, result := range results { + require.True(t, reflect.DeepEqual(expectedResult, result), + "Disk ordering should be deterministic across multiple calls. Iteration %d differs from first result", i) + } + + // Verify the specific ordering - should preserve currentDiskList order: scsi1, then scsi0 + require.Len(t, expectedResult, 2, "Should have 2 disks") + + disk0 := expectedResult[0].(map[string]interface{}) + disk1 := expectedResult[1].(map[string]interface{}) + + require.Equal(t, "scsi1", disk0[mkDiskInterface], "First disk should be scsi1 (as in currentDiskList)") + require.Equal(t, 150, disk0[mkDiskSize], "First disk should have size 150") + + require.Equal(t, "scsi0", disk1[mkDiskInterface], "Second disk should be scsi0 (as in currentDiskList)") + require.Equal(t, 50, disk1[mkDiskSize], "Second disk should have size 50") +} + +// TestDiskOrderingVariousInterfaces tests deterministic ordering with various disk interfaces, +// ensuring the order from currentDiskList is preserved. +func TestDiskOrderingVariousInterfaces(t *testing.T) { + t.Parallel() + + diskSchema := Schema() + + // Test with various interface types in random order + currentDiskList := []interface{}{ + map[string]interface{}{ + mkDiskInterface: "virtio2", + mkDiskDatastoreID: "local", + mkDiskSize: 30, + mkDiskSpeed: []interface{}{}, + }, + map[string]interface{}{ + mkDiskInterface: "scsi0", + mkDiskDatastoreID: "local", + mkDiskSize: 10, + mkDiskSpeed: []interface{}{}, + }, + map[string]interface{}{ + mkDiskInterface: "sata1", + mkDiskDatastoreID: "local", + mkDiskSize: 20, + mkDiskSpeed: []interface{}{}, + }, + map[string]interface{}{ + mkDiskInterface: "virtio0", + mkDiskDatastoreID: "local", + mkDiskSize: 40, + mkDiskSpeed: []interface{}{}, + }, + } + + qcow2Format := "qcow2" + diskDeviceObjects := vms.CustomStorageDevices{ + "scsi0": &vms.CustomStorageDevice{FileVolume: "local:10", Size: types.DiskSizeFromGigabytes(10), Format: &qcow2Format}, + "sata1": &vms.CustomStorageDevice{FileVolume: "local:20", Size: types.DiskSizeFromGigabytes(20), Format: &qcow2Format}, + "virtio2": &vms.CustomStorageDevice{FileVolume: "local:30", Size: types.DiskSizeFromGigabytes(30), Format: &qcow2Format}, + "virtio0": &vms.CustomStorageDevice{FileVolume: "local:40", Size: types.DiskSizeFromGigabytes(40), Format: &qcow2Format}, + } + + // Test multiple iterations + const iterations = 5 + + results := make([][]interface{}, 0, iterations) + + for range iterations { + resourceData := schema.TestResourceDataRaw(t, diskSchema, map[string]interface{}{ + MkDisk: currentDiskList, + }) + + ctx := context.Background() + vmID := 100 + + var client proxmox.Client = nil + + diags := Read(ctx, resourceData, diskDeviceObjects, vmID, client, "test-node", false) + require.Empty(t, diags) + + diskList := resourceData.Get(MkDisk).([]interface{}) + results = append(results, diskList) + } + + // Verify deterministic ordering + expectedResult := results[0] + for i, result := range results { + require.True(t, reflect.DeepEqual(expectedResult, result), + "Disk ordering should be deterministic for various interfaces. Iteration %d differs", i) + } + + // Verify ordering preserves currentDiskList order: virtio2, scsi0, sata1, virtio0 + require.Len(t, expectedResult, 4) + + expectedOrder := []string{"virtio2", "scsi0", "sata1", "virtio0"} + for i, expectedInterface := range expectedOrder { + disk := expectedResult[i].(map[string]interface{}) + require.Equal(t, expectedInterface, disk[mkDiskInterface], + "Disk at position %d should be %s (as in currentDiskList)", i, expectedInterface) + } +} diff --git a/utils/maps.go b/utils/maps.go index 0ddccd90..6b71ec2f 100644 --- a/utils/maps.go +++ b/utils/maps.go @@ -26,7 +26,7 @@ func OrderedListFromMap(inputMap map[string]interface{}) []interface{} { i++ } - slices.SortFunc(keyList, compareWithPrefix) + slices.SortFunc(keyList, CompareWithPrefix) return OrderedListFromMapByKeyValues(inputMap, keyList) } @@ -36,7 +36,7 @@ func OrderedListFromMap(inputMap map[string]interface{}) []interface{} { // - If numbers are equal, falls back to string comparison (preserving digit formatting). // - If numeric parsing fails, falls back to string comparison. // - If prefixes differ, compares the whole values as strings. -func compareWithPrefix(a, b string) int { +func CompareWithPrefix(a, b string) int { prefix := commonPrefix(a, b) if prefix != "" { diff --git a/utils/maps_test.go b/utils/maps_test.go index 6bf33161..9f7b4ae1 100644 --- a/utils/maps_test.go +++ b/utils/maps_test.go @@ -114,7 +114,7 @@ func TestCompareWithPrefix(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - assert.Equalf(t, tt.want, compareWithPrefix(tt.args.a, tt.args.b), "compareWithPrefix(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.want, CompareWithPrefix(tt.args.a, tt.args.b), "CompareWithPrefix(%v, %v)", tt.args.a, tt.args.b) }) } }