0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-06-30 02:31:10 +00:00

fix(vm,lxc,file): improve timeouts handling (#1222)

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-04-19 12:38:16 -04:00 committed by GitHub
parent 7e4710d3f0
commit 0c9c2066fd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 629 additions and 465 deletions

View File

@ -215,7 +215,9 @@ output "ubuntu_container_public_key" {
meta-argument to ignore changes to this attribute.
- `template` - (Optional) Whether to create a template (defaults to `false`).
- `timeout_create` - (Optional) Timeout for creating a container in seconds (defaults to 1800).
- `timeout_start` - (Optional) Timeout for starting a container in seconds (defaults to 300).
- `timeout_clone` - (Optional) Timeout for cloning a container in seconds (defaults to 1800).
- `timeout_delete` - (Optional) Timeout for deleting a container in seconds (defaults to 60).
- `timeout_update` - (Optional) Timeout for updating a container in seconds (defaults to 1800).
- `unprivileged` - (Optional) Whether the container runs as unprivileged on
the host (defaults to `false`).
- `vm_id` - (Optional) The container identifier

View File

@ -515,8 +515,6 @@ output "ubuntu_vm_public_key" {
1800).
- `timeout_create` - (Optional) Timeout for creating a VM in seconds (defaults to
1800).
- `timeout_move_disk` - (Optional) Timeout for moving the disk of a VM in
seconds (defaults to 1800).
- `timeout_migrate` - (Optional) Timeout for migrating the VM (defaults to
1800).
- `timeout_reboot` - (Optional) Timeout for rebooting a VM in seconds (defaults

View File

@ -8,10 +8,12 @@ package fwprovider
import (
"context"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/path"
@ -26,6 +28,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/bpg/terraform-provider-proxmox/fwprovider/structure"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes"
@ -361,6 +364,11 @@ func (r *downloadFileResource) Create(
return
}
timeout := time.Duration(plan.UploadTimeout.ValueInt64()) * time.Second
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
fileMetadata, err := r.getURLMetadata(
ctx,
&plan,
@ -394,27 +402,20 @@ func (r *downloadFileResource) Create(
}
storageClient := nodesClient.Storage(plan.Storage.ValueString())
err = storageClient.DownloadFileByURL(
ctx,
&downloadFileReq,
plan.UploadTimeout.ValueInt64(),
)
err = storageClient.DownloadFileByURL(ctx, &downloadFileReq)
if isErrFileAlreadyExists(err) && plan.OverwriteUnmanaged.ValueBool() {
fileID := plan.Content.ValueString() + "/" + plan.FileName.ValueString()
err = storageClient.DeleteDatastoreFile(ctx, fileID)
if err != nil {
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
resp.Diagnostics.AddError("Error deleting file from datastore",
fmt.Sprintf("Could not delete file '%s', unexpected error: %s", fileID, err.Error()),
)
}
err = storageClient.DownloadFileByURL(
ctx,
&downloadFileReq,
plan.UploadTimeout.ValueInt64(),
)
err = storageClient.DownloadFileByURL(ctx, &downloadFileReq)
}
if err != nil {
@ -610,7 +611,7 @@ func (r *downloadFileResource) Delete(
ctx,
state.ID.ValueString(),
)
if err != nil {
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
if strings.Contains(err.Error(), "unable to parse") {
resp.Diagnostics.AddWarning(
"Datastore file does not exists",

View File

@ -13,6 +13,8 @@ import (
)
func TestAccDatasourceNode(t *testing.T) {
t.Parallel()
te := initTestEnvironment(t)
tests := []struct {

View File

@ -11,6 +11,7 @@ import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-plugin-testing/terraform"
@ -104,7 +105,10 @@ func testAccResourceContainerCreateCheck(te *testEnvironment) resource.TestCheck
return resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(accTestContainerName, "description", "my\ndescription\nvalue\n"),
func(*terraform.State) error {
err := te.nodeClient().Container(accTestContainerID).WaitForContainerStatus(context.Background(), "running", 10, 1)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := te.nodeClient().Container(accTestContainerID).WaitForContainerStatus(ctx, "running")
require.NoError(te.t, err, "container did not start")
return nil
@ -137,7 +141,10 @@ func testAccResourceContainerCreateCloneCheck(te *testEnvironment) resource.Test
return resource.ComposeTestCheckFunc(
func(*terraform.State) error {
err := te.nodeClient().Container(accCloneContainerID).WaitForContainerStatus(context.Background(), "running", 10, 1)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := te.nodeClient().Container(accCloneContainerID).WaitForContainerStatus(ctx, "running")
require.NoError(te.t, err, "container did not start")
return nil

View File

@ -9,6 +9,7 @@ package tests
import (
"context"
"testing"
"time"
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/stretchr/testify/require"
@ -126,17 +127,23 @@ func TestAccResourceDownloadFile(t *testing.T) {
}},
{"override unmanaged file", []resource.TestStep{{
PreConfig: func() {
err := te.nodeStorageClient().DownloadFileByURL(context.Background(), &storage.DownloadURLPostRequestBody{
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
_ = te.nodeStorageClient().DeleteDatastoreFile(ctx, "iso/fake_file.iso") //nolint: errcheck
err := te.nodeStorageClient().DownloadFileByURL(ctx, &storage.DownloadURLPostRequestBody{
Content: types.StrPtr("iso"),
FileName: types.StrPtr("fake_file.iso"),
Node: types.StrPtr(te.nodeName),
Storage: types.StrPtr(te.datastoreID),
URL: types.StrPtr(fakeFileISO),
}, 600)
})
require.NoError(t, err)
t.Cleanup(func() {
err := te.nodeStorageClient().DeleteDatastoreFile(context.Background(), "iso/fake_file.iso")
require.NoError(t, err)
e := te.nodeStorageClient().DeleteDatastoreFile(context.Background(), "iso/fake_file.iso")
require.NoError(t, e)
})
},
Config: te.renderConfig(`

View File

@ -311,6 +311,11 @@ func (c *client) HTTP() *http.Client {
// validateResponseCode ensures that a response is valid.
func validateResponseCode(res *http.Response) error {
if res.StatusCode < 200 || res.StatusCode >= 300 {
if res.StatusCode == http.StatusNotFound ||
(res.StatusCode == http.StatusInternalServerError && strings.Contains(res.Status, "does not exist")) {
return ErrResourceDoesNotExist
}
msg := strings.TrimPrefix(res.Status, fmt.Sprintf("%d ", res.StatusCode))
errRes := &ErrorResponseBody{}

105
proxmox/api/client_test.go Normal file
View File

@ -0,0 +1,105 @@
package api
import (
"context"
"errors"
"net/http"
"reflect"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
// RoundTripFunc .
type RoundTripFunc func(req *http.Request) *http.Response
// RoundTrip .
func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
return f(req), nil
}
// NewTestClient returns *http.Client with Transport replaced to avoid making real calls.
func newTestClient(fn RoundTripFunc) *http.Client {
return &http.Client{
Transport: fn,
}
}
type dummyAuthenticator struct{}
func (dummyAuthenticator) IsRoot() bool {
return false
}
func (dummyAuthenticator) IsRootTicket() bool {
return false
}
func (dummyAuthenticator) AuthenticateRequest(_ context.Context, _ *http.Request) error {
return nil
}
func TestClientDoRequest(t *testing.T) {
t.Parallel()
tests := []struct {
name string
status string
wantErr error
}{
{name: "no error", status: "200 OK", wantErr: nil},
{name: "not exists - 404 status", status: "404 missing", wantErr: ErrResourceDoesNotExist},
{name: "not exists - 500 status", status: "500 This thing does not exist", wantErr: ErrResourceDoesNotExist},
{name: "500 status", status: "500 Internal Server Error", wantErr: &HTTPError{
Code: 500,
Message: "Internal Server Error",
}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := client{
conn: &Connection{
endpoint: "http://localhost",
httpClient: newTestClient(func(_ *http.Request) *http.Response {
sc, err := strconv.Atoi(strings.Fields(tt.status)[0])
require.NoError(t, err)
return &http.Response{
Status: tt.status,
StatusCode: sc,
Body: nil,
}
}),
},
auth: dummyAuthenticator{},
}
err := c.DoRequest(context.Background(), "POST", "any", nil, nil)
fail := false
switch {
case err == nil && tt.wantErr == nil:
return
case err != nil && tt.wantErr == nil:
fallthrough
case err == nil && tt.wantErr != nil:
fail = true
default:
var he, we *HTTPError
if errors.As(err, &he) && errors.As(tt.wantErr, &we) {
fail = !reflect.DeepEqual(he, we)
} else {
fail = !errors.Is(err, tt.wantErr)
}
}
if fail {
t.Errorf("DoRequest() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -6,7 +6,9 @@
package api
import "fmt"
import (
"fmt"
)
// Error is a sentinel error type for API errors.
type Error string
@ -18,12 +20,15 @@ func (err Error) Error() string {
// ErrNoDataObjectInResponse is returned when the server does not include a data object in the response.
const ErrNoDataObjectInResponse Error = "the server did not include a data object in the response"
// ErrResourceDoesNotExist is returned when the requested resource does not exist.
const ErrResourceDoesNotExist Error = "the requested resource does not exist"
// HTTPError is a generic error type for HTTP errors.
type HTTPError struct {
Code int
Message string
}
func (err *HTTPError) Error() string {
func (err HTTPError) Error() string {
return fmt.Sprintf("received an HTTP %d response - Reason: %s", err.Code, err.Message)
}

View File

@ -8,11 +8,14 @@ package containers
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/avast/retry-go/v4"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
)
@ -27,13 +30,13 @@ func (c *Client) CloneContainer(ctx context.Context, d *CloneRequestBody) error
}
// CreateContainer creates a container.
func (c *Client) CreateContainer(ctx context.Context, d *CreateRequestBody, timeout int) error {
func (c *Client) CreateContainer(ctx context.Context, d *CreateRequestBody) error {
taskID, err := c.CreateContainerAsync(ctx, d)
if err != nil {
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for container created: %w", err)
}
@ -120,7 +123,7 @@ func (c *Client) ShutdownContainer(ctx context.Context, d *ShutdownRequestBody)
}
// StartContainer starts a container if is not already running.
func (c *Client) StartContainer(ctx context.Context, timeout int) error {
func (c *Client) StartContainer(ctx context.Context) error {
status, err := c.GetContainerStatus(ctx)
if err != nil {
return fmt.Errorf("error retrieving container status: %w", err)
@ -135,13 +138,13 @@ func (c *Client) StartContainer(ctx context.Context, timeout int) error {
return fmt.Errorf("error starting container: %w", err)
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for container start: %w", err)
}
// the timeout here should probably be configurable
err = c.WaitForContainerStatus(ctx, "running", timeout*2, 5)
err = c.WaitForContainerStatus(ctx, "running")
if err != nil {
return fmt.Errorf("error waiting for container start: %w", err)
}
@ -186,74 +189,77 @@ func (c *Client) UpdateContainer(ctx context.Context, d *UpdateRequestBody) erro
}
// WaitForContainerStatus waits for a container to reach a specific state.
func (c *Client) WaitForContainerStatus(ctx context.Context, status string, timeout int, delay int) error {
func (c *Client) WaitForContainerStatus(ctx context.Context, status string) error {
status = strings.ToLower(status)
timeDelay := int64(delay)
timeMax := float64(timeout)
timeStart := time.Now()
timeElapsed := timeStart.Sub(timeStart)
unexpectedStatus := fmt.Errorf("unexpected status %q", status)
for timeElapsed.Seconds() < timeMax {
if int64(timeElapsed.Seconds())%timeDelay == 0 {
err := retry.Do(
func() error {
data, err := c.GetContainerStatus(ctx)
if err != nil {
return fmt.Errorf("error retrieving container status: %w", err)
return err
}
if data.Status == status {
return nil
if data.Status != status {
return unexpectedStatus
}
time.Sleep(1 * time.Second)
}
time.Sleep(200 * time.Millisecond)
timeElapsed = time.Since(timeStart)
if ctx.Err() != nil {
return fmt.Errorf("context error: %w", ctx.Err())
}
}
return fmt.Errorf(
"timeout while waiting for container \"%d\" to enter the status \"%s\"",
c.VMID,
status,
return nil
},
retry.Context(ctx),
retry.RetryIf(func(err error) bool {
return errors.Is(err, unexpectedStatus)
}),
retry.Attempts(0), // retry until context deadline
retry.Delay(1*time.Second),
retry.LastErrorOnly(true),
)
}
// WaitForContainerLock waits for a container lock to be released.
func (c *Client) WaitForContainerLock(ctx context.Context, timeout int, delay int, ignoreErrorResponse bool) error {
timeDelay := int64(delay)
timeMax := float64(timeout)
timeStart := time.Now()
timeElapsed := timeStart.Sub(timeStart)
for timeElapsed.Seconds() < timeMax {
if int64(timeElapsed.Seconds())%timeDelay == 0 {
data, err := c.GetContainerStatus(ctx)
if err != nil {
if !ignoreErrorResponse {
return fmt.Errorf("error retrieving container status: %w", err)
}
} else if data.Lock == nil || *data.Lock == "" {
return nil
}
time.Sleep(1 * time.Second)
}
time.Sleep(200 * time.Millisecond)
timeElapsed = time.Since(timeStart)
if ctx.Err() != nil {
return fmt.Errorf("context error: %w", ctx.Err())
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("timeout while waiting for container %d to enter the status %q", c.VMID, status)
}
return fmt.Errorf("timeout while waiting for container \"%d\" to become unlocked", c.VMID)
if err != nil {
return fmt.Errorf("error waiting for container %d to enter the status %q: %w", c.VMID, status, err)
}
return nil
}
// WaitForContainerConfigUnlock waits for a container lock to be released.
func (c *Client) WaitForContainerConfigUnlock(ctx context.Context, ignoreErrorResponse bool) error {
stillLocked := errors.New("still locked")
err := retry.Do(
func() error {
data, err := c.GetContainerStatus(ctx)
if err != nil {
return err
}
if data.Lock != nil && *data.Lock != "" {
return stillLocked
}
return nil
},
retry.Context(ctx),
retry.RetryIf(func(err error) bool {
return errors.Is(err, stillLocked) || ignoreErrorResponse
}),
retry.Attempts(0), // retry until context deadline
retry.Delay(1*time.Second),
retry.LastErrorOnly(true),
)
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("timeout while waiting for container %d configuration to become unlocked", c.VMID)
}
if err != nil && !ignoreErrorResponse {
return fmt.Errorf("error waiting for container %d configuration to become unlocked: %w", c.VMID, err)
}
return nil
}

View File

@ -22,7 +22,7 @@ import (
)
const (
networkReloadTimeoutSec = 5
networkReloadTimeout = 10 * time.Second
)
// reloadLock is used to prevent concurrent network reloads.
@ -66,6 +66,9 @@ func (c *Client) CreateNetworkInterface(ctx context.Context, d *NetworkInterface
// ReloadNetworkConfiguration reloads the network configuration for a specific node.
func (c *Client) ReloadNetworkConfiguration(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, networkReloadTimeout)
defer cancel()
reloadLock.Lock()
defer reloadLock.Unlock()
@ -82,8 +85,9 @@ func (c *Client) ReloadNetworkConfiguration(ctx context.Context) error {
return api.ErrNoDataObjectInResponse
}
return c.Tasks().WaitForTask(ctx, *resBody.Data, networkReloadTimeoutSec, 1)
return c.Tasks().WaitForTask(ctx, *resBody.Data)
},
retry.Context(ctx),
retry.Delay(1*time.Second),
retry.Attempts(3),
retry.RetryIf(func(err error) bool {

View File

@ -8,11 +8,14 @@ package storage
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"sort"
"github.com/avast/retry-go/v4"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
)
@ -21,17 +24,17 @@ func (c *Client) DeleteDatastoreFile(
ctx context.Context,
volumeID string,
) error {
err := c.DoRequest(
ctx,
http.MethodDelete,
c.ExpandPath(
fmt.Sprintf(
"content/%s",
url.PathEscape(volumeID),
),
),
nil,
nil,
path := c.ExpandPath(fmt.Sprintf("content/%s", url.PathEscape(volumeID)))
err := retry.Do(
func() error {
return c.DoRequest(ctx, http.MethodDelete, path, nil, nil)
},
retry.Context(ctx),
retry.RetryIf(func(err error) bool {
return !errors.Is(err, api.ErrResourceDoesNotExist)
}),
retry.LastErrorOnly(true),
)
if err != nil {
return fmt.Errorf("error deleting file %s from datastore %s: %w", volumeID, c.StorageName, err)

View File

@ -18,7 +18,6 @@ import (
func (c *Client) DownloadFileByURL(
ctx context.Context,
d *DownloadURLPostRequestBody,
uploadTimeout int64,
) error {
resBody := &DownloadURLResponseBody{}
@ -31,7 +30,7 @@ func (c *Client) DownloadFileByURL(
return api.ErrNoDataObjectInResponse
}
taskErr := c.Tasks().WaitForTask(ctx, *resBody.TaskID, int(uploadTimeout), 5)
taskErr := c.Tasks().WaitForTask(ctx, *resBody.TaskID)
if taskErr != nil {
err = fmt.Errorf(
"error download file to datastore %s: failed waiting for url download: %w",

View File

@ -17,7 +17,6 @@ import (
func (c *Client) APIUpload(
ctx context.Context,
d *api.FileUploadRequest,
uploadTimeout int,
tempDir string,
) (*DatastoreUploadResponseBody, error) {
tflog.Debug(ctx, "uploading file to datastore using PVE API", map[string]interface{}{
@ -149,7 +148,7 @@ func (c *Client) APIUpload(
return nil, fmt.Errorf("error uploading file to datastore %s: no uploadID", c.StorageName)
}
err = c.Tasks().WaitForTask(ctx, *resBody.UploadID, uploadTimeout, 5)
err = c.Tasks().WaitForTask(ctx, *resBody.UploadID)
if err != nil {
return nil, fmt.Errorf("error uploading file to datastore %s: failed waiting for upload - %w", c.StorageName, err)
}

View File

@ -13,6 +13,8 @@ import (
"net/http"
"time"
"github.com/avast/retry-go/v4"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
)
@ -25,13 +27,7 @@ func (c *Client) GetTaskStatus(ctx context.Context, upid string) (*GetTaskStatus
return nil, fmt.Errorf("error building path for task status: %w", err)
}
err = c.DoRequest(
ctx,
http.MethodGet,
path,
nil,
resBody,
)
err = c.DoRequest(ctx, http.MethodGet, path, nil, resBody)
if err != nil {
return nil, fmt.Errorf("error retrieving task status: %w", err)
}
@ -55,13 +51,7 @@ func (c *Client) GetTaskLog(ctx context.Context, upid string) ([]string, error)
return lines, fmt.Errorf("error building path for task status: %w", err)
}
err = c.DoRequest(
ctx,
http.MethodGet,
path,
nil,
resBody,
)
err = c.DoRequest(ctx, http.MethodGet, path, nil, resBody)
if err != nil {
return lines, fmt.Errorf("error retrieving task status: %w", err)
}
@ -84,14 +74,12 @@ func (c *Client) DeleteTask(ctx context.Context, upid string) error {
return fmt.Errorf("error creating task path: %w", err)
}
err = c.DoRequest(
ctx,
http.MethodDelete,
path,
nil,
nil,
)
err = c.DoRequest(ctx, http.MethodDelete, path, nil, nil)
if err != nil {
if errors.Is(err, api.ErrResourceDoesNotExist) {
return nil
}
return fmt.Errorf("error deleting task: %w", err)
}
@ -99,62 +87,53 @@ func (c *Client) DeleteTask(ctx context.Context, upid string) error {
}
// WaitForTask waits for a specific task to complete.
func (c *Client) WaitForTask(ctx context.Context, upid string, timeoutSec, delaySec int) error {
timeDelay := int64(delaySec)
timeMax := float64(timeoutSec)
timeStart := time.Now()
timeElapsed := timeStart.Sub(timeStart)
func (c *Client) WaitForTask(ctx context.Context, upid string) error {
errStillRunning := errors.New("still running")
isCriticalError := func(err error) bool {
var target *api.HTTPError
if errors.As(err, &target) {
if target.Code != http.StatusBadRequest {
// this is a special case to account for eventual consistency
// when creating a task -- the task may not be available via status API
// immediately after creation
return true
}
}
return err != nil
}
for timeElapsed.Seconds() < timeMax {
if int64(timeElapsed.Seconds())%timeDelay == 0 {
status, err := retry.DoWithData(
func() (*GetTaskStatusResponseData, error) {
status, err := c.GetTaskStatus(ctx, upid)
if isCriticalError(err) {
return err
if err != nil {
return nil, err
}
if status.Status != "running" {
if status.ExitCode != "OK" {
return fmt.Errorf(
"task \"%s\" failed to complete with exit code: %s",
upid,
status.ExitCode,
)
if status.Status == "running" {
return nil, errStillRunning
}
return status, err
},
retry.Context(ctx),
retry.RetryIf(func(err error) bool {
var target *api.HTTPError
if errors.As(err, &target) {
if target.Code == http.StatusBadRequest {
// this is a special case to account for eventual consistency
// when creating a task -- the task may not be available via status API
// immediately after creation
return true
}
return nil
}
time.Sleep(1 * time.Second)
}
return errors.Is(err, errStillRunning)
}),
retry.LastErrorOnly(true),
retry.Attempts(0), // retry until context deadline
retry.DelayType(retry.FixedDelay),
retry.Delay(time.Second),
)
time.Sleep(200 * time.Millisecond)
timeElapsed = time.Since(timeStart)
if ctx.Err() != nil {
return fmt.Errorf(
"context error while waiting for task \"%s\" to complete: %w",
upid, ctx.Err(),
)
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("timeout while waiting for task %q to complete", upid)
}
return fmt.Errorf(
"timeout while waiting for task \"%s\" to complete",
upid,
)
if err != nil {
return fmt.Errorf("error while waiting for task %q to complete: %w", upid, err)
}
if status.ExitCode != "OK" {
return fmt.Errorf("task %q failed to complete with exit code: %s", upid, status.ExitCode)
}
return nil
}

View File

@ -8,6 +8,7 @@ package vms
import (
"context"
"errors"
"fmt"
"net"
"net/http"
@ -23,7 +24,7 @@ import (
)
// CloneVM clones a virtual machine.
func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody, timeout int) error {
func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody) error {
var err error
resBody := &MoveDiskResponseBody{}
@ -43,7 +44,7 @@ func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody,
return api.ErrNoDataObjectInResponse
}
return c.Tasks().WaitForTask(ctx, *resBody.Data, timeout, 5)
return c.Tasks().WaitForTask(ctx, *resBody.Data)
}, retry.Attempts(uint(retries)), retry.Delay(10*time.Second))
if err != nil {
return fmt.Errorf("error waiting for VM clone: %w", err)
@ -53,13 +54,13 @@ func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody,
}
// CreateVM creates a virtual machine.
func (c *Client) CreateVM(ctx context.Context, d *CreateRequestBody, timeout int) error {
func (c *Client) CreateVM(ctx context.Context, d *CreateRequestBody) error {
taskID, err := c.CreateVMAsync(ctx, d)
if err != nil {
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 1)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for VM creation: %w", err)
}
@ -67,7 +68,7 @@ func (c *Client) CreateVM(ctx context.Context, d *CreateRequestBody, timeout int
return nil
}
// CreateVMAsync creates a virtual machine asynchronously.
// CreateVMAsync creates a virtual machine asynchronously. Returns ID of the started task.
func (c *Client) CreateVMAsync(ctx context.Context, d *CreateRequestBody) (*string, error) {
resBody := &CreateResponseBody{}
@ -76,30 +77,57 @@ func (c *Client) CreateVMAsync(ctx context.Context, d *CreateRequestBody) (*stri
return nil, fmt.Errorf("error creating VM: %w", err)
}
if resBody.Data == nil {
if resBody.TaskID == nil {
return nil, api.ErrNoDataObjectInResponse
}
return resBody.Data, nil
return resBody.TaskID, nil
}
// DeleteVM deletes a virtual machine.
// DeleteVM creates a virtual machine.
func (c *Client) DeleteVM(ctx context.Context) error {
err := c.DoRequest(ctx, http.MethodDelete, c.ExpandPath("?destroy-unreferenced-disks=1&purge=1"), nil, nil)
taskID, err := c.DeleteVMAsync(ctx)
if err != nil {
return fmt.Errorf("error deleting VM: %w", err)
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for VM deletion: %w", err)
}
return nil
}
// DeleteVMAsync deletes a virtual machine asynchronously. Returns ID of the started task.
func (c *Client) DeleteVMAsync(ctx context.Context) (*string, error) {
// PVE may return a 500 error "got no worker upid - start worker failed", so we retry few times.
resBody := &DeleteResponseBody{}
err := retry.Do(
func() error {
return c.DoRequest(ctx, http.MethodDelete, c.ExpandPath("?destroy-unreferenced-disks=1&purge=1"), nil, resBody)
},
retry.Context(ctx),
retry.RetryIf(func(err error) bool {
return !errors.Is(err, api.ErrResourceDoesNotExist)
}),
retry.LastErrorOnly(true),
)
if err != nil {
return nil, fmt.Errorf("error deleting VM: %w", err)
}
return resBody.TaskID, nil
}
// GetVM retrieves a virtual machine.
func (c *Client) GetVM(ctx context.Context) (*GetResponseData, error) {
resBody := &GetResponseBody{}
err := c.DoRequest(ctx, http.MethodGet, c.ExpandPath("config"), nil, resBody)
if err != nil {
return nil, fmt.Errorf("error retrieving VM: %w", err)
return nil, fmt.Errorf("error retrieving VM %d: %w", c.VMID, err)
}
if resBody.Data == nil {
@ -142,13 +170,13 @@ func (c *Client) GetVMStatus(ctx context.Context) (*GetStatusResponseData, error
}
// MigrateVM migrates a virtual machine.
func (c *Client) MigrateVM(ctx context.Context, d *MigrateRequestBody, timeout int) error {
func (c *Client) MigrateVM(ctx context.Context, d *MigrateRequestBody) error {
taskID, err := c.MigrateVMAsync(ctx, d)
if err != nil {
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for VM migration: %w", err)
}
@ -173,7 +201,7 @@ func (c *Client) MigrateVMAsync(ctx context.Context, d *MigrateRequestBody) (*st
}
// MoveVMDisk moves a virtual machine disk.
func (c *Client) MoveVMDisk(ctx context.Context, d *MoveDiskRequestBody, timeout int) error {
func (c *Client) MoveVMDisk(ctx context.Context, d *MoveDiskRequestBody) error {
taskID, err := c.MoveVMDiskAsync(ctx, d)
if err != nil {
if strings.Contains(err.Error(), "you can't move to the same storage with same format") {
@ -184,7 +212,7 @@ func (c *Client) MoveVMDisk(ctx context.Context, d *MoveDiskRequestBody, timeout
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for VM disk move: %w", err)
}
@ -225,13 +253,13 @@ func (c *Client) ListVMs(ctx context.Context) ([]*ListResponseData, error) {
}
// RebootVM reboots a virtual machine.
func (c *Client) RebootVM(ctx context.Context, d *RebootRequestBody, timeout int) error {
func (c *Client) RebootVM(ctx context.Context, d *RebootRequestBody) error {
taskID, err := c.RebootVMAsync(ctx, d)
if err != nil {
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for VM reboot: %w", err)
}
@ -256,15 +284,17 @@ func (c *Client) RebootVMAsync(ctx context.Context, d *RebootRequestBody) (*stri
}
// ResizeVMDisk resizes a virtual machine disk.
func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody, timeout int) error {
err := retry.Do(func() error {
taskID, err := c.ResizeVMDiskAsync(ctx, d)
if err != nil {
return err
}
func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody) error {
err := retry.Do(
func() error {
taskID, err := c.ResizeVMDiskAsync(ctx, d)
if err != nil {
return err
}
return c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
},
return c.Tasks().WaitForTask(ctx, *taskID)
},
retry.Context(ctx),
retry.Attempts(3),
retry.Delay(1*time.Second),
retry.LastErrorOnly(false),
@ -296,13 +326,13 @@ func (c *Client) ResizeVMDiskAsync(ctx context.Context, d *ResizeDiskRequestBody
}
// ShutdownVM shuts down a virtual machine.
func (c *Client) ShutdownVM(ctx context.Context, d *ShutdownRequestBody, timeout int) error {
func (c *Client) ShutdownVM(ctx context.Context, d *ShutdownRequestBody) error {
taskID, err := c.ShutdownVMAsync(ctx, d)
if err != nil {
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for VM shutdown: %w", err)
}
@ -328,13 +358,13 @@ func (c *Client) ShutdownVMAsync(ctx context.Context, d *ShutdownRequestBody) (*
// StartVM starts a virtual machine.
// Returns the task log if the VM had warnings at startup, or fails to start.
func (c *Client) StartVM(ctx context.Context, timeout int) ([]string, error) {
taskID, err := c.StartVMAsync(ctx, timeout)
func (c *Client) StartVM(ctx context.Context, timeoutSec int) ([]string, error) {
taskID, err := c.StartVMAsync(ctx, timeoutSec)
if err != nil {
return nil, err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
log, e := c.Tasks().GetTaskLog(ctx, *taskID)
if e != nil {
@ -357,9 +387,9 @@ func (c *Client) StartVM(ctx context.Context, timeout int) ([]string, error) {
}
// StartVMAsync starts a virtual machine asynchronously.
func (c *Client) StartVMAsync(ctx context.Context, timeout int) (*string, error) {
func (c *Client) StartVMAsync(ctx context.Context, timeoutSec int) (*string, error) {
reqBody := &StartRequestBody{
TimeoutSeconds: &timeout,
TimeoutSeconds: &timeoutSec,
}
resBody := &StartResponseBody{}
@ -376,13 +406,13 @@ func (c *Client) StartVMAsync(ctx context.Context, timeout int) (*string, error)
}
// StopVM stops a virtual machine.
func (c *Client) StopVM(ctx context.Context, timeout int) error {
func (c *Client) StopVM(ctx context.Context) error {
taskID, err := c.StopVMAsync(ctx)
if err != nil {
return err
}
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
err = c.Tasks().WaitForTask(ctx, *taskID)
if err != nil {
return fmt.Errorf("error waiting for VM stop: %w", err)
}
@ -437,7 +467,7 @@ func (c *Client) WaitForNetworkInterfacesFromVMAgent(
ctx context.Context,
timeout int, // time in seconds to wait until giving up
delay int, // the delay in seconds between requests to the agent
waitForIP bool, // whether or not to block until an IP is found, or just block until the interfaces are published
waitForIP bool, // whether to block until an IP is found, or just block until the interfaces are published
) (*GetQEMUNetworkInterfacesResponseData, error) {
delaySeconds := int64(delay)
timeMaxSeconds := float64(timeout)
@ -525,103 +555,78 @@ func (c *Client) WaitForNetworkInterfacesFromVMAgent(
)
}
// WaitForNoNetworkInterfacesFromVMAgent waits for a virtual machine's QEMU agent to unpublish the network interfaces.
func (c *Client) WaitForNoNetworkInterfacesFromVMAgent(ctx context.Context, timeout int, delay int) error {
timeDelay := int64(delay)
timeMax := float64(timeout)
timeStart := time.Now()
timeElapsed := timeStart.Sub(timeStart)
for timeElapsed.Seconds() < timeMax {
if int64(timeElapsed.Seconds())%timeDelay == 0 {
_, err := c.GetVMNetworkInterfacesFromAgent(ctx)
if err == nil {
return nil
}
time.Sleep(1 * time.Second)
}
time.Sleep(200 * time.Millisecond)
timeElapsed = time.Since(timeStart)
if ctx.Err() != nil {
return fmt.Errorf("error waiting for VM network interfaces: %w", ctx.Err())
}
}
return fmt.Errorf(
"timeout while waiting for the QEMU agent on VM \"%d\" to unpublish the network interfaces",
c.VMID,
)
}
// WaitForVMConfigUnlock waits for a virtual machine configuration to become unlocked.
func (c *Client) WaitForVMConfigUnlock(ctx context.Context, timeout int, delay int, ignoreErrorResponse bool) error {
timeDelay := int64(delay)
timeMax := float64(timeout)
timeStart := time.Now()
timeElapsed := timeStart.Sub(timeStart)
func (c *Client) WaitForVMConfigUnlock(ctx context.Context, ignoreErrorResponse bool) error {
stillLocked := errors.New("still locked")
for timeElapsed.Seconds() < timeMax {
if int64(timeElapsed.Seconds())%timeDelay == 0 {
data, err := c.GetVMStatus(ctx)
if err != nil {
if !ignoreErrorResponse {
return err
}
} else if data.Lock == nil || *data.Lock == "" {
return nil
}
time.Sleep(1 * time.Second)
}
time.Sleep(200 * time.Millisecond)
timeElapsed = time.Since(timeStart)
if ctx.Err() != nil {
return fmt.Errorf("error waiting for VM configuration to become unlocked: %w", ctx.Err())
}
}
return fmt.Errorf("timeout while waiting for VM \"%d\" configuration to become unlocked", c.VMID)
}
// WaitForVMStatus waits for a virtual machine to reach a specific status.
func (c *Client) WaitForVMStatus(ctx context.Context, state string, timeout int, delay int) error {
state = strings.ToLower(state)
timeDelay := int64(delay)
timeMax := float64(timeout)
timeStart := time.Now()
timeElapsed := timeStart.Sub(timeStart)
for timeElapsed.Seconds() < timeMax {
if int64(timeElapsed.Seconds())%timeDelay == 0 {
err := retry.Do(
func() error {
data, err := c.GetVMStatus(ctx)
if err != nil {
return err
}
if data.Status == state {
return nil
if data.Lock != nil && *data.Lock != "" {
return stillLocked
}
time.Sleep(1 * time.Second)
}
return nil
},
retry.Context(ctx),
retry.RetryIf(func(err error) bool {
return errors.Is(err, stillLocked) || ignoreErrorResponse
}),
retry.Attempts(0), // retry until context deadline
retry.Delay(1*time.Second),
retry.LastErrorOnly(true),
)
time.Sleep(200 * time.Millisecond)
timeElapsed = time.Since(timeStart)
if ctx.Err() != nil {
return fmt.Errorf("error waiting for VM state: %w", ctx.Err())
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("timeout while waiting for VM %d configuration to become unlocked", c.VMID)
}
return fmt.Errorf("timeout while waiting for VM \"%d\" to enter the state \"%s\"", c.VMID, state)
if err != nil && !ignoreErrorResponse {
return fmt.Errorf("error waiting for VM %d configuration to become unlocked: %w", c.VMID, err)
}
return nil
}
// WaitForVMStatus waits for a virtual machine to reach a specific status.
func (c *Client) WaitForVMStatus(ctx context.Context, status string) error {
status = strings.ToLower(status)
unexpectedStatus := fmt.Errorf("unexpected status %q", status)
err := retry.Do(
func() error {
data, err := c.GetVMStatus(ctx)
if err != nil {
return err
}
if data.Status != status {
return unexpectedStatus
}
return nil
},
retry.Context(ctx),
retry.RetryIf(func(err error) bool {
return errors.Is(err, unexpectedStatus)
}),
retry.Attempts(0), // retry until context deadline
retry.Delay(1*time.Second),
retry.LastErrorOnly(true),
)
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("timeout while waiting for VM %d to enter the status %q", c.VMID, status)
}
if err != nil {
return fmt.Errorf("error waiting for VM %d to enter the status %q: %w", c.VMID, status, err)
}
return nil
}

View File

@ -296,7 +296,12 @@ type CreateRequestBody struct {
// CreateResponseBody contains the body from a create response.
type CreateResponseBody struct {
Data *string `json:"data,omitempty"`
TaskID *string `json:"data,omitempty"`
}
// DeleteResponseBody contains the body from a delete response.
type DeleteResponseBody struct {
TaskID *string `json:"data,omitempty"`
}
// GetQEMUNetworkInterfacesResponseBody contains the body from a QEMU get network interfaces response.

View File

@ -8,6 +8,7 @@ package datasource
import (
"context"
"errors"
"sort"
"strconv"
"strings"
@ -15,6 +16,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
)
@ -61,7 +63,7 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -69,10 +71,9 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
nodeName := d.Get(mkDataSourceVirtualEnvironmentVMNodeName).(string)
vmID := d.Get(mkDataSourceVirtualEnvironmentVMVMID).(int)
vmStatus, err := api.Node(nodeName).VM(vmID).GetVMStatus(ctx)
vmStatus, err := client.Node(nodeName).VM(vmID).GetVMStatus(ctx)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") ||
(strings.Contains(err.Error(), "HTTP 500") && strings.Contains(err.Error(), "does not exist")) {
if errors.Is(err, api.ErrNoDataObjectInResponse) {
d.SetId("")
return nil

View File

@ -8,15 +8,18 @@ package resource
import (
"context"
"errors"
"fmt"
"sort"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/containers"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
@ -76,7 +79,9 @@ const (
dvStartOnBoot = true
dvTemplate = false
dvTimeoutCreate = 1800
dvTimeoutStart = 300
dvTimeoutClone = 1800
dvTimeoutUpdate = 1800
dvTimeoutDelete = 60
dvUnprivileged = false
dvVMID = -1
@ -157,7 +162,9 @@ const (
mkTags = "tags"
mkTemplate = "template"
mkTimeoutCreate = "timeout_create"
mkTimeoutStart = "timeout_start"
mkTimeoutClone = "timeout_clone"
mkTimeoutUpdate = "timeout_update"
mkTimeoutDelete = "timeout_delete"
mkUnprivileged = "unprivileged"
mkVMID = "vm_id"
)
@ -844,11 +851,31 @@ func Container() *schema.Resource {
Optional: true,
Default: dvTimeoutCreate,
},
mkTimeoutStart: {
mkTimeoutClone: {
Type: schema.TypeInt,
Description: "Clone container timeout",
Optional: true,
Default: dvTimeoutClone,
},
mkTimeoutUpdate: {
Type: schema.TypeInt,
Description: "Update container timeout",
Optional: true,
Default: dvTimeoutUpdate,
},
mkTimeoutDelete: {
Type: schema.TypeInt,
Description: "Delete container timeout",
Optional: true,
Default: dvTimeoutDelete,
},
"timeout_start": {
Type: schema.TypeInt,
Description: "Start container timeout",
Optional: true,
Default: dvTimeoutStart,
Default: 300,
Deprecated: "This field is deprecated and will be removed in a future release. " +
"An overall operation timeout (`timeout_create` / `timeout_clone`) is used instead.",
},
mkUnprivileged: {
Type: schema.TypeBool,
@ -900,6 +927,11 @@ func containerCreate(ctx context.Context, d *schema.ResourceData, m interface{})
}
func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cloneTimeoutSec := d.Get(mkTimeoutClone).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(cloneTimeoutSec)*time.Second)
defer cancel()
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
@ -977,7 +1009,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
containerAPI := api.Node(nodeName).Container(vmID)
// Wait for the container to be created and its configuration lock to be released.
err = containerAPI.WaitForContainerLock(ctx, 600, 5, true)
err = containerAPI.WaitForContainerConfigUnlock(ctx, true)
if err != nil {
return diag.FromErr(err)
}
@ -1264,7 +1296,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
}
// Wait for the container's lock to be released.
err = containerAPI.WaitForContainerLock(ctx, 600, 5, true)
err = containerAPI.WaitForContainerConfigUnlock(ctx, true)
if err != nil {
return diag.FromErr(err)
}
@ -1273,6 +1305,11 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
}
func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
createTimeoutSec := d.Get(mkTimeoutCreate).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(createTimeoutSec)*time.Second)
defer cancel()
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
@ -1625,7 +1662,6 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
template := types.CustomBool(d.Get(mkTemplate).(bool))
unprivileged := types.CustomBool(d.Get(mkUnprivileged).(bool))
vmID := d.Get(mkVMID).(int)
createTimeout := d.Get(mkTimeoutCreate).(int)
if vmID == -1 {
vmIDNew, e := api.Cluster().GetVMID(ctx)
@ -1698,7 +1734,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
createBody.Tags = &tagsString
}
err = api.Node(nodeName).Container(0).CreateContainer(ctx, &createBody, createTimeout)
err = api.Node(nodeName).Container(0).CreateContainer(ctx, &createBody)
if err != nil {
return diag.FromErr(err)
}
@ -1706,7 +1742,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
d.SetId(strconv.Itoa(vmID))
// Wait for the container's lock to be released.
err = api.Node(nodeName).Container(vmID).WaitForContainerLock(ctx, 600, 5, true)
err = api.Node(nodeName).Container(vmID).WaitForContainerConfigUnlock(ctx, true)
if err != nil {
return diag.FromErr(err)
}
@ -1738,10 +1774,8 @@ func containerCreateStart(ctx context.Context, d *schema.ResourceData, m interfa
containerAPI := api.Node(nodeName).Container(vmID)
startTimeout := d.Get(mkTimeoutStart).(int)
// Start the container and wait for it to reach a running state before continuing.
err = containerAPI.StartContainer(ctx, startTimeout)
err = containerAPI.StartContainer(ctx)
if err != nil {
return diag.FromErr(err)
}
@ -1919,7 +1953,7 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
config := m.(proxmoxtf.ProviderConfiguration)
api, e := config.GetClient()
client, e := config.GetClient()
if e != nil {
return diag.FromErr(e)
}
@ -1931,13 +1965,12 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
return diag.FromErr(e)
}
containerAPI := api.Node(nodeName).Container(vmID)
containerAPI := client.Node(nodeName).Container(vmID)
// Retrieve the entire configuration in order to compare it to the state.
containerConfig, e := containerAPI.GetContainer(ctx)
if e != nil {
if strings.Contains(e.Error(), "HTTP 404") ||
(strings.Contains(e.Error(), "HTTP 500") && strings.Contains(e.Error(), "does not exist")) {
if errors.Is(e, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
@ -2540,6 +2573,11 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
}
func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
updateTimeoutSec := d.Get(mkTimeoutUpdate).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(updateTimeoutSec)*time.Second)
defer cancel()
config := m.(proxmoxtf.ProviderConfiguration)
api, e := config.GetClient()
@ -2941,7 +2979,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
if d.HasChange(mkStarted) && !bool(template) {
if started {
e = containerAPI.StartContainer(ctx, 60)
e = containerAPI.StartContainer(ctx)
if e != nil {
return diag.FromErr(e)
}
@ -2957,7 +2995,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
return diag.FromErr(e)
}
e = containerAPI.WaitForContainerStatus(ctx, "stopped", 300, 5)
e = containerAPI.WaitForContainerStatus(ctx, "stopped")
if e != nil {
return diag.FromErr(e)
}
@ -2985,9 +3023,14 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
}
func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
deleteTimeoutSec := d.Get(mkTimeoutDelete).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(deleteTimeoutSec)*time.Second)
defer cancel()
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -2999,7 +3042,7 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
return diag.FromErr(err)
}
containerAPI := api.Node(nodeName).Container(vmID)
containerAPI := client.Node(nodeName).Container(vmID)
// Shut down the container before deleting it.
status, err := containerAPI.GetContainerStatus(ctx)
@ -3009,20 +3052,19 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
if status.Status != "stopped" {
forceStop := types.CustomBool(true)
shutdownTimeout := 300
err = containerAPI.ShutdownContainer(
ctx,
&containers.ShutdownRequestBody{
ForceStop: &forceStop,
Timeout: &shutdownTimeout,
Timeout: &deleteTimeoutSec,
},
)
if err != nil {
return diag.FromErr(err)
}
err = containerAPI.WaitForContainerStatus(ctx, "stopped", 30, 5)
err = containerAPI.WaitForContainerStatus(ctx, "stopped")
if err != nil {
return diag.FromErr(err)
}
@ -3030,7 +3072,7 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
err = containerAPI.DeleteContainer(ctx)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
if errors.Is(err, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
@ -3040,7 +3082,7 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
}
// Wait for the state to become unavailable as that clearly indicates the destruction of the container.
err = containerAPI.WaitForContainerStatus(ctx, "", 60, 2)
err = containerAPI.WaitForContainerStatus(ctx, "")
if err == nil {
return diag.Errorf("failed to delete container \"%d\"", vmID)
}

View File

@ -11,6 +11,7 @@ import (
"context"
"crypto/sha256"
"crypto/tls"
"errors"
"fmt"
"io"
"net/http"
@ -307,6 +308,11 @@ func fileParseImportID(id string) (string, fileVolumeID, error) {
}
func fileCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
uploadTimeout := d.Get(mkResourceVirtualEnvironmentFileTimeoutUpload).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(uploadTimeout)*time.Second)
defer cancel()
var diags diag.Diagnostics
contentType, dg := fileGetContentType(d)
@ -537,9 +543,9 @@ func fileCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
switch *contentType {
case "iso", "vztmpl":
uploadTimeout := d.Get(mkResourceVirtualEnvironmentFileTimeoutUpload).(int)
_, err = capi.Node(nodeName).Storage(datastoreID).APIUpload(ctx, request, uploadTimeout, config.TempDir())
_, err = capi.Node(nodeName).Storage(datastoreID).APIUpload(
ctx, request, config.TempDir(),
)
if err != nil {
diags = append(diags, diag.FromErr(err)...)
return diags
@ -905,13 +911,7 @@ func fileDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag
nodeName := d.Get(mkResourceVirtualEnvironmentFileNodeName).(string)
err = capi.Node(nodeName).Storage(datastoreID).DeleteDatastoreFile(ctx, d.Id())
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
d.SetId("")
return nil
}
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
return diag.FromErr(err)
}

View File

@ -8,12 +8,13 @@ package resource
import (
"context"
"strings"
"errors"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
)
@ -143,15 +144,15 @@ func groupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.
var diags diag.Diagnostics
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
groupID := d.Id()
group, err := api.Access().GetGroup(ctx, groupID)
group, err := client.Access().GetGroup(ctx, groupID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
if errors.Is(err, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
@ -159,7 +160,7 @@ func groupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.
return diag.FromErr(err)
}
acl, err := api.Access().GetACL(ctx)
acl, err := client.Access().GetACL(ctx)
if err != nil {
return diag.FromErr(err)
}
@ -271,7 +272,7 @@ func groupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) dia
func groupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -294,20 +295,15 @@ func groupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) dia
Roles: []string{aclEntry[mkResourceVirtualEnvironmentGroupACLRoleID].(string)},
}
err = api.Access().UpdateACL(ctx, aclBody)
err = client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}
}
err = api.Access().DeleteGroup(ctx, groupID)
err = client.Access().DeleteGroup(ctx, groupID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
d.SetId("")
return nil
}
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
return diag.FromErr(err)
}

View File

@ -8,12 +8,13 @@ package resource
import (
"context"
"errors"
"fmt"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox/pools"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
)
@ -129,15 +130,15 @@ func poolRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
var diags diag.Diagnostics
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
poolID := d.Id()
pool, err := api.Pool().GetPool(ctx, poolID)
pool, err := client.Pool().GetPool(ctx, poolID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
if errors.Is(err, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
}
@ -206,21 +207,15 @@ func poolUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
func poolDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
poolID := d.Id()
err = api.Pool().DeletePool(ctx, poolID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
d.SetId("")
return nil
}
err = client.Pool().DeletePool(ctx, poolID)
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
return diag.FromErr(err)
}

View File

@ -8,12 +8,13 @@ package resource
import (
"context"
"strings"
"errors"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
)
@ -82,15 +83,15 @@ func roleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
func roleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
roleID := d.Id()
role, err := api.Access().GetRole(ctx, roleID)
role, err := client.Access().GetRole(ctx, roleID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
if errors.Is(err, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
@ -139,20 +140,15 @@ func roleUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
func roleDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
roleID := d.Id()
err = api.Access().DeleteRole(ctx, roleID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
d.SetId("")
return nil
}
err = client.Access().DeleteRole(ctx, roleID)
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
return diag.FromErr(err)
}

View File

@ -8,7 +8,7 @@ package resource
import (
"context"
"strings"
"errors"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@ -16,6 +16,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
)
@ -231,15 +232,15 @@ func userCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
userID := d.Id()
user, err := api.Access().GetUser(ctx, userID)
user, err := client.Access().GetUser(ctx, userID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
if errors.Is(err, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
@ -247,7 +248,7 @@ func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
return diag.FromErr(err)
}
acl, err := api.Access().GetACL(ctx)
acl, err := client.Access().GetACL(ctx)
if err != nil {
return diag.FromErr(err)
}
@ -453,7 +454,7 @@ func userUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
func userDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -476,20 +477,14 @@ func userDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Users: []string{userID},
}
err = api.Access().UpdateACL(ctx, aclBody)
err = client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}
}
err = api.Access().DeleteUser(ctx, userID)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") {
d.SetId("")
return nil
}
err = client.Access().DeleteUser(ctx, userID)
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
return diag.FromErr(err)
}

View File

@ -178,17 +178,15 @@ func CreateClone(
}
}
timeout := d.Get(MkTimeoutMoveDisk).(int)
if moveDisk {
err := vmAPI.MoveVMDisk(ctx, diskMoveBody, timeout)
err := vmAPI.MoveVMDisk(ctx, diskMoveBody)
if err != nil {
return fmt.Errorf("disk move fails: %w", err)
}
}
if diskSize > currentDiskInfo.Size.InGigabytes() {
err := vmAPI.ResizeVMDisk(ctx, diskResizeBody, timeout)
err := vmAPI.ResizeVMDisk(ctx, diskResizeBody)
if err != nil {
return fmt.Errorf("disk resize fails: %w", err)
}

View File

@ -41,9 +41,6 @@ const (
mkDiskSpeedWrite = "write"
mkDiskSpeedWriteBurstable = "write_burstable"
mkDiskSSD = "ssd"
// MkTimeoutMoveDisk is the name of the timeout_move_disk attribute.
MkTimeoutMoveDisk = "timeout_move_disk"
)
// Schema returns the schema for the disk resource.

View File

@ -31,6 +31,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox/cluster"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms"
"github.com/bpg/terraform-provider-proxmox/proxmox/pools"
@ -120,7 +121,6 @@ const (
dvTemplate = false
dvTimeoutClone = 1800
dvTimeoutCreate = 1800
dvTimeoutMoveDisk = 1800
dvTimeoutMigrate = 1800
dvTimeoutReboot = 1800
dvTimeoutShutdownVM = 1800
@ -263,7 +263,7 @@ const (
mkTemplate = "template"
mkTimeoutClone = "timeout_clone"
mkTimeoutCreate = "timeout_create"
mkTimeoutMigrate = "timeout_migrate"
mkTimeoutMigrate = "timeout_migrate" // this is essentially an "timeout_update", needs to be refactored
mkTimeoutReboot = "timeout_reboot"
mkTimeoutShutdownVM = "timeout_shutdown_vm"
mkTimeoutStartVM = "timeout_start_vm"
@ -1344,11 +1344,13 @@ func VM() *schema.Resource {
Optional: true,
Default: dvTimeoutCreate,
},
disk.MkTimeoutMoveDisk: {
"timeout_move_disk": {
Type: schema.TypeInt,
Description: "MoveDisk timeout",
Optional: true,
Default: dvTimeoutMoveDisk,
Default: 1800,
Deprecated: "This field is deprecated and will be removed in a future release. " +
"An overall operation timeout (timeout_create / timeout_clone / timeout_migrate) is used instead.",
},
mkTimeoutMigrate: {
Type: schema.TypeInt,
@ -1653,15 +1655,18 @@ func deleteIdeDrives(ctx context.Context, vmAPI *vms.Client, itf1 string, itf2 s
// Start the VM, then wait for it to actually start; it may not be started immediately if running in HA mode.
func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag.Diagnostics {
var diags diag.Diagnostics
tflog.Debug(ctx, "Starting VM")
startVMTimeout := d.Get(mkTimeoutStartVM).(int)
startTimeoutSec := d.Get(mkTimeoutStartVM).(int)
log, e := vmAPI.StartVM(ctx, startVMTimeout)
ctx, cancel := context.WithTimeout(ctx, time.Duration(startTimeoutSec)*time.Second)
defer cancel()
var diags diag.Diagnostics
log, e := vmAPI.StartVM(ctx, startTimeoutSec)
if e != nil {
return append(diags, diag.FromErr(e)...)
return diag.FromErr(e)
}
if len(log) > 0 {
@ -1672,7 +1677,7 @@ func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) dia
})
}
return append(diags, diag.FromErr(vmAPI.WaitForVMStatus(ctx, "running", startVMTimeout, 1))...)
return append(diags, diag.FromErr(vmAPI.WaitForVMStatus(ctx, "running"))...)
}
// Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if
@ -1681,17 +1686,20 @@ func vmShutdown(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData)
tflog.Debug(ctx, "Shutting down VM")
forceStop := types.CustomBool(true)
shutdownTimeout := d.Get(mkTimeoutShutdownVM).(int)
shutdownTimeoutSec := d.Get(mkTimeoutShutdownVM).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(shutdownTimeoutSec)*time.Second)
defer cancel()
e := vmAPI.ShutdownVM(ctx, &vms.ShutdownRequestBody{
ForceStop: &forceStop,
Timeout: &shutdownTimeout,
}, shutdownTimeout+30)
Timeout: &shutdownTimeoutSec,
})
if e != nil {
return diag.FromErr(e)
}
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", shutdownTimeout, 1))
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped"))
}
// Forcefully stop the VM, then wait for it to actually stop.
@ -1700,18 +1708,26 @@ func vmStop(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag
stopTimeout := d.Get(mkTimeoutStopVM).(int)
e := vmAPI.StopVM(ctx, stopTimeout+30)
ctx, cancel := context.WithTimeout(ctx, time.Duration(stopTimeout)*time.Second)
defer cancel()
e := vmAPI.StopVM(ctx)
if e != nil {
return diag.FromErr(e)
}
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", stopTimeout, 1))
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped"))
}
func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cloneTimeoutSec := d.Get(mkTimeoutClone).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(cloneTimeoutSec)*time.Second)
defer cancel()
config := m.(proxmoxtf.ProviderConfiguration)
api, e := config.GetClient()
client, e := config.GetClient()
if e != nil {
return diag.FromErr(e)
}
@ -1733,7 +1749,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
vmID := vmIDUntyped.(int)
if !hasVMID {
vmIDNew, err := api.Cluster().GetVMID(ctx)
vmIDNew, err := client.Cluster().GetVMID(ctx)
if err != nil {
return diag.FromErr(err)
}
@ -1769,11 +1785,9 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
cloneBody.PoolID = &poolID
}
cloneTimeout := d.Get(mkTimeoutClone).(int)
if cloneNodeName != "" && cloneNodeName != nodeName {
// Check if any used datastores of the source VM are not shared
vmConfig, err := api.Node(cloneNodeName).VM(cloneVMID).GetVM(ctx)
vmConfig, err := client.Node(cloneNodeName).VM(cloneVMID).GetVM(ctx)
if err != nil {
return diag.FromErr(err)
}
@ -1783,7 +1797,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
onlySharedDatastores := true
for _, datastore := range datastores {
datastoreStatus, err2 := api.Node(cloneNodeName).Storage(datastore).GetDatastoreStatus(ctx)
datastoreStatus, err2 := client.Node(cloneNodeName).Storage(datastore).GetDatastoreStatus(ctx)
if err2 != nil {
return diag.FromErr(err2)
}
@ -1800,12 +1814,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
// on a different node is currently not supported by proxmox.
cloneBody.TargetNodeName = &nodeName
err = api.Node(cloneNodeName).VM(cloneVMID).CloneVM(
ctx,
cloneRetries,
cloneBody,
cloneTimeout,
)
err = client.Node(cloneNodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody)
if err != nil {
return diag.FromErr(err)
}
@ -1816,14 +1825,14 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
// https://forum.proxmox.com/threads/500-cant-clone-to-non-shared-storage-local.49078/#post-229727
// Temporarily clone to local node
err = api.Node(cloneNodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody, cloneTimeout)
err = client.Node(cloneNodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody)
if err != nil {
return diag.FromErr(err)
}
// Wait for the virtual machine to be created and its configuration lock to be released before migrating.
err = api.Node(cloneNodeName).VM(vmID).WaitForVMConfigUnlock(ctx, 600, 5, true)
err = client.Node(cloneNodeName).VM(vmID).WaitForVMConfigUnlock(ctx, true)
if err != nil {
return diag.FromErr(err)
}
@ -1839,13 +1848,13 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
migrateBody.TargetStorage = &cloneDatastoreID
}
err = api.Node(cloneNodeName).VM(vmID).MigrateVM(ctx, migrateBody, cloneTimeout)
err = client.Node(cloneNodeName).VM(vmID).MigrateVM(ctx, migrateBody)
if err != nil {
return diag.FromErr(err)
}
}
} else {
e = api.Node(nodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody, cloneTimeout)
e = client.Node(nodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody)
}
if e != nil {
@ -1854,10 +1863,10 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
d.SetId(strconv.Itoa(vmID))
vmAPI := api.Node(nodeName).VM(vmID)
vmAPI := client.Node(nodeName).VM(vmID)
// Wait for the virtual machine to be created and its configuration lock to be released.
e = vmAPI.WaitForVMConfigUnlock(ctx, 600, 5, true)
e = vmAPI.WaitForVMConfigUnlock(ctx, true)
if e != nil {
return diag.FromErr(e)
}
@ -1985,7 +1994,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
}
// Only the root account is allowed to change the CPU architecture, which makes this check necessary.
if api.API().IsRootTicket() ||
if client.API().IsRootTicket() ||
cpuArchitecture != dvCPUArchitecture {
updateBody.CPUArchitecture = &cpuArchitecture
}
@ -2184,10 +2193,8 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
vmConfig, e = vmAPI.GetVM(ctx)
if e != nil {
if strings.Contains(e.Error(), "HTTP 404") ||
(strings.Contains(e.Error(), "HTTP 500") && strings.Contains(e.Error(), "does not exist")) {
if errors.Is(e, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
}
@ -2259,9 +2266,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
}
if moveDisk {
moveDiskTimeout := d.Get(disk.MkTimeoutMoveDisk).(int)
e = vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout)
e = vmAPI.MoveVMDisk(ctx, diskMoveBody)
if e != nil {
return diag.FromErr(e)
}
@ -2312,9 +2317,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
}
if moveDisk {
moveDiskTimeout := d.Get(disk.MkTimeoutMoveDisk).(int)
e = vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout)
e = vmAPI.MoveVMDisk(ctx, diskMoveBody)
if e != nil {
return diag.FromErr(e)
}
@ -2325,6 +2328,11 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
}
func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
createTimeoutSec := d.Get(mkTimeoutCreate).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(createTimeoutSec)*time.Second)
defer cancel()
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
@ -2732,9 +2740,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
createBody.HookScript = &hookScript
}
createTimeout := d.Get(mkTimeoutClone).(int)
err = api.Node(nodeName).VM(0).CreateVM(ctx, createBody, createTimeout)
err = api.Node(nodeName).VM(0).CreateVM(ctx, createBody)
if err != nil {
return diag.FromErr(err)
}
@ -2780,14 +2786,13 @@ func vmCreateStart(ctx context.Context, d *schema.ResourceData, m interface{}) d
}
if reboot {
rebootTimeout := d.Get(mkTimeoutReboot).(int)
rebootTimeoutSec := d.Get(mkTimeoutReboot).(int)
err := vmAPI.RebootVM(
ctx,
&vms.RebootRequestBody{
Timeout: &rebootTimeout,
Timeout: &rebootTimeoutSec,
},
rebootTimeout+30,
)
if err != nil {
return diag.FromErr(err)
@ -3349,7 +3354,7 @@ func vmGetVGADeviceObject(d *schema.ResourceData) (*vms.CustomVGADevice, error)
func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -3359,7 +3364,7 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
return diag.FromErr(err)
}
vmNodeName, err := api.Cluster().GetVMNodeName(ctx, vmID)
vmNodeName, err := client.Cluster().GetVMNodeName(ctx, vmID)
if err != nil {
if errors.Is(err, cluster.ErrVMDoesNotExist) {
d.SetId("")
@ -3379,15 +3384,13 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
nodeName := d.Get(mkNodeName).(string)
vmAPI := api.Node(nodeName).VM(vmID)
vmAPI := client.Node(nodeName).VM(vmID)
// Retrieve the entire configuration in order to compare it to the state.
vmConfig, err := vmAPI.GetVM(ctx)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") ||
(strings.Contains(err.Error(), "HTTP 500") && strings.Contains(err.Error(), "does not exist")) {
if errors.Is(err, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
}
@ -4730,11 +4733,15 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
// If the node name has changed we need to migrate the VM to the new node before we do anything else.
if d.HasChange(mkNodeName) {
migrateTimeoutSec := d.Get(mkTimeoutMigrate).(int)
ctx, cancel := context.WithTimeout(ctx, time.Duration(migrateTimeoutSec)*time.Second)
defer cancel()
oldNodeNameValue, _ := d.GetChange(mkNodeName)
oldNodeName := oldNodeNameValue.(string)
vmAPI := api.Node(oldNodeName).VM(vmID)
migrateTimeout := d.Get(mkTimeoutMigrate).(int)
trueValue := types.CustomBool(true)
migrateBody := &vms.MigrateRequestBody{
TargetNode: nodeName,
@ -4742,7 +4749,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
OnlineMigration: &trueValue,
}
err := vmAPI.MigrateVM(ctx, migrateBody, migrateTimeout)
err := vmAPI.MigrateVM(ctx, migrateBody)
if err != nil {
return diag.FromErr(err)
}
@ -5516,17 +5523,15 @@ func vmUpdateDiskLocationAndSize(
}
}
timeout := d.Get(disk.MkTimeoutMoveDisk).(int)
for _, reqBody := range diskMoveBodies {
err = vmAPI.MoveVMDisk(ctx, reqBody, timeout)
err = vmAPI.MoveVMDisk(ctx, reqBody)
if err != nil {
return diag.FromErr(err)
}
}
for _, reqBody := range diskResizeBodies {
err = vmAPI.ResizeVMDisk(ctx, reqBody, timeout)
err = vmAPI.ResizeVMDisk(ctx, reqBody)
if err != nil {
return diag.FromErr(err)
}
@ -5550,14 +5555,13 @@ func vmUpdateDiskLocationAndSize(
}
if vmStatus.Status != "stopped" {
rebootTimeout := d.Get(mkTimeoutReboot).(int)
rebootTimeoutSec := d.Get(mkTimeoutReboot).(int)
err := vmAPI.RebootVM(
ctx,
&vms.RebootRequestBody{
Timeout: &rebootTimeout,
Timeout: &rebootTimeoutSec,
},
rebootTimeout+30,
)
if err != nil {
return diag.FromErr(err)
@ -5569,9 +5573,19 @@ func vmUpdateDiskLocationAndSize(
}
func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
timeout := d.Get(mkTimeoutStopVM).(int)
shutdownTimeout := d.Get(mkTimeoutShutdownVM).(int)
if shutdownTimeout > timeout {
timeout = shutdownTimeout
}
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
defer cancel()
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -5583,7 +5597,7 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
return diag.FromErr(err)
}
vmAPI := api.Node(nodeName).VM(vmID)
vmAPI := client.Node(nodeName).VM(vmID)
// Stop or shut down the virtual machine before deleting it.
status, err := vmAPI.GetVMStatus(ctx)
@ -5608,10 +5622,8 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
err = vmAPI.DeleteVM(ctx)
if err != nil {
if strings.Contains(err.Error(), "HTTP 404") ||
(strings.Contains(err.Error(), "HTTP 500") && strings.Contains(err.Error(), "does not exist")) {
if errors.Is(err, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
}
@ -5619,7 +5631,7 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
}
// Wait for the state to become unavailable as that clearly indicates the destruction of the VM.
err = vmAPI.WaitForVMStatus(ctx, "", 60, 2)
err = vmAPI.WaitForVMStatus(ctx, "")
if err == nil {
return diag.Errorf("failed to delete VM \"%d\"", vmID)
}