mirror of
https://github.com/bpg/terraform-provider-proxmox.git
synced 2025-08-25 04:45:46 +00:00
Merge branch 'main' into datasource-containers
This commit is contained in:
commit
b15d783658
@ -1,6 +1,6 @@
|
|||||||
FROM golang:1.24.6@sha256:2c89c41fb9efc3807029b59af69645867cfe978d2b877d475be0d72f6c6ce6f6
|
FROM golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6
|
||||||
|
|
||||||
ARG GOLANGCI_LINT_VERSION=2.3.1 # renovate: depName=golangci/golangci-lint datasource=github-releases
|
ARG GOLANGCI_LINT_VERSION=2.4.0 # renovate: depName=golangci/golangci-lint datasource=github-releases
|
||||||
|
|
||||||
RUN apt update && apt upgrade -y && \
|
RUN apt update && apt upgrade -y && \
|
||||||
apt-get install --no-install-recommends -y ca-certificates curl gnupg lsb-release jq zsh neovim gh && \
|
apt-get install --no-install-recommends -y ca-certificates curl gnupg lsb-release jq zsh neovim gh && \
|
||||||
|
4
.github/workflows/code-quality.yml
vendored
4
.github/workflows/code-quality.yml
vendored
@ -16,12 +16,12 @@ jobs:
|
|||||||
pull-requests: write
|
pull-requests: write
|
||||||
checks: write
|
checks: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }} # to check out the actual pull request commit, not the merge commit
|
ref: ${{ github.event.pull_request.head.sha }} # to check out the actual pull request commit, not the merge commit
|
||||||
fetch-depth: 0 # a full history is required for pull request analysis
|
fetch-depth: 0 # a full history is required for pull request analysis
|
||||||
- name: 'Qodana Scan'
|
- name: 'Qodana Scan'
|
||||||
uses: JetBrains/qodana-action@e14351bdf4707c4cecc25a86a9190745b7b40de8 # v2025.1.1
|
uses: JetBrains/qodana-action@27de2a744479d1d731934eeaf79287575ebc5dd3 # v2025.2.1
|
||||||
with:
|
with:
|
||||||
post-pr-comment: false
|
post-pr-comment: false
|
||||||
env:
|
env:
|
||||||
|
4
.github/workflows/golangci-lint.yml
vendored
4
.github/workflows/golangci-lint.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
|||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -42,6 +42,6 @@ jobs:
|
|||||||
if: ${{ steps.filter.outputs.go == 'true' || steps.filter.outputs.linter == 'true'}}
|
if: ${{ steps.filter.outputs.go == 'true' || steps.filter.outputs.linter == 'true'}}
|
||||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8
|
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8
|
||||||
with:
|
with:
|
||||||
version: v2.3.1 # renovate: depName=golangci/golangci-lint datasource=github-releases
|
version: v2.4.0 # renovate: depName=golangci/golangci-lint datasource=github-releases
|
||||||
skip-cache: true
|
skip-cache: true
|
||||||
args: -v --timeout=10m
|
args: -v --timeout=10m
|
||||||
|
6
.github/workflows/link-check.yml
vendored
6
.github/workflows/link-check.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Generate Short Lived OAuth App Token
|
- name: Generate Short Lived OAuth App Token
|
||||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
|
||||||
id: app-token
|
id: app-token
|
||||||
with:
|
with:
|
||||||
app-id: "${{ secrets.BOT_APP_ID }}"
|
app-id: "${{ secrets.BOT_APP_ID }}"
|
||||||
@ -19,10 +19,10 @@ jobs:
|
|||||||
repositories: "${{ github.event.repository.name }}"
|
repositories: "${{ github.event.repository.name }}"
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||||
|
|
||||||
- name: Link Checker
|
- name: Link Checker
|
||||||
uses: lycheeverse/lychee-action@82202e5e9c2f4ef1a55a3d02563e1cb6041e5332 # v2.4.1
|
uses: lycheeverse/lychee-action@5c4ee84814c983aa7164eaee476f014e53ff3963 # v2.5.0
|
||||||
id: lychee
|
id: lychee
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: "${{ steps.app-token.outputs.token }}"
|
GITHUB_TOKEN: "${{ steps.app-token.outputs.token }}"
|
||||||
|
4
.github/workflows/publish.yml
vendored
4
.github/workflows/publish.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Generate Short Lived OAuth App Token
|
- name: Generate Short Lived OAuth App Token
|
||||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
|
||||||
id: app-token
|
id: app-token
|
||||||
with:
|
with:
|
||||||
app-id: "${{ secrets.BOT_APP_ID }}"
|
app-id: "${{ secrets.BOT_APP_ID }}"
|
||||||
@ -32,7 +32,7 @@ jobs:
|
|||||||
repositories: "${{ github.event.repository.name }}"
|
repositories: "${{ github.event.repository.name }}"
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
2
.github/workflows/release-please.yml
vendored
2
.github/workflows/release-please.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
- name: Generate Short Lived OAuth App Token
|
- name: Generate Short Lived OAuth App Token
|
||||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
|
||||||
id: app-token
|
id: app-token
|
||||||
with:
|
with:
|
||||||
app-id: "${{ secrets.BOT_APP_ID }}"
|
app-id: "${{ secrets.BOT_APP_ID }}"
|
||||||
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
@ -12,7 +12,7 @@ jobs:
|
|||||||
pull-requests: write
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
- name: Generate Short Lived OAuth App Token
|
- name: Generate Short Lived OAuth App Token
|
||||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
|
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
|
||||||
id: app-token
|
id: app-token
|
||||||
with:
|
with:
|
||||||
app-id: "${{ secrets.BOT_APP_ID }}"
|
app-id: "${{ secrets.BOT_APP_ID }}"
|
||||||
|
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
|||||||
timeout-minutes: 5
|
timeout-minutes: 5
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ jobs:
|
|||||||
run: echo "$GITHUB_CONTEXT"
|
run: echo "$GITHUB_CONTEXT"
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
2
.github/workflows/testacc.yml
vendored
2
.github/workflows/testacc.yml
vendored
@ -35,7 +35,7 @@ jobs:
|
|||||||
run: echo "$GITHUB_CONTEXT"
|
run: echo "$GITHUB_CONTEXT"
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
ref: ${{ github.event.inputs.ref || github.ref}}
|
ref: ${{ github.event.inputs.ref || github.ref}}
|
||||||
|
2
Makefile
2
Makefile
@ -3,7 +3,7 @@ TARGETS=darwin linux windows
|
|||||||
TERRAFORM_PLUGIN_EXTENSION=
|
TERRAFORM_PLUGIN_EXTENSION=
|
||||||
VERSION=0.81.0# x-release-please-version
|
VERSION=0.81.0# x-release-please-version
|
||||||
|
|
||||||
GOLANGCI_LINT_VERSION=2.3.1# renovate: depName=golangci/golangci-lint datasource=github-releases
|
GOLANGCI_LINT_VERSION=2.4.0# renovate: depName=golangci/golangci-lint datasource=github-releases
|
||||||
|
|
||||||
# check if opentofu is installed and use it if it is,
|
# check if opentofu is installed and use it if it is,
|
||||||
# otherwise use terraform
|
# otherwise use terraform
|
||||||
|
@ -3,21 +3,21 @@ layout: page
|
|||||||
page_title: "Clone a VM"
|
page_title: "Clone a VM"
|
||||||
subcategory: Guides
|
subcategory: Guides
|
||||||
description: |-
|
description: |-
|
||||||
This guide explains how to create a VM template and then clone it to another VM.
|
This guide explains how to create a VM template and clone it to a new VM.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Clone a VM
|
# Clone a VM
|
||||||
|
|
||||||
## Create a VM template
|
## Create a VM template
|
||||||
|
|
||||||
VM templates in Proxmox provide an efficient way to create multiple identical VMs. Templates act as a base image that can be cloned to create new VMs, ensuring consistency and reducing the time needed to provision new instances. When a VM is created as a template, it is read-only and can't be started, but can be cloned multiple times to create new VMs.
|
VM templates in Proxmox provide an efficient way to create multiple identical VMs. Templates act as a base image that can be cloned to create new VMs, ensuring consistency and reducing the time needed to provision new instances. When a VM is created as a template, it is read-only and cannot be started, but can be cloned multiple times to create new VMs.
|
||||||
|
|
||||||
You can create a template directly in Proxmox by setting the `template` attribute to `true` when creating the VM resource:
|
You can create a template with Terraform by setting the `template` attribute to `true` when creating the VM resource:
|
||||||
|
|
||||||
```terraform
|
```terraform
|
||||||
resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
||||||
name = "ubuntu-template"
|
name = "ubuntu-template"
|
||||||
node_name = "pve"
|
node_name = var.virtual_environment_node_name
|
||||||
|
|
||||||
template = true
|
template = true
|
||||||
started = false
|
started = false
|
||||||
@ -35,12 +35,12 @@ resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
efi_disk {
|
efi_disk {
|
||||||
datastore_id = "local"
|
datastore_id = var.datastore_id
|
||||||
type = "4m"
|
type = "4m"
|
||||||
}
|
}
|
||||||
|
|
||||||
disk {
|
disk {
|
||||||
datastore_id = "local-lvm"
|
datastore_id = var.datastore_id
|
||||||
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
|
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
|
||||||
interface = "virtio0"
|
interface = "virtio0"
|
||||||
iothread = true
|
iothread = true
|
||||||
@ -67,18 +67,18 @@ resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
|||||||
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
|
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
|
||||||
content_type = "iso"
|
content_type = "iso"
|
||||||
datastore_id = "local"
|
datastore_id = "local"
|
||||||
node_name = "pve"
|
node_name = var.virtual_environment_node_name
|
||||||
|
|
||||||
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
|
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you have a template, you can clone it to create new VMs. The cloned VMs will inherit all the configuration from the template but can be customized further as needed.
|
Once you have a template, you can clone it to create new VMs. The cloned VMs will inherit all configuration from the template but can be customized further as needed.
|
||||||
|
|
||||||
```terraform
|
```terraform
|
||||||
resource "proxmox_virtual_environment_vm" "ubuntu_clone" {
|
resource "proxmox_virtual_environment_vm" "ubuntu_clone" {
|
||||||
name = "ubuntu-clone"
|
name = "ubuntu-clone"
|
||||||
node_name = "pve"
|
node_name = var.virtual_environment_node_name
|
||||||
|
|
||||||
clone {
|
clone {
|
||||||
vm_id = proxmox_virtual_environment_vm.ubuntu_template.id
|
vm_id = proxmox_virtual_environment_vm.ubuntu_template.id
|
||||||
@ -113,3 +113,5 @@ output "vm_ipv4_address" {
|
|||||||
value = proxmox_virtual_environment_vm.ubuntu_clone.ipv4_addresses[1][0]
|
value = proxmox_virtual_environment_vm.ubuntu_clone.ipv4_addresses[1][0]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Full example is available in the [examples/guides/clone-vm](https://github.com/bpg/terraform-provider-proxmox/tree/main/examples/guides/clone-vm) directory.
|
||||||
|
@ -134,6 +134,7 @@ output "ubuntu_container_public_key" {
|
|||||||
- `size` - (Optional) The size of the root filesystem in gigabytes (defaults
|
- `size` - (Optional) The size of the root filesystem in gigabytes (defaults
|
||||||
to `4`). When set to 0 a directory or zfs/btrfs subvolume will be created.
|
to `4`). When set to 0 a directory or zfs/btrfs subvolume will be created.
|
||||||
Requires `datastore_id` to be set.
|
Requires `datastore_id` to be set.
|
||||||
|
- `mount_options` (Optional) List of extra mount options.
|
||||||
- `initialization` - (Optional) The initialization configuration.
|
- `initialization` - (Optional) The initialization configuration.
|
||||||
- `dns` - (Optional) The DNS configuration.
|
- `dns` - (Optional) The DNS configuration.
|
||||||
- `domain` - (Optional) The DNS search domain.
|
- `domain` - (Optional) The DNS search domain.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
resource "proxmox_virtual_environment_vm" "ubuntu_clone" {
|
resource "proxmox_virtual_environment_vm" "ubuntu_clone" {
|
||||||
name = "ubuntu-clone"
|
name = "ubuntu-clone"
|
||||||
node_name = "pve"
|
node_name = var.virtual_environment_node_name
|
||||||
|
|
||||||
clone {
|
clone {
|
||||||
vm_id = proxmox_virtual_environment_vm.ubuntu_template.id
|
vm_id = proxmox_virtual_environment_vm.ubuntu_template.id
|
||||||
|
@ -5,7 +5,7 @@ data "local_file" "ssh_public_key" {
|
|||||||
resource "proxmox_virtual_environment_file" "user_data_cloud_config" {
|
resource "proxmox_virtual_environment_file" "user_data_cloud_config" {
|
||||||
content_type = "snippets"
|
content_type = "snippets"
|
||||||
datastore_id = "local"
|
datastore_id = "local"
|
||||||
node_name = "pve"
|
node_name = var.virtual_environment_node_name
|
||||||
|
|
||||||
source_raw {
|
source_raw {
|
||||||
data = <<-EOF
|
data = <<-EOF
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
||||||
name = "ubuntu-template"
|
name = "ubuntu-template"
|
||||||
node_name = "pve"
|
node_name = var.virtual_environment_node_name
|
||||||
|
|
||||||
template = true
|
template = true
|
||||||
started = false
|
started = false
|
||||||
@ -18,12 +18,12 @@ resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
efi_disk {
|
efi_disk {
|
||||||
datastore_id = "local"
|
datastore_id = var.datastore_id
|
||||||
type = "4m"
|
type = "4m"
|
||||||
}
|
}
|
||||||
|
|
||||||
disk {
|
disk {
|
||||||
datastore_id = "local-lvm"
|
datastore_id = var.datastore_id
|
||||||
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
|
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
|
||||||
interface = "virtio0"
|
interface = "virtio0"
|
||||||
iothread = true
|
iothread = true
|
||||||
@ -50,7 +50,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_template" {
|
|||||||
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
|
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
|
||||||
content_type = "iso"
|
content_type = "iso"
|
||||||
datastore_id = "local"
|
datastore_id = "local"
|
||||||
node_name = "pve"
|
node_name = var.virtual_environment_node_name
|
||||||
|
|
||||||
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
|
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
|
||||||
}
|
}
|
||||||
|
@ -8,3 +8,15 @@ variable "virtual_environment_token" {
|
|||||||
description = "The token for the Proxmox Virtual Environment API"
|
description = "The token for the Proxmox Virtual Environment API"
|
||||||
sensitive = true
|
sensitive = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "virtual_environment_node_name" {
|
||||||
|
type = string
|
||||||
|
description = "The node name for the Proxmox Virtual Environment API"
|
||||||
|
default = "pve"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "datastore_id" {
|
||||||
|
type = string
|
||||||
|
description = "Datastore for VM disks"
|
||||||
|
default = "local-lvm"
|
||||||
|
}
|
||||||
|
@ -48,7 +48,7 @@ func TestAccResourceContainer(t *testing.T) {
|
|||||||
FileName: ptr.Ptr(imageFileName),
|
FileName: ptr.Ptr(imageFileName),
|
||||||
Node: ptr.Ptr(te.NodeName),
|
Node: ptr.Ptr(te.NodeName),
|
||||||
Storage: ptr.Ptr(te.DatastoreID),
|
Storage: ptr.Ptr(te.DatastoreID),
|
||||||
URL: ptr.Ptr(fmt.Sprintf("%s/images/system/ubuntu-23.04-standard_23.04-1_amd64.tar.zst", te.ContainerImagesServer)),
|
URL: ptr.Ptr(fmt.Sprintf("%s/images/system/ubuntu-24.10-standard_24.10-1_amd64.tar.zst", te.ContainerImagesServer)),
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -110,9 +110,10 @@ func TestAccResourceContainer(t *testing.T) {
|
|||||||
"device_passthrough.0.mode": "0660",
|
"device_passthrough.0.mode": "0660",
|
||||||
"initialization.0.dns.#": "0",
|
"initialization.0.dns.#": "0",
|
||||||
}),
|
}),
|
||||||
ResourceAttributesSet(accTestContainerName, []string{
|
// TODO: depends on DHCP, which may not work in some environments
|
||||||
"ipv4.vmbr0",
|
// ResourceAttributesSet(accTestContainerName, []string{
|
||||||
}),
|
// "ipv4.vmbr0",
|
||||||
|
// }),
|
||||||
func(*terraform.State) error {
|
func(*terraform.State) error {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -142,6 +143,7 @@ func TestAccResourceContainer(t *testing.T) {
|
|||||||
disk {
|
disk {
|
||||||
datastore_id = "local-lvm"
|
datastore_id = "local-lvm"
|
||||||
size = 4
|
size = 4
|
||||||
|
mount_options = ["discard"]
|
||||||
}
|
}
|
||||||
mount_point {
|
mount_point {
|
||||||
volume = "local-lvm"
|
volume = "local-lvm"
|
||||||
@ -178,6 +180,56 @@ func TestAccResourceContainer(t *testing.T) {
|
|||||||
"description": "my\ndescription\nvalue\n",
|
"description": "my\ndescription\nvalue\n",
|
||||||
"device_passthrough.#": "1",
|
"device_passthrough.#": "1",
|
||||||
"initialization.0.dns.#": "0",
|
"initialization.0.dns.#": "0",
|
||||||
|
"disk.0.mount_options.#": "1",
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// remove disk options
|
||||||
|
Config: te.RenderConfig(`
|
||||||
|
resource "proxmox_virtual_environment_container" "test_container" {
|
||||||
|
node_name = "{{.NodeName}}"
|
||||||
|
vm_id = {{.TestContainerID}}
|
||||||
|
timeout_delete = 10
|
||||||
|
unprivileged = true
|
||||||
|
disk {
|
||||||
|
datastore_id = "local-lvm"
|
||||||
|
size = 4
|
||||||
|
mount_options = []
|
||||||
|
}
|
||||||
|
mount_point {
|
||||||
|
volume = "local-lvm"
|
||||||
|
size = "4G"
|
||||||
|
path = "mnt/local"
|
||||||
|
}
|
||||||
|
device_passthrough {
|
||||||
|
path = "/dev/zero"
|
||||||
|
}
|
||||||
|
description = <<-EOT
|
||||||
|
my
|
||||||
|
description
|
||||||
|
value
|
||||||
|
EOT
|
||||||
|
initialization {
|
||||||
|
hostname = "test"
|
||||||
|
ip_config {
|
||||||
|
ipv4 {
|
||||||
|
address = "172.16.10.10/15"
|
||||||
|
gateway = "172.16.0.1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
network_interface {
|
||||||
|
name = "vmbr0"
|
||||||
|
}
|
||||||
|
operating_system {
|
||||||
|
template_file_id = "local:vztmpl/{{.ImageFileName}}"
|
||||||
|
type = "ubuntu"
|
||||||
|
}
|
||||||
|
}`, WithRootUser()),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
ResourceAttributes(accTestContainerName, map[string]string{
|
||||||
|
"disk.0.mount_options.#": "0",
|
||||||
}),
|
}),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
@ -560,7 +560,7 @@ func (r *CustomRootFS) EncodeValues(key string, v *url.Values) error {
|
|||||||
|
|
||||||
if r.MountOptions != nil {
|
if r.MountOptions != nil {
|
||||||
if len(*r.MountOptions) > 0 {
|
if len(*r.MountOptions) > 0 {
|
||||||
values = append(values, fmt.Sprintf("mount=%s", strings.Join(*r.MountOptions, ";")))
|
values = append(values, fmt.Sprintf("mountoptions=%s", strings.Join(*r.MountOptions, ";")))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -889,6 +889,8 @@ func (r *CustomRootFS) UnmarshalJSON(b []byte) error {
|
|||||||
r.Volume = v[0]
|
r.Volume = v[0]
|
||||||
} else if len(v) == 2 {
|
} else if len(v) == 2 {
|
||||||
switch v[0] {
|
switch v[0] {
|
||||||
|
case "volume":
|
||||||
|
r.Volume = v[1]
|
||||||
case "acl":
|
case "acl":
|
||||||
bv := types.CustomBool(v[1] == "1")
|
bv := types.CustomBool(v[1] == "1")
|
||||||
r.ACL = &bv
|
r.ACL = &bv
|
||||||
@ -916,7 +918,7 @@ func (r *CustomRootFS) UnmarshalJSON(b []byte) error {
|
|||||||
case "size":
|
case "size":
|
||||||
r.Size = new(types.DiskSize)
|
r.Size = new(types.DiskSize)
|
||||||
|
|
||||||
err := r.Size.UnmarshalJSON([]byte(v[1]))
|
err = r.Size.UnmarshalJSON([]byte(v[1]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to unmarshal disk size: %w", err)
|
return fmt.Errorf("failed to unmarshal disk size: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,10 @@ const (
|
|||||||
dvCPUUnits = 1024
|
dvCPUUnits = 1024
|
||||||
dvDescription = ""
|
dvDescription = ""
|
||||||
dvDevicePassthroughMode = "0660"
|
dvDevicePassthroughMode = "0660"
|
||||||
|
dvDiskACL = false
|
||||||
dvDiskDatastoreID = "local"
|
dvDiskDatastoreID = "local"
|
||||||
|
dvDiskQuota = false
|
||||||
|
dvDiskReplicate = false
|
||||||
dvDiskSize = 4
|
dvDiskSize = 4
|
||||||
dvFeaturesNesting = false
|
dvFeaturesNesting = false
|
||||||
dvFeaturesKeyControl = false
|
dvFeaturesKeyControl = false
|
||||||
@ -107,7 +110,11 @@ const (
|
|||||||
mkCPUUnits = "units"
|
mkCPUUnits = "units"
|
||||||
mkDescription = "description"
|
mkDescription = "description"
|
||||||
mkDisk = "disk"
|
mkDisk = "disk"
|
||||||
|
mkDiskACL = "acl"
|
||||||
mkDiskDatastoreID = "datastore_id"
|
mkDiskDatastoreID = "datastore_id"
|
||||||
|
mkDiskMountOptions = "mount_options"
|
||||||
|
mkDiskQuota = "quota"
|
||||||
|
mkDiskReplicate = "replicate"
|
||||||
mkDiskSize = "size"
|
mkDiskSize = "size"
|
||||||
mkFeatures = "features"
|
mkFeatures = "features"
|
||||||
mkFeaturesNesting = "nesting"
|
mkFeaturesNesting = "nesting"
|
||||||
@ -329,13 +336,20 @@ func Container() *schema.Resource {
|
|||||||
DefaultFunc: func() (interface{}, error) {
|
DefaultFunc: func() (interface{}, error) {
|
||||||
return []interface{}{
|
return []interface{}{
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
mkDiskDatastoreID: dvDiskDatastoreID,
|
mkDiskDatastoreID: dvDiskDatastoreID,
|
||||||
mkDiskSize: dvDiskSize,
|
mkDiskSize: dvDiskSize,
|
||||||
|
mkDiskMountOptions: nil,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
mkDiskACL: {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Description: "Explicitly enable or disable ACL support",
|
||||||
|
Optional: true,
|
||||||
|
Default: dvDiskACL,
|
||||||
|
},
|
||||||
mkDiskDatastoreID: {
|
mkDiskDatastoreID: {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Description: "The datastore id",
|
Description: "The datastore id",
|
||||||
@ -343,6 +357,18 @@ func Container() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Default: dvDiskDatastoreID,
|
Default: dvDiskDatastoreID,
|
||||||
},
|
},
|
||||||
|
mkDiskQuota: {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Description: "Enable user quotas for the container rootfs",
|
||||||
|
Optional: true,
|
||||||
|
Default: dvDiskQuota,
|
||||||
|
},
|
||||||
|
mkDiskReplicate: {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Description: "Will include this volume to a storage replica job",
|
||||||
|
Optional: true,
|
||||||
|
Default: dvDiskReplicate,
|
||||||
|
},
|
||||||
mkDiskSize: {
|
mkDiskSize: {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Description: "The rootfs size in gigabytes",
|
Description: "The rootfs size in gigabytes",
|
||||||
@ -351,6 +377,17 @@ func Container() *schema.Resource {
|
|||||||
Default: dvDiskSize,
|
Default: dvDiskSize,
|
||||||
ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(0)),
|
ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(0)),
|
||||||
},
|
},
|
||||||
|
mkDiskMountOptions: {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Description: "Extra mount options",
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
ValidateFunc: validation.StringIsNotEmpty,
|
||||||
|
},
|
||||||
|
DiffSuppressFunc: structure.SuppressIfListsAreEqualIgnoringOrder,
|
||||||
|
DiffSuppressOnRefresh: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
MaxItems: 1,
|
MaxItems: 1,
|
||||||
@ -1458,6 +1495,23 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vmIDUntyped, hasVMID := d.GetOk(mkVMID)
|
||||||
|
vmID := vmIDUntyped.(int)
|
||||||
|
|
||||||
|
if !hasVMID {
|
||||||
|
vmIDNew, err := config.GetIDGenerator().NextID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
vmID = vmIDNew
|
||||||
|
|
||||||
|
err = d.Set(mkVMID, vmID)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
nodeName := d.Get(mkNodeName).(string)
|
nodeName := d.Get(mkNodeName).(string)
|
||||||
container := Container()
|
container := Container()
|
||||||
|
|
||||||
@ -1709,12 +1763,21 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
|
|
||||||
var rootFS *containers.CustomRootFS
|
var rootFS *containers.CustomRootFS
|
||||||
|
|
||||||
|
diskMountOptions := []string{}
|
||||||
|
|
||||||
|
if diskBlock[mkDiskMountOptions] != nil {
|
||||||
|
for _, opt := range diskBlock[mkDiskMountOptions].([]any) {
|
||||||
|
diskMountOptions = append(diskMountOptions, opt.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
diskSize := diskBlock[mkDiskSize].(int)
|
diskSize := diskBlock[mkDiskSize].(int)
|
||||||
if diskDatastoreID != "" && (diskSize != dvDiskSize || len(mountPoints) > 0) {
|
if diskDatastoreID != "" && (diskSize != dvDiskSize || len(mountPoints) > 0) {
|
||||||
// This is a special case where the rootfs size is set to a non-default value at creation time.
|
// This is a special case where the rootfs size is set to a non-default value at creation time.
|
||||||
// see https://pve.proxmox.com/pve-docs/chapter-pct.html#_storage_backed_mount_points
|
// see https://pve.proxmox.com/pve-docs/chapter-pct.html#_storage_backed_mount_points
|
||||||
rootFS = &containers.CustomRootFS{
|
rootFS = &containers.CustomRootFS{
|
||||||
Volume: fmt.Sprintf("%s:%d", diskDatastoreID, diskSize),
|
Volume: fmt.Sprintf("%s:%d", diskDatastoreID, diskSize),
|
||||||
|
MountOptions: &diskMountOptions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1831,22 +1894,6 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
tags := d.Get(mkTags).([]interface{})
|
tags := d.Get(mkTags).([]interface{})
|
||||||
template := types.CustomBool(d.Get(mkTemplate).(bool))
|
template := types.CustomBool(d.Get(mkTemplate).(bool))
|
||||||
unprivileged := types.CustomBool(d.Get(mkUnprivileged).(bool))
|
unprivileged := types.CustomBool(d.Get(mkUnprivileged).(bool))
|
||||||
vmIDUntyped, hasVMID := d.GetOk(mkVMID)
|
|
||||||
vmID := vmIDUntyped.(int)
|
|
||||||
|
|
||||||
if !hasVMID {
|
|
||||||
vmIDNew, err := config.GetIDGenerator().NextID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
vmID = vmIDNew
|
|
||||||
|
|
||||||
err = d.Set(mkVMID, vmID)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to create the container using the retrieved values.
|
// Attempt to create the container using the retrieved values.
|
||||||
createBody := containers.CreateRequestBody{
|
createBody := containers.CreateRequestBody{
|
||||||
@ -2253,12 +2300,22 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
|
|
||||||
if containerConfig.RootFS != nil {
|
if containerConfig.RootFS != nil {
|
||||||
volumeParts := strings.Split(containerConfig.RootFS.Volume, ":")
|
volumeParts := strings.Split(containerConfig.RootFS.Volume, ":")
|
||||||
|
disk[mkDiskACL] = containerConfig.RootFS.ACL
|
||||||
|
disk[mkDiskReplicate] = containerConfig.RootFS.Replicate
|
||||||
|
disk[mkDiskQuota] = containerConfig.RootFS.Quota
|
||||||
disk[mkDiskDatastoreID] = volumeParts[0]
|
disk[mkDiskDatastoreID] = volumeParts[0]
|
||||||
|
|
||||||
disk[mkDiskSize] = containerConfig.RootFS.Size.InGigabytes()
|
disk[mkDiskSize] = containerConfig.RootFS.Size.InGigabytes()
|
||||||
|
if containerConfig.RootFS.MountOptions != nil {
|
||||||
|
disk[mkDiskMountOptions] = *containerConfig.RootFS.MountOptions
|
||||||
|
} else {
|
||||||
|
disk[mkDiskMountOptions] = []string{}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Default value of "storage" is "local" according to the API documentation.
|
// Default value of "storage" is "local" according to the API documentation.
|
||||||
disk[mkDiskDatastoreID] = "local"
|
disk[mkDiskDatastoreID] = "local"
|
||||||
disk[mkDiskSize] = dvDiskSize
|
disk[mkDiskSize] = dvDiskSize
|
||||||
|
disk[mkDiskMountOptions] = []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
currentDisk := d.Get(mkDisk).([]interface{})
|
currentDisk := d.Get(mkDisk).([]interface{})
|
||||||
@ -2275,7 +2332,10 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
}
|
}
|
||||||
} else if len(currentDisk) > 0 ||
|
} else if len(currentDisk) > 0 ||
|
||||||
disk[mkDiskDatastoreID] != dvDiskDatastoreID ||
|
disk[mkDiskDatastoreID] != dvDiskDatastoreID ||
|
||||||
disk[mkDiskSize] != dvDiskSize {
|
disk[mkDiskACL] != dvDiskACL ||
|
||||||
|
disk[mkDiskReplicate] != dvDiskReplicate ||
|
||||||
|
disk[mkDiskQuota] != dvDiskQuota ||
|
||||||
|
len(disk[mkDiskMountOptions].([]string)) > 0 {
|
||||||
err := d.Set(mkDisk, []interface{}{disk})
|
err := d.Set(mkDisk, []interface{}{disk})
|
||||||
diags = append(diags, diag.FromErr(err)...)
|
diags = append(diags, diag.FromErr(err)...)
|
||||||
}
|
}
|
||||||
@ -2917,6 +2977,50 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
updateBody.CPUUnits = &cpuUnits
|
updateBody.CPUUnits = &cpuUnits
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange(mkDisk) {
|
||||||
|
diskBlock, err := structure.GetSchemaBlock(
|
||||||
|
container,
|
||||||
|
d,
|
||||||
|
[]string{mkDisk},
|
||||||
|
0,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rootFS := &containers.CustomRootFS{}
|
||||||
|
// Disk ID for the rootfs is always 0
|
||||||
|
diskID := 0
|
||||||
|
vmID := d.Get(mkVMID).(int)
|
||||||
|
rootFS.Volume = diskBlock[mkDiskDatastoreID].(string)
|
||||||
|
rootFS.Volume = getContainerDiskVolume(rootFS.Volume, vmID, diskID)
|
||||||
|
|
||||||
|
acl := types.CustomBool(diskBlock[mkDiskACL].(bool))
|
||||||
|
mountOptions := diskBlock[mkDiskMountOptions].([]interface{})
|
||||||
|
quota := types.CustomBool(diskBlock[mkDiskQuota].(bool))
|
||||||
|
replicate := types.CustomBool(diskBlock[mkDiskReplicate].(bool))
|
||||||
|
size := types.DiskSizeFromGigabytes(int64(diskBlock[mkDiskSize].(int)))
|
||||||
|
|
||||||
|
rootFS.ACL = &acl
|
||||||
|
rootFS.Quota = "a
|
||||||
|
rootFS.Replicate = &replicate
|
||||||
|
rootFS.Size = size
|
||||||
|
|
||||||
|
mountOptionsStrings := make([]string, 0, len(mountOptions))
|
||||||
|
|
||||||
|
for _, option := range mountOptions {
|
||||||
|
mountOptionsStrings = append(mountOptionsStrings, option.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Always set, including empty, to allow clearing mount options
|
||||||
|
rootFS.MountOptions = &mountOptionsStrings
|
||||||
|
|
||||||
|
updateBody.RootFS = rootFS
|
||||||
|
|
||||||
|
rebootRequired = true
|
||||||
|
}
|
||||||
|
|
||||||
if d.HasChange(mkFeatures) {
|
if d.HasChange(mkFeatures) {
|
||||||
features, err := containerGetFeatures(container, d)
|
features, err := containerGetFeatures(container, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -3424,3 +3528,7 @@ func parseImportIDWithNodeName(id string) (string, string, error) {
|
|||||||
|
|
||||||
return nodeName, id, nil
|
return nodeName, id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getContainerDiskVolume(rawVolume string, vmID int, diskID int) string {
|
||||||
|
return fmt.Sprintf("%s:vm-%d-disk-%d", rawVolume, vmID, diskID)
|
||||||
|
}
|
||||||
|
@ -3,19 +3,21 @@ layout: page
|
|||||||
page_title: "Clone a VM"
|
page_title: "Clone a VM"
|
||||||
subcategory: Guides
|
subcategory: Guides
|
||||||
description: |-
|
description: |-
|
||||||
This guide explains how to create a VM template and then clone it to another VM.
|
This guide explains how to create a VM template and clone it to a new VM.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Clone a VM
|
# Clone a VM
|
||||||
|
|
||||||
## Create a VM template
|
## Create a VM template
|
||||||
|
|
||||||
VM templates in Proxmox provide an efficient way to create multiple identical VMs. Templates act as a base image that can be cloned to create new VMs, ensuring consistency and reducing the time needed to provision new instances. When a VM is created as a template, it is read-only and can't be started, but can be cloned multiple times to create new VMs.
|
VM templates in Proxmox provide an efficient way to create multiple identical VMs. Templates act as a base image that can be cloned to create new VMs, ensuring consistency and reducing the time needed to provision new instances. When a VM is created as a template, it is read-only and cannot be started, but can be cloned multiple times to create new VMs.
|
||||||
|
|
||||||
You can create a template directly in Proxmox by setting the `template` attribute to `true` when creating the VM resource:
|
You can create a template with Terraform by setting the `template` attribute to `true` when creating the VM resource:
|
||||||
|
|
||||||
{{ codefile "terraform" "examples/guides/clone-vm/template.tf" }}
|
{{ codefile "terraform" "examples/guides/clone-vm/template.tf" }}
|
||||||
|
|
||||||
Once you have a template, you can clone it to create new VMs. The cloned VMs will inherit all the configuration from the template but can be customized further as needed.
|
Once you have a template, you can clone it to create new VMs. The cloned VMs will inherit all configuration from the template but can be customized further as needed.
|
||||||
|
|
||||||
{{ codefile "terraform" "examples/guides/clone-vm/clone.tf" }}
|
{{ codefile "terraform" "examples/guides/clone-vm/clone.tf" }}
|
||||||
|
|
||||||
|
Full example is available in the [examples/guides/clone-vm](https://github.com/bpg/terraform-provider-proxmox/tree/main/examples/guides/clone-vm) directory.
|
||||||
|
Loading…
Reference in New Issue
Block a user