# VM Definitions # Define all VMs to deploy in the locals.vms map below # Omit fields to use defaults from variables.tf locals { # Define VMs here # Each VM can override defaults by specifying values # Omit "ip" field for DHCP, include it for static IP vms = { # Example DHCP VM (uncomment to deploy): # "example-dhcp-vm" = { # cpu_cores = 2 # memory = 2048 # disk_size = "20G" # } # Example Static IP VM (uncomment to deploy): # "example-static-vm" = { # ip = "10.69.13.50/24" # cpu_cores = 4 # memory = 4096 # disk_size = "50G" # } # Example Test VM with custom git branch (for testing pipeline changes): # "test-vm" = { # ip = "10.69.13.100/24" # flake_branch = "test-pipeline" # Bootstrap from this branch instead of master # } # Example Minimal VM using all defaults (uncomment to deploy): # "minimal-vm" = {} # "bootstrap-verify-test" = {} "testvm01" = { ip = "10.69.13.101/24" cpu_cores = 2 memory = 2048 disk_size = "20G" flake_branch = "pipeline-testing-improvements" } "vault01" = { ip = "10.69.13.19/24" cpu_cores = 2 memory = 2048 disk_size = "20G" } } # Compute VM configurations with defaults applied vm_configs = { for name, vm in local.vms : name => { target_node = lookup(vm, "target_node", var.default_target_node) template_name = lookup(vm, "template_name", var.default_template_name) cpu_cores = lookup(vm, "cpu_cores", var.default_cpu_cores) memory = lookup(vm, "memory", var.default_memory) disk_size = lookup(vm, "disk_size", var.default_disk_size) storage = lookup(vm, "storage", var.default_storage) bridge = lookup(vm, "bridge", var.default_bridge) vlan_tag = lookup(vm, "vlan_tag", var.default_vlan_tag) ssh_public_key = lookup(vm, "ssh_public_key", var.default_ssh_public_key) nameservers = lookup(vm, "nameservers", var.default_nameservers) search_domain = lookup(vm, "search_domain", var.default_search_domain) # Network configuration - detect DHCP vs static ip = lookup(vm, "ip", null) gateway = lookup(vm, "gateway", var.default_gateway) # Branch configuration for bootstrap (optional, uses master if not set) flake_branch = lookup(vm, "flake_branch", null) } } } # Deploy all VMs using for_each resource "proxmox_vm_qemu" "vm" { for_each = local.vm_configs name = each.key target_node = each.value.target_node # Clone from template clone = each.value.template_name full_clone = true # Boot configuration boot = "order=virtio0" scsihw = "virtio-scsi-single" # VM settings cpu { cores = each.value.cpu_cores } memory = each.value.memory # Network network { id = 0 model = "virtio" bridge = each.value.bridge tag = each.value.vlan_tag } # Disk settings disks { virtio { virtio0 { disk { size = each.value.disk_size storage = each.value.storage } } } ide { ide2 { # Reference the custom cloud-init disk created in cloud-init.tf cdrom { iso = proxmox_cloud_init_disk.ci[each.key].id } } } } # TPM device tpm_state { storage = each.value.storage } # Start on boot start_at_node_boot = true # Agent agent = 1 # Skip IPv6 since we don't use it skip_ipv6 = true # RNG device for better entropy rng { source = "/dev/urandom" period = 1000 } }