ansible: restructure with dynamic inventory from flake
- Move playbooks/ to ansible/playbooks/ - Add dynamic inventory script that extracts hosts from flake - Groups by tier (tier_test, tier_prod) and role (role_dns, etc.) - Reads homelab.host.* options for metadata - Add static inventory for non-flake hosts (Proxmox) - Add ansible.cfg with inventory path and SSH optimizations - Add group_vars/all.yml for common variables - Add restart-service.yml playbook for restarting systemd services - Update provision-approle.yml with single-host safeguard - Add ANSIBLE_CONFIG to devshell for automatic inventory discovery - Add ansible = "false" label to template2 to exclude from inventory - Update CLAUDE.md to reference ansible/README.md for details Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
146
ansible/playbooks/build-and-deploy-template.yml
Normal file
146
ansible/playbooks/build-and-deploy-template.yml
Normal file
@@ -0,0 +1,146 @@
|
||||
---
|
||||
- name: Build and deploy NixOS Proxmox template
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
template_name: "template2"
|
||||
nixos_config: "template2"
|
||||
proxmox_node: "pve1.home.2rjus.net" # Change to your Proxmox node name
|
||||
proxmox_host: "pve1.home.2rjus.net" # Change to your Proxmox host
|
||||
template_vmid: 9000 # Template VM ID
|
||||
storage: "local-zfs"
|
||||
|
||||
tasks:
|
||||
- name: Build NixOS image
|
||||
ansible.builtin.command:
|
||||
cmd: "nixos-rebuild build-image --image-variant proxmox --flake .#template2"
|
||||
chdir: "{{ playbook_dir }}/../.."
|
||||
register: build_result
|
||||
changed_when: true
|
||||
|
||||
- name: Find built image file
|
||||
ansible.builtin.find:
|
||||
paths: "{{ playbook_dir}}/../../result"
|
||||
patterns: "*.vma.zst"
|
||||
recurse: true
|
||||
register: image_files
|
||||
|
||||
- name: Fail if no image found
|
||||
ansible.builtin.fail:
|
||||
msg: "No QCOW2 image found in build output"
|
||||
when: image_files.matched == 0
|
||||
|
||||
- name: Set image path
|
||||
ansible.builtin.set_fact:
|
||||
image_path: "{{ image_files.files[0].path }}"
|
||||
|
||||
- name: Extract image filename
|
||||
ansible.builtin.set_fact:
|
||||
image_filename: "{{ image_path | basename }}"
|
||||
|
||||
- name: Display image info
|
||||
ansible.builtin.debug:
|
||||
msg: "Built image: {{ image_path }} ({{ image_filename }})"
|
||||
|
||||
- name: Deploy template to Proxmox
|
||||
hosts: proxmox
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
template_name: "template2"
|
||||
template_vmid: 9000
|
||||
storage: "local-zfs"
|
||||
|
||||
tasks:
|
||||
- name: Get image path and filename from localhost
|
||||
ansible.builtin.set_fact:
|
||||
image_path: "{{ hostvars['localhost']['image_path'] }}"
|
||||
image_filename: "{{ hostvars['localhost']['image_filename'] }}"
|
||||
|
||||
- name: Set destination path
|
||||
ansible.builtin.set_fact:
|
||||
image_dest: "/var/lib/vz/dump/{{ image_filename }}"
|
||||
|
||||
- name: Copy image to Proxmox
|
||||
ansible.builtin.copy:
|
||||
src: "{{ image_path }}"
|
||||
dest: "{{ image_dest }}"
|
||||
mode: '0644'
|
||||
|
||||
- name: Check if template VM already exists
|
||||
ansible.builtin.command:
|
||||
cmd: "qm status {{ template_vmid }}"
|
||||
register: vm_status
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Destroy existing template VM if it exists
|
||||
ansible.builtin.command:
|
||||
cmd: "qm destroy {{ template_vmid }} --purge"
|
||||
when: vm_status.rc == 0
|
||||
changed_when: true
|
||||
|
||||
- name: Import image
|
||||
ansible.builtin.command:
|
||||
cmd: "qmrestore {{ image_dest }} {{ template_vmid }}"
|
||||
changed_when: true
|
||||
|
||||
- name: Convert VM to template
|
||||
ansible.builtin.command:
|
||||
cmd: "qm template {{ template_vmid }}"
|
||||
changed_when: true
|
||||
|
||||
- name: Clean up uploaded image
|
||||
ansible.builtin.file:
|
||||
path: "{{ image_dest }}"
|
||||
state: absent
|
||||
|
||||
- name: Display success message
|
||||
ansible.builtin.debug:
|
||||
msg: "Template VM {{ template_vmid }} created successfully on {{ storage }}"
|
||||
|
||||
- name: Update Terraform template name
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
terraform_dir: "{{ playbook_dir }}/../../terraform"
|
||||
|
||||
tasks:
|
||||
- name: Get image filename from earlier play
|
||||
ansible.builtin.set_fact:
|
||||
image_filename: "{{ hostvars['localhost']['image_filename'] }}"
|
||||
|
||||
- name: Extract template name from image filename
|
||||
ansible.builtin.set_fact:
|
||||
new_template_name: "{{ image_filename | regex_replace('\\.vma\\.zst$', '') | regex_replace('^vzdump-qemu-', '') }}"
|
||||
|
||||
- name: Read current Terraform variables file
|
||||
ansible.builtin.slurp:
|
||||
src: "{{ terraform_dir }}/variables.tf"
|
||||
register: variables_tf_content
|
||||
|
||||
- name: Extract current template name from variables.tf
|
||||
ansible.builtin.set_fact:
|
||||
current_template_name: "{{ (variables_tf_content.content | b64decode) | regex_search('variable \"default_template_name\"[^}]+default\\s*=\\s*\"([^\"]+)\"', '\\1') | first }}"
|
||||
|
||||
- name: Check if template name has changed
|
||||
ansible.builtin.set_fact:
|
||||
template_name_changed: "{{ current_template_name != new_template_name }}"
|
||||
|
||||
- name: Display template name status
|
||||
ansible.builtin.debug:
|
||||
msg: "Template name: {{ current_template_name }} -> {{ new_template_name }} ({{ 'changed' if template_name_changed else 'unchanged' }})"
|
||||
|
||||
- name: Update default_template_name in variables.tf
|
||||
ansible.builtin.replace:
|
||||
path: "{{ terraform_dir }}/variables.tf"
|
||||
regexp: '(variable "default_template_name"[^}]+default\s*=\s*)"[^"]+"'
|
||||
replace: '\1"{{ new_template_name }}"'
|
||||
when: template_name_changed
|
||||
|
||||
- name: Display update result
|
||||
ansible.builtin.debug:
|
||||
msg: "Updated terraform/variables.tf with new template name: {{ new_template_name }}"
|
||||
when: template_name_changed
|
||||
98
ansible/playbooks/provision-approle.yml
Normal file
98
ansible/playbooks/provision-approle.yml
Normal file
@@ -0,0 +1,98 @@
|
||||
---
|
||||
# Provision OpenBao AppRole credentials to a host
|
||||
#
|
||||
# Usage: ansible-playbook ansible/playbooks/provision-approle.yml -l <hostname>
|
||||
# Requires: BAO_ADDR and BAO_TOKEN environment variables set
|
||||
#
|
||||
# IMPORTANT: This playbook must target exactly one host to prevent
|
||||
# accidentally regenerating credentials for multiple hosts.
|
||||
|
||||
- name: Validate single host target
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Fail if targeting multiple hosts
|
||||
ansible.builtin.fail:
|
||||
msg: |
|
||||
This playbook must target exactly one host.
|
||||
Use: ansible-playbook provision-approle.yml -l <hostname>
|
||||
|
||||
Targeting multiple hosts would regenerate credentials for all of them,
|
||||
potentially breaking existing services.
|
||||
when: ansible_play_hosts | length != 1
|
||||
run_once: true
|
||||
|
||||
- name: Fetch AppRole credentials from OpenBao
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
target_host: "{{ groups['all'] | first }}"
|
||||
target_hostname: "{{ hostvars[target_host]['short_hostname'] | default(target_host.split('.')[0]) }}"
|
||||
|
||||
tasks:
|
||||
- name: Display target host
|
||||
ansible.builtin.debug:
|
||||
msg: "Provisioning AppRole credentials for: {{ target_hostname }}"
|
||||
|
||||
- name: Get role-id for host
|
||||
ansible.builtin.command:
|
||||
cmd: "bao read -field=role_id auth/approle/role/{{ target_hostname }}/role-id"
|
||||
environment:
|
||||
BAO_ADDR: "{{ vault_addr }}"
|
||||
BAO_SKIP_VERIFY: "1"
|
||||
register: role_id_result
|
||||
changed_when: false
|
||||
|
||||
- name: Generate secret-id for host
|
||||
ansible.builtin.command:
|
||||
cmd: "bao write -field=secret_id -f auth/approle/role/{{ target_hostname }}/secret-id"
|
||||
environment:
|
||||
BAO_ADDR: "{{ vault_addr }}"
|
||||
BAO_SKIP_VERIFY: "1"
|
||||
register: secret_id_result
|
||||
changed_when: true
|
||||
|
||||
- name: Store credentials for next play
|
||||
ansible.builtin.set_fact:
|
||||
vault_role_id: "{{ role_id_result.stdout }}"
|
||||
vault_secret_id: "{{ secret_id_result.stdout }}"
|
||||
|
||||
- name: Deploy AppRole credentials to host
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
vault_role_id: "{{ hostvars['localhost']['vault_role_id'] }}"
|
||||
vault_secret_id: "{{ hostvars['localhost']['vault_secret_id'] }}"
|
||||
|
||||
tasks:
|
||||
- name: Create AppRole directory
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/vault/approle
|
||||
state: directory
|
||||
mode: "0700"
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: Write role-id
|
||||
ansible.builtin.copy:
|
||||
content: "{{ vault_role_id }}"
|
||||
dest: /var/lib/vault/approle/role-id
|
||||
mode: "0600"
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: Write secret-id
|
||||
ansible.builtin.copy:
|
||||
content: "{{ vault_secret_id }}"
|
||||
dest: /var/lib/vault/approle/secret-id
|
||||
mode: "0600"
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: Display success
|
||||
ansible.builtin.debug:
|
||||
msg: "AppRole credentials provisioned to {{ inventory_hostname }}"
|
||||
40
ansible/playbooks/restart-service.yml
Normal file
40
ansible/playbooks/restart-service.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
# Restart a systemd service on target hosts
|
||||
#
|
||||
# Usage examples:
|
||||
# # Restart unbound on all DNS servers
|
||||
# ansible-playbook restart-service.yml -l role_dns -e service=unbound
|
||||
#
|
||||
# # Restart nginx on a specific host
|
||||
# ansible-playbook restart-service.yml -l http-proxy.home.2rjus.net -e service=nginx
|
||||
#
|
||||
# # Restart promtail on all prod hosts
|
||||
# ansible-playbook restart-service.yml -l tier_prod -e service=promtail
|
||||
|
||||
- name: Restart systemd service
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Validate service name provided
|
||||
ansible.builtin.fail:
|
||||
msg: |
|
||||
The 'service' variable is required.
|
||||
Usage: ansible-playbook restart-service.yml -l <target> -e service=<name>
|
||||
|
||||
Examples:
|
||||
-e service=nginx
|
||||
-e service=unbound
|
||||
-e service=promtail
|
||||
when: service is not defined
|
||||
run_once: true
|
||||
|
||||
- name: Restart {{ service }}
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ service }}"
|
||||
state: restarted
|
||||
register: restart_result
|
||||
|
||||
- name: Display result
|
||||
ansible.builtin.debug:
|
||||
msg: "Service {{ service }} restarted on {{ inventory_hostname }}"
|
||||
9
ansible/playbooks/run-upgrade.yml
Normal file
9
ansible/playbooks/run-upgrade.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Trigger nixos-upgrade job on all hosts
|
||||
hosts: all
|
||||
remote_user: root
|
||||
|
||||
tasks:
|
||||
- ansible.builtin.systemd_service:
|
||||
name: nixos-upgrade.service
|
||||
state: started
|
||||
Reference in New Issue
Block a user