Compare commits
407 Commits
7336231878
...
host-vault
| Author | SHA1 | Date | |
|---|---|---|---|
|
4afb37d730
|
|||
|
a2c798bc30
|
|||
|
6d64e53586
|
|||
|
e0ad445341
|
|||
| d194c147d6 | |||
|
9908286062
|
|||
|
cec496dda7
|
|||
|
fca50562c3
|
|||
|
1f1829dc2f
|
|||
|
21a32e0521
|
|||
|
7fe0aa0f54
|
|||
|
83de9a3ffb
|
|||
| 30addc5116 | |||
|
2aeed8f231
|
|||
| c3180c1b2c | |||
|
6f7aee3444
|
|||
| af17387c7d | |||
|
408554b477
|
|||
|
b20ad9c275
|
|||
| 076e22c338 | |||
|
7aa5137039
|
|||
| b3132fbe70 | |||
|
ce6d2b1d33
|
|||
|
3a464bc323
|
|||
|
7f72a72043
|
|||
|
f779f49c20
|
|||
| 7964d5a171 | |||
| 04422a26b8 | |||
| 676fe5b3de | |||
| b4eab5b534 | |||
| e9ab19d06b | |||
| df100c9849 | |||
| 1e89c1b1d7 | |||
| 5c5286c3a9 | |||
| d0f1688fc1 | |||
|
0bd37cdf48
|
|||
|
57d208e08b
|
|||
| 5fa3277641 | |||
| 8dee351278 | |||
| cb69a249b9 | |||
| 9272f64f4e | |||
| 0200727e26 | |||
| 0a472c9f2e | |||
| 9be728bf60 | |||
| 98b0344909 | |||
| 375fd75402 | |||
| 3b02e9c8b5 | |||
| 0ba8c7300e | |||
| 8655a3efa3 | |||
| b7f5acdd6e | |||
| e59eab155b | |||
| 7451bd5fc4 | |||
| 3d14020600 | |||
| 3c96d33ede | |||
| e01436e7a5 | |||
| 29cfa21ab4 | |||
| 4a8f448e3b | |||
| 16e7eeda17 | |||
| b7185f52a1 | |||
| a28e604f12 | |||
| c080c2620a | |||
| 7c55bb9a48 | |||
| d4d73e76cd | |||
| dac989f418 | |||
| d0eab72e40 | |||
|
04f89fbda2
|
|||
|
bb9de5b4ca
|
|||
|
8eefe38d5e
|
|||
|
78efc4f592
|
|||
|
48df2bf199
|
|||
|
25b786915c
|
|||
|
f2963a150b
|
|||
| a50f3d0ddd | |||
| 60bf5b52a7 | |||
| 95e32646c2 | |||
| 658e871fc0 | |||
| 98949d5ddb | |||
| a86eae9ad6 | |||
| 814d45955b | |||
| c723084467 | |||
| ba36c82bf5 | |||
| c86d9d5774 | |||
| 3f5fef1a6d | |||
| 58c9307f00 | |||
| 78e7f5064c | |||
| 9c53cfdfa1 | |||
| 8120e38e23 | |||
| 69b5588e7a | |||
| 2e2d7f9b5c | |||
| ddcb107382 | |||
| e3ad470ece | |||
| e63aab7746 | |||
| bea083f03c | |||
| 72099a3682 | |||
| 9799d44584 | |||
| 3e7c648bf7 | |||
| bfcb1cbc6d | |||
| c0c963ef0f | |||
| e8a9227cbc | |||
| 18624cfc44 | |||
| 9eb6492c70 | |||
| 1f91092ba6 | |||
| 917a3d7c70 | |||
| 0f45e6d37b | |||
| 361f7c2319 | |||
| 0eb6e9664f | |||
| 83221f1f58 | |||
| 4b528e751c | |||
| 3e0ad4d1c0 | |||
| fe680b258f | |||
| 85b701f0ae | |||
| f85be0d691 | |||
| 8b5c7d4919 | |||
| 1f602ca2e2 | |||
| 1af8e69e4b | |||
| 3b017ccd2d | |||
| acba89b864 | |||
| 3a6ee2894b | |||
| 16e4b9c908 | |||
| 0f820eb0f6 | |||
| 186ce8525e | |||
| ff9281fcbf | |||
| a0b9a3d900 | |||
| aca303c367 | |||
| 033f103374 | |||
| 7317e98a39 | |||
| 8bc0546b70 | |||
| 98099d0a2e | |||
| 7556a93700 | |||
| 78c2cff25b | |||
| afb1ba5427 | |||
| d18f1bb0c4 | |||
| 05a5fb9d52 | |||
| 0b039e5ffe | |||
| 36b8df91fc | |||
| 3be5f21a07 | |||
| 1b44b9291a | |||
| 7c5c4c172d | |||
| b11267647b | |||
| beefa4a122 | |||
|
3219b8da4b
|
|||
|
e5d799ef68
|
|||
|
2fc4623e8d
|
|||
|
bd162f3743
|
|||
|
b86de01de8
|
|||
| ca69c434bd | |||
| 37cbc0f0a6 | |||
| 5239772ecb | |||
|
09bd63169d
|
|||
|
ef3d34d27f
|
|||
|
ecd1dd83ab
|
|||
|
2cdc2fc896
|
|||
|
7b2c3d1efa
|
|||
|
ad3f4e8094
|
|||
|
fa4e47a873
|
|||
|
f49711b1b3
|
|||
|
a0e94430b4
|
|||
| cdf2f17e0b | |||
| e4b004e2a5 | |||
| 20b345a693 | |||
| bb3cda7c48 | |||
| 29eb7fb4b2 | |||
| 2a9ceacaf2 | |||
| 92071795a6 | |||
| 0a97f4e709 | |||
| 76f319291a | |||
| 00ac943f9a | |||
| d29fa1f5b0 | |||
| 5e7b4b3fa6 | |||
| a4961847fa | |||
|
bcf01a0c11
|
|||
| 44766feb95 | |||
| 7ea44648a2 | |||
| a4659182c9 | |||
| a389044060 | |||
| e95f031237 | |||
| 1a0a15cbb9 | |||
| 01e35b0bbc | |||
| ad821eabe1 | |||
| d482a49bf0 | |||
| 0f2da6933e | |||
| ae8c318ecb | |||
| b79e95c194 | |||
| 102e370e57 | |||
| c3f8fef2c8 | |||
| a727d128a1 | |||
| 17e6fb567c | |||
|
ccd9bbf4da
|
|||
| 84c65ebd82 | |||
| a1d61f65e8 | |||
| 4d4ce21e4f | |||
| a69342bde1 | |||
| 063d0acc71 | |||
| feae389078 | |||
| bd2b6ee3ed | |||
| 78f03614d5 | |||
| 5fb773b517 | |||
| b90304ee0e | |||
| 51f5ad3575 | |||
| 650f841cbf | |||
| 64913ca1a1 | |||
| eaeb477fbf | |||
| 8930e1f15c | |||
| 5b13cf7e20 | |||
| 79a9822f79 | |||
| 0217c5455b | |||
| 1714d4c5fa | |||
| 81c3e0c0b6 | |||
| 1ed2fd6245 | |||
| a767aadca6 | |||
| 1845a56394 | |||
|
5187d033cb
|
|||
|
87a05c73a7
|
|||
| 3be54bfee5 | |||
| 337eb6b0ab | |||
|
adf70999b9
|
|||
|
acb9e59775
|
|||
|
fa4782e43f
|
|||
|
9236d6aef7
|
|||
|
7f84780956
|
|||
|
41aac24d52
|
|||
|
3e943862ef
|
|||
|
4754fea0c2
|
|||
|
2747556674
|
|||
|
de8bcda3c1
|
|||
| a78a32f080 | |||
|
14aa3a9340
|
|||
|
797f915939
|
|||
| 1f6689aeb6 | |||
|
3785b8047a
|
|||
|
fb1a36a846
|
|||
|
87c98581c2
|
|||
|
2538f57312
|
|||
|
a790331d0f
|
|||
|
3588fa670e
|
|||
|
dd255955ca
|
|||
|
77d1782f36
|
|||
|
5b06a95222
|
|||
|
b9102b5a44
|
|||
| 2f9205d2c6 | |||
| 495cf2a294 | |||
|
5ce8f46394
|
|||
|
feff1d06eb
|
|||
|
b75df7578f
|
|||
|
4d88644417
|
|||
|
d4137f79aa
|
|||
|
486320b0ec
|
|||
|
30b6f86eee
|
|||
|
6fc4d42d16
|
|||
|
2e6679b134
|
|||
|
ebcdefd0ca
|
|||
|
c32e288273
|
|||
|
2380c13465
|
|||
|
4432f4c1d0
|
|||
|
2dae23560d
|
|||
|
1988b36f03
|
|||
|
2a46da3761
|
|||
|
4927e37c90
|
|||
|
6fda081dc8
|
|||
| 8a0ec5b3f0 | |||
|
4e870cda44
|
|||
|
78621fa006
|
|||
|
38c2fbca2c
|
|||
|
3f07119ca1
|
|||
|
5e9aff0590
|
|||
|
6e6d5098c5
|
|||
| 504d119f3e | |||
| d6a04d81ac | |||
|
e609fed855
|
|||
|
aa2cbcda60
|
|||
|
8d47d0a70c
|
|||
|
e56d7d4b84
|
|||
| 2c9f706eb5 | |||
|
78efb084ec
|
|||
|
16042b08c0
|
|||
|
8e0b97c9e0
|
|||
|
fe2e87658a
|
|||
| 071bf948a6 | |||
|
c07d96bbab
|
|||
|
bd58d07001
|
|||
|
3797526000
|
|||
|
afa3cc3a57
|
|||
| f607be5afb | |||
|
6243ac3754
|
|||
|
c1cd25e865
|
|||
|
3c52b81d99
|
|||
|
6b85e87506
|
|||
|
f15c318558
|
|||
| f9a0a74435 | |||
|
08a0ddaf30
|
|||
| 4cd4212df0 | |||
| 38b9b0540c | |||
| 691b1b7835 | |||
| d73fd90707 | |||
| 65e28bdbe8 | |||
| 075933f31c | |||
| a4ff52d673 | |||
| 9df28f509c | |||
| 5ffb966ee6 | |||
| fc922fff9b | |||
| 66f97fc976 | |||
| 9bebaa8a1c | |||
| a39d6c93b9 | |||
| eda6fa0f4e | |||
| 99fb976089 | |||
| 200352f4bb | |||
| b9ad16c315 | |||
| c448e773b0 | |||
| 759e987127 | |||
| 3be9bfeb51 | |||
| ea064b8888 | |||
| 97e38a006e | |||
| 265e675b20 | |||
| 097f078204 | |||
|
518e3a3ded
|
|||
| 81f4165aeb | |||
| 65eeebea61 | |||
| 04cababa0d | |||
|
6759653491
|
|||
|
ac476cce26
|
|||
|
cba1821f3b
|
|||
| 5370d01c3d | |||
| be767730cb | |||
|
3842dcec78
|
|||
| 156c5ab897 | |||
| 3639fed6d4 | |||
| 6930789f3f | |||
| 262b7ebade | |||
| 4f49e4ef8b | |||
|
78c36c5384
|
|||
|
e279e7d940
|
|||
| 073a1c94dd | |||
| 3104971995 | |||
|
bb614db298
|
|||
| 41444c3429 | |||
|
41d5df4d1a
|
|||
|
529d5ae0d9
|
|||
| 1732d9adae | |||
| e8e26c2c80 | |||
| 8e8fb22b5b | |||
|
c15b4f4c5e
|
|||
| fbc96bdc0f | |||
|
dd86298253
|
|||
|
7c44bf9656
|
|||
|
844449b899
|
|||
| 91e618f495 | |||
|
78fc0c8c66
|
|||
|
99a2b414b8
|
|||
|
d63d891231
|
|||
|
298f2372ca
|
|||
|
0dbdee65c5
|
|||
|
3fde3bf757
|
|||
| 990a0d4a1c | |||
|
b468e9d533
|
|||
|
874e30fb28
|
|||
|
db9bf38ab6
|
|||
|
15e5ccb0ec
|
|||
| 8486c4e88f | |||
| a9852e4d60 | |||
| cdbf9008bb | |||
| 4ea0f67db5 | |||
| 1c8e9d2ff1 | |||
| 7084eb6185 | |||
|
c02b3534b0
|
|||
|
3f05a965e2
|
|||
|
83928028c4
|
|||
|
070d7384de
|
|||
|
07c422498e
|
|||
| 67a20e505f | |||
|
0df45877e1
|
|||
|
5b64f40412
|
|||
|
b8d058d23e
|
|||
|
a5448c5fc1
|
|||
|
f1ca20a387
|
|||
|
f0bc29ac5e
|
|||
|
539ff4eeac
|
|||
|
fa1635323e
|
|||
|
3b500a25a7
|
|||
|
abb4cf58ea
|
|||
|
fb3d810089
|
|||
|
02e3d42b47
|
|||
| 69e05f102b | |||
| d8fc32f235 | |||
| 7a0fb9cf31 | |||
|
c43e2aa063
|
|||
|
4d2fbff6d0
|
|||
|
f29edfe34a
|
|||
|
002f934c70
|
|||
|
fbcb81291b
|
|||
|
44d4dc6cdf
|
|||
|
5866a2be8f
|
|||
|
60b2a24271
|
|||
| e431ec8e64 | |||
|
3787fb37be
|
|||
|
e717102a93
|
|||
| df6480be4e | |||
| 6edd75a8c4 | |||
| 5e57423e00 | |||
| dd6c78473c | |||
| c0ebec86c8 | |||
| 52e1e3b7c6 | |||
|
e366a05204
|
|||
|
399c853f0c
|
|||
|
4af1bded61
|
|||
|
eb0e2a0877
|
|||
|
529949de59
|
|||
|
319739b4de
|
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1 +1,12 @@
|
|||||||
.direnv/
|
.direnv/
|
||||||
|
result
|
||||||
|
|
||||||
|
# Terraform/OpenTofu
|
||||||
|
terraform/.terraform/
|
||||||
|
terraform/.terraform.lock.hcl
|
||||||
|
terraform/*.tfstate
|
||||||
|
terraform/*.tfstate.*
|
||||||
|
terraform/terraform.tfvars
|
||||||
|
terraform/*.auto.tfvars
|
||||||
|
terraform/crash.log
|
||||||
|
terraform/crash.*.log
|
||||||
|
|||||||
16
.sops.yaml
16
.sops.yaml
@@ -11,8 +11,10 @@ keys:
|
|||||||
- &server_ca age1288993th0ge00reg4zqueyvmkrsvk829cs068eekjqfdprsrkeqql7mljk
|
- &server_ca age1288993th0ge00reg4zqueyvmkrsvk829cs068eekjqfdprsrkeqql7mljk
|
||||||
- &server_monitoring01 age1vpns76ykll8jgdlu3h05cur4ew2t3k7u03kxdg8y6ypfhsfhq9fqyurjey
|
- &server_monitoring01 age1vpns76ykll8jgdlu3h05cur4ew2t3k7u03kxdg8y6ypfhsfhq9fqyurjey
|
||||||
- &server_jelly01 age1hchvlf3apn8g8jq2743pw53sd6v6ay6xu6lqk0qufrjeccan9vzsc7hdfq
|
- &server_jelly01 age1hchvlf3apn8g8jq2743pw53sd6v6ay6xu6lqk0qufrjeccan9vzsc7hdfq
|
||||||
- &server_nix-cache01 age1a0477laj9sdh79wdas5v7hzk6au8fach74njg8epfw2rdht90qjsakkwd6
|
- &server_nix-cache01 age1w029fksjv0edrff9p7s03tgk3axecdkppqymfpwfn2nu2gsqqefqc37sxq
|
||||||
- &server_pgdb1 age1ha34qeksr4jeaecevqvv2afqem67eja2mvawlmrqsudch0e7fe7qtpsekv
|
- &server_pgdb1 age1ha34qeksr4jeaecevqvv2afqem67eja2mvawlmrqsudch0e7fe7qtpsekv
|
||||||
|
- &server_nats1 age1cxt8kwqzx35yuldazcc49q88qvgy9ajkz30xu0h37uw3ts97jagqgmn2ga
|
||||||
|
- &server_auth01 age16prza00sqzuhwwcyakj6z4hvwkruwkqpmmrsn94a5ucgpkelncdq2ldctk
|
||||||
creation_rules:
|
creation_rules:
|
||||||
- path_regex: secrets/[^/]+\.(yaml|json|env|ini)
|
- path_regex: secrets/[^/]+\.(yaml|json|env|ini)
|
||||||
key_groups:
|
key_groups:
|
||||||
@@ -31,6 +33,8 @@ creation_rules:
|
|||||||
- *server_jelly01
|
- *server_jelly01
|
||||||
- *server_nix-cache01
|
- *server_nix-cache01
|
||||||
- *server_pgdb1
|
- *server_pgdb1
|
||||||
|
- *server_nats1
|
||||||
|
- *server_auth01
|
||||||
- path_regex: secrets/ns3/[^/]+\.(yaml|json|env|ini)
|
- path_regex: secrets/ns3/[^/]+\.(yaml|json|env|ini)
|
||||||
key_groups:
|
key_groups:
|
||||||
- age:
|
- age:
|
||||||
@@ -56,3 +60,13 @@ creation_rules:
|
|||||||
- age:
|
- age:
|
||||||
- *admin_torjus
|
- *admin_torjus
|
||||||
- *server_nix-cache01
|
- *server_nix-cache01
|
||||||
|
- path_regex: secrets/http-proxy/.+
|
||||||
|
key_groups:
|
||||||
|
- age:
|
||||||
|
- *admin_torjus
|
||||||
|
- *server_http-proxy
|
||||||
|
- path_regex: secrets/auth01/[^/]+\.(yaml|json|env|ini|)
|
||||||
|
key_groups:
|
||||||
|
- age:
|
||||||
|
- *admin_torjus
|
||||||
|
- *server_auth01
|
||||||
|
|||||||
246
CLAUDE.md
Normal file
246
CLAUDE.md
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||||
|
|
||||||
|
## Repository Overview
|
||||||
|
|
||||||
|
This is a Nix Flake-based NixOS configuration repository for managing a homelab infrastructure consisting of 16 server configurations. The repository uses a modular architecture with shared system configurations, reusable service modules, and per-host customization.
|
||||||
|
|
||||||
|
## Common Commands
|
||||||
|
|
||||||
|
### Building Configurations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all available configurations
|
||||||
|
nix flake show
|
||||||
|
|
||||||
|
# Build a specific host configuration locally (without deploying)
|
||||||
|
nixos-rebuild build --flake .#<hostname>
|
||||||
|
|
||||||
|
# Build and check a configuration
|
||||||
|
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deployment
|
||||||
|
|
||||||
|
Do not automatically deploy changes. Deployments are usually done by updating the master branch, and then triggering the auto update on the specific host.
|
||||||
|
|
||||||
|
### Flake Management
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check flake for errors
|
||||||
|
nix flake check
|
||||||
|
```
|
||||||
|
Do not run `nix flake update`. Should only be done manually by user.
|
||||||
|
|
||||||
|
### Development Environment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enter development shell (provides ansible, python3)
|
||||||
|
nix develop
|
||||||
|
```
|
||||||
|
|
||||||
|
### Secrets Management
|
||||||
|
|
||||||
|
Secrets are handled by sops. Do not edit any `.sops.yaml` or any file within `secrets/`. Ask the user to modify if necessary.
|
||||||
|
|
||||||
|
### Git Commit Messages
|
||||||
|
|
||||||
|
Commit messages should follow the format: `topic: short description`
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `flake: add opentofu to devshell`
|
||||||
|
- `template2: add proxmox image configuration`
|
||||||
|
- `terraform: add VM deployment configuration`
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Directory Structure
|
||||||
|
|
||||||
|
- `/flake.nix` - Central flake defining all 16 NixOS configurations
|
||||||
|
- `/hosts/<hostname>/` - Per-host configurations
|
||||||
|
- `default.nix` - Entry point, imports configuration.nix and services
|
||||||
|
- `configuration.nix` - Host-specific settings (networking, hardware, users)
|
||||||
|
- `/system/` - Shared system-level configurations applied to ALL hosts
|
||||||
|
- Core modules: nix.nix, sshd.nix, sops.nix, acme.nix, autoupgrade.nix
|
||||||
|
- Monitoring: node-exporter and promtail on every host
|
||||||
|
- `/services/` - Reusable service modules, selectively imported by hosts
|
||||||
|
- `home-assistant/` - Home automation stack
|
||||||
|
- `monitoring/` - Observability stack (Prometheus, Grafana, Loki, Tempo)
|
||||||
|
- `ns/` - DNS services (authoritative, resolver)
|
||||||
|
- `http-proxy/`, `ca/`, `postgres/`, `nats/`, `jellyfin/`, etc.
|
||||||
|
- `/secrets/` - SOPS-encrypted secrets with age encryption
|
||||||
|
- `/common/` - Shared configurations (e.g., VM guest agent)
|
||||||
|
- `/playbooks/` - Ansible playbooks for fleet management
|
||||||
|
- `/.sops.yaml` - SOPS configuration with age keys for all servers
|
||||||
|
|
||||||
|
### Configuration Inheritance
|
||||||
|
|
||||||
|
Each host follows this import pattern:
|
||||||
|
```
|
||||||
|
hosts/<hostname>/default.nix
|
||||||
|
└─> configuration.nix (host-specific)
|
||||||
|
├─> ../../system (ALL shared system configs - applied to every host)
|
||||||
|
├─> ../../services/<service> (selective service imports)
|
||||||
|
└─> ../../common/vm (if VM)
|
||||||
|
```
|
||||||
|
|
||||||
|
All hosts automatically get:
|
||||||
|
- Nix binary cache (nix-cache.home.2rjus.net)
|
||||||
|
- SSH with root login enabled
|
||||||
|
- SOPS secrets management with auto-generated age keys
|
||||||
|
- Internal ACME CA integration (ca.home.2rjus.net)
|
||||||
|
- Daily auto-upgrades with auto-reboot
|
||||||
|
- Prometheus node-exporter + Promtail (logs to monitoring01)
|
||||||
|
- Custom root CA trust
|
||||||
|
|
||||||
|
### Active Hosts
|
||||||
|
|
||||||
|
Production servers managed by `rebuild-all.sh`:
|
||||||
|
- `ns1`, `ns2` - Primary/secondary DNS servers (10.69.13.5/6)
|
||||||
|
- `ca` - Internal Certificate Authority
|
||||||
|
- `ha1` - Home Assistant + Zigbee2MQTT + Mosquitto
|
||||||
|
- `http-proxy` - Reverse proxy
|
||||||
|
- `monitoring01` - Full observability stack (Prometheus, Grafana, Loki, Tempo, Pyroscope)
|
||||||
|
- `jelly01` - Jellyfin media server
|
||||||
|
- `nix-cache01` - Binary cache server
|
||||||
|
- `pgdb1` - PostgreSQL database
|
||||||
|
- `nats1` - NATS messaging server
|
||||||
|
- `auth01` - Authentication service
|
||||||
|
|
||||||
|
Template/test hosts:
|
||||||
|
- `template1` - Base template for cloning new hosts
|
||||||
|
- `nixos-test1` - Test environment
|
||||||
|
|
||||||
|
### Flake Inputs
|
||||||
|
|
||||||
|
- `nixpkgs` - NixOS 25.11 stable (primary)
|
||||||
|
- `nixpkgs-unstable` - Unstable channel (available via overlay as `pkgs.unstable.<package>`)
|
||||||
|
- `sops-nix` - Secrets management
|
||||||
|
- Custom packages from git.t-juice.club:
|
||||||
|
- `backup-helper` - Backup automation module
|
||||||
|
- `alerttonotify` - Alert routing
|
||||||
|
- `labmon` - Lab monitoring
|
||||||
|
|
||||||
|
### Network Architecture
|
||||||
|
|
||||||
|
- Domain: `home.2rjus.net`
|
||||||
|
- Infrastructure subnet: `10.69.13.x`
|
||||||
|
- DNS: ns1/ns2 provide authoritative DNS with primary-secondary setup
|
||||||
|
- Internal CA for ACME certificates (no Let's Encrypt)
|
||||||
|
- Centralized monitoring at monitoring01
|
||||||
|
- Static networking via systemd-networkd
|
||||||
|
|
||||||
|
### Secrets Management
|
||||||
|
|
||||||
|
- Uses SOPS with age encryption
|
||||||
|
- Each server has unique age key in `.sops.yaml`
|
||||||
|
- Keys auto-generated at `/var/lib/sops-nix/key.txt` on first boot
|
||||||
|
- Shared secrets: `/secrets/secrets.yaml`
|
||||||
|
- Per-host secrets: `/secrets/<hostname>/`
|
||||||
|
- All production servers can decrypt shared secrets; host-specific secrets require specific host keys
|
||||||
|
|
||||||
|
### Auto-Upgrade System
|
||||||
|
|
||||||
|
All hosts pull updates daily from:
|
||||||
|
```
|
||||||
|
git+https://git.t-juice.club/torjus/nixos-servers.git
|
||||||
|
```
|
||||||
|
|
||||||
|
Configured in `/system/autoupgrade.nix`:
|
||||||
|
- Random delay to avoid simultaneous upgrades
|
||||||
|
- Auto-reboot after successful upgrade
|
||||||
|
- Systemd service: `nixos-upgrade.service`
|
||||||
|
|
||||||
|
### Proxmox VM Provisioning with OpenTofu
|
||||||
|
|
||||||
|
The repository includes automated workflows for building Proxmox VM templates and deploying VMs using OpenTofu (Terraform).
|
||||||
|
|
||||||
|
#### Building and Deploying Templates
|
||||||
|
|
||||||
|
Template VMs are built from `hosts/template2` and deployed to Proxmox using Ansible:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build NixOS image and deploy to Proxmox as template
|
||||||
|
nix develop -c ansible-playbook -i playbooks/inventory.ini playbooks/build-and-deploy-template.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
This playbook:
|
||||||
|
1. Builds the Proxmox image using `nixos-rebuild build-image --image-variant proxmox`
|
||||||
|
2. Uploads the `.vma.zst` image to Proxmox at `/var/lib/vz/dump`
|
||||||
|
3. Restores it as VM ID 9000
|
||||||
|
4. Converts it to a template
|
||||||
|
|
||||||
|
Template configuration (`hosts/template2`):
|
||||||
|
- Minimal base system with essential packages (age, vim, wget, git)
|
||||||
|
- Cloud-init configured for NoCloud datasource (no EC2 metadata timeout)
|
||||||
|
- DHCP networking on ens18
|
||||||
|
- SSH key-based root login
|
||||||
|
- `prepare-host.sh` script for cleaning machine-id, SSH keys, and regenerating age keys
|
||||||
|
|
||||||
|
#### Deploying VMs with OpenTofu
|
||||||
|
|
||||||
|
VMs are deployed from templates using OpenTofu in the `/terraform` directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd terraform
|
||||||
|
tofu init # First time only
|
||||||
|
tofu apply # Deploy VMs
|
||||||
|
```
|
||||||
|
|
||||||
|
Configuration files:
|
||||||
|
- `main.tf` - Proxmox provider configuration
|
||||||
|
- `variables.tf` - Provider variables (API credentials)
|
||||||
|
- `vm.tf` - VM resource definitions
|
||||||
|
- `terraform.tfvars` - Actual credentials (gitignored)
|
||||||
|
|
||||||
|
Example VM deployment includes:
|
||||||
|
- Clone from template VM
|
||||||
|
- Cloud-init configuration (SSH keys, network, DNS)
|
||||||
|
- Custom CPU/memory/disk sizing
|
||||||
|
- VLAN tagging
|
||||||
|
- QEMU guest agent
|
||||||
|
|
||||||
|
OpenTofu outputs the VM's IP address after deployment for easy SSH access.
|
||||||
|
|
||||||
|
### Adding a New Host
|
||||||
|
|
||||||
|
1. Create `/hosts/<hostname>/` directory
|
||||||
|
2. Copy structure from `template1` or similar host
|
||||||
|
3. Add host entry to `flake.nix` nixosConfigurations
|
||||||
|
4. Add hostname to dns zone files. Merge to master. Run auto-upgrade on dns servers.
|
||||||
|
5. User clones template host
|
||||||
|
6. User runs `prepare-host.sh` on new host, this deletes files which should be regenerated, like ssh host keys, machine-id etc. It also creates a new age key, and prints the public key
|
||||||
|
7. This key is then added to `.sops.yaml`
|
||||||
|
8. Create `/secrets/<hostname>/` if needed
|
||||||
|
9. Configure networking (static IP, DNS servers)
|
||||||
|
10. Commit changes, and merge to master.
|
||||||
|
11. Deploy by running `nixos-rebuild boot --flake URL#<hostname>` on the host.
|
||||||
|
|
||||||
|
### Important Patterns
|
||||||
|
|
||||||
|
**Overlay usage**: Access unstable packages via `pkgs.unstable.<package>` (defined in flake.nix overlay-unstable)
|
||||||
|
|
||||||
|
**Service composition**: Services in `/services/` are designed to be imported by multiple hosts. Keep them modular and reusable.
|
||||||
|
|
||||||
|
**Hardware configuration reuse**: Multiple hosts share `/hosts/template/hardware-configuration.nix` for VM instances.
|
||||||
|
|
||||||
|
**State version**: All hosts use stateVersion `"23.11"` - do not change this on existing hosts.
|
||||||
|
|
||||||
|
**Firewall**: Disabled on most hosts (trusted network). Enable selectively in host configuration if needed.
|
||||||
|
|
||||||
|
### Monitoring Stack
|
||||||
|
|
||||||
|
All hosts ship metrics and logs to `monitoring01`:
|
||||||
|
- **Metrics**: Prometheus scrapes node-exporter from all hosts
|
||||||
|
- **Logs**: Promtail ships logs to Loki on monitoring01
|
||||||
|
- **Access**: Grafana at monitoring01 for visualization
|
||||||
|
- **Tracing**: Tempo for distributed tracing
|
||||||
|
- **Profiling**: Pyroscope for continuous profiling
|
||||||
|
|
||||||
|
### DNS Architecture
|
||||||
|
|
||||||
|
- `ns1` (10.69.13.5) - Primary authoritative DNS + resolver
|
||||||
|
- `ns2` (10.69.13.6) - Secondary authoritative DNS (AXFR from ns1)
|
||||||
|
- Zone files managed in `/services/ns/`
|
||||||
|
- All hosts point to ns1/ns2 for DNS resolution
|
||||||
549
TODO.md
Normal file
549
TODO.md
Normal file
@@ -0,0 +1,549 @@
|
|||||||
|
# TODO: Automated Host Deployment Pipeline
|
||||||
|
|
||||||
|
## Vision
|
||||||
|
|
||||||
|
Automate the entire process of creating, configuring, and deploying new NixOS hosts on Proxmox from a single command or script.
|
||||||
|
|
||||||
|
**Desired workflow:**
|
||||||
|
```bash
|
||||||
|
./scripts/create-host.sh --hostname myhost --ip 10.69.13.50
|
||||||
|
# Script creates config, deploys VM, bootstraps NixOS, and you're ready to go
|
||||||
|
```
|
||||||
|
|
||||||
|
**Current manual workflow (from CLAUDE.md):**
|
||||||
|
1. Create `/hosts/<hostname>/` directory structure
|
||||||
|
2. Add host to `flake.nix`
|
||||||
|
3. Add DNS entries
|
||||||
|
4. Clone template VM manually
|
||||||
|
5. Run `prepare-host.sh` on new VM
|
||||||
|
6. Add generated age key to `.sops.yaml`
|
||||||
|
7. Configure networking
|
||||||
|
8. Commit and push
|
||||||
|
9. Run `nixos-rebuild boot --flake URL#<hostname>` on host
|
||||||
|
|
||||||
|
## The Plan
|
||||||
|
|
||||||
|
### Phase 1: Parameterized OpenTofu Deployments ✅ COMPLETED
|
||||||
|
|
||||||
|
**Status:** Fully implemented and tested
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
- Locals-based structure using `for_each` pattern for multiple VM deployments
|
||||||
|
- All VM parameters configurable with smart defaults (CPU, memory, disk, IP, storage, etc.)
|
||||||
|
- Automatic DHCP vs static IP detection based on `ip` field presence
|
||||||
|
- Dynamic outputs showing deployed VM IPs and specifications
|
||||||
|
- Successfully tested deploying multiple VMs simultaneously
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [x] Create module/template structure in terraform for repeatable VM deployments
|
||||||
|
- [x] Parameterize VM configuration (hostname, CPU, memory, disk, IP)
|
||||||
|
- [x] Support both DHCP and static IP configuration via cloud-init
|
||||||
|
- [x] Test deploying multiple VMs from same template
|
||||||
|
|
||||||
|
**Deliverable:** ✅ Can deploy multiple VMs with custom parameters via OpenTofu in a single `tofu apply`
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- `terraform/vms.tf` - VM definitions using locals map
|
||||||
|
- `terraform/outputs.tf` - Dynamic outputs for all VMs
|
||||||
|
- `terraform/variables.tf` - Configurable defaults
|
||||||
|
- `terraform/README.md` - Complete documentation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 2: Host Configuration Generator ✅ COMPLETED
|
||||||
|
|
||||||
|
**Status:** ✅ Fully implemented and tested
|
||||||
|
**Completed:** 2025-02-01
|
||||||
|
**Enhanced:** 2025-02-01 (added --force flag)
|
||||||
|
|
||||||
|
**Goal:** Automate creation of host configuration files
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
- Python CLI tool packaged as Nix derivation
|
||||||
|
- Available as `create-host` command in devShell
|
||||||
|
- Rich terminal UI with configuration previews
|
||||||
|
- Comprehensive validation (hostname format/uniqueness, IP subnet/uniqueness)
|
||||||
|
- Jinja2 templates for NixOS configurations
|
||||||
|
- Automatic updates to flake.nix and terraform/vms.tf
|
||||||
|
- `--force` flag for regenerating existing configurations (useful for testing)
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [x] Create Python CLI with typer framework
|
||||||
|
- [x] Takes parameters: hostname, IP, CPU cores, memory, disk size
|
||||||
|
- [x] Generates `/hosts/<hostname>/` directory structure
|
||||||
|
- [x] Creates `configuration.nix` with proper hostname and networking
|
||||||
|
- [x] Generates `default.nix` with standard imports
|
||||||
|
- [x] References shared `hardware-configuration.nix` from template
|
||||||
|
- [x] Add host entry to `flake.nix` programmatically
|
||||||
|
- [x] Text-based manipulation (regex insertion)
|
||||||
|
- [x] Inserts new nixosConfiguration entry
|
||||||
|
- [x] Maintains proper formatting
|
||||||
|
- [x] Generate corresponding OpenTofu configuration
|
||||||
|
- [x] Adds VM definition to `terraform/vms.tf`
|
||||||
|
- [x] Uses parameters from CLI input
|
||||||
|
- [x] Supports both static IP and DHCP modes
|
||||||
|
- [x] Package as Nix derivation with templates
|
||||||
|
- [x] Add to flake packages and devShell
|
||||||
|
- [x] Implement dry-run mode
|
||||||
|
- [x] Write comprehensive README
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
```bash
|
||||||
|
# In nix develop shell
|
||||||
|
create-host \
|
||||||
|
--hostname test01 \
|
||||||
|
--ip 10.69.13.50/24 \ # optional, omit for DHCP
|
||||||
|
--cpu 4 \ # optional, default 2
|
||||||
|
--memory 4096 \ # optional, default 2048
|
||||||
|
--disk 50G \ # optional, default 20G
|
||||||
|
--dry-run # optional preview mode
|
||||||
|
```
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- `scripts/create-host/` - Complete Python package with Nix derivation
|
||||||
|
- `scripts/create-host/README.md` - Full documentation and examples
|
||||||
|
|
||||||
|
**Deliverable:** ✅ Tool generates all config files for a new host, validated with Nix and Terraform
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 3: Bootstrap Mechanism ✅ COMPLETED
|
||||||
|
|
||||||
|
**Status:** ✅ Fully implemented and tested
|
||||||
|
**Completed:** 2025-02-01
|
||||||
|
**Enhanced:** 2025-02-01 (added branch support for testing)
|
||||||
|
|
||||||
|
**Goal:** Get freshly deployed VM to apply its specific host configuration
|
||||||
|
|
||||||
|
**Implementation:** Systemd oneshot service that runs on first boot after cloud-init
|
||||||
|
|
||||||
|
**Approach taken:** Systemd service (variant of Option A)
|
||||||
|
- Systemd service `nixos-bootstrap.service` runs on first boot
|
||||||
|
- Depends on `cloud-config.service` to ensure hostname is set
|
||||||
|
- Reads hostname from `hostnamectl` (set by cloud-init via Terraform)
|
||||||
|
- Supports custom git branch via `NIXOS_FLAKE_BRANCH` environment variable
|
||||||
|
- Runs `nixos-rebuild boot --flake git+https://git.t-juice.club/torjus/nixos-servers.git?ref=$BRANCH#${hostname}`
|
||||||
|
- Reboots into new configuration on success
|
||||||
|
- Fails gracefully without reboot on errors (network issues, missing config)
|
||||||
|
- Service self-destructs after successful bootstrap (not in new config)
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [x] Create bootstrap service module in template2
|
||||||
|
- [x] systemd oneshot service with proper dependencies
|
||||||
|
- [x] Reads hostname from hostnamectl (cloud-init sets it)
|
||||||
|
- [x] Checks network connectivity via HTTPS (curl)
|
||||||
|
- [x] Runs nixos-rebuild boot with flake URL
|
||||||
|
- [x] Reboots on success, fails gracefully on error
|
||||||
|
- [x] Configure cloud-init datasource
|
||||||
|
- [x] Use ConfigDrive datasource (Proxmox provider)
|
||||||
|
- [x] Add cloud-init disk to Terraform VMs (disks.ide.ide2.cloudinit)
|
||||||
|
- [x] Hostname passed via cloud-init user-data from Terraform
|
||||||
|
- [x] Test bootstrap service execution on fresh VM
|
||||||
|
- [x] Handle failure cases (flake doesn't exist, network issues)
|
||||||
|
- [x] Clear error messages in journald
|
||||||
|
- [x] No reboot on failure
|
||||||
|
- [x] System remains accessible for debugging
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- `hosts/template2/bootstrap.nix` - Bootstrap service definition
|
||||||
|
- `hosts/template2/configuration.nix` - Cloud-init ConfigDrive datasource
|
||||||
|
- `terraform/vms.tf` - Cloud-init disk configuration
|
||||||
|
|
||||||
|
**Deliverable:** ✅ VMs automatically bootstrap and reboot into host-specific configuration on first boot
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 4: Secrets Management with HashiCorp Vault
|
||||||
|
|
||||||
|
**Challenge:** Current sops-nix approach has chicken-and-egg problem with age keys
|
||||||
|
|
||||||
|
**Current workflow:**
|
||||||
|
1. VM boots, generates age key at `/var/lib/sops-nix/key.txt`
|
||||||
|
2. User runs `prepare-host.sh` which prints public key
|
||||||
|
3. User manually adds public key to `.sops.yaml`
|
||||||
|
4. User commits, pushes
|
||||||
|
5. VM can now decrypt secrets
|
||||||
|
|
||||||
|
**Selected approach:** Migrate to HashiCorp Vault for centralized secrets management
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- Industry-standard secrets management (Vault experience transferable to work)
|
||||||
|
- Eliminates manual age key distribution step
|
||||||
|
- Secrets-as-code via OpenTofu (infrastructure-as-code aligned)
|
||||||
|
- Centralized PKI management (replaces step-ca, consolidates TLS + SSH CA)
|
||||||
|
- Automatic secret rotation capabilities
|
||||||
|
- Audit logging for all secret access
|
||||||
|
- AppRole authentication enables automated bootstrap
|
||||||
|
|
||||||
|
**Architecture:**
|
||||||
|
```
|
||||||
|
vault.home.2rjus.net
|
||||||
|
├─ KV Secrets Engine (replaces sops-nix)
|
||||||
|
├─ PKI Engine (replaces step-ca for TLS)
|
||||||
|
├─ SSH CA Engine (replaces step-ca SSH CA)
|
||||||
|
└─ AppRole Auth (per-host authentication)
|
||||||
|
↓
|
||||||
|
New hosts authenticate on first boot
|
||||||
|
Fetch secrets via Vault API
|
||||||
|
No manual key distribution needed
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Phase 4a: Vault Server Setup
|
||||||
|
|
||||||
|
**Goal:** Deploy and configure Vault server with auto-unseal
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [ ] Create `hosts/vault01/` configuration
|
||||||
|
- [ ] Basic NixOS configuration (hostname, networking, etc.)
|
||||||
|
- [ ] Vault service configuration
|
||||||
|
- [ ] Firewall rules (8200 for API, 8201 for cluster)
|
||||||
|
- [ ] Add to flake.nix and terraform
|
||||||
|
- [ ] Implement auto-unseal mechanism
|
||||||
|
- [ ] **Preferred:** TPM-based auto-unseal if hardware supports it
|
||||||
|
- [ ] Use tpm2-tools to seal/unseal Vault keys
|
||||||
|
- [ ] Systemd service to unseal on boot
|
||||||
|
- [ ] **Fallback:** Shamir secret sharing with systemd automation
|
||||||
|
- [ ] Generate 3 keys, threshold 2
|
||||||
|
- [ ] Store 2 keys on disk (encrypted), keep 1 offline
|
||||||
|
- [ ] Systemd service auto-unseals using 2 keys
|
||||||
|
- [ ] Initial Vault setup
|
||||||
|
- [ ] Initialize Vault
|
||||||
|
- [ ] Configure storage backend (integrated raft or file)
|
||||||
|
- [ ] Set up root token management
|
||||||
|
- [ ] Enable audit logging
|
||||||
|
- [ ] Deploy to infrastructure
|
||||||
|
- [ ] Add DNS entry for vault.home.2rjus.net
|
||||||
|
- [ ] Deploy VM via terraform
|
||||||
|
- [ ] Bootstrap and verify Vault is running
|
||||||
|
|
||||||
|
**Deliverable:** Running Vault server that auto-unseals on boot
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Phase 4b: Vault-as-Code with OpenTofu
|
||||||
|
|
||||||
|
**Goal:** Manage all Vault configuration (secrets structure, policies, roles) as code
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [ ] Set up Vault Terraform provider
|
||||||
|
- [ ] Create `terraform/vault/` directory
|
||||||
|
- [ ] Configure Vault provider (address, auth)
|
||||||
|
- [ ] Store Vault token securely (terraform.tfvars, gitignored)
|
||||||
|
- [ ] Enable and configure secrets engines
|
||||||
|
- [ ] Enable KV v2 secrets engine at `secret/`
|
||||||
|
- [ ] Define secret path structure (per-service, per-host)
|
||||||
|
- [ ] Example: `secret/monitoring/grafana`, `secret/postgres/ha1`
|
||||||
|
- [ ] Define policies as code
|
||||||
|
- [ ] Create policies for different service tiers
|
||||||
|
- [ ] Principle of least privilege (hosts only read their secrets)
|
||||||
|
- [ ] Example: monitoring-policy allows read on `secret/monitoring/*`
|
||||||
|
- [ ] Set up AppRole authentication
|
||||||
|
- [ ] Enable AppRole auth backend
|
||||||
|
- [ ] Create role per host type (monitoring, dns, database, etc.)
|
||||||
|
- [ ] Bind policies to roles
|
||||||
|
- [ ] Configure TTL and token policies
|
||||||
|
- [ ] Migrate existing secrets from sops-nix
|
||||||
|
- [ ] Create migration script/playbook
|
||||||
|
- [ ] Decrypt sops secrets and load into Vault KV
|
||||||
|
- [ ] Verify all secrets migrated successfully
|
||||||
|
- [ ] Keep sops as backup during transition
|
||||||
|
- [ ] Implement secrets-as-code patterns
|
||||||
|
- [ ] Secret values in gitignored terraform.tfvars
|
||||||
|
- [ ] Or use random_password for auto-generated secrets
|
||||||
|
- [ ] Secret structure/paths in version-controlled .tf files
|
||||||
|
|
||||||
|
**Example OpenTofu:**
|
||||||
|
```hcl
|
||||||
|
resource "vault_kv_secret_v2" "monitoring_grafana" {
|
||||||
|
mount = "secret"
|
||||||
|
name = "monitoring/grafana"
|
||||||
|
data_json = jsonencode({
|
||||||
|
admin_password = var.grafana_admin_password
|
||||||
|
smtp_password = var.smtp_password
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "vault_policy" "monitoring" {
|
||||||
|
name = "monitoring-policy"
|
||||||
|
policy = <<EOT
|
||||||
|
path "secret/data/monitoring/*" {
|
||||||
|
capabilities = ["read"]
|
||||||
|
}
|
||||||
|
EOT
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "vault_approle_auth_backend_role" "monitoring01" {
|
||||||
|
backend = "approle"
|
||||||
|
role_name = "monitoring01"
|
||||||
|
token_policies = ["monitoring-policy"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Deliverable:** All secrets and policies managed as OpenTofu code in `terraform/vault/`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Phase 4c: PKI Migration (Replace step-ca)
|
||||||
|
|
||||||
|
**Goal:** Consolidate PKI infrastructure into Vault
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [ ] Set up Vault PKI engines
|
||||||
|
- [ ] Create root CA in Vault (`pki/` mount, 10 year TTL)
|
||||||
|
- [ ] Create intermediate CA (`pki_int/` mount, 5 year TTL)
|
||||||
|
- [ ] Sign intermediate with root CA
|
||||||
|
- [ ] Configure CRL and OCSP
|
||||||
|
- [ ] Enable ACME support
|
||||||
|
- [ ] Enable ACME on intermediate CA (Vault 1.14+)
|
||||||
|
- [ ] Create PKI role for homelab domain
|
||||||
|
- [ ] Set certificate TTLs and allowed domains
|
||||||
|
- [ ] Configure SSH CA in Vault
|
||||||
|
- [ ] Enable SSH secrets engine (`ssh/` mount)
|
||||||
|
- [ ] Generate SSH signing keys
|
||||||
|
- [ ] Create roles for host and user certificates
|
||||||
|
- [ ] Configure TTLs and allowed principals
|
||||||
|
- [ ] Migrate hosts from step-ca to Vault
|
||||||
|
- [ ] Update system/acme.nix to use Vault ACME endpoint
|
||||||
|
- [ ] Change server to `https://vault.home.2rjus.net:8200/v1/pki_int/acme/directory`
|
||||||
|
- [ ] Test certificate issuance on one host
|
||||||
|
- [ ] Roll out to all hosts via auto-upgrade
|
||||||
|
- [ ] Migrate SSH CA trust
|
||||||
|
- [ ] Distribute Vault SSH CA public key to all hosts
|
||||||
|
- [ ] Update sshd_config to trust Vault CA
|
||||||
|
- [ ] Test SSH certificate authentication
|
||||||
|
- [ ] Decommission step-ca
|
||||||
|
- [ ] Verify all services migrated
|
||||||
|
- [ ] Stop step-ca service on ca host
|
||||||
|
- [ ] Archive step-ca configuration for backup
|
||||||
|
|
||||||
|
**Deliverable:** All TLS and SSH certificates issued by Vault, step-ca retired
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Phase 4d: Bootstrap Integration
|
||||||
|
|
||||||
|
**Goal:** New hosts automatically authenticate to Vault on first boot, no manual steps
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [ ] Update create-host tool
|
||||||
|
- [ ] Generate AppRole role_id + secret_id for new host
|
||||||
|
- [ ] Or create wrapped token for one-time bootstrap
|
||||||
|
- [ ] Add host-specific policy to Vault (via terraform)
|
||||||
|
- [ ] Store bootstrap credentials for cloud-init injection
|
||||||
|
- [ ] Update template2 for Vault authentication
|
||||||
|
- [ ] Create Vault authentication module
|
||||||
|
- [ ] Reads bootstrap credentials from cloud-init
|
||||||
|
- [ ] Authenticates to Vault, retrieves permanent AppRole credentials
|
||||||
|
- [ ] Stores role_id + secret_id locally for services to use
|
||||||
|
- [ ] Create NixOS Vault secrets module
|
||||||
|
- [ ] Replacement for sops.secrets
|
||||||
|
- [ ] Fetches secrets from Vault at nixos-rebuild/activation time
|
||||||
|
- [ ] Or runtime secret fetching for services
|
||||||
|
- [ ] Handle Vault token renewal
|
||||||
|
- [ ] Update bootstrap service
|
||||||
|
- [ ] After authenticating to Vault, fetch any bootstrap secrets
|
||||||
|
- [ ] Run nixos-rebuild with host configuration
|
||||||
|
- [ ] Services automatically fetch their secrets from Vault
|
||||||
|
- [ ] Update terraform cloud-init
|
||||||
|
- [ ] Inject Vault address and bootstrap credentials
|
||||||
|
- [ ] Pass via cloud-init user-data or write_files
|
||||||
|
- [ ] Credentials scoped to single use or short TTL
|
||||||
|
- [ ] Test complete flow
|
||||||
|
- [ ] Run create-host to generate new host config
|
||||||
|
- [ ] Deploy with terraform
|
||||||
|
- [ ] Verify host bootstraps and authenticates to Vault
|
||||||
|
- [ ] Verify services can fetch secrets
|
||||||
|
- [ ] Confirm no manual steps required
|
||||||
|
|
||||||
|
**Bootstrap flow:**
|
||||||
|
```
|
||||||
|
1. terraform apply (deploys VM with cloud-init)
|
||||||
|
2. Cloud-init sets hostname + Vault bootstrap credentials
|
||||||
|
3. nixos-bootstrap.service runs:
|
||||||
|
- Authenticates to Vault with bootstrap credentials
|
||||||
|
- Retrieves permanent AppRole credentials
|
||||||
|
- Stores locally for service use
|
||||||
|
- Runs nixos-rebuild
|
||||||
|
4. Host services fetch secrets from Vault as needed
|
||||||
|
5. Done - no manual intervention
|
||||||
|
```
|
||||||
|
|
||||||
|
**Deliverable:** Fully automated secrets access from first boot, zero manual steps
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 5: DNS Automation
|
||||||
|
|
||||||
|
**Goal:** Automatically generate DNS entries from host configurations
|
||||||
|
|
||||||
|
**Approach:** Leverage Nix to generate zone file entries from flake host configurations
|
||||||
|
|
||||||
|
Since most hosts use static IPs defined in their NixOS configurations, we can extract this information and automatically generate A records. This keeps DNS in sync with the actual host configs.
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [ ] Add optional CNAME field to host configurations
|
||||||
|
- [ ] Add `networking.cnames = [ "alias1" "alias2" ]` or similar option
|
||||||
|
- [ ] Document in host configuration template
|
||||||
|
- [ ] Create Nix function to extract DNS records from all hosts
|
||||||
|
- [ ] Parse each host's `networking.hostName` and IP configuration
|
||||||
|
- [ ] Collect any defined CNAMEs
|
||||||
|
- [ ] Generate zone file fragment with A and CNAME records
|
||||||
|
- [ ] Integrate auto-generated records into zone files
|
||||||
|
- [ ] Keep manual entries separate (for non-flake hosts/services)
|
||||||
|
- [ ] Include generated fragment in main zone file
|
||||||
|
- [ ] Add comments showing which records are auto-generated
|
||||||
|
- [ ] Update zone file serial number automatically
|
||||||
|
- [ ] Test zone file validity after generation
|
||||||
|
- [ ] Either:
|
||||||
|
- [ ] Automatically trigger DNS server reload (Ansible)
|
||||||
|
- [ ] Or document manual step: merge to master, run upgrade on ns1/ns2
|
||||||
|
|
||||||
|
**Deliverable:** DNS A records and CNAMEs automatically generated from host configs
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 6: Integration Script
|
||||||
|
|
||||||
|
**Goal:** Single command to create and deploy a new host
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
- [ ] Create `scripts/create-host.sh` master script that orchestrates:
|
||||||
|
1. Prompts for: hostname, IP (or DHCP), CPU, memory, disk
|
||||||
|
2. Validates inputs (IP not in use, hostname unique, etc.)
|
||||||
|
3. Calls host config generator (Phase 2)
|
||||||
|
4. Generates OpenTofu config (Phase 2)
|
||||||
|
5. Handles secrets (Phase 4)
|
||||||
|
6. Updates DNS (Phase 5)
|
||||||
|
7. Commits all changes to git
|
||||||
|
8. Runs `tofu apply` to deploy VM
|
||||||
|
9. Waits for bootstrap to complete (Phase 3)
|
||||||
|
10. Prints success message with IP and SSH command
|
||||||
|
- [ ] Add `--dry-run` flag to preview changes
|
||||||
|
- [ ] Add `--interactive` mode vs `--batch` mode
|
||||||
|
- [ ] Error handling and rollback on failures
|
||||||
|
|
||||||
|
**Deliverable:** `./scripts/create-host.sh --hostname myhost --ip 10.69.13.50` creates a fully working host
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Phase 7: Testing & Documentation
|
||||||
|
|
||||||
|
**Status:** 🚧 In Progress (testing improvements completed)
|
||||||
|
|
||||||
|
**Testing Improvements Implemented (2025-02-01):**
|
||||||
|
|
||||||
|
The pipeline now supports efficient testing without polluting master branch:
|
||||||
|
|
||||||
|
**1. --force Flag for create-host**
|
||||||
|
- Re-run `create-host` to regenerate existing configurations
|
||||||
|
- Updates existing entries in flake.nix and terraform/vms.tf (no duplicates)
|
||||||
|
- Skip uniqueness validation checks
|
||||||
|
- Useful for iterating on configuration templates during testing
|
||||||
|
|
||||||
|
**2. Branch Support for Bootstrap**
|
||||||
|
- Bootstrap service reads `NIXOS_FLAKE_BRANCH` environment variable
|
||||||
|
- Defaults to `master` if not set
|
||||||
|
- Allows testing pipeline changes on feature branches
|
||||||
|
- Cloud-init passes branch via `/etc/environment`
|
||||||
|
|
||||||
|
**3. Cloud-init Disk for Branch Configuration**
|
||||||
|
- Terraform generates custom cloud-init snippets for test VMs
|
||||||
|
- Set `flake_branch` field in VM definition to use non-master branch
|
||||||
|
- Production VMs omit this field and use master (default)
|
||||||
|
- Files automatically uploaded to Proxmox via SSH
|
||||||
|
|
||||||
|
**Testing Workflow:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Create test branch
|
||||||
|
git checkout -b test-pipeline
|
||||||
|
|
||||||
|
# 2. Generate or update host config
|
||||||
|
create-host --hostname testvm01 --ip 10.69.13.100/24
|
||||||
|
|
||||||
|
# 3. Edit terraform/vms.tf to add test VM with branch
|
||||||
|
# vms = {
|
||||||
|
# "testvm01" = {
|
||||||
|
# ip = "10.69.13.100/24"
|
||||||
|
# flake_branch = "test-pipeline" # Bootstrap from this branch
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
|
||||||
|
# 4. Commit and push test branch
|
||||||
|
git add -A && git commit -m "test: add testvm01"
|
||||||
|
git push origin test-pipeline
|
||||||
|
|
||||||
|
# 5. Deploy VM
|
||||||
|
cd terraform && tofu apply
|
||||||
|
|
||||||
|
# 6. Watch bootstrap (VM fetches from test-pipeline branch)
|
||||||
|
ssh root@10.69.13.100
|
||||||
|
journalctl -fu nixos-bootstrap.service
|
||||||
|
|
||||||
|
# 7. Iterate: modify templates and regenerate with --force
|
||||||
|
cd .. && create-host --hostname testvm01 --ip 10.69.13.100/24 --force
|
||||||
|
git commit -am "test: update config" && git push
|
||||||
|
|
||||||
|
# Redeploy to test fresh bootstrap
|
||||||
|
cd terraform
|
||||||
|
tofu destroy -target=proxmox_vm_qemu.vm[\"testvm01\"] && tofu apply
|
||||||
|
|
||||||
|
# 8. Clean up when done: squash commits, merge to master, remove test VM
|
||||||
|
```
|
||||||
|
|
||||||
|
**Files:**
|
||||||
|
- `scripts/create-host/create_host.py` - Added --force parameter
|
||||||
|
- `scripts/create-host/manipulators.py` - Update vs insert logic
|
||||||
|
- `hosts/template2/bootstrap.nix` - Branch support via environment variable
|
||||||
|
- `terraform/vms.tf` - flake_branch field support
|
||||||
|
- `terraform/cloud-init.tf` - Custom cloud-init disk generation
|
||||||
|
- `terraform/variables.tf` - proxmox_host variable for SSH uploads
|
||||||
|
|
||||||
|
**Remaining Tasks:**
|
||||||
|
- [ ] Test full pipeline end-to-end on feature branch
|
||||||
|
- [ ] Update CLAUDE.md with testing workflow
|
||||||
|
- [ ] Add troubleshooting section
|
||||||
|
- [ ] Create examples for common scenarios (DHCP host, static IP host, etc.)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
|
||||||
|
1. **Bootstrap method:** Cloud-init runcmd vs Terraform provisioner vs Ansible?
|
||||||
|
2. **Secrets handling:** Pre-generate keys vs post-deployment injection?
|
||||||
|
3. **DNS automation:** Auto-commit or manual merge?
|
||||||
|
4. **Git workflow:** Auto-push changes or leave for user review?
|
||||||
|
5. **Template selection:** Single template2 or multiple templates for different host types?
|
||||||
|
6. **Networking:** Always DHCP initially, or support static IP from start?
|
||||||
|
7. **Error recovery:** What happens if bootstrap fails? Manual intervention or retry?
|
||||||
|
|
||||||
|
## Implementation Order
|
||||||
|
|
||||||
|
Recommended sequence:
|
||||||
|
1. Phase 1: Parameterize OpenTofu (foundation for testing)
|
||||||
|
2. Phase 3: Bootstrap mechanism (core automation)
|
||||||
|
3. Phase 2: Config generator (automate the boilerplate)
|
||||||
|
4. Phase 4: Secrets (solves biggest chicken-and-egg)
|
||||||
|
5. Phase 5: DNS (nice-to-have automation)
|
||||||
|
6. Phase 6: Integration script (ties it all together)
|
||||||
|
7. Phase 7: Testing & docs
|
||||||
|
|
||||||
|
## Success Criteria
|
||||||
|
|
||||||
|
When complete, creating a new host should:
|
||||||
|
- Take < 5 minutes of human time
|
||||||
|
- Require minimal user input (hostname, IP, basic specs)
|
||||||
|
- Result in a fully configured, secret-enabled, DNS-registered host
|
||||||
|
- Be reproducible and documented
|
||||||
|
- Handle common errors gracefully
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Keep incremental commits at each phase
|
||||||
|
- Test each phase independently before moving to next
|
||||||
|
- Maintain backward compatibility with manual workflow
|
||||||
|
- Document any manual steps that can't be automated
|
||||||
72
flake.lock
generated
72
flake.lock
generated
@@ -1,5 +1,26 @@
|
|||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
|
"alerttonotify": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs-unstable"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1739310461,
|
||||||
|
"narHash": "sha256-GscftfATX84Aae9FObrQOe+hr5MsEma2Fc5fdzuu3hA=",
|
||||||
|
"ref": "master",
|
||||||
|
"rev": "53915cec6356be1a2d44ac2cbd0a71b32d679e6f",
|
||||||
|
"revCount": 7,
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.t-juice.club/torjus/alerttonotify"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"ref": "master",
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.t-juice.club/torjus/alerttonotify"
|
||||||
|
}
|
||||||
|
},
|
||||||
"backup-helper": {
|
"backup-helper": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
@@ -7,11 +28,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1727998045,
|
"lastModified": 1738015166,
|
||||||
"narHash": "sha256-BOvQHqs50Hk1sevvuJQai83kYuwTN27FTgmTitPsJtw=",
|
"narHash": "sha256-573tR4aXNjILKvYnjZUM5DZZME2H6YTHJkUKs3ZehFU=",
|
||||||
"ref": "master",
|
"ref": "master",
|
||||||
"rev": "162c35769cc06b117b6753eb93460af650b64921",
|
"rev": "f9540cc065692c7ca80735e7b08399459e0ea6d6",
|
||||||
"revCount": 31,
|
"revCount": 35,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.t-juice.club/torjus/backup-helper"
|
"url": "https://git.t-juice.club/torjus/backup-helper"
|
||||||
},
|
},
|
||||||
@@ -21,29 +42,50 @@
|
|||||||
"url": "https://git.t-juice.club/torjus/backup-helper"
|
"url": "https://git.t-juice.club/torjus/backup-helper"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"labmon": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs-unstable"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1748983975,
|
||||||
|
"narHash": "sha256-DA5mOqxwLMj/XLb4hvBU1WtE6cuVej7PjUr8N0EZsCE=",
|
||||||
|
"ref": "master",
|
||||||
|
"rev": "040a73e891a70ff06ec7ab31d7167914129dbf7d",
|
||||||
|
"revCount": 17,
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.t-juice.club/torjus/labmon"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"ref": "master",
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.t-juice.club/torjus/labmon"
|
||||||
|
}
|
||||||
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1737672001,
|
"lastModified": 1769598131,
|
||||||
"narHash": "sha256-YnHJJ19wqmibLQdUeq9xzE6CjrMA568KN/lFPuSVs4I=",
|
"narHash": "sha256-e7VO/kGLgRMbWtpBqdWl0uFg8Y2XWFMdz0uUJvlML8o=",
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "035f8c0853c2977b24ffc4d0a42c74f00b182cd8",
|
"rev": "fa83fd837f3098e3e678e6cf017b2b36102c7211",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"ref": "nixos-24.11",
|
"ref": "nixos-25.11",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs-unstable": {
|
"nixpkgs-unstable": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1737885589,
|
"lastModified": 1769461804,
|
||||||
"narHash": "sha256-Zf0hSrtzaM1DEz8//+Xs51k/wdSajticVrATqDrfQjg=",
|
"narHash": "sha256-msG8SU5WsBUfVVa/9RPLaymvi5bI8edTavbIq3vRlhI=",
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "852ff1d9e153d8875a83602e03fdef8a63f0ecf8",
|
"rev": "bfc1b8a4574108ceef22f02bafcf6611380c100d",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -55,7 +97,9 @@
|
|||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
|
"alerttonotify": "alerttonotify",
|
||||||
"backup-helper": "backup-helper",
|
"backup-helper": "backup-helper",
|
||||||
|
"labmon": "labmon",
|
||||||
"nixpkgs": "nixpkgs",
|
"nixpkgs": "nixpkgs",
|
||||||
"nixpkgs-unstable": "nixpkgs-unstable",
|
"nixpkgs-unstable": "nixpkgs-unstable",
|
||||||
"sops-nix": "sops-nix"
|
"sops-nix": "sops-nix"
|
||||||
@@ -68,11 +112,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1737411508,
|
"lastModified": 1769469829,
|
||||||
"narHash": "sha256-j9IdflJwRtqo9WpM0OfAZml47eBblUHGNQTe62OUqTw=",
|
"narHash": "sha256-wFcr32ZqspCxk4+FvIxIL0AZktRs6DuF8oOsLt59YBU=",
|
||||||
"owner": "Mic92",
|
"owner": "Mic92",
|
||||||
"repo": "sops-nix",
|
"repo": "sops-nix",
|
||||||
"rev": "015d461c16678fc02a2f405eb453abb509d4e1d4",
|
"rev": "c5eebd4eb2e3372fe12a8d70a248a6ee9dd02eff",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
187
flake.nix
187
flake.nix
@@ -2,7 +2,7 @@
|
|||||||
description = "Homelab v5 Nixos Server Configurations";
|
description = "Homelab v5 Nixos Server Configurations";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-24.11";
|
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-25.11";
|
||||||
nixpkgs-unstable.url = "github:nixos/nixpkgs?ref=nixos-unstable";
|
nixpkgs-unstable.url = "github:nixos/nixpkgs?ref=nixos-unstable";
|
||||||
|
|
||||||
sops-nix = {
|
sops-nix = {
|
||||||
@@ -13,6 +13,14 @@
|
|||||||
url = "git+https://git.t-juice.club/torjus/backup-helper?ref=master";
|
url = "git+https://git.t-juice.club/torjus/backup-helper?ref=master";
|
||||||
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||||
};
|
};
|
||||||
|
alerttonotify = {
|
||||||
|
url = "git+https://git.t-juice.club/torjus/alerttonotify?ref=master";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||||
|
};
|
||||||
|
labmon = {
|
||||||
|
url = "git+https://git.t-juice.club/torjus/labmon?ref=master";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs =
|
||||||
@@ -22,6 +30,8 @@
|
|||||||
nixpkgs-unstable,
|
nixpkgs-unstable,
|
||||||
sops-nix,
|
sops-nix,
|
||||||
backup-helper,
|
backup-helper,
|
||||||
|
alerttonotify,
|
||||||
|
labmon,
|
||||||
...
|
...
|
||||||
}@inputs:
|
}@inputs:
|
||||||
let
|
let
|
||||||
@@ -32,6 +42,19 @@
|
|||||||
config.allowUnfree = true;
|
config.allowUnfree = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
commonOverlays = [
|
||||||
|
overlay-unstable
|
||||||
|
alerttonotify.overlays.default
|
||||||
|
labmon.overlays.default
|
||||||
|
];
|
||||||
|
allSystems = [
|
||||||
|
"x86_64-linux"
|
||||||
|
"aarch64-linux"
|
||||||
|
"x86_64-darwin"
|
||||||
|
"aarch64-darwin"
|
||||||
|
];
|
||||||
|
forAllSystems =
|
||||||
|
f: nixpkgs.lib.genAttrs allSystems (system: f { pkgs = import nixpkgs { inherit system; }; });
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
nixosConfigurations = {
|
nixosConfigurations = {
|
||||||
@@ -44,7 +67,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/ns1
|
./hosts/ns1
|
||||||
@@ -60,7 +83,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/ns2
|
./hosts/ns2
|
||||||
@@ -76,7 +99,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/ns3
|
./hosts/ns3
|
||||||
@@ -92,7 +115,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/ns4
|
./hosts/ns4
|
||||||
@@ -108,7 +131,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/nixos-test1
|
./hosts/nixos-test1
|
||||||
@@ -125,7 +148,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/ha1
|
./hosts/ha1
|
||||||
@@ -133,40 +156,6 @@
|
|||||||
backup-helper.nixosModules.backup-helper
|
backup-helper.nixosModules.backup-helper
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
inc1 = nixpkgs.lib.nixosSystem {
|
|
||||||
inherit system;
|
|
||||||
specialArgs = {
|
|
||||||
inherit inputs self sops-nix;
|
|
||||||
};
|
|
||||||
modules = [
|
|
||||||
(
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
|
||||||
}
|
|
||||||
)
|
|
||||||
./hosts/inc1
|
|
||||||
sops-nix.nixosModules.sops
|
|
||||||
# backup-helper.nixosModules.backup-helper
|
|
||||||
];
|
|
||||||
};
|
|
||||||
inc2 = nixpkgs.lib.nixosSystem {
|
|
||||||
inherit system;
|
|
||||||
specialArgs = {
|
|
||||||
inherit inputs self sops-nix;
|
|
||||||
};
|
|
||||||
modules = [
|
|
||||||
(
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
|
||||||
}
|
|
||||||
)
|
|
||||||
./hosts/inc2
|
|
||||||
sops-nix.nixosModules.sops
|
|
||||||
# backup-helper.nixosModules.backup-helper
|
|
||||||
];
|
|
||||||
};
|
|
||||||
template1 = nixpkgs.lib.nixosSystem {
|
template1 = nixpkgs.lib.nixosSystem {
|
||||||
inherit system;
|
inherit system;
|
||||||
specialArgs = {
|
specialArgs = {
|
||||||
@@ -176,13 +165,29 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/template
|
./hosts/template
|
||||||
sops-nix.nixosModules.sops
|
sops-nix.nixosModules.sops
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
template2 = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
specialArgs = {
|
||||||
|
inherit inputs self sops-nix;
|
||||||
|
};
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
nixpkgs.overlays = commonOverlays;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
./hosts/template2
|
||||||
|
sops-nix.nixosModules.sops
|
||||||
|
];
|
||||||
|
};
|
||||||
http-proxy = nixpkgs.lib.nixosSystem {
|
http-proxy = nixpkgs.lib.nixosSystem {
|
||||||
inherit system;
|
inherit system;
|
||||||
specialArgs = {
|
specialArgs = {
|
||||||
@@ -192,7 +197,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/http-proxy
|
./hosts/http-proxy
|
||||||
@@ -208,7 +213,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/ca
|
./hosts/ca
|
||||||
@@ -224,11 +229,13 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/monitoring01
|
./hosts/monitoring01
|
||||||
sops-nix.nixosModules.sops
|
sops-nix.nixosModules.sops
|
||||||
|
backup-helper.nixosModules.backup-helper
|
||||||
|
labmon.nixosModules.labmon
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
jelly01 = nixpkgs.lib.nixosSystem {
|
jelly01 = nixpkgs.lib.nixosSystem {
|
||||||
@@ -240,7 +247,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/jelly01
|
./hosts/jelly01
|
||||||
@@ -256,7 +263,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/nix-cache01
|
./hosts/nix-cache01
|
||||||
@@ -272,7 +279,7 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/media1
|
./hosts/media1
|
||||||
@@ -288,13 +295,95 @@
|
|||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = commonOverlays;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
./hosts/pgdb1
|
./hosts/pgdb1
|
||||||
sops-nix.nixosModules.sops
|
sops-nix.nixosModules.sops
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
nats1 = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
specialArgs = {
|
||||||
|
inherit inputs self sops-nix;
|
||||||
};
|
};
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
nixpkgs.overlays = commonOverlays;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
./hosts/nats1
|
||||||
|
sops-nix.nixosModules.sops
|
||||||
|
];
|
||||||
|
};
|
||||||
|
auth01 = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
specialArgs = {
|
||||||
|
inherit inputs self sops-nix;
|
||||||
|
};
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
nixpkgs.overlays = commonOverlays;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
./hosts/auth01
|
||||||
|
sops-nix.nixosModules.sops
|
||||||
|
];
|
||||||
|
};
|
||||||
|
testvm01 = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
specialArgs = {
|
||||||
|
inherit inputs self sops-nix;
|
||||||
|
};
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
nixpkgs.overlays = commonOverlays;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
./hosts/testvm01
|
||||||
|
sops-nix.nixosModules.sops
|
||||||
|
];
|
||||||
|
};
|
||||||
|
vault01 = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
specialArgs = {
|
||||||
|
inherit inputs self sops-nix;
|
||||||
|
};
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
nixpkgs.overlays = commonOverlays;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
./hosts/vault01
|
||||||
|
sops-nix.nixosModules.sops
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
packages = forAllSystems (
|
||||||
|
{ pkgs }:
|
||||||
|
{
|
||||||
|
create-host = pkgs.callPackage ./scripts/create-host { };
|
||||||
|
}
|
||||||
|
);
|
||||||
|
devShells = forAllSystems (
|
||||||
|
{ pkgs }:
|
||||||
|
{
|
||||||
|
default = pkgs.mkShell {
|
||||||
|
packages = with pkgs; [
|
||||||
|
ansible
|
||||||
|
opentofu
|
||||||
|
(pkgs.callPackage ./scripts/create-host { })
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
65
hosts/auth01/configuration.nix
Normal file
65
hosts/auth01/configuration.nix
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
{
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../template/hardware-configuration.nix
|
||||||
|
|
||||||
|
../../system
|
||||||
|
../../common/vm
|
||||||
|
];
|
||||||
|
|
||||||
|
nixpkgs.config.allowUnfree = true;
|
||||||
|
# Use the systemd-boot EFI boot loader.
|
||||||
|
boot.loader.grub = {
|
||||||
|
enable = true;
|
||||||
|
device = "/dev/sda";
|
||||||
|
configurationLimit = 3;
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.hostName = "auth01";
|
||||||
|
networking.domain = "home.2rjus.net";
|
||||||
|
networking.useNetworkd = true;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
services.resolved.enable = true;
|
||||||
|
networking.nameservers = [
|
||||||
|
"10.69.13.5"
|
||||||
|
"10.69.13.6"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.network.enable = true;
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
address = [
|
||||||
|
"10.69.13.18/24"
|
||||||
|
];
|
||||||
|
routes = [
|
||||||
|
{ Gateway = "10.69.13.1"; }
|
||||||
|
];
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
time.timeZone = "Europe/Oslo";
|
||||||
|
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
nix.settings.tarball-ttl = 0;
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
vim
|
||||||
|
wget
|
||||||
|
git
|
||||||
|
];
|
||||||
|
|
||||||
|
services.qemuGuest.enable = true;
|
||||||
|
|
||||||
|
# Open ports in the firewall.
|
||||||
|
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||||
|
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||||
|
# Or disable the firewall altogether.
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
system.stateVersion = "23.11"; # Did you read the comment?
|
||||||
|
}
|
||||||
8
hosts/auth01/default.nix
Normal file
8
hosts/auth01/default.nix
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./configuration.nix
|
||||||
|
../../services/lldap
|
||||||
|
../../services/authelia
|
||||||
|
];
|
||||||
|
}
|
||||||
@@ -3,5 +3,6 @@
|
|||||||
imports = [
|
imports = [
|
||||||
./configuration.nix
|
./configuration.nix
|
||||||
../../services/http-proxy
|
../../services/http-proxy
|
||||||
|
./wireguard.nix
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
33
hosts/http-proxy/wireguard.nix
Normal file
33
hosts/http-proxy/wireguard.nix
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets.wireguard_private_key = {
|
||||||
|
sopsFile = ../../secrets/http-proxy/wireguard.yaml;
|
||||||
|
key = "wg_private_key";
|
||||||
|
};
|
||||||
|
networking.wireguard = {
|
||||||
|
enable = true;
|
||||||
|
useNetworkd = true;
|
||||||
|
|
||||||
|
interfaces = {
|
||||||
|
wg0 = {
|
||||||
|
ips = [ "10.69.222.3/24" ];
|
||||||
|
mtu = 1384;
|
||||||
|
listenPort = 51820;
|
||||||
|
privateKeyFile = config.sops.secrets.wireguard_private_key.path;
|
||||||
|
peers = [
|
||||||
|
{
|
||||||
|
name = "docker2.t-juice.club";
|
||||||
|
endpoint = "docker2.t-juice.club:51820";
|
||||||
|
publicKey = "32Rb13wExcy8uI92JTnFdiOfkv0mlQ6f181WA741DHs=";
|
||||||
|
allowedIPs = [ "10.69.222.0/24" ];
|
||||||
|
persistentKeepalive = 25;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# monitoring
|
||||||
|
services.prometheus.exporters.wireguard = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
# Edit this configuration file to define what should be installed on
|
|
||||||
# your system. Help is available in the configuration.nix(5) man page, on
|
|
||||||
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
|
|
||||||
|
|
||||||
{ config, lib, pkgs, ... }:
|
|
||||||
|
|
||||||
{
|
|
||||||
imports =
|
|
||||||
[
|
|
||||||
# Include the results of the hardware scan.
|
|
||||||
./hardware-configuration.nix
|
|
||||||
../../system
|
|
||||||
../../services/incus
|
|
||||||
];
|
|
||||||
|
|
||||||
# Use the systemd-boot EFI boot loader.
|
|
||||||
boot.loader.systemd-boot.enable = true;
|
|
||||||
boot.loader.efi.canTouchEfiVariables = true;
|
|
||||||
|
|
||||||
boot.kernel.sysctl = {
|
|
||||||
"net.ipv4.ip_forward" = 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
networking.hostName = "inc1";
|
|
||||||
networking.domain = "home.2rjus.net";
|
|
||||||
networking.useNetworkd = true;
|
|
||||||
networking.useDHCP = false;
|
|
||||||
networking.nftables.enable = true;
|
|
||||||
networking.firewall.trustedInterfaces = [ "vlan13" ];
|
|
||||||
|
|
||||||
services.resolved.enable = true;
|
|
||||||
networking.nameservers = [
|
|
||||||
"10.69.13.5"
|
|
||||||
"10.69.13.6"
|
|
||||||
];
|
|
||||||
|
|
||||||
systemd.network.enable = true;
|
|
||||||
# Primary interface
|
|
||||||
systemd.network.networks."enp2s0" = {
|
|
||||||
matchConfig.Name = "enp2s0";
|
|
||||||
address = [
|
|
||||||
"10.69.12.80/24"
|
|
||||||
];
|
|
||||||
networkConfig = {
|
|
||||||
VLAN = [ "enp2s0.13" ];
|
|
||||||
};
|
|
||||||
routes = [
|
|
||||||
{ Gateway = "10.69.12.1"; }
|
|
||||||
];
|
|
||||||
linkConfig.RequiredForOnline = "routable";
|
|
||||||
};
|
|
||||||
|
|
||||||
# VLAN 13 netdev
|
|
||||||
systemd.network.netdevs."enp2s0.13" = {
|
|
||||||
enable = true;
|
|
||||||
netdevConfig = {
|
|
||||||
Kind = "vlan";
|
|
||||||
Name = "enp2s0.13";
|
|
||||||
};
|
|
||||||
vlanConfig = {
|
|
||||||
Id = 13;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# # Bridge netdev
|
|
||||||
# systemd.network.netdevs."br13" = {
|
|
||||||
# netdevConfig = {
|
|
||||||
# Name = "br13";
|
|
||||||
# Kind = "bridge";
|
|
||||||
# };
|
|
||||||
# };
|
|
||||||
|
|
||||||
# # Bridge network
|
|
||||||
# systemd.network.networks."br13" = {
|
|
||||||
# matchConfig.Name = "enp2s0.13";
|
|
||||||
# networkConfig.Bridge = "br13";
|
|
||||||
# };
|
|
||||||
|
|
||||||
time.timeZone = "Europe/Oslo";
|
|
||||||
|
|
||||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
|
||||||
nix.settings.tarball-ttl = 0;
|
|
||||||
environment.systemPackages = with pkgs; [
|
|
||||||
tcpdump
|
|
||||||
vim
|
|
||||||
wget
|
|
||||||
git
|
|
||||||
];
|
|
||||||
|
|
||||||
# Enable the OpenSSH daemon.
|
|
||||||
# services.openssh.enable = true;
|
|
||||||
# services.openssh.settings.PermitRootLogin = "yes";
|
|
||||||
|
|
||||||
system.stateVersion = "24.05"; # Did you read the comment?
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
|
||||||
# and may be overwritten by future invocations. Please make changes
|
|
||||||
# to /etc/nixos/configuration.nix instead.
|
|
||||||
{ config, lib, pkgs, modulesPath, ... }:
|
|
||||||
|
|
||||||
{
|
|
||||||
imports =
|
|
||||||
[ (modulesPath + "/installer/scan/not-detected.nix")
|
|
||||||
];
|
|
||||||
|
|
||||||
boot.initrd.availableKernelModules = [ "xhci_pci" "nvme" "ahci" "usbhid" "usb_storage" "sd_mod" "rtsx_usb_sdmmc" ];
|
|
||||||
boot.initrd.kernelModules = [ ];
|
|
||||||
boot.kernelModules = [ "kvm-amd" ];
|
|
||||||
boot.extraModulePackages = [ ];
|
|
||||||
|
|
||||||
fileSystems."/" =
|
|
||||||
{ device = "/dev/disk/by-uuid/faa60038-b3a4-448a-8909-49857818c955";
|
|
||||||
fsType = "xfs";
|
|
||||||
};
|
|
||||||
|
|
||||||
fileSystems."/boot" =
|
|
||||||
{ device = "/dev/disk/by-uuid/7A94-A91C";
|
|
||||||
fsType = "vfat";
|
|
||||||
options = [ "fmask=0077" "dmask=0077" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
swapDevices =
|
|
||||||
[ { device = "/dev/disk/by-uuid/f7a4f85e-0b4b-492d-a611-f50d2b915c2c"; }
|
|
||||||
];
|
|
||||||
|
|
||||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
|
||||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
|
||||||
# still possible to use this option, but it's recommended to use it in conjunction
|
|
||||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
|
||||||
networking.useDHCP = lib.mkDefault true;
|
|
||||||
# networking.interfaces.enp2s0.useDHCP = lib.mkDefault true;
|
|
||||||
# networking.interfaces.wlp3s0.useDHCP = lib.mkDefault true;
|
|
||||||
|
|
||||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
|
||||||
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
|
||||||
}
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
# Edit this configuration file to define what should be installed on
|
|
||||||
# your system. Help is available in the configuration.nix(5) man page, on
|
|
||||||
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
|
|
||||||
|
|
||||||
{ config, lib, pkgs, ... }:
|
|
||||||
|
|
||||||
{
|
|
||||||
imports =
|
|
||||||
[
|
|
||||||
# Include the results of the hardware scan.
|
|
||||||
./hardware-configuration.nix
|
|
||||||
../../system
|
|
||||||
../../services/incus
|
|
||||||
];
|
|
||||||
|
|
||||||
# Use the systemd-boot EFI boot loader.
|
|
||||||
boot.loader.systemd-boot.enable = true;
|
|
||||||
boot.loader.efi.canTouchEfiVariables = true;
|
|
||||||
|
|
||||||
boot.kernel.sysctl = {
|
|
||||||
"net.ipv4.ip_forward" = 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
networking.hostName = "inc2";
|
|
||||||
networking.domain = "home.2rjus.net";
|
|
||||||
networking.useNetworkd = true;
|
|
||||||
networking.useDHCP = false;
|
|
||||||
networking.nftables.enable = true;
|
|
||||||
networking.firewall.trustedInterfaces = [ "vlan13" ];
|
|
||||||
|
|
||||||
services.resolved.enable = true;
|
|
||||||
networking.nameservers = [
|
|
||||||
"10.69.13.5"
|
|
||||||
"10.69.13.6"
|
|
||||||
];
|
|
||||||
|
|
||||||
systemd.network.enable = true;
|
|
||||||
# Primary interface
|
|
||||||
systemd.network.networks."enp2s0" = {
|
|
||||||
matchConfig.Name = "enp2s0";
|
|
||||||
address = [
|
|
||||||
"10.69.12.81/24"
|
|
||||||
];
|
|
||||||
networkConfig = {
|
|
||||||
VLAN = [ "enp2s0.13" ];
|
|
||||||
};
|
|
||||||
routes = [
|
|
||||||
{ Gateway = "10.69.12.1"; }
|
|
||||||
];
|
|
||||||
linkConfig.RequiredForOnline = "routable";
|
|
||||||
};
|
|
||||||
|
|
||||||
# VLAN 13 netdev
|
|
||||||
systemd.network.netdevs."enp2s0.13" = {
|
|
||||||
enable = true;
|
|
||||||
netdevConfig = {
|
|
||||||
Kind = "vlan";
|
|
||||||
Name = "enp2s0.13";
|
|
||||||
};
|
|
||||||
vlanConfig = {
|
|
||||||
Id = 13;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# # Bridge netdev
|
|
||||||
# systemd.network.netdevs."br13" = {
|
|
||||||
# netdevConfig = {
|
|
||||||
# Name = "br13";
|
|
||||||
# Kind = "bridge";
|
|
||||||
# };
|
|
||||||
# };
|
|
||||||
|
|
||||||
# # Bridge network
|
|
||||||
# systemd.network.networks."br13" = {
|
|
||||||
# matchConfig.Name = "enp2s0.13";
|
|
||||||
# networkConfig.Bridge = "br13";
|
|
||||||
# };
|
|
||||||
|
|
||||||
time.timeZone = "Europe/Oslo";
|
|
||||||
|
|
||||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
|
||||||
nix.settings.tarball-ttl = 0;
|
|
||||||
environment.systemPackages = with pkgs; [
|
|
||||||
tcpdump
|
|
||||||
vim
|
|
||||||
wget
|
|
||||||
git
|
|
||||||
];
|
|
||||||
|
|
||||||
# Enable the OpenSSH daemon.
|
|
||||||
# services.openssh.enable = true;
|
|
||||||
# services.openssh.settings.PermitRootLogin = "yes";
|
|
||||||
|
|
||||||
system.stateVersion = "24.05"; # Did you read the comment?
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
{ config, lib, pkgs, modulesPath, ... }:
|
|
||||||
|
|
||||||
{
|
|
||||||
imports =
|
|
||||||
[
|
|
||||||
(modulesPath + "/installer/scan/not-detected.nix")
|
|
||||||
];
|
|
||||||
|
|
||||||
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" "rtsx_usb_sdmmc" ];
|
|
||||||
boot.initrd.kernelModules = [ ];
|
|
||||||
boot.kernelModules = [ "kvm-amd" ];
|
|
||||||
boot.extraModulePackages = [ ];
|
|
||||||
|
|
||||||
fileSystems."/" =
|
|
||||||
{
|
|
||||||
device = "/dev/disk/by-uuid/3e7c311c-b1a3-4be7-b8bf-e497cba64302";
|
|
||||||
fsType = "btrfs";
|
|
||||||
};
|
|
||||||
|
|
||||||
fileSystems."/boot" =
|
|
||||||
{
|
|
||||||
device = "/dev/disk/by-uuid/F0D7-E5C1";
|
|
||||||
fsType = "vfat";
|
|
||||||
options = [ "fmask=0022" "dmask=0022" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
swapDevices =
|
|
||||||
[{ device = "/dev/disk/by-uuid/1a06a36f-da61-4d36-b94e-b852836c328a"; }];
|
|
||||||
|
|
||||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
|
||||||
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -61,5 +61,9 @@
|
|||||||
# Or disable the firewall altogether.
|
# Or disable the firewall altogether.
|
||||||
networking.firewall.enable = false;
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
zramSwap = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
system.stateVersion = "23.11"; # Did you read the comment?
|
system.stateVersion = "23.11"; # Did you read the comment?
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,15 +17,10 @@
|
|||||||
loader.systemd-boot = {
|
loader.systemd-boot = {
|
||||||
enable = true;
|
enable = true;
|
||||||
configurationLimit = 5;
|
configurationLimit = 5;
|
||||||
|
memtest86.enable = true;
|
||||||
};
|
};
|
||||||
loader.efi.canTouchEfiVariables = true;
|
loader.efi.canTouchEfiVariables = true;
|
||||||
supportedFilesystems = [ "nfs" ];
|
supportedFilesystems = [ "nfs" ];
|
||||||
kernelPackages = pkgs.linuxPackages_latest;
|
|
||||||
kernelParams = [
|
|
||||||
"quiet"
|
|
||||||
"splash"
|
|
||||||
"rd.systemd.show_status=false"
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
networking.hostName = "media1";
|
networking.hostName = "media1";
|
||||||
@@ -42,10 +37,10 @@
|
|||||||
systemd.network.networks."enp2s0" = {
|
systemd.network.networks."enp2s0" = {
|
||||||
matchConfig.Name = "enp2s0";
|
matchConfig.Name = "enp2s0";
|
||||||
address = [
|
address = [
|
||||||
"10.69.31.49/24"
|
"10.69.12.82/24"
|
||||||
];
|
];
|
||||||
routes = [
|
routes = [
|
||||||
{ Gateway = "10.69.31.1"; }
|
{ Gateway = "10.69.12.1"; }
|
||||||
];
|
];
|
||||||
linkConfig.RequiredForOnline = "routable";
|
linkConfig.RequiredForOnline = "routable";
|
||||||
};
|
};
|
||||||
@@ -55,8 +50,8 @@
|
|||||||
hardware.graphics = {
|
hardware.graphics = {
|
||||||
enable = true;
|
enable = true;
|
||||||
extraPackages = with pkgs; [
|
extraPackages = with pkgs; [
|
||||||
vaapiVdpau
|
|
||||||
libvdpau-va-gl
|
libvdpau-va-gl
|
||||||
|
libva-vdpau-driver
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,58 +1,33 @@
|
|||||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
{ config, lib, pkgs, modulesPath, ... }:
|
||||||
# and may be overwritten by future invocations. Please make changes
|
|
||||||
# to /etc/nixos/configuration.nix instead.
|
|
||||||
{
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
pkgs,
|
|
||||||
modulesPath,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
|
|
||||||
{
|
{
|
||||||
imports = [
|
imports =
|
||||||
|
[
|
||||||
(modulesPath + "/installer/scan/not-detected.nix")
|
(modulesPath + "/installer/scan/not-detected.nix")
|
||||||
];
|
];
|
||||||
|
|
||||||
boot.initrd.availableKernelModules = [
|
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" "rtsx_usb_sdmmc" ];
|
||||||
"xhci_pci"
|
|
||||||
"nvme"
|
|
||||||
"ahci"
|
|
||||||
"usbhid"
|
|
||||||
"usb_storage"
|
|
||||||
"sd_mod"
|
|
||||||
"rtsx_usb_sdmmc"
|
|
||||||
];
|
|
||||||
boot.initrd.kernelModules = [ ];
|
boot.initrd.kernelModules = [ ];
|
||||||
boot.kernelModules = [ "kvm-amd" ];
|
boot.kernelModules = [ "kvm-amd" ];
|
||||||
boot.extraModulePackages = [ ];
|
boot.extraModulePackages = [ ];
|
||||||
|
|
||||||
fileSystems."/" = {
|
fileSystems."/" =
|
||||||
device = "/dev/disk/by-uuid/faa60038-b3a4-448a-8909-49857818c955";
|
{
|
||||||
fsType = "xfs";
|
device = "/dev/disk/by-uuid/3e7c311c-b1a3-4be7-b8bf-e497cba64302";
|
||||||
|
fsType = "btrfs";
|
||||||
};
|
};
|
||||||
|
|
||||||
fileSystems."/boot" = {
|
fileSystems."/boot" =
|
||||||
device = "/dev/disk/by-uuid/7A94-A91C";
|
{
|
||||||
|
device = "/dev/disk/by-uuid/F0D7-E5C1";
|
||||||
fsType = "vfat";
|
fsType = "vfat";
|
||||||
options = [
|
options = [ "fmask=0022" "dmask=0022" ];
|
||||||
"fmask=0077"
|
|
||||||
"dmask=0077"
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
swapDevices = [
|
swapDevices =
|
||||||
{ device = "/dev/disk/by-uuid/f7a4f85e-0b4b-492d-a611-f50d2b915c2c"; }
|
[{ device = "/dev/disk/by-uuid/1a06a36f-da61-4d36-b94e-b852836c328a"; }];
|
||||||
];
|
|
||||||
|
|
||||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
|
||||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
|
||||||
# still possible to use this option, but it's recommended to use it in conjunction
|
|
||||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
|
||||||
networking.useDHCP = lib.mkDefault true;
|
|
||||||
# networking.interfaces.enp2s0.useDHCP = lib.mkDefault true;
|
|
||||||
# networking.interfaces.wlp3s0.useDHCP = lib.mkDefault true;
|
|
||||||
|
|
||||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||||
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,12 +11,15 @@ in
|
|||||||
isNormalUser = true;
|
isNormalUser = true;
|
||||||
description = "Kodi Media Center user";
|
description = "Kodi Media Center user";
|
||||||
};
|
};
|
||||||
services.xserver = {
|
#services.xserver = {
|
||||||
enable = true;
|
# enable = true;
|
||||||
};
|
#};
|
||||||
services.cage = {
|
services.cage = {
|
||||||
enable = true;
|
enable = true;
|
||||||
user = "kodi";
|
user = "kodi";
|
||||||
|
environment = {
|
||||||
|
XKB_DEFAULT_LAYOUT = "no";
|
||||||
|
};
|
||||||
program = "${kodipkg}/bin/kodi";
|
program = "${kodipkg}/bin/kodi";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -51,10 +51,79 @@
|
|||||||
vim
|
vim
|
||||||
wget
|
wget
|
||||||
git
|
git
|
||||||
|
sqlite
|
||||||
];
|
];
|
||||||
|
|
||||||
services.qemuGuest.enable = true;
|
services.qemuGuest.enable = true;
|
||||||
|
|
||||||
|
sops.secrets."backup_helper_secret" = { };
|
||||||
|
backup-helper = {
|
||||||
|
enable = true;
|
||||||
|
password-file = "/run/secrets/backup_helper_secret";
|
||||||
|
backup-dirs = [
|
||||||
|
"/var/lib/grafana/plugins"
|
||||||
|
];
|
||||||
|
backup-commands = [
|
||||||
|
# "grafana.db:${pkgs.sqlite}/bin/sqlite /var/lib/grafana/data/grafana.db .dump"
|
||||||
|
"grafana.db:${pkgs.sqlite}/bin/sqlite3 /var/lib/grafana/data/grafana.db .dump"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
labmon = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
ListenAddr = ":9969";
|
||||||
|
Profiling = true;
|
||||||
|
StepMonitors = [
|
||||||
|
{
|
||||||
|
Enabled = true;
|
||||||
|
BaseURL = "https://ca.home.2rjus.net";
|
||||||
|
RootID = "3381bda8015a86b9a3cd1851439d1091890a79005e0f1f7c4301fe4bccc29d80";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
TLSConnectionMonitors = [
|
||||||
|
{
|
||||||
|
Enabled = true;
|
||||||
|
Address = "ca.home.2rjus.net:443";
|
||||||
|
Verify = true;
|
||||||
|
Duration = "12h";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Enabled = true;
|
||||||
|
Address = "jelly.home.2rjus.net:443";
|
||||||
|
Verify = true;
|
||||||
|
Duration = "12h";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Enabled = true;
|
||||||
|
Address = "grafana.home.2rjus.net:443";
|
||||||
|
Verify = true;
|
||||||
|
Duration = "12h";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Enabled = true;
|
||||||
|
Address = "prometheus.home.2rjus.net:443";
|
||||||
|
Verify = true;
|
||||||
|
Duration = "12h";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Enabled = true;
|
||||||
|
Address = "alertmanager.home.2rjus.net:443";
|
||||||
|
Verify = true;
|
||||||
|
Duration = "12h";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Enabled = true;
|
||||||
|
Address = "pyroscope.home.2rjus.net:443";
|
||||||
|
Verify = true;
|
||||||
|
Duration = "12h";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
# Open ports in the firewall.
|
# Open ports in the firewall.
|
||||||
# networking.firewall.allowedTCPPorts = [ ... ];
|
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||||
# networking.firewall.allowedUDPPorts = [ ... ];
|
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||||
|
|||||||
63
hosts/nats1/configuration.nix
Normal file
63
hosts/nats1/configuration.nix
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
{
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../template/hardware-configuration.nix
|
||||||
|
|
||||||
|
../../system
|
||||||
|
../../common/vm
|
||||||
|
];
|
||||||
|
|
||||||
|
nixpkgs.config.allowUnfree = true;
|
||||||
|
# Use the systemd-boot EFI boot loader.
|
||||||
|
boot.loader.grub = {
|
||||||
|
enable = true;
|
||||||
|
device = "/dev/sda";
|
||||||
|
configurationLimit = 3;
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.hostName = "nats1";
|
||||||
|
networking.domain = "home.2rjus.net";
|
||||||
|
networking.useNetworkd = true;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
services.resolved.enable = true;
|
||||||
|
networking.nameservers = [
|
||||||
|
"10.69.13.5"
|
||||||
|
"10.69.13.6"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.network.enable = true;
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
address = [
|
||||||
|
"10.69.13.17/24"
|
||||||
|
];
|
||||||
|
routes = [
|
||||||
|
{ Gateway = "10.69.13.1"; }
|
||||||
|
];
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
time.timeZone = "Europe/Oslo";
|
||||||
|
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
nix.settings.tarball-ttl = 0;
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
vim
|
||||||
|
wget
|
||||||
|
git
|
||||||
|
];
|
||||||
|
|
||||||
|
# Open ports in the firewall.
|
||||||
|
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||||
|
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||||
|
# Or disable the firewall altogether.
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
system.stateVersion = "23.11"; # Did you read the comment?
|
||||||
|
}
|
||||||
7
hosts/nats1/default.nix
Normal file
7
hosts/nats1/default.nix
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./configuration.nix
|
||||||
|
../../services/nats
|
||||||
|
];
|
||||||
|
}
|
||||||
@@ -3,5 +3,7 @@
|
|||||||
imports = [
|
imports = [
|
||||||
./configuration.nix
|
./configuration.nix
|
||||||
../../services/nix-cache
|
../../services/nix-cache
|
||||||
|
../../services/actions-runner
|
||||||
|
./zram.nix
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
6
hosts/nix-cache01/zram.nix
Normal file
6
hosts/nix-cache01/zram.nix
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
zramSwap = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,4 +1,10 @@
|
|||||||
{ config, lib, pkgs, modulesPath, ... }:
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
modulesPath,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
@@ -13,17 +19,17 @@
|
|||||||
"sr_mod"
|
"sr_mod"
|
||||||
];
|
];
|
||||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||||
boot.kernelModules = [ ];
|
boot.kernelModules = [
|
||||||
|
"ptp_kvm"
|
||||||
|
];
|
||||||
boot.extraModulePackages = [ ];
|
boot.extraModulePackages = [ ];
|
||||||
|
|
||||||
fileSystems."/" =
|
fileSystems."/" = {
|
||||||
{
|
|
||||||
device = "/dev/disk/by-label/root";
|
device = "/dev/disk/by-label/root";
|
||||||
fsType = "xfs";
|
fsType = "xfs";
|
||||||
};
|
};
|
||||||
|
|
||||||
swapDevices =
|
swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
|
||||||
[{ device = "/dev/disk/by-label/swap"; }];
|
|
||||||
|
|
||||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||||
@@ -34,4 +40,3 @@
|
|||||||
|
|
||||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
73
hosts/template2/bootstrap.nix
Normal file
73
hosts/template2/bootstrap.nix
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
{ pkgs, config, lib, ... }:
|
||||||
|
let
|
||||||
|
bootstrap-script = pkgs.writeShellApplication {
|
||||||
|
name = "nixos-bootstrap";
|
||||||
|
runtimeInputs = with pkgs; [ systemd curl nixos-rebuild jq git ];
|
||||||
|
text = ''
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Read hostname set by cloud-init (from Terraform VM name via user-data)
|
||||||
|
# Cloud-init sets the system hostname from user-data.txt, so we read it from hostnamectl
|
||||||
|
HOSTNAME=$(hostnamectl hostname)
|
||||||
|
echo "DEBUG: Hostname from hostnamectl: '$HOSTNAME'"
|
||||||
|
|
||||||
|
echo "Starting NixOS bootstrap for host: $HOSTNAME"
|
||||||
|
echo "Waiting for network connectivity..."
|
||||||
|
|
||||||
|
# Verify we can reach the git server via HTTPS (doesn't respond to ping)
|
||||||
|
if ! curl -s --connect-timeout 5 --max-time 10 https://git.t-juice.club >/dev/null 2>&1; then
|
||||||
|
echo "ERROR: Cannot reach git.t-juice.club via HTTPS"
|
||||||
|
echo "Check network configuration and DNS settings"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Network connectivity confirmed"
|
||||||
|
echo "Fetching and building NixOS configuration from flake..."
|
||||||
|
|
||||||
|
# Read git branch from environment, default to master
|
||||||
|
BRANCH="''${NIXOS_FLAKE_BRANCH:-master}"
|
||||||
|
echo "Using git branch: $BRANCH"
|
||||||
|
|
||||||
|
# Build and activate the host-specific configuration
|
||||||
|
FLAKE_URL="git+https://git.t-juice.club/torjus/nixos-servers.git?ref=$BRANCH#''${HOSTNAME}"
|
||||||
|
|
||||||
|
if nixos-rebuild boot --flake "$FLAKE_URL"; then
|
||||||
|
echo "Successfully built configuration for $HOSTNAME"
|
||||||
|
echo "Rebooting into new configuration..."
|
||||||
|
sleep 2
|
||||||
|
systemctl reboot
|
||||||
|
else
|
||||||
|
echo "ERROR: nixos-rebuild failed for $HOSTNAME"
|
||||||
|
echo "Check that flake has configuration for this hostname"
|
||||||
|
echo "Manual intervention required - system will not reboot"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
systemd.services."nixos-bootstrap" = {
|
||||||
|
description = "Bootstrap NixOS configuration from flake on first boot";
|
||||||
|
|
||||||
|
# Wait for cloud-init to finish setting hostname and network to be online
|
||||||
|
after = [ "cloud-config.service" "network-online.target" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
requires = [ "cloud-config.service" ];
|
||||||
|
|
||||||
|
# Run on boot
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = true;
|
||||||
|
ExecStart = "${bootstrap-script}/bin/nixos-bootstrap";
|
||||||
|
|
||||||
|
# Read environment variables from /etc/environment (set by cloud-init)
|
||||||
|
EnvironmentFile = "-/etc/environment";
|
||||||
|
|
||||||
|
# Logging to journald
|
||||||
|
StandardOutput = "journal+console";
|
||||||
|
StandardError = "journal+console";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
70
hosts/template2/configuration.nix
Normal file
70
hosts/template2/configuration.nix
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./hardware-configuration.nix
|
||||||
|
../../system/sshd.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
# Root user with no password but SSH key access for bootstrapping
|
||||||
|
users.users.root = {
|
||||||
|
hashedPassword = "";
|
||||||
|
openssh.authorizedKeys.keys = [
|
||||||
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwfb2jpKrBnCw28aevnH8HbE5YbcMXpdaVv2KmueDu6 torjus@gunter"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Proxmox image-specific configuration
|
||||||
|
# Configure storage to use local-zfs instead of local-lvm
|
||||||
|
image.modules.proxmox = {
|
||||||
|
proxmox.qemuConf.virtio0 = lib.mkForce "local-zfs:vm-9999-disk-0";
|
||||||
|
proxmox.qemuConf.boot = lib.mkForce "order=virtio0";
|
||||||
|
proxmox.cloudInit.defaultStorage = lib.mkForce "local-zfs";
|
||||||
|
};
|
||||||
|
|
||||||
|
# Configure cloud-init to use ConfigDrive datasource (used by Proxmox)
|
||||||
|
services.cloud-init.settings = {
|
||||||
|
datasource_list = [ "ConfigDrive" "NoCloud" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
boot.loader.grub.enable = true;
|
||||||
|
boot.loader.grub.device = "/dev/vda";
|
||||||
|
networking.hostName = "nixos-template2";
|
||||||
|
networking.domain = "home.2rjus.net";
|
||||||
|
networking.useNetworkd = true;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
services.resolved.enable = true;
|
||||||
|
|
||||||
|
systemd.network.enable = true;
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
networkConfig.DHCP = "ipv4";
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
time.timeZone = "Europe/Oslo";
|
||||||
|
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
nix.settings.tarball-ttl = 0;
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
age
|
||||||
|
vim
|
||||||
|
wget
|
||||||
|
git
|
||||||
|
];
|
||||||
|
|
||||||
|
# Open ports in the firewall.
|
||||||
|
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||||
|
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||||
|
# Or disable the firewall altogether.
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
system.stateVersion = "25.11";
|
||||||
|
}
|
||||||
10
hosts/template2/default.nix
Normal file
10
hosts/template2/default.nix
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./hardware-configuration.nix
|
||||||
|
./configuration.nix
|
||||||
|
./scripts.nix
|
||||||
|
./bootstrap.nix
|
||||||
|
../../system/packages.nix
|
||||||
|
];
|
||||||
|
}
|
||||||
45
hosts/template2/hardware-configuration.nix
Normal file
45
hosts/template2/hardware-configuration.nix
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
modulesPath,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
(modulesPath + "/profiles/qemu-guest.nix")
|
||||||
|
];
|
||||||
|
boot.initrd.availableKernelModules = [
|
||||||
|
"ata_piix"
|
||||||
|
"uhci_hcd"
|
||||||
|
"virtio_pci"
|
||||||
|
"virtio_scsi"
|
||||||
|
"sd_mod"
|
||||||
|
"sr_mod"
|
||||||
|
];
|
||||||
|
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||||
|
boot.kernelModules = [
|
||||||
|
"ptp_kvm"
|
||||||
|
"virtio_rng" # Provides entropy from host for fast SSH key generation
|
||||||
|
];
|
||||||
|
boot.extraModulePackages = [ ];
|
||||||
|
|
||||||
|
# Filesystem configuration matching Proxmox image builder output
|
||||||
|
fileSystems."/" = lib.mkDefault {
|
||||||
|
device = "/dev/disk/by-label/nixos";
|
||||||
|
fsType = "ext4";
|
||||||
|
options = [ "x-systemd.growfs" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
swapDevices = lib.mkDefault [ ];
|
||||||
|
|
||||||
|
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||||
|
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||||
|
# still possible to use this option, but it's recommended to use it in conjunction
|
||||||
|
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||||
|
networking.useDHCP = lib.mkDefault true;
|
||||||
|
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
|
||||||
|
|
||||||
|
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||||
|
}
|
||||||
33
hosts/template2/scripts.nix
Normal file
33
hosts/template2/scripts.nix
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
let
|
||||||
|
prepare-host-script = pkgs.writeShellScriptBin "prepare-host.sh"
|
||||||
|
''
|
||||||
|
echo "Removing machine-id"
|
||||||
|
rm -f /etc/machine-id || true
|
||||||
|
|
||||||
|
echo "Removing SSH host keys"
|
||||||
|
rm -f /etc/ssh/ssh_host_* || true
|
||||||
|
|
||||||
|
echo "Restarting SSH"
|
||||||
|
systemctl restart sshd
|
||||||
|
|
||||||
|
echo "Removing temporary files"
|
||||||
|
rm -rf /tmp/* || true
|
||||||
|
|
||||||
|
echo "Removing logs"
|
||||||
|
journalctl --rotate || true
|
||||||
|
journalctl --vacuum-time=1s || true
|
||||||
|
|
||||||
|
echo "Removing cache"
|
||||||
|
rm -rf /var/cache/* || true
|
||||||
|
|
||||||
|
echo "Generate age key"
|
||||||
|
rm -rf /var/lib/sops-nix || true
|
||||||
|
mkdir -p /var/lib/sops-nix
|
||||||
|
${pkgs.age}/bin/age-keygen -o /var/lib/sops-nix/key.txt
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
environment.systemPackages = [ prepare-host-script ];
|
||||||
|
users.motd = "Prepare host by running 'prepare-host.sh'.";
|
||||||
|
}
|
||||||
61
hosts/testvm01/configuration.nix
Normal file
61
hosts/testvm01/configuration.nix
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../template2/hardware-configuration.nix
|
||||||
|
|
||||||
|
../../system
|
||||||
|
../../common/vm
|
||||||
|
];
|
||||||
|
|
||||||
|
nixpkgs.config.allowUnfree = true;
|
||||||
|
boot.loader.grub.enable = true;
|
||||||
|
boot.loader.grub.device = "/dev/vda";
|
||||||
|
|
||||||
|
networking.hostName = "testvm01";
|
||||||
|
networking.domain = "home.2rjus.net";
|
||||||
|
networking.useNetworkd = true;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
services.resolved.enable = false;
|
||||||
|
networking.nameservers = [
|
||||||
|
"10.69.13.5"
|
||||||
|
"10.69.13.6"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.network.enable = true;
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
address = [
|
||||||
|
"10.69.13.101/24"
|
||||||
|
];
|
||||||
|
routes = [
|
||||||
|
{ Gateway = "10.69.13.1"; }
|
||||||
|
];
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
time.timeZone = "Europe/Oslo";
|
||||||
|
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
nix.settings.tarball-ttl = 0;
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
vim
|
||||||
|
wget
|
||||||
|
git
|
||||||
|
];
|
||||||
|
|
||||||
|
# Open ports in the firewall.
|
||||||
|
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||||
|
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||||
|
# Or disable the firewall altogether.
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
system.stateVersion = "25.11"; # Did you read the comment?
|
||||||
|
}
|
||||||
63
hosts/vault01/configuration.nix
Normal file
63
hosts/vault01/configuration.nix
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../template2/hardware-configuration.nix
|
||||||
|
|
||||||
|
../../system
|
||||||
|
../../common/vm
|
||||||
|
../../services/vault
|
||||||
|
];
|
||||||
|
|
||||||
|
nixpkgs.config.allowUnfree = true;
|
||||||
|
boot.loader.grub.enable = true;
|
||||||
|
boot.loader.grub.device = "/dev/vda";
|
||||||
|
|
||||||
|
networking.hostName = "vault01";
|
||||||
|
networking.domain = "home.2rjus.net";
|
||||||
|
networking.useNetworkd = true;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
services.resolved.enable = true;
|
||||||
|
networking.nameservers = [
|
||||||
|
"10.69.13.5"
|
||||||
|
"10.69.13.6"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.network.enable = true;
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
address = [
|
||||||
|
"10.69.13.19/24"
|
||||||
|
];
|
||||||
|
routes = [
|
||||||
|
{ Gateway = "10.69.13.1"; }
|
||||||
|
];
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
time.timeZone = "Europe/Oslo";
|
||||||
|
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
nix.settings.tarball-ttl = 0;
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
vim
|
||||||
|
wget
|
||||||
|
git
|
||||||
|
];
|
||||||
|
|
||||||
|
# Open ports in the firewall.
|
||||||
|
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||||
|
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||||
|
# Or disable the firewall altogether.
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
system.stateVersion = "25.11"; # Did you read the comment?
|
||||||
|
}
|
||||||
|
|
||||||
5
hosts/vault01/default.nix
Normal file
5
hosts/vault01/default.nix
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{ ... }: {
|
||||||
|
imports = [
|
||||||
|
./configuration.nix
|
||||||
|
];
|
||||||
|
}
|
||||||
31
inventory
Executable file
31
inventory
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
IGNORED_HOSTS = [
|
||||||
|
"inc1",
|
||||||
|
"inc2",
|
||||||
|
"media1",
|
||||||
|
"nixos-test1",
|
||||||
|
"ns3",
|
||||||
|
"ns4",
|
||||||
|
"template1",
|
||||||
|
]
|
||||||
|
|
||||||
|
result = subprocess.run(["nix", "flake", "show", "--json"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
|
results = json.loads(result.stdout)
|
||||||
|
|
||||||
|
configs = results.get("nixosConfigurations")
|
||||||
|
hosts = [x for x in configs.keys() if x not in IGNORED_HOSTS]
|
||||||
|
|
||||||
|
output = {
|
||||||
|
"all": {
|
||||||
|
"hosts": hosts,
|
||||||
|
"vars": {
|
||||||
|
"ansible_python_interpreter": "/run/current-system/sw/bin/python3"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
print(json.dumps(output))
|
||||||
101
playbooks/build-and-deploy-template.yml
Normal file
101
playbooks/build-and-deploy-template.yml
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
---
|
||||||
|
- name: Build and deploy NixOS Proxmox template
|
||||||
|
hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
template_name: "template2"
|
||||||
|
nixos_config: "template2"
|
||||||
|
proxmox_node: "pve1.home.2rjus.net" # Change to your Proxmox node name
|
||||||
|
proxmox_host: "pve1.home.2rjus.net" # Change to your Proxmox host
|
||||||
|
template_vmid: 9000 # Template VM ID
|
||||||
|
storage: "local-zfs"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Build NixOS image
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "nixos-rebuild build-image --image-variant proxmox --flake .#template2"
|
||||||
|
chdir: "{{ playbook_dir }}/.."
|
||||||
|
register: build_result
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Find built image file
|
||||||
|
ansible.builtin.find:
|
||||||
|
paths: "{{ playbook_dir}}/../result"
|
||||||
|
patterns: "*.vma.zst"
|
||||||
|
recurse: true
|
||||||
|
register: image_files
|
||||||
|
|
||||||
|
- name: Fail if no image found
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "No QCOW2 image found in build output"
|
||||||
|
when: image_files.matched == 0
|
||||||
|
|
||||||
|
- name: Set image path
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
image_path: "{{ image_files.files[0].path }}"
|
||||||
|
|
||||||
|
- name: Extract image filename
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
image_filename: "{{ image_path | basename }}"
|
||||||
|
|
||||||
|
- name: Display image info
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Built image: {{ image_path }} ({{ image_filename }})"
|
||||||
|
|
||||||
|
- name: Deploy template to Proxmox
|
||||||
|
hosts: proxmox
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
template_name: "template2"
|
||||||
|
template_vmid: 9000
|
||||||
|
storage: "local-zfs"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Get image path and filename from localhost
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
image_path: "{{ hostvars['localhost']['image_path'] }}"
|
||||||
|
image_filename: "{{ hostvars['localhost']['image_filename'] }}"
|
||||||
|
|
||||||
|
- name: Set destination path
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
image_dest: "/var/lib/vz/dump/{{ image_filename }}"
|
||||||
|
|
||||||
|
- name: Copy image to Proxmox
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "{{ image_path }}"
|
||||||
|
dest: "{{ image_dest }}"
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Check if template VM already exists
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "qm status {{ template_vmid }}"
|
||||||
|
register: vm_status
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Destroy existing template VM if it exists
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "qm destroy {{ template_vmid }} --purge"
|
||||||
|
when: vm_status.rc == 0
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Import image
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "qmrestore {{ image_dest }} {{ template_vmid }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Convert VM to template
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "qm template {{ template_vmid }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Clean up uploaded image
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ image_dest }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Display success message
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Template VM {{ template_vmid }} created successfully on {{ storage }}"
|
||||||
5
playbooks/inventory.ini
Normal file
5
playbooks/inventory.ini
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[proxmox]
|
||||||
|
pve1.home.2rjus.net
|
||||||
|
|
||||||
|
[proxmox:vars]
|
||||||
|
ansible_user=root
|
||||||
9
playbooks/run-upgrade.yml
Normal file
9
playbooks/run-upgrade.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: Trigger nixos-upgrade job on all hosts
|
||||||
|
hosts: all
|
||||||
|
remote_user: root
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- ansible.builtin.systemd_service:
|
||||||
|
name: nixos-upgrade.service
|
||||||
|
state: started
|
||||||
1
scripts/create-host/MANIFEST.in
Normal file
1
scripts/create-host/MANIFEST.in
Normal file
@@ -0,0 +1 @@
|
|||||||
|
recursive-include templates *.j2
|
||||||
268
scripts/create-host/README.md
Normal file
268
scripts/create-host/README.md
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
# NixOS Host Configuration Generator
|
||||||
|
|
||||||
|
Automated tool for generating NixOS host configurations, flake.nix entries, and Terraform VM definitions for homelab infrastructure.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
The tool is available in the Nix development shell:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
nix develop
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
Create a new host with DHCP networking:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create --hostname test01
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a new host with static IP:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname test01 \
|
||||||
|
--ip 10.69.13.50/24
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a host with custom resources:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname bighost01 \
|
||||||
|
--ip 10.69.13.51/24 \
|
||||||
|
--cpu 8 \
|
||||||
|
--memory 8192 \
|
||||||
|
--disk 100G
|
||||||
|
```
|
||||||
|
|
||||||
|
### Dry Run Mode
|
||||||
|
|
||||||
|
Preview what would be created without making changes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname test01 \
|
||||||
|
--ip 10.69.13.50/24 \
|
||||||
|
--dry-run
|
||||||
|
```
|
||||||
|
|
||||||
|
### Force Mode (Regenerate Existing Configuration)
|
||||||
|
|
||||||
|
Overwrite an existing host configuration (useful for testing):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname test01 \
|
||||||
|
--ip 10.69.13.50/24 \
|
||||||
|
--force
|
||||||
|
```
|
||||||
|
|
||||||
|
This mode:
|
||||||
|
- Skips hostname and IP uniqueness validation
|
||||||
|
- Overwrites files in `hosts/<hostname>/`
|
||||||
|
- Updates existing entries in `flake.nix` and `terraform/vms.tf` (doesn't duplicate)
|
||||||
|
- Useful for iterating on configuration templates during testing
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
- `--hostname` (required): Hostname for the new host
|
||||||
|
- Must be lowercase alphanumeric with hyphens
|
||||||
|
- Must be unique (not already exist in repository)
|
||||||
|
|
||||||
|
- `--ip` (optional): Static IP address with CIDR notation
|
||||||
|
- Format: `10.69.13.X/24`
|
||||||
|
- Must be in `10.69.13.0/24` subnet
|
||||||
|
- Last octet must be 1-254
|
||||||
|
- Omit this option for DHCP configuration
|
||||||
|
|
||||||
|
- `--cpu` (optional, default: 2): Number of CPU cores
|
||||||
|
- Must be at least 1
|
||||||
|
|
||||||
|
- `--memory` (optional, default: 2048): Memory in MB
|
||||||
|
- Must be at least 512
|
||||||
|
|
||||||
|
- `--disk` (optional, default: "20G"): Disk size
|
||||||
|
- Examples: "20G", "50G", "100G"
|
||||||
|
|
||||||
|
- `--dry-run` (flag): Preview changes without creating files
|
||||||
|
|
||||||
|
- `--force` (flag): Overwrite existing host configuration
|
||||||
|
- Skips uniqueness validation
|
||||||
|
- Updates existing entries instead of creating duplicates
|
||||||
|
|
||||||
|
## What It Does
|
||||||
|
|
||||||
|
The tool performs the following actions:
|
||||||
|
|
||||||
|
1. **Validates** the configuration:
|
||||||
|
- Hostname format (RFC 1123 compliance)
|
||||||
|
- Hostname uniqueness
|
||||||
|
- IP address format and subnet (if provided)
|
||||||
|
- IP address uniqueness (if provided)
|
||||||
|
|
||||||
|
2. **Generates** host configuration files:
|
||||||
|
- `hosts/<hostname>/default.nix` - Import wrapper
|
||||||
|
- `hosts/<hostname>/configuration.nix` - Full host configuration
|
||||||
|
|
||||||
|
3. **Updates** repository files:
|
||||||
|
- `flake.nix` - Adds new nixosConfigurations entry
|
||||||
|
- `terraform/vms.tf` - Adds new VM definition
|
||||||
|
|
||||||
|
4. **Displays** next steps for:
|
||||||
|
- Reviewing changes with git diff
|
||||||
|
- Verifying NixOS configuration
|
||||||
|
- Verifying Terraform configuration
|
||||||
|
- Committing changes
|
||||||
|
- Deploying the VM
|
||||||
|
|
||||||
|
## Generated Configuration
|
||||||
|
|
||||||
|
### Host Features
|
||||||
|
|
||||||
|
All generated hosts include:
|
||||||
|
|
||||||
|
- Full system imports from `../../system`:
|
||||||
|
- Nix binary cache integration
|
||||||
|
- SSH with root login
|
||||||
|
- SOPS secrets management
|
||||||
|
- Internal ACME CA integration
|
||||||
|
- Daily auto-upgrades with auto-reboot
|
||||||
|
- Prometheus node-exporter
|
||||||
|
- Promtail logging to monitoring01
|
||||||
|
|
||||||
|
- VM guest agent from `../../common/vm`
|
||||||
|
- Hardware configuration from `../template/hardware-configuration.nix`
|
||||||
|
|
||||||
|
### Networking
|
||||||
|
|
||||||
|
**Static IP mode** (when `--ip` is provided):
|
||||||
|
```nix
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
address = [ "10.69.13.50/24" ];
|
||||||
|
routes = [ { Gateway = "10.69.13.1"; } ];
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**DHCP mode** (when `--ip` is omitted):
|
||||||
|
```nix
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
networkConfig.DHCP = "ipv4";
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### DNS Configuration
|
||||||
|
|
||||||
|
All hosts are configured with:
|
||||||
|
- DNS servers: `10.69.13.5`, `10.69.13.6` (ns1, ns2)
|
||||||
|
- Domain: `home.2rjus.net`
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Create a test VM with defaults
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create --hostname test99
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates a DHCP VM with 2 CPU cores, 2048 MB memory, and 20G disk.
|
||||||
|
|
||||||
|
### Create a database server with static IP
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname pgdb2 \
|
||||||
|
--ip 10.69.13.52/24 \
|
||||||
|
--cpu 4 \
|
||||||
|
--memory 4096 \
|
||||||
|
--disk 50G
|
||||||
|
```
|
||||||
|
|
||||||
|
### Preview changes before creating
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname test99 \
|
||||||
|
--ip 10.69.13.99/24 \
|
||||||
|
--dry-run
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
The tool validates input and provides clear error messages for:
|
||||||
|
|
||||||
|
- Invalid hostname format (must be lowercase alphanumeric with hyphens)
|
||||||
|
- Duplicate hostname (already exists in repository)
|
||||||
|
- Invalid IP format (must be X.X.X.X/24)
|
||||||
|
- Wrong subnet (must be 10.69.13.0/24)
|
||||||
|
- Invalid last octet (must be 1-254)
|
||||||
|
- Duplicate IP address (already in use)
|
||||||
|
- Resource constraints (CPU < 1, memory < 512 MB)
|
||||||
|
|
||||||
|
## Integration with Deployment Pipeline
|
||||||
|
|
||||||
|
This tool implements **Phase 2** of the automated deployment pipeline:
|
||||||
|
|
||||||
|
1. **Phase 1**: Template building ✓ (build-and-deploy-template.yml)
|
||||||
|
2. **Phase 2**: Host configuration generation ✓ (this tool)
|
||||||
|
3. **Phase 3**: Bootstrap automation (planned)
|
||||||
|
4. **Phase 4**: Secrets management (planned)
|
||||||
|
5. **Phase 5**: DNS automation (planned)
|
||||||
|
6. **Phase 6**: Full integration (planned)
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
scripts/create-host/
|
||||||
|
├── create_host.py # Main CLI entry point (typer app)
|
||||||
|
├── __init__.py # Package initialization
|
||||||
|
├── validators.py # Validation logic
|
||||||
|
├── generators.py # File generation using Jinja2
|
||||||
|
├── manipulators.py # Text manipulation for flake.nix and vms.tf
|
||||||
|
├── models.py # Data models (HostConfig)
|
||||||
|
├── templates/
|
||||||
|
│ ├── default.nix.j2 # Template for default.nix
|
||||||
|
│ └── configuration.nix.j2 # Template for configuration.nix
|
||||||
|
└── README.md # This file
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
Run the test cases from the implementation plan:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test 1: DHCP host with defaults
|
||||||
|
python -m scripts.create_host.create_host create --hostname testdhcp --dry-run
|
||||||
|
|
||||||
|
# Test 2: Static IP host
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname test50 --ip 10.69.13.50/24 --dry-run
|
||||||
|
|
||||||
|
# Test 3: Custom resources
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname test51 --ip 10.69.13.51/24 \
|
||||||
|
--cpu 8 --memory 8192 --disk 100G --dry-run
|
||||||
|
|
||||||
|
# Test 4: Duplicate hostname (should error)
|
||||||
|
python -m scripts.create_host.create_host create --hostname ns1 --dry-run
|
||||||
|
|
||||||
|
# Test 5: Invalid subnet (should error)
|
||||||
|
python -m scripts.create_host.create_host create \
|
||||||
|
--hostname testbad --ip 192.168.1.50/24 --dry-run
|
||||||
|
|
||||||
|
# Test 6: Invalid hostname (should error)
|
||||||
|
python -m scripts.create_host.create_host create --hostname Test_Host --dry-run
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Part of the nixos-servers homelab infrastructure repository.
|
||||||
3
scripts/create-host/__init__.py
Normal file
3
scripts/create-host/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""NixOS host configuration generator for homelab infrastructure."""
|
||||||
|
|
||||||
|
__version__ = "0.1.0"
|
||||||
6
scripts/create-host/__main__.py
Normal file
6
scripts/create-host/__main__.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
"""Entry point for running the create-host module."""
|
||||||
|
|
||||||
|
from .create_host import app
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
app()
|
||||||
197
scripts/create-host/create_host.py
Normal file
197
scripts/create-host/create_host.py
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
"""CLI tool for generating NixOS host configurations."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import typer
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.panel import Panel
|
||||||
|
from rich.table import Table
|
||||||
|
|
||||||
|
from generators import generate_host_files
|
||||||
|
from manipulators import update_flake_nix, update_terraform_vms
|
||||||
|
from models import HostConfig
|
||||||
|
from validators import (
|
||||||
|
validate_hostname_format,
|
||||||
|
validate_hostname_unique,
|
||||||
|
validate_ip_subnet,
|
||||||
|
validate_ip_unique,
|
||||||
|
)
|
||||||
|
|
||||||
|
app = typer.Typer(
|
||||||
|
name="create-host",
|
||||||
|
help="Generate NixOS host configurations for homelab infrastructure",
|
||||||
|
add_completion=False,
|
||||||
|
)
|
||||||
|
console = Console()
|
||||||
|
|
||||||
|
|
||||||
|
def get_repo_root() -> Path:
|
||||||
|
"""Get the repository root directory."""
|
||||||
|
# Use current working directory as repo root
|
||||||
|
# The tool should be run from the repository root
|
||||||
|
return Path.cwd()
|
||||||
|
|
||||||
|
|
||||||
|
@app.callback(invoke_without_command=True)
|
||||||
|
def main(
|
||||||
|
ctx: typer.Context,
|
||||||
|
hostname: Optional[str] = typer.Option(None, "--hostname", help="Hostname for the new host"),
|
||||||
|
ip: Optional[str] = typer.Option(
|
||||||
|
None, "--ip", help="Static IP address with CIDR (e.g., 10.69.13.50/24). Omit for DHCP."
|
||||||
|
),
|
||||||
|
cpu: int = typer.Option(2, "--cpu", help="Number of CPU cores"),
|
||||||
|
memory: int = typer.Option(2048, "--memory", help="Memory in MB"),
|
||||||
|
disk: str = typer.Option("20G", "--disk", help="Disk size (e.g., 20G, 50G, 100G)"),
|
||||||
|
dry_run: bool = typer.Option(False, "--dry-run", help="Preview changes without creating files"),
|
||||||
|
force: bool = typer.Option(False, "--force", help="Overwrite existing host configuration"),
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Create a new NixOS host configuration.
|
||||||
|
|
||||||
|
Generates host configuration files, updates flake.nix, and adds Terraform VM definition.
|
||||||
|
"""
|
||||||
|
# Show help if no hostname provided
|
||||||
|
if hostname is None:
|
||||||
|
console.print("[bold red]Error:[/bold red] --hostname is required\n")
|
||||||
|
ctx.get_help()
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Build configuration
|
||||||
|
config = HostConfig(
|
||||||
|
hostname=hostname,
|
||||||
|
ip=ip,
|
||||||
|
cpu=cpu,
|
||||||
|
memory=memory,
|
||||||
|
disk=disk,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get repository root
|
||||||
|
repo_root = get_repo_root()
|
||||||
|
|
||||||
|
# Validate configuration
|
||||||
|
console.print("\n[bold blue]Validating configuration...[/bold blue]")
|
||||||
|
|
||||||
|
config.validate()
|
||||||
|
validate_hostname_format(hostname)
|
||||||
|
|
||||||
|
# Skip uniqueness checks in force mode
|
||||||
|
if not force:
|
||||||
|
validate_hostname_unique(hostname, repo_root)
|
||||||
|
if ip:
|
||||||
|
validate_ip_unique(ip, repo_root)
|
||||||
|
else:
|
||||||
|
# Check if we're actually overwriting something
|
||||||
|
host_dir = repo_root / "hosts" / hostname
|
||||||
|
if host_dir.exists():
|
||||||
|
console.print(f"[yellow]⚠[/yellow] Updating existing host configuration for {hostname}")
|
||||||
|
|
||||||
|
if ip:
|
||||||
|
validate_ip_subnet(ip)
|
||||||
|
|
||||||
|
console.print("[green]✓[/green] All validations passed\n")
|
||||||
|
|
||||||
|
# Display configuration summary
|
||||||
|
display_config_summary(config)
|
||||||
|
|
||||||
|
# Dry run mode - exit before making changes
|
||||||
|
if dry_run:
|
||||||
|
console.print("\n[yellow]DRY RUN MODE - No files will be created[/yellow]\n")
|
||||||
|
display_dry_run_summary(config, repo_root)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Generate files
|
||||||
|
console.print("\n[bold blue]Generating host configuration...[/bold blue]")
|
||||||
|
|
||||||
|
generate_host_files(config, repo_root)
|
||||||
|
action = "Updated" if force else "Created"
|
||||||
|
console.print(f"[green]✓[/green] {action} hosts/{hostname}/default.nix")
|
||||||
|
console.print(f"[green]✓[/green] {action} hosts/{hostname}/configuration.nix")
|
||||||
|
|
||||||
|
update_flake_nix(config, repo_root, force=force)
|
||||||
|
console.print("[green]✓[/green] Updated flake.nix")
|
||||||
|
|
||||||
|
update_terraform_vms(config, repo_root, force=force)
|
||||||
|
console.print("[green]✓[/green] Updated terraform/vms.tf")
|
||||||
|
|
||||||
|
# Success message
|
||||||
|
console.print("\n[bold green]✓ Host configuration generated successfully![/bold green]\n")
|
||||||
|
|
||||||
|
# Display next steps
|
||||||
|
display_next_steps(hostname)
|
||||||
|
|
||||||
|
except ValueError as e:
|
||||||
|
console.print(f"\n[bold red]Error:[/bold red] {e}\n", style="red")
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"\n[bold red]Unexpected error:[/bold red] {e}\n", style="red")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def display_config_summary(config: HostConfig) -> None:
|
||||||
|
"""Display configuration summary table."""
|
||||||
|
table = Table(title="Host Configuration", show_header=False)
|
||||||
|
table.add_column("Property", style="cyan")
|
||||||
|
table.add_column("Value", style="white")
|
||||||
|
|
||||||
|
table.add_row("Hostname", config.hostname)
|
||||||
|
table.add_row("Domain", config.domain)
|
||||||
|
table.add_row("Network Mode", "Static IP" if config.is_static_ip else "DHCP")
|
||||||
|
|
||||||
|
if config.is_static_ip:
|
||||||
|
table.add_row("IP Address", config.ip)
|
||||||
|
table.add_row("Gateway", config.gateway)
|
||||||
|
|
||||||
|
table.add_row("DNS Servers", ", ".join(config.nameservers))
|
||||||
|
table.add_row("CPU Cores", str(config.cpu))
|
||||||
|
table.add_row("Memory", f"{config.memory} MB")
|
||||||
|
table.add_row("Disk Size", config.disk)
|
||||||
|
table.add_row("State Version", config.state_version)
|
||||||
|
|
||||||
|
console.print(table)
|
||||||
|
|
||||||
|
|
||||||
|
def display_dry_run_summary(config: HostConfig, repo_root: Path) -> None:
|
||||||
|
"""Display what would be created in dry run mode."""
|
||||||
|
console.print("[bold]Files that would be created:[/bold]")
|
||||||
|
console.print(f" • {repo_root}/hosts/{config.hostname}/default.nix")
|
||||||
|
console.print(f" • {repo_root}/hosts/{config.hostname}/configuration.nix")
|
||||||
|
|
||||||
|
console.print("\n[bold]Files that would be modified:[/bold]")
|
||||||
|
console.print(f" • {repo_root}/flake.nix (add nixosConfigurations.{config.hostname})")
|
||||||
|
console.print(f" • {repo_root}/terraform/vms.tf (add VM definition)")
|
||||||
|
|
||||||
|
|
||||||
|
def display_next_steps(hostname: str) -> None:
|
||||||
|
"""Display next steps after successful generation."""
|
||||||
|
next_steps = f"""[bold cyan]Next Steps:[/bold cyan]
|
||||||
|
|
||||||
|
1. Review changes:
|
||||||
|
[white]git diff[/white]
|
||||||
|
|
||||||
|
2. Verify NixOS configuration:
|
||||||
|
[white]nix flake check
|
||||||
|
nix build .#nixosConfigurations.{hostname}.config.system.build.toplevel[/white]
|
||||||
|
|
||||||
|
3. Verify Terraform configuration:
|
||||||
|
[white]cd terraform
|
||||||
|
tofu validate
|
||||||
|
tofu plan[/white]
|
||||||
|
|
||||||
|
4. Commit changes:
|
||||||
|
[white]git add hosts/{hostname} flake.nix terraform/vms.tf
|
||||||
|
git commit -m "hosts: add {hostname} configuration"[/white]
|
||||||
|
|
||||||
|
5. Deploy VM (after merging to master):
|
||||||
|
[white]cd terraform
|
||||||
|
tofu apply[/white]
|
||||||
|
|
||||||
|
6. Bootstrap the host (see Phase 3 of deployment pipeline)
|
||||||
|
"""
|
||||||
|
console.print(Panel(next_steps, border_style="cyan"))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
app()
|
||||||
38
scripts/create-host/default.nix
Normal file
38
scripts/create-host/default.nix
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
{ lib
|
||||||
|
, python3
|
||||||
|
, python3Packages
|
||||||
|
}:
|
||||||
|
|
||||||
|
python3Packages.buildPythonApplication {
|
||||||
|
pname = "create-host";
|
||||||
|
version = "0.1.0";
|
||||||
|
|
||||||
|
src = ./.;
|
||||||
|
|
||||||
|
pyproject = true;
|
||||||
|
|
||||||
|
build-system = with python3Packages; [
|
||||||
|
setuptools
|
||||||
|
];
|
||||||
|
|
||||||
|
propagatedBuildInputs = with python3Packages; [
|
||||||
|
typer
|
||||||
|
jinja2
|
||||||
|
rich
|
||||||
|
];
|
||||||
|
|
||||||
|
# Install templates to share directory
|
||||||
|
postInstall = ''
|
||||||
|
mkdir -p $out/share/create-host
|
||||||
|
cp -r templates $out/share/create-host/
|
||||||
|
'';
|
||||||
|
|
||||||
|
# No tests yet
|
||||||
|
doCheck = false;
|
||||||
|
|
||||||
|
meta = with lib; {
|
||||||
|
description = "NixOS host configuration generator for homelab infrastructure";
|
||||||
|
license = licenses.mit;
|
||||||
|
maintainers = [ ];
|
||||||
|
};
|
||||||
|
}
|
||||||
88
scripts/create-host/generators.py
Normal file
88
scripts/create-host/generators.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
"""File generation using Jinja2 templates."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from jinja2 import Environment, BaseLoader, TemplateNotFound
|
||||||
|
|
||||||
|
from models import HostConfig
|
||||||
|
|
||||||
|
|
||||||
|
class PackageTemplateLoader(BaseLoader):
|
||||||
|
"""Custom Jinja2 loader that works with both dev and installed packages."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Try to find templates in multiple locations
|
||||||
|
self.template_dirs = []
|
||||||
|
|
||||||
|
# Location 1: Development (scripts/create-host/templates)
|
||||||
|
dev_dir = Path(__file__).parent / "templates"
|
||||||
|
if dev_dir.exists():
|
||||||
|
self.template_dirs.append(dev_dir)
|
||||||
|
|
||||||
|
# Location 2: Installed via Nix (../share/create-host/templates from bin dir)
|
||||||
|
# When installed via Nix, __file__ is in lib/python3.X/site-packages/
|
||||||
|
# and templates are in ../../../share/create-host/templates
|
||||||
|
for site_path in sys.path:
|
||||||
|
site_dir = Path(site_path)
|
||||||
|
# Try to find the Nix store path
|
||||||
|
if "site-packages" in str(site_dir):
|
||||||
|
# Go up to the package root (e.g., /nix/store/xxx-create-host-0.1.0)
|
||||||
|
pkg_root = site_dir.parent.parent.parent
|
||||||
|
share_templates = pkg_root / "share" / "create-host" / "templates"
|
||||||
|
if share_templates.exists():
|
||||||
|
self.template_dirs.append(share_templates)
|
||||||
|
|
||||||
|
# Location 3: Fallback - sys.path templates
|
||||||
|
for site_path in sys.path:
|
||||||
|
site_templates = Path(site_path) / "templates"
|
||||||
|
if site_templates.exists():
|
||||||
|
self.template_dirs.append(site_templates)
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
for template_dir in self.template_dirs:
|
||||||
|
template_path = template_dir / template
|
||||||
|
if template_path.exists():
|
||||||
|
mtime = template_path.stat().st_mtime
|
||||||
|
source = template_path.read_text()
|
||||||
|
return source, str(template_path), lambda: mtime == template_path.stat().st_mtime
|
||||||
|
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_host_files(config: HostConfig, repo_root: Path) -> None:
|
||||||
|
"""
|
||||||
|
Generate host configuration files from templates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Host configuration
|
||||||
|
repo_root: Path to repository root
|
||||||
|
"""
|
||||||
|
# Setup Jinja2 environment with custom loader
|
||||||
|
env = Environment(
|
||||||
|
loader=PackageTemplateLoader(),
|
||||||
|
trim_blocks=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create host directory
|
||||||
|
host_dir = repo_root / "hosts" / config.hostname
|
||||||
|
host_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Generate default.nix
|
||||||
|
default_template = env.get_template("default.nix.j2")
|
||||||
|
default_content = default_template.render(hostname=config.hostname)
|
||||||
|
(host_dir / "default.nix").write_text(default_content)
|
||||||
|
|
||||||
|
# Generate configuration.nix
|
||||||
|
config_template = env.get_template("configuration.nix.j2")
|
||||||
|
config_content = config_template.render(
|
||||||
|
hostname=config.hostname,
|
||||||
|
domain=config.domain,
|
||||||
|
nameservers=config.nameservers,
|
||||||
|
is_static_ip=config.is_static_ip,
|
||||||
|
ip=config.ip,
|
||||||
|
gateway=config.gateway,
|
||||||
|
state_version=config.state_version,
|
||||||
|
)
|
||||||
|
(host_dir / "configuration.nix").write_text(config_content)
|
||||||
124
scripts/create-host/manipulators.py
Normal file
124
scripts/create-host/manipulators.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
"""Text manipulation for flake.nix and Terraform files."""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from models import HostConfig
|
||||||
|
|
||||||
|
|
||||||
|
def update_flake_nix(config: HostConfig, repo_root: Path, force: bool = False) -> None:
|
||||||
|
"""
|
||||||
|
Add or update host entry in flake.nix nixosConfigurations.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Host configuration
|
||||||
|
repo_root: Path to repository root
|
||||||
|
force: If True, replace existing entry; if False, insert new entry
|
||||||
|
"""
|
||||||
|
flake_path = repo_root / "flake.nix"
|
||||||
|
content = flake_path.read_text()
|
||||||
|
|
||||||
|
# Create new entry
|
||||||
|
new_entry = f""" {config.hostname} = nixpkgs.lib.nixosSystem {{
|
||||||
|
inherit system;
|
||||||
|
specialArgs = {{
|
||||||
|
inherit inputs self sops-nix;
|
||||||
|
}};
|
||||||
|
modules = [
|
||||||
|
(
|
||||||
|
{{ config, pkgs, ... }}:
|
||||||
|
{{
|
||||||
|
nixpkgs.overlays = commonOverlays;
|
||||||
|
}}
|
||||||
|
)
|
||||||
|
./hosts/{config.hostname}
|
||||||
|
sops-nix.nixosModules.sops
|
||||||
|
];
|
||||||
|
}};
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Check if hostname already exists
|
||||||
|
hostname_pattern = rf"^ {re.escape(config.hostname)} = nixpkgs\.lib\.nixosSystem"
|
||||||
|
existing_match = re.search(hostname_pattern, content, re.MULTILINE)
|
||||||
|
|
||||||
|
if existing_match and force:
|
||||||
|
# Replace existing entry
|
||||||
|
# Match the entire block from "hostname = " to "};"
|
||||||
|
replace_pattern = rf"^ {re.escape(config.hostname)} = nixpkgs\.lib\.nixosSystem \{{.*?^ \}};\n"
|
||||||
|
new_content, count = re.subn(replace_pattern, new_entry, content, flags=re.MULTILINE | re.DOTALL)
|
||||||
|
|
||||||
|
if count == 0:
|
||||||
|
raise ValueError(f"Could not find existing entry for {config.hostname} in flake.nix")
|
||||||
|
else:
|
||||||
|
# Insert new entry before closing brace of nixosConfigurations
|
||||||
|
# Pattern: " };\n packages = forAllSystems"
|
||||||
|
pattern = r"( \};)\n( packages = forAllSystems)"
|
||||||
|
replacement = rf"{new_entry}\g<1>\n\g<2>"
|
||||||
|
|
||||||
|
new_content, count = re.subn(pattern, replacement, content)
|
||||||
|
|
||||||
|
if count == 0:
|
||||||
|
raise ValueError(
|
||||||
|
"Could not find insertion point in flake.nix. "
|
||||||
|
"Looking for pattern: ' };\\n packages = forAllSystems'"
|
||||||
|
)
|
||||||
|
|
||||||
|
flake_path.write_text(new_content)
|
||||||
|
|
||||||
|
|
||||||
|
def update_terraform_vms(config: HostConfig, repo_root: Path, force: bool = False) -> None:
|
||||||
|
"""
|
||||||
|
Add or update VM entry in terraform/vms.tf locals.vms map.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Host configuration
|
||||||
|
repo_root: Path to repository root
|
||||||
|
force: If True, replace existing entry; if False, insert new entry
|
||||||
|
"""
|
||||||
|
terraform_path = repo_root / "terraform" / "vms.tf"
|
||||||
|
content = terraform_path.read_text()
|
||||||
|
|
||||||
|
# Create new entry based on whether we have static IP or DHCP
|
||||||
|
if config.is_static_ip:
|
||||||
|
new_entry = f''' "{config.hostname}" = {{
|
||||||
|
ip = "{config.ip}"
|
||||||
|
cpu_cores = {config.cpu}
|
||||||
|
memory = {config.memory}
|
||||||
|
disk_size = "{config.disk}"
|
||||||
|
}}
|
||||||
|
'''
|
||||||
|
else:
|
||||||
|
new_entry = f''' "{config.hostname}" = {{
|
||||||
|
cpu_cores = {config.cpu}
|
||||||
|
memory = {config.memory}
|
||||||
|
disk_size = "{config.disk}"
|
||||||
|
}}
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Check if hostname already exists
|
||||||
|
hostname_pattern = rf'^\s+"{re.escape(config.hostname)}" = \{{'
|
||||||
|
existing_match = re.search(hostname_pattern, content, re.MULTILINE)
|
||||||
|
|
||||||
|
if existing_match and force:
|
||||||
|
# Replace existing entry
|
||||||
|
# Match the entire block from "hostname" = { to }
|
||||||
|
replace_pattern = rf'^\s+"{re.escape(config.hostname)}" = \{{.*?^\s+\}}\n'
|
||||||
|
new_content, count = re.subn(replace_pattern, new_entry, content, flags=re.MULTILINE | re.DOTALL)
|
||||||
|
|
||||||
|
if count == 0:
|
||||||
|
raise ValueError(f"Could not find existing entry for {config.hostname} in terraform/vms.tf")
|
||||||
|
else:
|
||||||
|
# Insert new entry before closing brace
|
||||||
|
# Pattern: " }\n\n # Compute VM configurations"
|
||||||
|
pattern = r"( \})\n\n( # Compute VM configurations)"
|
||||||
|
replacement = rf"{new_entry}\g<1>\n\n\g<2>"
|
||||||
|
|
||||||
|
new_content, count = re.subn(pattern, replacement, content)
|
||||||
|
|
||||||
|
if count == 0:
|
||||||
|
raise ValueError(
|
||||||
|
"Could not find insertion point in terraform/vms.tf. "
|
||||||
|
"Looking for pattern: ' }\\n\\n # Compute VM configurations'"
|
||||||
|
)
|
||||||
|
|
||||||
|
terraform_path.write_text(new_content)
|
||||||
54
scripts/create-host/models.py
Normal file
54
scripts/create-host/models.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
"""Data models for host configuration."""
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HostConfig:
|
||||||
|
"""Configuration for a new NixOS host."""
|
||||||
|
|
||||||
|
hostname: str
|
||||||
|
ip: Optional[str] = None
|
||||||
|
cpu: int = 2
|
||||||
|
memory: int = 2048
|
||||||
|
disk: str = "20G"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_static_ip(self) -> bool:
|
||||||
|
"""Check if host uses static IP configuration."""
|
||||||
|
return self.ip is not None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def gateway(self) -> str:
|
||||||
|
"""Default gateway for the network."""
|
||||||
|
return "10.69.13.1"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def nameservers(self) -> list[str]:
|
||||||
|
"""DNS nameservers for the network."""
|
||||||
|
return ["10.69.13.5", "10.69.13.6"]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def domain(self) -> str:
|
||||||
|
"""Domain name for the network."""
|
||||||
|
return "home.2rjus.net"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def state_version(self) -> str:
|
||||||
|
"""NixOS state version for new hosts."""
|
||||||
|
return "25.11"
|
||||||
|
|
||||||
|
def validate(self) -> None:
|
||||||
|
"""Validate configuration constraints."""
|
||||||
|
if not self.hostname:
|
||||||
|
raise ValueError("Hostname cannot be empty")
|
||||||
|
|
||||||
|
if self.cpu < 1:
|
||||||
|
raise ValueError("CPU cores must be at least 1")
|
||||||
|
|
||||||
|
if self.memory < 512:
|
||||||
|
raise ValueError("Memory must be at least 512 MB")
|
||||||
|
|
||||||
|
if not self.disk:
|
||||||
|
raise ValueError("Disk size cannot be empty")
|
||||||
33
scripts/create-host/setup.py
Normal file
33
scripts/create-host/setup.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
from setuptools import setup
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Read templates
|
||||||
|
templates = [str(p.relative_to(".")) for p in Path("templates").glob("*.j2")]
|
||||||
|
|
||||||
|
setup(
|
||||||
|
name="create-host",
|
||||||
|
version="0.1.0",
|
||||||
|
description="NixOS host configuration generator for homelab infrastructure",
|
||||||
|
py_modules=[
|
||||||
|
"create_host",
|
||||||
|
"models",
|
||||||
|
"validators",
|
||||||
|
"generators",
|
||||||
|
"manipulators",
|
||||||
|
],
|
||||||
|
include_package_data=True,
|
||||||
|
data_files=[
|
||||||
|
("templates", templates),
|
||||||
|
],
|
||||||
|
install_requires=[
|
||||||
|
"typer",
|
||||||
|
"jinja2",
|
||||||
|
"rich",
|
||||||
|
],
|
||||||
|
entry_points={
|
||||||
|
"console_scripts": [
|
||||||
|
"create-host=create_host:app",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
python_requires=">=3.9",
|
||||||
|
)
|
||||||
66
scripts/create-host/templates/configuration.nix.j2
Normal file
66
scripts/create-host/templates/configuration.nix.j2
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../template2/hardware-configuration.nix
|
||||||
|
|
||||||
|
../../system
|
||||||
|
../../common/vm
|
||||||
|
];
|
||||||
|
|
||||||
|
nixpkgs.config.allowUnfree = true;
|
||||||
|
boot.loader.grub.enable = true;
|
||||||
|
boot.loader.grub.device = "/dev/vda";
|
||||||
|
|
||||||
|
networking.hostName = "{{ hostname }}";
|
||||||
|
networking.domain = "{{ domain }}";
|
||||||
|
networking.useNetworkd = true;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
services.resolved.enable = true;
|
||||||
|
networking.nameservers = [
|
||||||
|
{% for ns in nameservers %}
|
||||||
|
"{{ ns }}"
|
||||||
|
{% endfor %}
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.network.enable = true;
|
||||||
|
systemd.network.networks."ens18" = {
|
||||||
|
matchConfig.Name = "ens18";
|
||||||
|
{% if is_static_ip %}
|
||||||
|
address = [
|
||||||
|
"{{ ip }}"
|
||||||
|
];
|
||||||
|
routes = [
|
||||||
|
{ Gateway = "{{ gateway }}"; }
|
||||||
|
];
|
||||||
|
{% else %}
|
||||||
|
networkConfig.DHCP = "ipv4";
|
||||||
|
{% endif %}
|
||||||
|
linkConfig.RequiredForOnline = "routable";
|
||||||
|
};
|
||||||
|
time.timeZone = "Europe/Oslo";
|
||||||
|
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
nix.settings.tarball-ttl = 0;
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
vim
|
||||||
|
wget
|
||||||
|
git
|
||||||
|
];
|
||||||
|
|
||||||
|
# Open ports in the firewall.
|
||||||
|
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||||
|
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||||
|
# Or disable the firewall altogether.
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
system.stateVersion = "{{ state_version }}"; # Did you read the comment?
|
||||||
|
}
|
||||||
159
scripts/create-host/validators.py
Normal file
159
scripts/create-host/validators.py
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
"""Validation functions for host configuration."""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
def validate_hostname_format(hostname: str) -> None:
|
||||||
|
"""
|
||||||
|
Validate hostname format according to RFC 1123.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hostname: Hostname to validate
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If hostname format is invalid
|
||||||
|
"""
|
||||||
|
# RFC 1123: lowercase, alphanumeric, hyphens, max 63 chars
|
||||||
|
pattern = r"^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
|
||||||
|
|
||||||
|
if not re.match(pattern, hostname):
|
||||||
|
raise ValueError(
|
||||||
|
f"Invalid hostname '{hostname}'. "
|
||||||
|
"Must be lowercase alphanumeric with hyphens, "
|
||||||
|
"start and end with alphanumeric, max 63 characters."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_hostname_unique(hostname: str, repo_root: Path) -> None:
|
||||||
|
"""
|
||||||
|
Validate that hostname is unique in the repository.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hostname: Hostname to check
|
||||||
|
repo_root: Path to repository root
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If hostname already exists
|
||||||
|
"""
|
||||||
|
# Check if host directory exists
|
||||||
|
host_dir = repo_root / "hosts" / hostname
|
||||||
|
if host_dir.exists():
|
||||||
|
raise ValueError(f"Host directory already exists: {host_dir}")
|
||||||
|
|
||||||
|
# Check if hostname exists in flake.nix
|
||||||
|
flake_path = repo_root / "flake.nix"
|
||||||
|
if flake_path.exists():
|
||||||
|
flake_content = flake_path.read_text()
|
||||||
|
# Look for pattern like " hostname = "
|
||||||
|
hostname_pattern = rf'^\s+{re.escape(hostname)}\s*='
|
||||||
|
if re.search(hostname_pattern, flake_content, re.MULTILINE):
|
||||||
|
raise ValueError(f"Hostname '{hostname}' already exists in flake.nix")
|
||||||
|
|
||||||
|
|
||||||
|
def validate_ip_format(ip: str) -> None:
|
||||||
|
"""
|
||||||
|
Validate IP address format with CIDR notation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ip: IP address with CIDR (e.g., "10.69.13.50/24")
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If IP format is invalid
|
||||||
|
"""
|
||||||
|
if not ip:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check CIDR notation
|
||||||
|
if "/" not in ip:
|
||||||
|
raise ValueError(f"IP address must include CIDR notation (e.g., {ip}/24)")
|
||||||
|
|
||||||
|
ip_part, cidr_part = ip.rsplit("/", 1)
|
||||||
|
|
||||||
|
# Validate CIDR is /24
|
||||||
|
if cidr_part != "24":
|
||||||
|
raise ValueError(f"CIDR notation must be /24, got /{cidr_part}")
|
||||||
|
|
||||||
|
# Validate IP format
|
||||||
|
octets = ip_part.split(".")
|
||||||
|
if len(octets) != 4:
|
||||||
|
raise ValueError(f"Invalid IP address format: {ip_part}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
octet_values = [int(octet) for octet in octets]
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(f"Invalid IP address format: {ip_part}")
|
||||||
|
|
||||||
|
# Check each octet is 0-255
|
||||||
|
for i, value in enumerate(octet_values):
|
||||||
|
if not 0 <= value <= 255:
|
||||||
|
raise ValueError(f"Invalid octet value {value} in IP address")
|
||||||
|
|
||||||
|
# Check last octet is 1-254
|
||||||
|
if not 1 <= octet_values[3] <= 254:
|
||||||
|
raise ValueError(
|
||||||
|
f"Last octet must be 1-254, got {octet_values[3]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_ip_subnet(ip: str) -> None:
|
||||||
|
"""
|
||||||
|
Validate that IP address is in the correct subnet (10.69.13.0/24).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ip: IP address with CIDR (e.g., "10.69.13.50/24")
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If IP is not in correct subnet
|
||||||
|
"""
|
||||||
|
if not ip:
|
||||||
|
return
|
||||||
|
|
||||||
|
validate_ip_format(ip)
|
||||||
|
|
||||||
|
ip_part = ip.split("/")[0]
|
||||||
|
octets = ip_part.split(".")
|
||||||
|
|
||||||
|
# Check subnet is 10.69.13.x
|
||||||
|
if octets[:3] != ["10", "69", "13"]:
|
||||||
|
raise ValueError(
|
||||||
|
f"IP address must be in 10.69.13.0/24 subnet, got {ip_part}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_ip_unique(ip: Optional[str], repo_root: Path) -> None:
|
||||||
|
"""
|
||||||
|
Validate that IP address is not already in use.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ip: IP address with CIDR to check (None for DHCP)
|
||||||
|
repo_root: Path to repository root
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If IP is already in use
|
||||||
|
"""
|
||||||
|
if not ip:
|
||||||
|
return # DHCP mode, no uniqueness check needed
|
||||||
|
|
||||||
|
# Extract just the IP part without CIDR for searching
|
||||||
|
ip_part = ip.split("/")[0]
|
||||||
|
|
||||||
|
# Check all hosts/*/configuration.nix files
|
||||||
|
hosts_dir = repo_root / "hosts"
|
||||||
|
if hosts_dir.exists():
|
||||||
|
for config_file in hosts_dir.glob("*/configuration.nix"):
|
||||||
|
content = config_file.read_text()
|
||||||
|
if ip_part in content:
|
||||||
|
raise ValueError(
|
||||||
|
f"IP address {ip_part} already in use in {config_file}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check terraform/vms.tf
|
||||||
|
terraform_file = repo_root / "terraform" / "vms.tf"
|
||||||
|
if terraform_file.exists():
|
||||||
|
content = terraform_file.read_text()
|
||||||
|
if ip_part in content:
|
||||||
|
raise ValueError(
|
||||||
|
f"IP address {ip_part} already in use in {terraform_file}"
|
||||||
|
)
|
||||||
29
secrets/auth01/secrets.yaml
Normal file
29
secrets/auth01/secrets.yaml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
authelia_ldap_password: ENC[AES256_GCM,data:x2UDMpqQKoRVSlDSmK5XiC9x4/WWzmjk7cwtFA70waAD7xYQfXEOV+AeX1LlFfj0qHYrhyn//TLsa+tJzb7HPEAfl8vYR4MdkVFOm5vjPWWoF5Ul8ZVn8+B1VJLbiXkexv0/hfXL8NMzEcp/pF4H0Yei7xaKezu9OPtGzKufHws=,iv:88RXaOj8Zy9fGeDLAE0ItY7TKCCzxn6F0+kU5+Zy/XU=,tag:yPdCJ9d139iO6J97thVVgA==,type:str]
|
||||||
|
authelia_jwt_secret: ENC[AES256_GCM,data:9ZHkT2o5KZLmml95g8HZce8fNBmaWtRn+175Gaz0KhsndNl3zdgGq3hydRuoZuEgLVsherJImVmb5DQAZpv04lUEsDKCYeFNwAyYl4Go2jCp1fI53fdcRCKlNVZA37pMi4AYaCoe8vIl/cwPOOBDEwK5raOBnklCzVERoO0B8a0=,iv:9CTWCw0ImZR0OSrl2znbhpRHlzAxA5Cpcy98JeH9Z+Y=,tag:L+0xKqiwXTi7XiDYWA1Bcw==,type:str]
|
||||||
|
authelia_storage_encryption_key_file: ENC[AES256_GCM,data:RfbcQK8+rrW/Krd2rbDfgo7YI2YvQKqpLuDtk5DZJNNhw4giBh5nFp/8LNeo8r39/oiJLYTe6FjTLBu72TZz2wWrJFsBqjwQ/3TfATQGdLUsaXXRDr88ezHLTiYvEHIHJhUS5qsr7VMwBam5e7YGWBe5sGZCE/nX41ijyPUjtOY=,iv:sayYcAC38cApAtL+cDhgGNjWaHn+furKRowKL6AmfdU=,tag:1IZpnlpvDWGLLpZyU9iJUw==,type:str]
|
||||||
|
authelia_session_secret: ENC[AES256_GCM,data:4PaLv4RRA7/9Z8QzETXLwo3OctJ0mvzQkYmHsGGF97nq9QeB3eo0xj4FyuCbkJGGZ/huAyRgmFBTyscY3wgxoc4t+8BdlYcSbefEk1/xRFjmG8ooXLKhvGJ5c6t72KJRcqsEGTiC0l9CFJWQ2qYcjM4dPwG8z0tjUZ6j25Zfx4M=,iv:QORJkf0w6iyuRHM/xuql1s7K75Qa49ygq+lwHfrm9rk=,tag:/HZ/qI80fKjmuTRwIwmX8g==,type:str]
|
||||||
|
lldap_user_pass: ENC[AES256_GCM,data:56gF7uqVQ+/J5/lY/N904Q==,iv:qtY1XhHs4WWA4kPY56NigPvX4OslO0koZepgdv947zg=,tag:UDmJs8FPXskp7rUS2Sxinw==,type:str]
|
||||||
|
sops:
|
||||||
|
age:
|
||||||
|
- recipient: age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlc1dxK3FKU2ZGWTNGUmxZ
|
||||||
|
aWx1NngySjVHclJTd3hXejJRTmVHRExReHcwCk55c0xMbGcyTktySkJZdHRZbzhK
|
||||||
|
bEI3RzBHQkROTU1qWXBoU1RqTXppdVkKLS0tIHkwZ0QyNTMydWRqUlBtTEdhZ05r
|
||||||
|
YVpuT1JadnlyN1hqNnJxYzVPT3pXN1UKDCeIv0xv+5pcoDdtYc+rYjwi8SLrqWth
|
||||||
|
vdWepxmV2edajZRqcwFEC9weOZ1j2lh7Z3hR6RSN/+X3sFpqkpw+Yg==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age16prza00sqzuhwwcyakj6z4hvwkruwkqpmmrsn94a5ucgpkelncdq2ldctk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvbU0wNmFLelRmNmJTRlho
|
||||||
|
dTEwVXZqUVI5NHZkb1QyNUZ4R0pLVFZWVDM4CkhVc00zY2FKaVdNRXdGVk1ranpG
|
||||||
|
MlRWWGJmd2FWeFE1dXU4WHVFL0FHZ3MKLS0tIGt2ZWlaOW5wNkJnQVkrTDZWTnY0
|
||||||
|
RW5HRjA3cERCUU1CVWZhck12SGhTRUkK6k/zQ87TIETYouRBby7ujtwgpqIPKKv+
|
||||||
|
2aLJW6lSWMVzL/f3ZrIeg12tJjHs3f44EXR6j3tfLfSKog2iL8Y57w==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2025-12-06T10:03:56Z"
|
||||||
|
mac: ENC[AES256_GCM,data:SRNqx5n+xg/cNGiyze3CGKufox3IuXmOKLqNRDeJhBNMBHC1iYYCjRdHEVXsl7XSiYe51dSwjV0KrJa/SG1pRVkuyT+xyPrTjT2/DyXN7A/CESSAkBIwI7lkZmIf8DkxB3CELF1PgjIr1o2isxlBnkAnhEBTxQ7t8AzpcH7I5yU=,iv:P3FGQurZrL0ed5UuBPRFk11T0VRFtL6xI4iQ4LmYTec=,tag:8gQL08ojjIMyCl5E0Qs/Ww==,type:str]
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.11.0
|
||||||
25
secrets/http-proxy/wireguard.yaml
Normal file
25
secrets/http-proxy/wireguard.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
wg_private_key: ENC[AES256_GCM,data:DlC9txcLkTnb7FoEd249oJV/Ehcp50P8uulbE4rY/xU16fkTlnKvPmYZ7u8=,iv:IsiTzdrh+BNSVgx1mfjpMGNV2J0c88q6AoP0kHX2aGY=,tag:OqFsOIyE71SBD1mcNS/PeQ==,type:str]
|
||||||
|
sops:
|
||||||
|
age:
|
||||||
|
- recipient: age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzdm9HTTN1amwxQ2Z6MUQv
|
||||||
|
dGJ0cEgyaHNOZWtWSWlXNXc5bGhUdSsvVlVzCkJkc3ZQdzlBNDNxb3Avdi96bXFt
|
||||||
|
TExZY29nUDI3RE5vanh6TVBRME1Fa1UKLS0tIG8vSHdCYzkvWmJpd0hNbnRtUmtk
|
||||||
|
aVcwaFJJclZ3YUlUTTNwR2VESmVyZWMKHvKUJBDuNCqacEcRlapetCXHKRb0Js09
|
||||||
|
sqxLfEDwiN2LQQjYHZOmnMfCOt/b2rwXVKEHdTcIsXbdIdKOJwuAIQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1gq8434ku0xekqmvnseeunv83e779cg03c06gwrusnymdsr3rpufqx6vr3m
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEeU01UTc2V1UyZXRadE5I
|
||||||
|
VE1aakVZUEZUNnJxbzJ1K3J1R3ZQdFdMbUhBCjZBMDM3ZkYvQWlyNHBtaDZRWkd4
|
||||||
|
VzY0L3l4N2RNZjJRTDJWZTZyZVhHbW8KLS0tIGVNZ0N0emVmaVRCV09jNmVKRlla
|
||||||
|
cWVSNkJqWHh5c21KcWFac2FlZTVaMTAK1UvfPgZAZYtwiONKIAo5HlaDpN+UT/S/
|
||||||
|
JfPUfjxgRQid8P20Eh/jUepxrDY8iXRZdsUMON+OoQ8mpwoAh5eN1A==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2025-05-15T18:56:55Z"
|
||||||
|
mac: ENC[AES256_GCM,data:J2kHY7pXBJZ0UuNCZOhkU11M8rDqCYNzY71NyuDRmzzRCC9ZiNIbavyQAWj2Dpk1pjGsYjXsVoZvP7ti1wTFqahpaR/YWI5gmphrzAe32b9qFVEWTC3YTnmItnY0YxQZYehYghspBjnJtfUK0BvZxSb17egpoFnvHmAq+u5dyxg=,iv:/aLg02RLuJZ1bRzZfOD74pJuE7gppCBztQvUEt557mU=,tag:toxHHBuv3WRblyc9Sth6Iw==,type:str]
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.10.2
|
||||||
19
secrets/nix-cache01/actions_token_1
Normal file
19
secrets/nix-cache01/actions_token_1
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:P84qHFU+xQjwQGK8I1gIdcBsHrskuUg0M1nGMMaA+hFjAdFYUhdhmAN/+y0CO28=,iv:zJtk01zNMTBDQdVtZBTM34CHRaNYDkabolxh7PWGKUI=,tag:8AS80AbZJbh9B3Av3zuI1w==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBkRFB6QTIyWWdwVkV4ZXNB\nWkdSdEhMc0s4cnByWVZXTGhnSWZ0MTdEUWhJCnFlOFQ5TU1hcE91azVyZXVXRCtu\nZjIxalRLYlEreGZ6ZDNoeXNPaFN4b28KLS0tIHY5WVFXN1k4NFVmUjh6VURkcEpv\ncklGcWVhdTdBRnlOdm1qM2h5SS9UUkEKq2RyxSVymDqcsZ+yiNRujDCwk1WOWYRW\nDa4TRKg3FCe7TcCEPkIaev1aBqjLg9J9c/70SYpUm6Zgeps7v5yl3A==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1w029fksjv0edrff9p7s03tgk3axecdkppqymfpwfn2nu2gsqqefqc37sxq",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArTGVuckp2NlhMZXRNMVhO\naUV3K0h3cmZ5ZGx4Q3dJWHNqZXFJeE1kM0dFCmF4TUFUMm9mTHJlYzlYWVhNa1RH\nR29VNDIrL1IvYUpQYm5SZEYzbWhhbkkKLS0tIEJsK1dwZVdaaHpWQkpOOS90dkhx\nbGhvRXhqdFdqQmhZZmhCdmw4NUtSVG8K3z2do+/cIjAqg6EMJnubOWid1sMeTxvo\nrq6eGJ7YzdgZr2JBVtJdDRtk/KeHXu9In4efbBXwLAPIfn1pU0gm1w==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-21T19:08:48Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:5CkO09NIqttb4UZPB9iGym8avhTsMeUkTFTKZJlNGjgB1qWyGQNeKCa50A1+SbBCCWE5EwxoynB1so7bi8vnq7k8CPUHbiWG8rLOJSYHQcZ9Tu7ZGtpeWPcCw1zPWJ/PTBsFVeaT5/ufdx/6ut+sTtRoKHOZZtO9oStHmu/Rlfg=,iv:z9iJJlbvhgxJaART5QoCrqvrqlgoVlGj8jlndCALmKU=,tag:ldjmND4NVVQrHUldLrB4Jg==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,23 +1,18 @@
|
|||||||
{
|
{
|
||||||
"data": "ENC[AES256_GCM,data:MQkR6FQGHK2AuhOmy2was49RY2XlLO5NwaXnUFzFo5Ata/2ufVoAj4Jvotw/dSrKL7f62A6s+2BPAyWrvACJ+pwYFlfyj3T9bNwhxwZPkEmiHEubJjWSiD6jkSW0gOxbY8ib6g/GbyF8I1cPeYr/hJD5qQ==,iv:eBL2Y3MOt9gYTETUZqsHo1D5hPOHxb4JR6Z/DFlzzqI=,tag:Qqbt39xZvQz/QhsggsArsw==,type:str]",
|
"data": "ENC[AES256_GCM,data:MQkR6FQGHK2AuhOmy2was49RY2XlLO5NwaXnUFzFo5Ata/2ufVoAj4Jvotw/dSrKL7f62A6s+2BPAyWrvACJ+pwYFlfyj3T9bNwhxwZPkEmiHEubJjWSiD6jkSW0gOxbY8ib6g/GbyF8I1cPeYr/hJD5qQ==,iv:eBL2Y3MOt9gYTETUZqsHo1D5hPOHxb4JR6Z/DFlzzqI=,tag:Qqbt39xZvQz/QhsggsArsw==,type:str]",
|
||||||
"sops": {
|
"sops": {
|
||||||
"kms": null,
|
|
||||||
"gcp_kms": null,
|
|
||||||
"azure_kv": null,
|
|
||||||
"hc_vault": null,
|
|
||||||
"age": [
|
"age": [
|
||||||
{
|
{
|
||||||
"recipient": "age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u",
|
"recipient": "age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u",
|
||||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBkQ0dGckxKMmZsM1JER3Qx\nYkRhb282OFlFSmRrNmU3c0dIYitmbHE1bHlFCnhpK0pCRlhlTlpBTHl6aU53blBP\nNGFuejRjOFhPWnhvUURPMzY1V1A5ZnMKLS0tIGhMSWhxVWtCbXd0Vnh6N1J1STBT\nVDRzWURscjNYT21kMzRYVnZDQlkreVkKMkRqbGfHd2/bRf8on8eqoJpFI8i9vMDK\ni0Lrw7Zpw0D1Arzq6rA8YGyAqboV4ixQVUjlrL8cJv9n3/8geCfOAQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwZzFXaEsyUkZGNFV0bVlW\nRkpPRHpUK2VwUHpOQXZCUUpoVzFGa3hycnhvCndTN0toVFdoU2E5N3V3UFhTTjU0\nNDByWTkrV0o3T295dE0zS08rVGpyQjAKLS0tIC96M0VEcWpjRk5DMjJnMFB4ZHI3\nM2Jod2x4ZzMyZm1pbDhZNTFuWGNRUlEKHs5jBSfjml09JOeKiT9vFR0Fykg6OxKG\njhFU/J2+fWB22G7dBc4PI60SNqhxIheUbGTdcz4Yp4BPL6vW3eArIw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"recipient": "age1a0477laj9sdh79wdas5v7hzk6au8fach74njg8epfw2rdht90qjsakkwd6",
|
"recipient": "age1w029fksjv0edrff9p7s03tgk3axecdkppqymfpwfn2nu2gsqqefqc37sxq",
|
||||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBGaUt5VHBWY3NiR2U4MXVX\nREpLZXIxaDNSc2FmdEZkclNEeHdkSzBEdDI4CjNiS0xMV1hjMmxVd1QwekFXT29k\nMXIrQ2VIMTR2ejJWaGd2S00zQWVKVHcKLS0tIER1azhRRHVRZzJuQU5xL3hZb1lR\nZlN3NGV2a1c2M1AwSW1JeldOTkhRMjAKGDk5neEcVzSPtauiiqxkOaqaCj/+jzUk\nEE8g9XQuK5xAIxFlvqPilgo59VOL335VjUJZqGgFxfc7TvhZQTSAaQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJT3lxamcrQUpFdjZteFlF\nYUQ3aGdadGpuNXd2Z3RtZ3dQU0cvMlFUMUNRClBDR3U0OXZJU0NDamVMSlR5NitN\nYlhvNVlvUE0wRjErYzkwVHFOdGVCVjgKLS0tIEttR1BLTGpDYTRSQ0lUZmVEcnNi\nWkNaMEViUHVBcExVOEpjNE5CZHpjVkEKuX/Rf8kaB3apr1UhAnq3swS6fXiVmwm8\n7Key+SUAPNstbWbz0u6B9m1ev5QcXB2lx2/+Cm7cjW+6VE2gLHjTsQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"lastmodified": "2025-01-24T12:19:16Z",
|
"lastmodified": "2025-01-24T12:19:16Z",
|
||||||
"mac": "ENC[AES256_GCM,data:X8X91LVP1MMJ8ZYeSNPRO6XHN+NuswLZcHpAkbvoY+E9aTteO8UqS+fsStbNDlpF5jz/mhdMsKElnU8Z/CIWImwolI4GGE6blKy6gyqRkn4VeZotUoXcJadYV/5COud3XP2uSTb694JyQEZnBXFNeYeiHpN0y38zLxoX8kXHFbc=,iv:fFCRfv+Y1Nt2zgJNKsxElrYcuKkATJ3A/jvheUY2IK4=,tag:hYojbMGUAQvx7I4qkO7o9w==,type:str]",
|
"mac": "ENC[AES256_GCM,data:X8X91LVP1MMJ8ZYeSNPRO6XHN+NuswLZcHpAkbvoY+E9aTteO8UqS+fsStbNDlpF5jz/mhdMsKElnU8Z/CIWImwolI4GGE6blKy6gyqRkn4VeZotUoXcJadYV/5COud3XP2uSTb694JyQEZnBXFNeYeiHpN0y38zLxoX8kXHFbc=,iv:fFCRfv+Y1Nt2zgJNKsxElrYcuKkATJ3A/jvheUY2IK4=,tag:hYojbMGUAQvx7I4qkO7o9w==,type:str]",
|
||||||
"pgp": null,
|
|
||||||
"unencrypted_suffix": "_unencrypted",
|
"unencrypted_suffix": "_unencrypted",
|
||||||
"version": "3.9.3"
|
"version": "3.9.3"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,140 +1,154 @@
|
|||||||
root_password_hash: ENC[AES256_GCM,data:wk/xEuf+qU3ezmondq9y3OIotXPI/L+TOErTjgJz58wEvQkApYkjc3bHaUTzOrmWjQBgDUENObzPmvQ8WKawUSJRVlpfOEr5TQ==,iv:I8Z3xJz3qoXBD7igx087A1fMwf8d29hQ4JEI3imRXdY=,tag:M80osQeWGG9AAA8BrMfhHA==,type:str]
|
root_password_hash: ENC[AES256_GCM,data:wk/xEuf+qU3ezmondq9y3OIotXPI/L+TOErTjgJz58wEvQkApYkjc3bHaUTzOrmWjQBgDUENObzPmvQ8WKawUSJRVlpfOEr5TQ==,iv:I8Z3xJz3qoXBD7igx087A1fMwf8d29hQ4JEI3imRXdY=,tag:M80osQeWGG9AAA8BrMfhHA==,type:str]
|
||||||
ns_xfer_key: ENC[AES256_GCM,data:VFpK7GChgFeUgQm31tTvVC888bN0yt6BAnHQa6KUTg4iZGP1WL5Bx6Zp8dY=,iv:9RF1eEc7JBxBebDOKfcDjGS2U7XsHkOW/l52yIP+1LA=,tag:L6DR2QlHOfo02kzfWWCrvg==,type:str]
|
ns_xfer_key: ENC[AES256_GCM,data:VFpK7GChgFeUgQm31tTvVC888bN0yt6BAnHQa6KUTg4iZGP1WL5Bx6Zp8dY=,iv:9RF1eEc7JBxBebDOKfcDjGS2U7XsHkOW/l52yIP+1LA=,tag:L6DR2QlHOfo02kzfWWCrvg==,type:str]
|
||||||
backup_helper_secret: ENC[AES256_GCM,data:EvXEJnDilbfALQ==,iv:Q3dkZ8Ee3qbcjcoi5GxfbaVB4uRIvkIB6ioKVV/dL2Y=,tag:T/UgZvQgYGa740Wh7D0b7Q==,type:str]
|
backup_helper_secret: ENC[AES256_GCM,data:EvXEJnDilbfALQ==,iv:Q3dkZ8Ee3qbcjcoi5GxfbaVB4uRIvkIB6ioKVV/dL2Y=,tag:T/UgZvQgYGa740Wh7D0b7Q==,type:str]
|
||||||
|
nats_nkey: ENC[AES256_GCM,data:N2CVXjdwiE7eSPUtXe+NeKSTzA9eFwK2igxaCdYsXd4Ps0/DjYb/ggnQziQzSy8viESZYjXhJ2VtNw==,iv:Xhcf5wPB01Wu0A+oMw0wzTEHATp+uN+wsaYshxIzy1w=,tag:IauTIOHqfiM75Ufml/JXbg==,type:str]
|
||||||
sops:
|
sops:
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
age:
|
||||||
- recipient: age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u
|
- recipient: age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0cHhMeU5PWmhZSFhvVjJV
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnbC90WWJiRXRPZ1VUVWhO
|
||||||
OXp6NmdzaDdTeXcyeUdWWi9oZ0gxRWJUcWlBCkU5WW5aYzVCY1ZqVDBmZVd0THdW
|
azc5R2lGeDhoRmQydXBnYlltbE81ajFQNW0wClRJNC9iaFV0NDRKRkw2Mm1vOHpN
|
||||||
V0J5SUcxSkdWZUg1T3F4SzRrSzlWeEkKLS0tIHBkcXhtK1h0clJVa3huR0RGZmdP
|
dVhnUm1nbElQRGQ4dmkxQ2FWdEdpdDAKLS0tIG9GNEpuZUFUQkVXbjZPREo0aEh4
|
||||||
RUlOZWI0VHFLV1Q3K3hrK1NIbWNwMncKEfqME0WIDqw9uMTuiIc+F+tmiHMB1EW3
|
ZVMyY0Y0Zldvd244eSt2RVZDeUZKWmcKGQ7jq50qiXPLKCHq751Y2SA79vEjbSbt
|
||||||
kmSqblbjYNO+FJY1CwOkYygC8nXlxzXPb1QbsnH14w+SsDpeLhG4mg==
|
yhRiakVEjwf9A+/iSNvXYAr/tnKaYC+NTA7F6AKmYpBcrzlBGU68KA==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1hz2lz4k050ru3shrk5j3zk3f8azxmrp54pktw5a7nzjml4saudesx6jsl0
|
- recipient: age1hz2lz4k050ru3shrk5j3zk3f8azxmrp54pktw5a7nzjml4saudesx6jsl0
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3YlM1MVA0ZkNSYyt3eDlw
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRTWFBRVRKeXR0UUloQ3FK
|
||||||
aUYwSTRVR1FUYWZNWGRsQ0pnMkdISS91bVVZCmFGNm9keEFSVW1SK2dDME9NTVMy
|
Rmhsak45aFZBVUp4Szk5eHJhZmswV3JUcHh3Cis0N09JaCtOZE1pQUM5blg4WDY5
|
||||||
WEVPWFRWWDg2UlBYb1YrZFc3TUZlSEEKLS0tIG15TjJ6dUVWLzVkaDl1bDU0N1BV
|
Q0ZGajJSZnJVQzdJK0dxZjJNWHZkbGsKLS0tIEVtRVJROTlWdWl0cFlNZmZkajM5
|
||||||
NStjK2ZXZVZJVVU2MTFYNFZ2KzJ3ZkEKhM2Re8f3x1KxJT0oNGBnGMCC9+N6+po+
|
N3FpdU56WlFWaC9QYU5Kc1o2a1VkT0UK2Utr9mvK8If4JhjzD+l06xZxdE3nbvCO
|
||||||
Nu7udt9X7sPd/kdj1PMDqFDEBNvhp9nXt6r+2XYql6PjbHeg78K+Ug==
|
NixMiYDhuQ/a55Fu0653jqd35i3CI3HukzEI9G5zLEeCcXxTKR5Bjg==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1w2q4gm2lrcgdzscq8du3ssyvk6qtzm4fcszc92z9ftclq23yyydqdga5um
|
- recipient: age1w2q4gm2lrcgdzscq8du3ssyvk6qtzm4fcszc92z9ftclq23yyydqdga5um
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwQUJVZFRFRmE0cGQ0aUs2
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFQVk0aUw0aStuOWhFMk5a
|
||||||
RUt4WDJDSnB6aVhpaHFUazdTcldVQkFFbG5JCnB3VFNmcjRRM3lyanU2ME8wdk5K
|
UVJ5YWg2WjU2eVFUWDlobEIrRDlZV3dxelc0Clo0N3lvOUZNL3YrM2l3Y21VaUQz
|
||||||
UHN5bzRaUG9RN01KRVV2U2gzbzdqcXcKLS0tIG9mK0VicnphYm9Va2NESk5zeWxy
|
MTV5djdPWTBIUXFXVDZpZitRTVhMbVEKLS0tIFluV1NFTzd0cFFaR0RwVkhlSmNm
|
||||||
Y2ZFZkFwcXRZSGZSeG9xS2JtZllTc0UKY7HGgtLzbaO8tQOWDj6UUMDOGWO3cbIf
|
VGdZNDlsUGI3cTQ1Tk9XRWtDSE1wNWMKQI226dcROyp/GprVZKtM0R57m5WbJyuR
|
||||||
/08r45vCFX4Civ1f0ssPUyFMcY+fPBNIMwR4hq343LwJfw1vY6stAw==
|
UZO74NqiDr7nxKfw+tHCfDLh94rbC1iP4jRiaQjDgfDDxviafSbGBA==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1snmhmpavqy7xddmw4nuny0u4xusqmnqxqarjmghkm5zaluff84eq5xatrd
|
- recipient: age1snmhmpavqy7xddmw4nuny0u4xusqmnqxqarjmghkm5zaluff84eq5xatrd
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPWkE3SHR1b0NUQUQ1cjh1
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA4WVBzazE3VkNDWXUwMk5x
|
||||||
MEdpZUttMnBRQ1FaanFnMEkrL0ZDVjRBRzFZClFEOFBwQnYwVmlBLytIcU13YnBB
|
NnZtL3N3THVBQytxZzdZNUhCeThURFBLdjBVClBpZjd5L3lKYjRZNVF2Z3hibW5R
|
||||||
UGRJV2JHOXlIcHRVbzFkQTYrUjhVYUUKLS0tIC8wanFQTi9JdGZWdmhmTEtWT0N2
|
YTdTR0NzaVp4VEZlTjlaTHVFNXNSSUEKLS0tIDBGbmhGUFNJQ21zeW1SbWtyWWh0
|
||||||
QlQvL1NhSnhYYkxYbDBLb2VZWjBJb3MKZJD14vDH2/UAZuiAqD97sz1crlB6wmKI
|
QkFXN2g5TlhBbnlmbW1aSUJQL1FOaWMKTv8OoaTxyG8XhKGZNs4aFR/9SXQ+RG6w
|
||||||
ddmnaSQBVvA/Quez4uNe64T4ScSvados82U/e4U+saCfarZ3OvrLpg==
|
+fxiUx7xQnOIYag9YQYfuAgoGzOaj/ha+i18WkQnx9LAgrjCTd+ejA==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age12a3nyvjs8jrwmpkf3tgawel3nwcklwsr35ktmytnvhpawqwzrsfqpgcy0q
|
- recipient: age12a3nyvjs8jrwmpkf3tgawel3nwcklwsr35ktmytnvhpawqwzrsfqpgcy0q
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmWVp0b2tvSFlZbjdqdWgv
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzcnVxL09JTEdsZ0FUb2VH
|
||||||
Q05lUVp1ZVVKczZ2UXNlcXJxbjNxYmNUT0NZCitQZlBiaEk0K0RSY1pKNU56OFNJ
|
a3dSY09uRFFCYnJXQno3YUFhMlpueHJreXdFCjQ4UWdRak5yK0VIT2lYUjBVK2h5
|
||||||
MTR0dW55M3NxVTY0SjQvOU9PS0IyZUUKLS0tIDhVd3ZXU0czeHlNeEo0ek1KeXFu
|
RFJmMTlyVEpnS3JxdkE4ckp1UHpLM2sKLS0tIHVyZXRTSHQxL1p1dUxMKzkyV0pW
|
||||||
dGhJakQ1cmR2MzB4Tnh5WWF4OFdSeW8KK9lU0EdYkqfLGx//hia+oaUl9InV6SKh
|
a2o0bG9vZUtmckdYTkhLSVZtZVRtNlUKpALeaeaH4/wFUPPGsNArTAIIJOvBWWDp
|
||||||
t6Oyp+Vlz2YHaSytz1CYuczuHl6BqOWFjzYYA2EqTTxcIEIcpcbyXQ==
|
MUYPJjqLqBVmWzIgCexM2jsDOhtcCV26MXjzTXmZhthaGJMSp23kMQ==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1d2w5zece9647qwyq4vas9qyqegg96xwmg6c86440a6eg4uj6dd2qrq0w3l
|
- recipient: age1d2w5zece9647qwyq4vas9qyqegg96xwmg6c86440a6eg4uj6dd2qrq0w3l
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBucURtNVJySzgzTzdra3Y5
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5M0liYUY1UHRHUDdvN3ds
|
||||||
clpobjVzSHB3MUw0WUJzTHpyTHhVV0t2WkdzCkNCOWhIa2VjeUVJL2N6TUc0dVNQ
|
TVdiWDlrWFROSVdRTy9nOHFOUTdmTmlHSzE4CjBpU3gzdjdWaHQzNXRMRkxPdVps
|
||||||
bVFpQk9FYXdTSExLdGZhQVlrRFhFTEEKLS0tIHhzb3I0WGZuZ3NEN1NGVU1ieHBi
|
TEZXbVlYenUwc3o0TXRnaXg4MmVHQmcKLS0tIDlVeWQ4V0hjbWJqRlNUL2hOWVhp
|
||||||
WmpNc3FiVXE0TmZSSmpBdTR5MVliMncKHPFOsTF9kZ2mRvzrWDPIe/U9djEN7JyG
|
WEJvZWZzbWZFeWZVeWJ1c3pVOWI3MFUKN2QfuOaod5IBKkBkYzi3jvPty+8PRGMJ
|
||||||
8mSFEN7H6bbA+a9iA5IH8Zvkv37WwzNhU+BU8ZtRvjkcvTjxq9tB/Q==
|
mozL7qydsb0bAZJtAwcL7HWCr1axar/Ertce0yMqhuthJ5bciVD5xQ==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1gcyfkxh4fq5zdp0dh484aj82ksz66wrly7qhnpv0r0p576sn9ekse8e9ju
|
- recipient: age1gcyfkxh4fq5zdp0dh484aj82ksz66wrly7qhnpv0r0p576sn9ekse8e9ju
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXQmlOODV1N3h5Uk83TXhF
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5L3NmcFMyUUpLOW9mOW9v
|
||||||
UkhFdTg4RWpndkErdFM1dmpHV2paZW9DYjFJCkpJdlhhMjhOd3RRYnJnQ2FQbmtV
|
VXhMTjl5SEFsZ0pzR3lHb1VJL0IzUUxCckdzCnltZnVySkszVUtwbDdQNHAwVWxl
|
||||||
ZHJIYTBQTTFKM3U4VEVBT011bkVWS3cKLS0tIE1kRzdaalpCS3g1L3Q4bUhTdU8v
|
V2xJU1BqSG0yMk5sTkpKRTIvc2JORFUKLS0tIHNydWZjdGg3clNpMDhGSGR6VVVh
|
||||||
YWJ5b3VCaU1yeURKMStmVnBOdzJVeGMKxJO15Fg4eDn/bpkSilze+iZ23qDAxOSz
|
VU1Rbk9ybGRJOG1ETEh4a1orNUY2Z00KJmdp+wLHd+86RJJ/G0QbLp4BEDPXfE9o
|
||||||
kMEGeKGBuWONIL6jjHVO4TaYkt2gMISsM99uJgLYZCWzAUGc8OiK2Q==
|
VZhPPSC6qtUcFV2z6rqSHSpsHPTlgzbCRqX39iePNhfQ2o0lR2P2zQ==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1g5luz2rtel3surgzuh62rkvtey7lythrvfenyq954vmeyfpxjqkqdj3wt8
|
- recipient: age1g5luz2rtel3surgzuh62rkvtey7lythrvfenyq954vmeyfpxjqkqdj3wt8
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBkTFF1SVpVKzA2aHBFeGJC
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBbnhXSG5qdVJHSjNmQ3Qx
|
||||||
Tm1vU1V1SGhqR2t0cllzY3lOZzRRY1ZUc3l3Ck9oTnhUbDJTY3VpbkhOVzRQd0Vh
|
Yk9zSVBkVTQyb3luYXgwbFJWbG9xK2tWZUdrCkh2MktoWmFOdkRldFNlQW1EMm9t
|
||||||
am93cE0ySUJPaWxnQmJFM095MU5yU3MKLS0tIHAybURnYU9oQVRzNWpDYjVqWWh4
|
ZHJRa3QrRzh0UElSNGkvSWcyYTUxZzgKLS0tIGdPT2dwWU9LbERYZGxzUTNEUHE1
|
||||||
RysrWHl4K25WcWVIcGdQTEs2ZFY5Y1kKc4F7mRe/BB7v7I0vimQiA2K11l2mcmOi
|
TmlIdWJjbmFvdnVQSURqUTBwbW9EL00Kaiy5ZGgHjKgAGvzbdjbwNExLf4MGDtiE
|
||||||
mOjubEQUkcGtbr2eXajvKEU7Rx/EPWWAZTvOY73n9fc2MQbPt8VEPw==
|
NJEvnmNWkQyEhtx9YzUteY02Tl/D7zBzAWHlV3RjAWTNIwLmm7QgCw==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1gq8434ku0xekqmvnseeunv83e779cg03c06gwrusnymdsr3rpufqx6vr3m
|
- recipient: age1gq8434ku0xekqmvnseeunv83e779cg03c06gwrusnymdsr3rpufqx6vr3m
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB5amg3aWVRREFuYklaOVdz
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVSDFIa1hNZU1BNWxHckk1
|
||||||
NWZHSzJ3bGF0UGREcUZjeUwvNHBpTTdIdDFnCktUVEZTM3dEOUd1NHpITmgxSWZS
|
UEdJT282Y054eVNpb3VOZ2t3S2NndTkycXdNCk1sNk5uL2xpbXk1MG95dVM1OWVD
|
||||||
VXFqOFJMdnB2ZDU3azVmaVhIOEQ2SlEKLS0tIFZkYmFseS9VVW9GeDEvd0VaN0d1
|
TldUWmsrSmxGeHYweWhGWXpSaE0xRmcKLS0tIFlVbEp2UU1kM0hhbHlSZm96TFl2
|
||||||
d1JJak1sTE5RWFBNelViRmhic1BSNncK9m0/4CLq53nA7xr7eTLhyvNhvHhTFBFE
|
TkVaK0xHN1NxNzlpUVYyY2RpdisrQVkKG+DlyZVruH64nB9UtCPMbXhmRHj+zpr6
|
||||||
e425OfpNYuB/qOq6PcBvRaJrEaNelf9/hXV7Ny/wBy1mzW0G4w0fVA==
|
CX4JOTXbUsueZIA4J/N93+d2J3V6yauoRYwCSl/JXX/gaSeSxF4z3A==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1288993th0ge00reg4zqueyvmkrsvk829cs068eekjqfdprsrkeqql7mljk
|
- recipient: age1288993th0ge00reg4zqueyvmkrsvk829cs068eekjqfdprsrkeqql7mljk
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBGWWk3UDdEb1hlRDgzc0lO
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB3YWxPRTNaVTNLb2tYSzZ5
|
||||||
WkZWN3pDVmRqbEhrcXlSRkZwempFZlFIMUJjCng1cmlhOGVEYmQ3dXV1aVRNaWFv
|
ZmVMYXk2MlVXYzNtZGFJNlJLR2FIVWhKb1RFCmx5bXozeExlbEZBQzhpSHA0T1JE
|
||||||
emZWZTZTanZEcTBGRHJHWk94TGg5TUUKLS0tIHI4cXRHRmdYQ2ROWTBmbjJiTTdL
|
dFpHRm8rcFl1QjZ2anRGYjVxeGJqc0EKLS0tIGVibzRnRTA3Vk5yR3c4QVFsdy95
|
||||||
Z1I2blJsRFVvZm5xb2JQQ3RXT2xiYWcKrjLkx4USG75PyHNG+YZGGYP2hRBS3LBy
|
bG1tejcremFiUjZaL3hmc1gwYzJIOGMKFmXmY60vABYlpfop2F020SaOEwV4TNya
|
||||||
M+jKO27zg5yFEmukH+kSg1nFWyDyjIQv+FRvbRoakkyN+uprVjRVpg==
|
F0tgrIqbufU1Yw4RhxPdBb9Wv1cQu25lcqQLh1i4VH9BSaWKk6TDEA==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1vpns76ykll8jgdlu3h05cur4ew2t3k7u03kxdg8y6ypfhsfhq9fqyurjey
|
- recipient: age1vpns76ykll8jgdlu3h05cur4ew2t3k7u03kxdg8y6ypfhsfhq9fqyurjey
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSMWxhYml3VzRJL1lUMzZD
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzRXM1VUJPNm90UUx4UEdZ
|
||||||
dGJvazBSOEtzOUpZUzl5akZMZVJhNUtVaERNCitlNll6WkdlOXoyRVNnOVRLMFFj
|
cDY5czVQaGl0MEdIMStjTnphTmR5ZkFWTDBjClhTd0xmaHNWUXo3NXR6eEUzTkg2
|
||||||
anE4VlBBUEdkdy9YaVZGNFVmcmIvalkKLS0tIHBwVnMwcjZvemZ2a2NHVDRtUkVs
|
L3BqT1N6bTNsYitmTGVpREtiWEpzdlEKLS0tIFUybTczSlRNbDkxRVZjSnFvdmtq
|
||||||
NEM2UFFaN0JkNCtXRXFhcTRMUnQya2sK1wHKS8h8rbrKjskkfaK2RP1ar2Mf6T/s
|
MVdRU3RPSHNqUzJzQWl1VVkyczFaencK72ZmWJIcfBTXlezmefvWeCGOC1BhpkXO
|
||||||
RkuoLtdnV0Iadfxf2gfzOVzxlK2XVKmuvY4lFy0jCPU6zH9+VYq0dA==
|
bm+X+ihzNfktuOCl6ZIMo2n4aJ3hYakrMp4npO10a6s4o/ldqeiATg==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1hchvlf3apn8g8jq2743pw53sd6v6ay6xu6lqk0qufrjeccan9vzsc7hdfq
|
- recipient: age1hchvlf3apn8g8jq2743pw53sd6v6ay6xu6lqk0qufrjeccan9vzsc7hdfq
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCS29UZWs3VlBBemVpaUs5
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBOL3F3OWRYVVdxWncwWmlk
|
||||||
Kzk3cmh3bThLNlhyQ0tNRzllNXVCNDNkZWlZCkNkKy9ValI4dVRraDV6SFZRckFU
|
SnloWFdscE02L3ZRa0JGcFlwSU9tU3JRakhnCjZyTnR3T051Tmt2NGM2dkFaNGJz
|
||||||
R0d0S0pVNzZvVDl2dnM4VzVLR1hLVkEKLS0tIHI5czJhUSszQzQwZ2R0RVdTaUEx
|
WVRnNDdNN0ozYXJnK0t4ZW5JRVQ2YzQKLS0tIFk0cFBxcVFETERNTGowMThJcDNR
|
||||||
M2UrMklzanIyYytSd1AreFgyM2djSVEKiDeQ0EnL6UKUGxsmvuoD4XWbXzYlvb1H
|
UW0wUUlFeHovSS9qYU5BRkJ6dnNjcWcKh2WcrmxsqMZeQ0/2HsaHeSqGsU3ILynU
|
||||||
+eO+cNIQooEWUfh4W59zoa+y0Yp6MT09IpUFNk6IbwyYm8E7jHwDLA==
|
SHBziWHGlFoNirCVjljh/Mw4DM8v66i0ztIQtWV5cFaFhu4kVda5jA==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1a0477laj9sdh79wdas5v7hzk6au8fach74njg8epfw2rdht90qjsakkwd6
|
- recipient: age1w029fksjv0edrff9p7s03tgk3axecdkppqymfpwfn2nu2gsqqefqc37sxq
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBySk9wZzdqOWRYbUtDQ1ZO
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6ZkovUkMzdmhOUGpZUC91
|
||||||
b0t0SkovOGt5RmliMzBZRU5YSG0wZ2dwSDFjCnE0WThjbC9yd1RTS3ZjT1UvNDNL
|
d1JFZGk1T2hOS2dlVFNHRGJKVTUwdUhpQmg0CnEybzlRdjBLcjVEckNtR0xzMDVk
|
||||||
ZndmdnZNa3JrbXZnbVlWNis0ZDZrWEkKLS0tIHJpNXJsS0l0VzhLM1pQWUtSeXN2
|
dURWbFdnTXk1alV5cjRSMkRrZ21vTjAKLS0tIEtDZlFCTGdVMU1PUWdBYTVOcTU4
|
||||||
SkxRTTJIQTcxbTBaVFgvRStCNi9nOEkK1EvAo6sdt0Xy4VdFn+iSfbQcePjEbqI7
|
ZkZHYmJiTUdJUGZhTFdLM1EzdU9wNmsK3AqFfycJfrBpvnjccN1srNiVBCv107rt
|
||||||
AvJ0C/TmcfbzAJumVGUjBSN82/ZnrfPBpSbBbLheX+aZn1JqsSYJjQ==
|
b/O5zcqKGR3Nzey7zAhlxasPCRKARyBTo292ScZ03QMU8p8HIukdzg==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1ha34qeksr4jeaecevqvv2afqem67eja2mvawlmrqsudch0e7fe7qtpsekv
|
- recipient: age1ha34qeksr4jeaecevqvv2afqem67eja2mvawlmrqsudch0e7fe7qtpsekv
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEcTliQmhZSlFSbmFWYjN0
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlOVNVNmFzbTE2NmdiM1dP
|
||||||
bURsbCtVUWsyMW1ZUmZlWTIzUXJVNGt2MEhRCis2ZnlFeHdBQmp5SStZOGhDTm1Q
|
TlhuTGYyQWlWeFlkaVU3Tml2aDNJbmxXVnlZCmJSb001OVJTaGpRcllzN2JSWDFF
|
||||||
akx2WjB3U2lSU0txcytucmNrT2lkOW8KLS0tIEVvRDEycE1rdmhNeXhLckZGM2hp
|
b1MyYjdKZys4ZHRoUmFhdG1oYTA2RzQKLS0tIEhGeU9YcW9Wc0ZZK3I5UjB0RHFm
|
||||||
V0sxMkFxeVdSYjRFS2lmNGdvQTUrM00K2PzXzZsznJgA6hsyyjIqq6p90RGw7iWk
|
bW1ucjZtYXFkT1A4bGszamFxaG5IaHMKqHuaWFi/ImnbDOZ9VisIN7jqplAYV8fo
|
||||||
eIo9whQnbqOGTWZYmcv8s5W2DW+6PloB2U8XzTFyS9NJKI7q7jqGfA==
|
y3PeVX34LcYE0d8cxbvH8CTs/Ubirt6P1obrmAL9W9Y0ozpqdqQSjA==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
lastmodified: "2024-06-02T22:09:06Z"
|
- recipient: age1cxt8kwqzx35yuldazcc49q88qvgy9ajkz30xu0h37uw3ts97jagqgmn2ga
|
||||||
mac: ENC[AES256_GCM,data:cxJq4EMEMVEw0IUXNwtyQj4MaYIJ/Xo4OaY+3VLgIhYw6oBO9CmJxgLuXcSnGnr23oNE5OQF6ALv+vxF46D1pI0V1zhqKL6zMIs0DzPBwo7Arg166w5kGAT274jK7YWymeJ7fafWXYubLlGUthyVJS1BkvlqIhoe2BlTZ3bPyBs=,iv:Z2Uh9Oo4q/ce6DDLShs7JAX3XFNAVOGBmBPvRbGxaaU=,tag:6qZhZ4+tgtXl60b0Lx7Taw==,type:str]
|
enc: |
|
||||||
pgp: []
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXbXo4UWhoMUQxc1lMcnNB
|
||||||
|
VWc1MUJuS3NnVnh4U254TE0wSDJTMzFSM3lrCnhHbmk1N0VqTlViT2dtZndGT1pn
|
||||||
|
NmpPc01iMjk3TXZLU1htZjBvd2NBK2sKLS0tIEN3dGlRZHF5Ykgybjl6MzRBVUJ0
|
||||||
|
Rm92SGdwanFHZlp6U00wMDUzL3MrMzgKtCJqy+BfDMFQMHaIVPlFyzALBsb4Ekls
|
||||||
|
+r7ofZ1ZjSomBljYxVPhKE9XaZJe6bqICEhJBCpODyxavfh8HmxHDQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age16prza00sqzuhwwcyakj6z4hvwkruwkqpmmrsn94a5ucgpkelncdq2ldctk
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBiQTRXTHljd2UrbFJOWUI4
|
||||||
|
WGRYcEVrZDJGM3hpVVNmVXlSREYzc1FHRlhFCjZHa2VTTzFHR1RXRmllT1huVDNV
|
||||||
|
UkRKaEQrWjF5eHpiaUg1NExnME5veFkKLS0tIFpZY1RrOVNTTjU0N2Y1dFN6QWpX
|
||||||
|
MTM3NDJrV1JZNE5pWGNLMUg1OFFwYUUKMx0hpB3iunnCbJ/+zWetdp1NI/LsrUTe
|
||||||
|
J84+aDoe7/WJYT0FLMlC0RK80txm6ztVygoyRdN0cRKx1z3KqPmavw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2025-02-11T21:18:22Z"
|
||||||
|
mac: ENC[AES256_GCM,data:5//boMp1awc/2XAkSASSCuobpkxa0E6IKf3GR8xHpMoCD30FJsCwV7PgX3fR8OuLEhOJ7UguqMNQdNqG37RMacreuDmI1J8oCFKp+3M2j4kCbXaEo8bw7WAtyjUez+SAXKzZWYmBibH0KOy6jdt+v0fdgy5hMBT4IFDofYRsyD0=,iv:6pD+SLwncpmal/FR4U8It2njvaQfUzzpALBCxa0NyME=,tag:4QN8ZFjdqck5ZgulF+FtbA==,type:str]
|
||||||
unencrypted_suffix: _unencrypted
|
unencrypted_suffix: _unencrypted
|
||||||
version: 3.8.1
|
version: 3.9.4
|
||||||
|
|||||||
55
services/actions-runner/default.nix
Normal file
55
services/actions-runner/default.nix
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
{ pkgs, config, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets."actions-token-1" = {
|
||||||
|
sopsFile = ../../secrets/nix-cache01/actions_token_1;
|
||||||
|
format = "binary";
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualisation.podman = {
|
||||||
|
enable = true;
|
||||||
|
dockerCompat = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
services.gitea-actions-runner.instances = {
|
||||||
|
actions1 = {
|
||||||
|
enable = true;
|
||||||
|
tokenFile = config.sops.secrets.actions-token-1.path;
|
||||||
|
name = "actions1.home.2rjus.net";
|
||||||
|
settings = {
|
||||||
|
log = {
|
||||||
|
level = "debug";
|
||||||
|
};
|
||||||
|
|
||||||
|
runner = {
|
||||||
|
file = ".runner";
|
||||||
|
capacity = 4;
|
||||||
|
timeout = "2h";
|
||||||
|
shutdown_timeout = "10m";
|
||||||
|
insecure = false;
|
||||||
|
fetch_timeout = "10s";
|
||||||
|
fetch_interval = "30s";
|
||||||
|
};
|
||||||
|
|
||||||
|
cache = {
|
||||||
|
enabled = true;
|
||||||
|
dir = "/var/cache/gitea-actions1";
|
||||||
|
};
|
||||||
|
|
||||||
|
container = {
|
||||||
|
privileged = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
labels =
|
||||||
|
builtins.map (n: "${n}:docker://gitea/runner-images:${n}") [
|
||||||
|
"ubuntu-latest"
|
||||||
|
"ubuntu-latest-slim"
|
||||||
|
"ubuntu-latest-full"
|
||||||
|
]
|
||||||
|
++ [
|
||||||
|
"homelab"
|
||||||
|
];
|
||||||
|
|
||||||
|
url = "https://git.t-juice.club";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
87
services/authelia/default.nix
Normal file
87
services/authelia/default.nix
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets.authelia_ldap_password = {
|
||||||
|
format = "yaml";
|
||||||
|
sopsFile = ../../secrets/auth01/secrets.yaml;
|
||||||
|
key = "authelia_ldap_password";
|
||||||
|
restartUnits = [ "authelia-auth.service" ];
|
||||||
|
owner = "authelia-auth";
|
||||||
|
group = "authelia-auth";
|
||||||
|
};
|
||||||
|
sops.secrets.authelia_jwt_secret = {
|
||||||
|
format = "yaml";
|
||||||
|
sopsFile = ../../secrets/auth01/secrets.yaml;
|
||||||
|
key = "authelia_jwt_secret";
|
||||||
|
restartUnits = [ "authelia-auth.service" ];
|
||||||
|
owner = "authelia-auth";
|
||||||
|
group = "authelia-auth";
|
||||||
|
};
|
||||||
|
sops.secrets.authelia_storage_encryption_key_file = {
|
||||||
|
format = "yaml";
|
||||||
|
key = "authelia_storage_encryption_key_file";
|
||||||
|
sopsFile = ../../secrets/auth01/secrets.yaml;
|
||||||
|
restartUnits = [ "authelia-auth.service" ];
|
||||||
|
owner = "authelia-auth";
|
||||||
|
group = "authelia-auth";
|
||||||
|
};
|
||||||
|
sops.secrets.authelia_session_secret = {
|
||||||
|
format = "yaml";
|
||||||
|
key = "authelia_session_secret";
|
||||||
|
sopsFile = ../../secrets/auth01/secrets.yaml;
|
||||||
|
restartUnits = [ "authelia-auth.service" ];
|
||||||
|
owner = "authelia-auth";
|
||||||
|
group = "authelia-auth";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.authelia.instances."auth" = {
|
||||||
|
enable = true;
|
||||||
|
environmentVariables = {
|
||||||
|
AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE =
|
||||||
|
config.sops.secrets.authelia_ldap_password.path;
|
||||||
|
AUTHELIA_SESSION_SECRET_FILE = config.sops.secrets.authelia_session_secret.path;
|
||||||
|
};
|
||||||
|
secrets = {
|
||||||
|
jwtSecretFile = config.sops.secrets.authelia_jwt_secret.path;
|
||||||
|
storageEncryptionKeyFile = config.sops.secrets.authelia_storage_encryption_key_file.path;
|
||||||
|
};
|
||||||
|
settings = {
|
||||||
|
access_control = {
|
||||||
|
default_policy = "two_factor";
|
||||||
|
};
|
||||||
|
session = {
|
||||||
|
# secret = "{{- fileContent \"${config.sops.secrets.authelia_session_secret.path}\" }}";
|
||||||
|
cookies = [
|
||||||
|
{
|
||||||
|
domain = "home.2rjus.net";
|
||||||
|
authelia_url = "https://auth.home.2rjus.net";
|
||||||
|
default_redirection_url = "https://dashboard.home.2rjus.net";
|
||||||
|
name = "authelia_session";
|
||||||
|
same_site = "lax";
|
||||||
|
inactivity = "1h";
|
||||||
|
expiration = "24h";
|
||||||
|
remember_me = "30d";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
notifier = {
|
||||||
|
filesystem.filename = "/var/lib/authelia-auth/notification.txt";
|
||||||
|
};
|
||||||
|
storage = {
|
||||||
|
local.path = "/var/lib/authelia-auth/db.sqlite3";
|
||||||
|
};
|
||||||
|
authentication_backend = {
|
||||||
|
password_reset = {
|
||||||
|
disable = false;
|
||||||
|
};
|
||||||
|
ldap = {
|
||||||
|
address = "ldap://127.0.0.1:3890";
|
||||||
|
implementation = "lldap";
|
||||||
|
timeout = "5s";
|
||||||
|
base_dn = "dc=home,dc=2rjus,dc=net";
|
||||||
|
user = "uid=authelia_ldap_user,ou=people,dc=home,dc=2rjus,dc=net";
|
||||||
|
# password = "{{- fileContent \"${config.sops.secrets.authelia_ldap_password.path}\" -}}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -37,6 +37,7 @@
|
|||||||
address = "0.0.0.0";
|
address = "0.0.0.0";
|
||||||
port = 443;
|
port = 443;
|
||||||
settings = {
|
settings = {
|
||||||
|
metricsAddress = ":9000";
|
||||||
authority = {
|
authority = {
|
||||||
provisioners = [
|
provisioners = [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -9,10 +9,13 @@
|
|||||||
extraPackages =
|
extraPackages =
|
||||||
python3Packages: with pkgs.unstable.python313Packages; [
|
python3Packages: with pkgs.unstable.python313Packages; [
|
||||||
aiopyarr
|
aiopyarr
|
||||||
|
aioshelly
|
||||||
bellows
|
bellows
|
||||||
gtts
|
gtts
|
||||||
|
ha-silabs-firmware-client
|
||||||
isal
|
isal
|
||||||
paho-mqtt
|
paho-mqtt
|
||||||
|
prometheus-client
|
||||||
pykodi
|
pykodi
|
||||||
python-roborock
|
python-roborock
|
||||||
radios
|
radios
|
||||||
|
|||||||
@@ -2,70 +2,127 @@
|
|||||||
{
|
{
|
||||||
services.caddy = {
|
services.caddy = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
package = pkgs.unstable.caddy;
|
||||||
configFile = pkgs.writeText "Caddyfile" ''
|
configFile = pkgs.writeText "Caddyfile" ''
|
||||||
{
|
{
|
||||||
acme_ca https://ca.home.2rjus.net/acme/acme/directory
|
acme_ca https://ca.home.2rjus.net/acme/acme/directory
|
||||||
|
|
||||||
|
metrics {
|
||||||
|
per_host
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nzbget.home.2rjus.net {
|
nzbget.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/nzbget.log
|
output file /var/log/caddy/nzbget.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://nzbget-jail.home.2rjus.net:6789
|
reverse_proxy http://nzbget-jail.home.2rjus.net:6789
|
||||||
}
|
}
|
||||||
|
|
||||||
radarr.home.2rjus.net {
|
radarr.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/radarr.log
|
output file /var/log/caddy/radarr.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://radarr-jail.home.2rjus.net:7878
|
reverse_proxy http://radarr-jail.home.2rjus.net:7878
|
||||||
}
|
}
|
||||||
|
|
||||||
sonarr.home.2rjus.net {
|
sonarr.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/sonarr.log
|
output file /var/log/caddy/sonarr.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://sonarr-jail.home.2rjus.net:8989
|
reverse_proxy http://sonarr-jail.home.2rjus.net:8989
|
||||||
}
|
}
|
||||||
ha.home.2rjus.net {
|
ha.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/ha.log
|
output file /var/log/caddy/ha.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://ha1.home.2rjus.net:8123
|
reverse_proxy http://ha1.home.2rjus.net:8123
|
||||||
}
|
}
|
||||||
z2m.home.2rjus.net {
|
z2m.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/z2m.log
|
output file /var/log/caddy/z2m.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://ha1.home.2rjus.net:8080
|
reverse_proxy http://ha1.home.2rjus.net:8080
|
||||||
}
|
}
|
||||||
prometheus.home.2rjus.net {
|
prometheus.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/prometheus.log
|
output file /var/log/caddy/prometheus.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://monitoring01.home.2rjus.net:9090
|
reverse_proxy http://monitoring01.home.2rjus.net:9090
|
||||||
}
|
}
|
||||||
alertmanager.home.2rjus.net {
|
alertmanager.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/alertmanager.log
|
output file /var/log/caddy/alertmanager.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://monitoring01.home.2rjus.net:9093
|
reverse_proxy http://monitoring01.home.2rjus.net:9093
|
||||||
}
|
}
|
||||||
grafana.home.2rjus.net {
|
grafana.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/grafana.log
|
output file /var/log/caddy/grafana.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://monitoring01.home.2rjus.net:3000
|
reverse_proxy http://monitoring01.home.2rjus.net:3000
|
||||||
}
|
}
|
||||||
jelly.home.2rjus.net {
|
jelly.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/jelly.log
|
output file /var/log/caddy/jelly.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reverse_proxy http://jelly01.home.2rjus.net:8096
|
reverse_proxy http://jelly01.home.2rjus.net:8096
|
||||||
}
|
}
|
||||||
|
lldap.home.2rjus.net {
|
||||||
|
log {
|
||||||
|
output file /var/log/caddy/auth.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reverse_proxy http://auth01.home.2rjus.net:17170
|
||||||
|
}
|
||||||
|
auth.home.2rjus.net {
|
||||||
|
log {
|
||||||
|
output file /var/log/caddy/auth.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reverse_proxy http://auth01.home.2rjus.net:9091
|
||||||
|
}
|
||||||
|
pyroscope.home.2rjus.net {
|
||||||
|
log {
|
||||||
|
output file /var/log/caddy/pyroscope.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reverse_proxy http://monitoring01.home.2rjus.net:4040
|
||||||
|
}
|
||||||
|
pushgw.home.2rjus.net {
|
||||||
|
log {
|
||||||
|
output file /var/log/caddy/pushgw.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reverse_proxy http://monitoring01.home.2rjus.net:9091
|
||||||
|
}
|
||||||
http://http-proxy.home.2rjus.net/metrics {
|
http://http-proxy.home.2rjus.net/metrics {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/caddy-metrics.log
|
output file /var/log/caddy/caddy-metrics.log {
|
||||||
|
mode 644
|
||||||
|
}
|
||||||
}
|
}
|
||||||
metrics
|
metrics
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
{ pkgs, config, ... }:
|
|
||||||
{
|
|
||||||
virtualisation.incus = {
|
|
||||||
enable = true;
|
|
||||||
};
|
|
||||||
networking.firewall.allowedTCPPorts = [ 8443 ];
|
|
||||||
}
|
|
||||||
38
services/lldap/default.nix
Normal file
38
services/lldap/default.nix
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets.lldap_user_pass = {
|
||||||
|
format = "yaml";
|
||||||
|
key = "lldap_user_pass";
|
||||||
|
sopsFile = ../../secrets/auth01/secrets.yaml;
|
||||||
|
restartUnits = [ "lldap.service" ];
|
||||||
|
group = "acme";
|
||||||
|
mode = "0440";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.lldap = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
ldap_base_dn = "dc=home,dc=2rjus,dc=net";
|
||||||
|
ldap_user_email = "admin@home.2rjus.net";
|
||||||
|
ldap_user_dn = "admin";
|
||||||
|
ldap_user_pass_file = config.sops.secrets.lldap_user_pass.path;
|
||||||
|
ldaps_options = {
|
||||||
|
enabled = true;
|
||||||
|
port = 6360;
|
||||||
|
cert_file = "/var/lib/acme/auth01.home.2rjus.net/cert.pem";
|
||||||
|
key_file = "/var/lib/acme/auth01.home.2rjus.net/key.pem";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
systemd.services.lldap = {
|
||||||
|
serviceConfig = {
|
||||||
|
SupplementaryGroups = [ "acme" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
security.acme.certs."auth01.home.2rjus.net" = {
|
||||||
|
listenHTTP = ":80";
|
||||||
|
reloadServices = [ "lldap" ];
|
||||||
|
extraDomainNames = [ "ldap.home.2rjus.net" ];
|
||||||
|
enableDebugLogs = true;
|
||||||
|
};
|
||||||
|
}
|
||||||
43
services/monitoring/alerttonotify.nix
Normal file
43
services/monitoring/alerttonotify.nix
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
{ pkgs, config, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets."nats_nkey" = { };
|
||||||
|
systemd.services."alerttonotify" = {
|
||||||
|
enable = true;
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
after = [
|
||||||
|
"network-online.target"
|
||||||
|
"sops-nix.service"
|
||||||
|
];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
restartIfChanged = true;
|
||||||
|
|
||||||
|
environment = {
|
||||||
|
NATS_URL = "nats://nats1.home.2rjus.net:4222";
|
||||||
|
NATS_NKEY_FILE = "%d/nats_nkey";
|
||||||
|
};
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "exec";
|
||||||
|
ExecStart = "${pkgs.alerttonotify}/bin/alerttonotify";
|
||||||
|
|
||||||
|
CapabilityBoundingSet = "";
|
||||||
|
DynamicUser = "yes";
|
||||||
|
LoadCredential = "nats_nkey:/run/secrets/nats_nkey";
|
||||||
|
LockPersonality = "yes";
|
||||||
|
MemoryDenyWriteExecute = "yes";
|
||||||
|
PrivateDevices = "yes";
|
||||||
|
PrivateUsers = "yes";
|
||||||
|
ProtectClock = "yes";
|
||||||
|
ProtectControlGroups = "yes";
|
||||||
|
ProtectHome = "yes";
|
||||||
|
ProtectHostname = "yes";
|
||||||
|
ProtectKernelLogs = "yes";
|
||||||
|
ProtectKernelModules = "yes";
|
||||||
|
RestrictAddressFamilies = "AF_INET AF_INET6";
|
||||||
|
RestrictNamespaces = "yes";
|
||||||
|
RestrictRealtime = "yes";
|
||||||
|
SystemCallArchitectures = "native";
|
||||||
|
SystemCallFilter = "~@privileged";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
41
services/monitoring/alloy.nix
Normal file
41
services/monitoring/alloy.nix
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
services.alloy = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
environment.etc."alloy/config.alloy" = {
|
||||||
|
enable = true;
|
||||||
|
mode = "0644";
|
||||||
|
text = ''
|
||||||
|
pyroscope.write "local_pyroscope" {
|
||||||
|
endpoint {
|
||||||
|
url = "http://localhost:4040"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pyroscope.scrape "labmon" {
|
||||||
|
targets = [{"__address__" = "localhost:9969", "service_name" = "labmon"}]
|
||||||
|
forward_to = [pyroscope.write.local_pyroscope.receiver]
|
||||||
|
|
||||||
|
profiling_config {
|
||||||
|
profile.process_cpu {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
profile.memory {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
profile.mutex {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
profile.block {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
profile.goroutine {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -5,5 +5,9 @@
|
|||||||
./grafana.nix
|
./grafana.nix
|
||||||
./prometheus.nix
|
./prometheus.nix
|
||||||
./pve.nix
|
./pve.nix
|
||||||
|
./alerttonotify.nix
|
||||||
|
./pyroscope.nix
|
||||||
|
./alloy.nix
|
||||||
|
./tempo.nix
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,18 +8,18 @@
|
|||||||
global = {
|
global = {
|
||||||
};
|
};
|
||||||
route = {
|
route = {
|
||||||
receiver = "webhook_gunter";
|
receiver = "webhook_natstonotify";
|
||||||
group_wait = "30s";
|
group_wait = "30s";
|
||||||
group_interval = "5m";
|
group_interval = "5m";
|
||||||
repeat_interval = "12h";
|
repeat_interval = "1h";
|
||||||
group_by = [ "alertname" ];
|
group_by = [ "alertname" ];
|
||||||
};
|
};
|
||||||
receivers = [
|
receivers = [
|
||||||
{
|
{
|
||||||
name = "webhook_gunter";
|
name = "webhook_natstonotify";
|
||||||
webhook_configs = [
|
webhook_configs = [
|
||||||
{
|
{
|
||||||
url = "http://gunter.home.2rjus.net:5001/alert";
|
url = "http://localhost:5001/alert";
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
@@ -35,93 +35,13 @@
|
|||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
|
retentionTime = "30d";
|
||||||
globalConfig = {
|
globalConfig = {
|
||||||
scrape_interval = "15s";
|
scrape_interval = "15s";
|
||||||
};
|
};
|
||||||
rules = [
|
rules = [
|
||||||
''
|
(builtins.readFile ./rules.yml)
|
||||||
groups:
|
|
||||||
- name: common_rules
|
|
||||||
rules:
|
|
||||||
- alert: node_down
|
|
||||||
expr: up == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "Instance {{ $labels.instance }} down"
|
|
||||||
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
|
|
||||||
- alert: low_disk_space
|
|
||||||
expr: node_filesystem_free_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"} * 100 < 10
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: warning
|
|
||||||
annotations:
|
|
||||||
summary: "Disk space low on {{ $labels.instance }}"
|
|
||||||
description: "Disk space is low on {{ $labels.instance }}. Please check."
|
|
||||||
- alert: high_cpu_load
|
|
||||||
expr: node_load1 > 1
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: warning
|
|
||||||
annotations:
|
|
||||||
summary: "High CPU load on {{ $labels.instance }}"
|
|
||||||
description: "CPU load is high on {{ $labels.instance }}. Please check."
|
|
||||||
- name: nameserver_rules
|
|
||||||
rules:
|
|
||||||
- alert: unbound_down
|
|
||||||
expr: node_systemd_unit_state {instance =~ "ns.+", name = "unbound.service", state = "active"} == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "Unbound not running on {{ $labels.instance }}"
|
|
||||||
description: "Unbound has been down on {{ $labels.instance }} more than 5 minutes."
|
|
||||||
- alert: nsd_down
|
|
||||||
expr: node_systemd_unit_state {instance =~ "ns.+", name = "nsd.service", state = "active"} == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "NSD not running on {{ $labels.instance }}"
|
|
||||||
description: "NSD has been down on {{ $labels.instance }} more than 5 minutes."
|
|
||||||
- name: http-proxy_rules
|
|
||||||
rules:
|
|
||||||
- alert: caddy_down
|
|
||||||
expr: node_systemd_unit_state {instance="http-proxy.home.2rjus.net:9100", name = "caddy.service", state = "active"} == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "Caddy not running on {{ $labels.instance }}"
|
|
||||||
description: "Caddy has been down on {{ $labels.instance }} more than 5 minutes."
|
|
||||||
- name: home_assistant_rules
|
|
||||||
rules:
|
|
||||||
- alert: home_assistant_down
|
|
||||||
expr: node_systemd_unit_state {instance="ha1.home.2rjus.net:9100", name="home-assistant.service", state="active"} == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "Home assistant not running on {{ $labels.instance }}"
|
|
||||||
description: "Home assistant has been down on {{ $labels.instance }} more than 5 minutes."
|
|
||||||
- alert: zigbee2qmtt_down
|
|
||||||
expr: node_systemd_unit_state {instance = "ha1.home.2rjus.net:9100", name = "zigbee2mqtt.service", state = "active"} == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "Zigbee2mqtt not running on {{ $labels.instance }}"
|
|
||||||
description: "Zigbee2mqtt has been down on {{ $labels.instance }} more than 5 minutes."
|
|
||||||
- alert: mosquitto_down
|
|
||||||
expr: node_systemd_unit_state {instance = "ha1.home.2rjus.net:9100", name = "mosquitto.service", state = "active"} == 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "Mosquitto not running on {{ $labels.instance }}"
|
|
||||||
description: "Mosquitto has been down on {{ $labels.instance }} more than 5 minutes."
|
|
||||||
''
|
|
||||||
];
|
];
|
||||||
|
|
||||||
scrapeConfigs = [
|
scrapeConfigs = [
|
||||||
@@ -140,6 +60,7 @@
|
|||||||
"ns1.home.2rjus.net:9100"
|
"ns1.home.2rjus.net:9100"
|
||||||
"ns2.home.2rjus.net:9100"
|
"ns2.home.2rjus.net:9100"
|
||||||
"pgdb1.home.2rjus.net:9100"
|
"pgdb1.home.2rjus.net:9100"
|
||||||
|
"nats1.home.2rjus.net:9100"
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
@@ -228,6 +149,80 @@
|
|||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
job_name = "smartctl";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "gunter.home.2rjus.net:9633" ];
|
||||||
|
}
|
||||||
];
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "wireguard";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "http-proxy.home.2rjus.net:9586" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "home-assistant";
|
||||||
|
scrape_interval = "60s";
|
||||||
|
metrics_path = "/api/prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "ha1.home.2rjus.net:8123" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "ghettoptt";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "gunter.home.2rjus.net:8989" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "step-ca";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "ca.home.2rjus.net:9000" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "labmon";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "monitoring01.home.2rjus.net:9969" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "pushgateway";
|
||||||
|
honor_labels = true;
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "localhost:9091" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "nix-cache_caddy";
|
||||||
|
scheme = "https";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "nix-cache.home.2rjus.net" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
pushgateway = {
|
||||||
|
enable = true;
|
||||||
|
web = {
|
||||||
|
external-url = "https://pushgw.home.2rjus.net";
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
8
services/monitoring/pyroscope.nix
Normal file
8
services/monitoring/pyroscope.nix
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
virtualisation.oci-containers.containers.pyroscope = {
|
||||||
|
pull = "missing";
|
||||||
|
image = "grafana/pyroscope:latest";
|
||||||
|
ports = [ "4040:4040" ];
|
||||||
|
};
|
||||||
|
}
|
||||||
250
services/monitoring/rules.yml
Normal file
250
services/monitoring/rules.yml
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
groups:
|
||||||
|
- name: common_rules
|
||||||
|
rules:
|
||||||
|
- alert: node_down
|
||||||
|
expr: up == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Instance {{ $labels.instance }} down"
|
||||||
|
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
|
||||||
|
- alert: low_disk_space
|
||||||
|
expr: node_filesystem_free_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"} * 100 < 10
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Disk space low on {{ $labels.instance }}"
|
||||||
|
description: "Disk space is low on {{ $labels.instance }}. Please check."
|
||||||
|
- alert: high_cpu_load
|
||||||
|
expr: max(node_load5{}) by (instance) > (count by (instance)(node_cpu_seconds_total{mode="idle"}) * 0.7)
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "High CPU load on {{ $labels.instance }}"
|
||||||
|
description: "CPU load is high on {{ $labels.instance }}. Please check."
|
||||||
|
- alert: low_memory
|
||||||
|
expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Low available memory on {{ $labels.instance }}
|
||||||
|
description: Node memory is filling up (< 10% left)\n VALUE = {{ $value }}
|
||||||
|
- alert: oom_kill
|
||||||
|
expr: increase(node_vmstat_oom_kill[1m]) > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: Host OOM kill detected on {{ $labels.instance }}
|
||||||
|
description: OOM kill detected
|
||||||
|
- alert: nixos_upgrade_failed
|
||||||
|
expr: node_systemd_unit_state{name="nixos-upgrade.service", state="failed"} == 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "NixOS upgrade failed on {{ $labels.instance }}"
|
||||||
|
description: "NixOS upgrade failed on {{ $labels.instance }}"
|
||||||
|
- alert: promtail_not_running
|
||||||
|
expr: node_systemd_unit_state{name="promtail.service", state="active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Promtail service not running on {{ $labels.instance }}"
|
||||||
|
description: "The promtail service has not been active on {{ $labels.instance }} for 5 minutes."
|
||||||
|
- name: nameserver_rules
|
||||||
|
rules:
|
||||||
|
- alert: unbound_down
|
||||||
|
expr: node_systemd_unit_state {instance =~ "ns.+", name = "unbound.service", state = "active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Unbound not running on {{ $labels.instance }}"
|
||||||
|
description: "Unbound has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- alert: nsd_down
|
||||||
|
expr: node_systemd_unit_state {instance =~ "ns.+", name = "nsd.service", state = "active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "NSD not running on {{ $labels.instance }}"
|
||||||
|
description: "NSD has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- name: http-proxy_rules
|
||||||
|
rules:
|
||||||
|
- alert: caddy_down
|
||||||
|
expr: node_systemd_unit_state {instance="http-proxy.home.2rjus.net:9100", name = "caddy.service", state = "active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Caddy not running on {{ $labels.instance }}"
|
||||||
|
description: "Caddy has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- name: nats_rules
|
||||||
|
rules:
|
||||||
|
- alert: nats_down
|
||||||
|
expr: node_systemd_unit_state {instance="nats1.home.2rjus.net:9100", name = "nats.service", state = "active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "NATS not running on {{ $labels.instance }}"
|
||||||
|
description: "NATS has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- name: nix_cache_rules
|
||||||
|
rules:
|
||||||
|
- alert: build-flakes_service_not_active_recently
|
||||||
|
expr: count_over_time(node_systemd_unit_state{instance="nix-cache01.home.2rjus.net:9100", name="build-flakes.service", state="active"}[1h]) < 1
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "The build-flakes service on {{ $labels.instance }} has not run recently"
|
||||||
|
description: "The build-flakes service on {{ $labels.instance }} has not run recently"
|
||||||
|
- alert: build_flakes_error
|
||||||
|
expr: build_flakes_error == 1
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "The build-flakes job has failed for host {{ $labels.host }}."
|
||||||
|
description: "The build-flakes job has failed for host {{ $labels.host }}."
|
||||||
|
- alert: harmonia_down
|
||||||
|
expr: node_systemd_unit_state {instance="nix-cache01.home.2rjus.net:9100", name = "harmonia.service", state = "active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Harmonia not running on {{ $labels.instance }}"
|
||||||
|
description: "Harmonia has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- alert: low_disk_space_nix
|
||||||
|
expr: node_filesystem_free_bytes{instance="nix-cache01.home.2rjus.net:9100", mountpoint="/nix"} / node_filesystem_size_bytes{instance="nix-cache01.home.2rjus.net:9100", mountpoint="/nix"} * 100 < 10
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Disk space low on /nix for {{ $labels.instance }}"
|
||||||
|
description: "Disk space is low on /nix for host {{ $labels.instance }}. Please check."
|
||||||
|
- name: home_assistant_rules
|
||||||
|
rules:
|
||||||
|
- alert: home_assistant_down
|
||||||
|
expr: node_systemd_unit_state {instance="ha1.home.2rjus.net:9100", name="home-assistant.service", state="active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Home assistant not running on {{ $labels.instance }}"
|
||||||
|
description: "Home assistant has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- alert: zigbee2qmtt_down
|
||||||
|
expr: node_systemd_unit_state {instance = "ha1.home.2rjus.net:9100", name = "zigbee2mqtt.service", state = "active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Zigbee2mqtt not running on {{ $labels.instance }}"
|
||||||
|
description: "Zigbee2mqtt has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- alert: mosquitto_down
|
||||||
|
expr: node_systemd_unit_state {instance = "ha1.home.2rjus.net:9100", name = "mosquitto.service", state = "active"} == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Mosquitto not running on {{ $labels.instance }}"
|
||||||
|
description: "Mosquitto has been down on {{ $labels.instance }} more than 5 minutes."
|
||||||
|
- name: smartctl_rules
|
||||||
|
rules:
|
||||||
|
- alert: SmartCriticalWarning
|
||||||
|
expr: smartctl_device_critical_warning > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: SMART critical warning (instance {{ $labels.instance }})
|
||||||
|
description: "Disk controller has critical warning on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
|
- alert: SmartMediaErrors
|
||||||
|
expr: smartctl_device_media_errors > 0
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: SMART media errors (instance {{ $labels.instance }})
|
||||||
|
description: "Disk controller detected media errors on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
|
- alert: SmartWearoutIndicator
|
||||||
|
expr: smartctl_device_available_spare < smartctl_device_available_spare_threshold
|
||||||
|
for: 0m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: SMART Wearout Indicator (instance {{ $labels.instance }})
|
||||||
|
description: "Device is wearing out on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||||
|
- name: wireguard_rules
|
||||||
|
rules:
|
||||||
|
- alert: WireguardHandshake
|
||||||
|
expr: (time() - wireguard_latest_handshake_seconds{instance="http-proxy.home.2rjus.net:9586",interface="wg0",public_key="32Rb13wExcy8uI92JTnFdiOfkv0mlQ6f181WA741DHs="}) > 300
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Wireguard handshake timeout on {{ $labels.instance }}"
|
||||||
|
description: "Wireguard handshake timeout on {{ $labels.instance }} for more than 1 minutes."
|
||||||
|
- name: monitoring_rules
|
||||||
|
rules:
|
||||||
|
- alert: prometheus_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="prometheus.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Prometheus service not running on {{ $labels.instance }}"
|
||||||
|
description: "Prometheus service not running on {{ $labels.instance }}"
|
||||||
|
- alert: alertmanager_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="alertmanager.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Alertmanager service not running on {{ $labels.instance }}"
|
||||||
|
description: "Alertmanager service not running on {{ $labels.instance }}"
|
||||||
|
- alert: pushgateway_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="pushgateway.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Pushgateway service not running on {{ $labels.instance }}"
|
||||||
|
description: "Pushgateway service not running on {{ $labels.instance }}"
|
||||||
|
- alert: pushgateway_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="pushgateway.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Pushgateway service not running on {{ $labels.instance }}"
|
||||||
|
description: "Pushgateway service not running on {{ $labels.instance }}"
|
||||||
|
- alert: loki_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="loki.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Loki service not running on {{ $labels.instance }}"
|
||||||
|
description: "Loki service not running on {{ $labels.instance }}"
|
||||||
|
- alert: grafana_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="grafana.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Grafana service not running on {{ $labels.instance }}"
|
||||||
|
description: "Grafana service not running on {{ $labels.instance }}"
|
||||||
|
- alert: tempo_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="tempo.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Tempo service not running on {{ $labels.instance }}"
|
||||||
|
description: "Tempo service not running on {{ $labels.instance }}"
|
||||||
|
- alert: pyroscope_not_running
|
||||||
|
expr: node_systemd_unit_state{instance="monitoring01.home.2rjus.net:9100", name="podman-pyroscope.service", state="active"} == 0
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Pyroscope service not running on {{ $labels.instance }}"
|
||||||
|
description: "Pyroscope service not running on {{ $labels.instance }}"
|
||||||
37
services/monitoring/tempo.nix
Normal file
37
services/monitoring/tempo.nix
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
services.tempo = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
server = {
|
||||||
|
http_listen_port = 3200;
|
||||||
|
grpc_listen_port = 3201;
|
||||||
|
};
|
||||||
|
distributor = {
|
||||||
|
receivers = {
|
||||||
|
otlp = {
|
||||||
|
protocols = {
|
||||||
|
http = {
|
||||||
|
endpoint = ":4318";
|
||||||
|
cors = {
|
||||||
|
allowed_origins = [ "*.home.2rjus.net" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
storage = {
|
||||||
|
trace = {
|
||||||
|
backend = "local";
|
||||||
|
local = {
|
||||||
|
path = "/var/lib/tempo";
|
||||||
|
};
|
||||||
|
wal = {
|
||||||
|
path = "/var/lib/tempo/wal";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
33
services/nats/default.nix
Normal file
33
services/nats/default.nix
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
services.nats = {
|
||||||
|
enable = true;
|
||||||
|
jetstream = true;
|
||||||
|
serverName = "nats1";
|
||||||
|
settings = {
|
||||||
|
accounts = {
|
||||||
|
ADMIN = {
|
||||||
|
users = [
|
||||||
|
{
|
||||||
|
nkey = "UA44ZINQKUBTV7CX3RE7MVHOEQOQK2VQGCI4GL4M7XBJB4S66URHLW7A";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
HOMELAB = {
|
||||||
|
jetstream = "enabled";
|
||||||
|
users = [
|
||||||
|
{
|
||||||
|
nkey = "UASLNKLWGICRTZMIXVD3RXLQ57XRIMCKBHP5V3PYFFRNO3E3BIJBCYMZ";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
system_account = "ADMIN";
|
||||||
|
jetstream = {
|
||||||
|
max_mem = "1G";
|
||||||
|
max_file = "1G";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -7,22 +7,9 @@ let
|
|||||||
nix
|
nix
|
||||||
nixos-rebuild
|
nixos-rebuild
|
||||||
jq
|
jq
|
||||||
|
curl
|
||||||
];
|
];
|
||||||
text = ''
|
text = builtins.readFile ./build-flakes.sh;
|
||||||
cd /root/nixos-servers
|
|
||||||
git pull
|
|
||||||
echo "Starting nixos-servers builds"
|
|
||||||
for host in $(nix flake show --json| jq -r '.nixosConfigurations | keys[]'); do
|
|
||||||
echo "Building $host"
|
|
||||||
nixos-rebuild --verbose -L --flake ".#$host" build
|
|
||||||
done
|
|
||||||
echo "All nixos-servers builds complete"
|
|
||||||
|
|
||||||
echo "Building gunter"
|
|
||||||
cd /root/nixos
|
|
||||||
git pull
|
|
||||||
nixos-rebuild --verbose -L --flake ".#gunter" build
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
|||||||
44
services/nix-cache/build-flakes.sh
Normal file
44
services/nix-cache/build-flakes.sh
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
JOB_NAME="build_flakes"
|
||||||
|
|
||||||
|
cd /root/nixos-servers
|
||||||
|
git pull
|
||||||
|
echo "Starting nixos-servers builds"
|
||||||
|
for host in $(nix flake show --json| jq -r '.nixosConfigurations | keys[]'); do
|
||||||
|
echo "Building $host"
|
||||||
|
if ! nixos-rebuild --verbose -L --flake ".#$host" build; then
|
||||||
|
echo "Build failed for $host"
|
||||||
|
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
|
||||||
|
# TYPE build_flakes_error gauge
|
||||||
|
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
|
||||||
|
build_flakes_error{instance="$HOSTNAME"} 1
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
echo "Build successful for $host"
|
||||||
|
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
|
||||||
|
# TYPE build_flakes_error gauge
|
||||||
|
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
|
||||||
|
build_flakes_error{instance="$HOSTNAME"} 0
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "All nixos-servers builds complete"
|
||||||
|
|
||||||
|
echo "Building gunter"
|
||||||
|
cd /root/nixos
|
||||||
|
git pull
|
||||||
|
host="gunter"
|
||||||
|
if ! nixos-rebuild --verbose -L --flake ".#gunter" build; then
|
||||||
|
echo "Build failed for $host"
|
||||||
|
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
|
||||||
|
# TYPE build_flakes_error gauge
|
||||||
|
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
|
||||||
|
build_flakes_error{instance="$HOSTNAME"} 1
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
echo "Build successful for $host"
|
||||||
|
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
|
||||||
|
# TYPE build_flakes_error gauge
|
||||||
|
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
|
||||||
|
build_flakes_error{instance="$HOSTNAME"} 0
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
@@ -2,7 +2,8 @@
|
|||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
./build-flakes.nix
|
./build-flakes.nix
|
||||||
./nix-serve.nix
|
./harmonia.nix
|
||||||
./proxy.nix
|
./proxy.nix
|
||||||
|
./nix.nix
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
16
services/nix-cache/harmonia.nix
Normal file
16
services/nix-cache/harmonia.nix
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{ pkgs, config, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets."cache-secret" = {
|
||||||
|
sopsFile = ../../secrets/nix-cache01/cache-secret;
|
||||||
|
format = "binary";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.harmonia = {
|
||||||
|
enable = true;
|
||||||
|
package = pkgs.unstable.harmonia;
|
||||||
|
signKeyPaths = [ config.sops.secrets.cache-secret.path ];
|
||||||
|
};
|
||||||
|
systemd.services.harmonia = {
|
||||||
|
environment.RUST_LOG = "info,actix_web=debug";
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{ pkgs, config, ... }:
|
|
||||||
{
|
|
||||||
sops.secrets."cache-secret" = {
|
|
||||||
sopsFile = ../../secrets/nix-cache01/cache-secret;
|
|
||||||
format = "binary";
|
|
||||||
};
|
|
||||||
services.nix-serve = {
|
|
||||||
enable = true;
|
|
||||||
package = pkgs.nix-serve-ng;
|
|
||||||
secretKeyFile = config.sops.secrets.cache-secret.path;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
7
services/nix-cache/nix.nix
Normal file
7
services/nix-cache/nix.nix
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
nix.settings.substituters = lib.mkForce [
|
||||||
|
"https://cache.nixos.org"
|
||||||
|
"https://cuda-maintainers.cachix.org"
|
||||||
|
];
|
||||||
|
}
|
||||||
@@ -2,15 +2,22 @@
|
|||||||
{
|
{
|
||||||
services.caddy = {
|
services.caddy = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
package = pkgs.unstable.caddy;
|
||||||
configFile = pkgs.writeText "Caddyfile" ''
|
configFile = pkgs.writeText "Caddyfile" ''
|
||||||
{
|
{
|
||||||
acme_ca https://ca.home.2rjus.net/acme/acme/directory
|
acme_ca https://ca.home.2rjus.net/acme/acme/directory
|
||||||
|
metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
nix-cache.home.2rjus.net {
|
nix-cache.home.2rjus.net {
|
||||||
log {
|
log {
|
||||||
output file /var/log/caddy/nzbget.log
|
output file /var/log/caddy/nix-cache.log {
|
||||||
|
mode 644
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
metrics /metrics
|
||||||
|
|
||||||
reverse_proxy http://localhost:5000
|
reverse_proxy http://localhost:5000
|
||||||
}
|
}
|
||||||
'';
|
'';
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
$ORIGIN home.2rjus.net.
|
$ORIGIN home.2rjus.net.
|
||||||
$TTL 1800
|
$TTL 1800
|
||||||
@ IN SOA ns1.home.2rjus.net. admin.test.2rjus.net. (
|
@ IN SOA ns1.home.2rjus.net. admin.test.2rjus.net. (
|
||||||
2050 ; serial number
|
2063 ; serial number
|
||||||
3600 ; refresh
|
3600 ; refresh
|
||||||
900 ; retry
|
900 ; retry
|
||||||
1209600 ; expire
|
1209600 ; expire
|
||||||
@@ -44,6 +44,7 @@ mpnzb IN A 10.69.12.57
|
|||||||
pve1 IN A 10.69.12.75
|
pve1 IN A 10.69.12.75
|
||||||
inc1 IN A 10.69.12.80
|
inc1 IN A 10.69.12.80
|
||||||
inc2 IN A 10.69.12.81
|
inc2 IN A 10.69.12.81
|
||||||
|
media1 IN A 10.69.12.82
|
||||||
|
|
||||||
; 13_SVC
|
; 13_SVC
|
||||||
ns1 IN A 10.69.13.5
|
ns1 IN A 10.69.13.5
|
||||||
@@ -58,7 +59,10 @@ monitoring01 IN A 10.69.13.13
|
|||||||
jelly01 IN A 10.69.13.14
|
jelly01 IN A 10.69.13.14
|
||||||
nix-cache01 IN A 10.69.13.15
|
nix-cache01 IN A 10.69.13.15
|
||||||
nix-cache IN CNAME nix-cache01
|
nix-cache IN CNAME nix-cache01
|
||||||
|
actions1 IN CNAME nix-cache01
|
||||||
pgdb1 IN A 10.69.13.16
|
pgdb1 IN A 10.69.13.16
|
||||||
|
nats1 IN A 10.69.13.17
|
||||||
|
auth01 IN A 10.69.13.18
|
||||||
|
|
||||||
; http-proxy cnames
|
; http-proxy cnames
|
||||||
nzbget IN CNAME http-proxy
|
nzbget IN CNAME http-proxy
|
||||||
@@ -70,6 +74,13 @@ grafana IN CNAME http-proxy
|
|||||||
prometheus IN CNAME http-proxy
|
prometheus IN CNAME http-proxy
|
||||||
alertmanager IN CNAME http-proxy
|
alertmanager IN CNAME http-proxy
|
||||||
jelly IN CNAME http-proxy
|
jelly IN CNAME http-proxy
|
||||||
|
auth IN CNAME http-proxy
|
||||||
|
lldap IN CNAME http-proxy
|
||||||
|
pyroscope IN CNAME http-proxy
|
||||||
|
pushgw IN CNAME http-proxy
|
||||||
|
|
||||||
|
ldap IN CNAME auth01
|
||||||
|
|
||||||
|
|
||||||
; 22_WLAN
|
; 22_WLAN
|
||||||
unifi-ctrl IN A 10.69.22.5
|
unifi-ctrl IN A 10.69.22.5
|
||||||
@@ -79,7 +90,6 @@ gunter IN A 10.69.30.105
|
|||||||
|
|
||||||
; 31
|
; 31
|
||||||
media IN A 10.69.31.50
|
media IN A 10.69.31.50
|
||||||
media1 IN A 10.69.31.49
|
|
||||||
|
|
||||||
; 99_MGMT
|
; 99_MGMT
|
||||||
sw1 IN A 10.69.99.2
|
sw1 IN A 10.69.99.2
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
enable = true;
|
enable = true;
|
||||||
enableJIT = true;
|
enableJIT = true;
|
||||||
enableTCPIP = true;
|
enableTCPIP = true;
|
||||||
|
extensions = ps: with ps; [ pgvector ];
|
||||||
authentication = ''
|
authentication = ''
|
||||||
# Allow access to everything from gunter
|
# Allow access to everything from gunter
|
||||||
host all all 10.69.30.105/32 scram-sha-256
|
host all all 10.69.30.105/32 scram-sha-256
|
||||||
|
|||||||
8
services/vault/default.nix
Normal file
8
services/vault/default.nix
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
services.vault = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
storageBackend = "file";
|
||||||
|
};
|
||||||
|
}
|
||||||
11
system/acme.nix
Normal file
11
system/acme.nix
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
security.acme = {
|
||||||
|
acceptTerms = true;
|
||||||
|
defaults = {
|
||||||
|
server = "https://ca.home.2rjus.net/acme/acme/directory";
|
||||||
|
email = "root@home.2rjus.net";
|
||||||
|
dnsPropagationCheck = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
9
system/autoupgrade.nix
Normal file
9
system/autoupgrade.nix
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
system.autoUpgrade = {
|
||||||
|
enable = true;
|
||||||
|
randomizedDelaySec = "1h";
|
||||||
|
allowReboot = true;
|
||||||
|
flake = "git+https://git.t-juice.club/torjus/nixos-servers.git";
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
{ ... }:
|
{ ... }:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
|
./acme.nix
|
||||||
|
./autoupgrade.nix
|
||||||
./monitoring
|
./monitoring
|
||||||
./packages.nix
|
./packages.nix
|
||||||
./nix.nix
|
./nix.nix
|
||||||
@@ -8,6 +10,5 @@
|
|||||||
./root-ca.nix
|
./root-ca.nix
|
||||||
./sops.nix
|
./sops.nix
|
||||||
./sshd.nix
|
./sshd.nix
|
||||||
./weekly-rebuild.nix
|
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,14 @@
|
|||||||
{ ... }:
|
{ config, ... }:
|
||||||
{
|
{
|
||||||
|
# Configure journald
|
||||||
|
services.journald = {
|
||||||
|
rateLimitInterval = "10s";
|
||||||
|
extraConfig = ''
|
||||||
|
SystemMaxUse=100M
|
||||||
|
SystemKeepFree=1G
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
# Configure promtail
|
||||||
services.promtail = {
|
services.promtail = {
|
||||||
enable = true;
|
enable = true;
|
||||||
configuration = {
|
configuration = {
|
||||||
@@ -36,6 +45,19 @@
|
|||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
job_name = "varlog";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "localhost" ];
|
||||||
|
labels = {
|
||||||
|
job = "varlog";
|
||||||
|
__path__ = "/var/log/**/*.log";
|
||||||
|
hostname = "${config.networking.hostName}";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,19 +1,30 @@
|
|||||||
{ pkgs, ... }:
|
{ lib, ... }:
|
||||||
{
|
{
|
||||||
|
nix = {
|
||||||
|
gc = {
|
||||||
|
automatic = true;
|
||||||
|
};
|
||||||
|
|
||||||
nix.settings.trusted-substituters = [
|
optimise = {
|
||||||
|
automatic = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
trusted-substituters = [
|
||||||
"https://nix-cache.home.2rjus.net"
|
"https://nix-cache.home.2rjus.net"
|
||||||
"https://cache.nixos.org"
|
"https://cache.nixos.org"
|
||||||
"https://cuda-maintainers.cachix.org"
|
"https://cuda-maintainers.cachix.org"
|
||||||
];
|
];
|
||||||
nix.settings.substituters = [
|
substituters = lib.mkOverride 90 [
|
||||||
"https://nix-cache.home.2rjus.net"
|
"https://nix-cache.home.2rjus.net"
|
||||||
"https://cache.nixos.org"
|
"https://cache.nixos.org"
|
||||||
"https://cuda-maintainers.cachix.org"
|
"https://cuda-maintainers.cachix.org"
|
||||||
];
|
];
|
||||||
nix.settings.trusted-public-keys = [
|
trusted-public-keys = [
|
||||||
"nix-cache.home.2rjus.net-1:2kowZOG6pvhoK4AHVO3alBlvcghH20wchzoR0V86UWI="
|
"nix-cache.home.2rjus.net-1:2kowZOG6pvhoK4AHVO3alBlvcghH20wchzoR0V86UWI="
|
||||||
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||||
"cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E="
|
"cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E="
|
||||||
];
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,5 +3,9 @@
|
|||||||
environment.systemPackages = [
|
environment.systemPackages = [
|
||||||
pkgs.git
|
pkgs.git
|
||||||
pkgs.jq
|
pkgs.jq
|
||||||
|
pkgs.kitty.terminfo
|
||||||
|
pkgs.python3
|
||||||
|
pkgs.neovim
|
||||||
|
pkgs.ncdu
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
systemd.services."weekly-rebuild" = {
|
|
||||||
path = [
|
|
||||||
pkgs.git
|
|
||||||
pkgs.nix
|
|
||||||
];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
ExecStart = "${pkgs.nixos-rebuild}/bin/nixos-rebuild boot --flake git+https://git.t-juice.club/torjus/nixos-servers#";
|
|
||||||
ExecStartPost = "${pkgs.nix}/bin/nix-collect-garbage --delete-older-than 30d";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
systemd.timers."weekly-rebuild" = {
|
|
||||||
enable = true;
|
|
||||||
wantedBy = [ "timers.target" ];
|
|
||||||
timerConfig = {
|
|
||||||
OnCalendar = "Sun 06:00:00";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
214
terraform/README.md
Normal file
214
terraform/README.md
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
# OpenTofu Configuration for Proxmox
|
||||||
|
|
||||||
|
This directory contains OpenTofu configuration for managing Proxmox VMs using a parameterized, multi-VM deployment system.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
1. **Create a Proxmox API token:**
|
||||||
|
- Log into Proxmox web UI
|
||||||
|
- Go to Datacenter → Permissions → API Tokens
|
||||||
|
- Click Add
|
||||||
|
- User: `root@pam`, Token ID: `terraform`
|
||||||
|
- Uncheck "Privilege Separation"
|
||||||
|
- Save the token secret (shown only once)
|
||||||
|
|
||||||
|
2. **Configure credentials:**
|
||||||
|
```bash
|
||||||
|
cd terraform
|
||||||
|
cp terraform.tfvars.example terraform.tfvars
|
||||||
|
# Edit terraform.tfvars with your Proxmox details
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Initialize OpenTofu:**
|
||||||
|
```bash
|
||||||
|
tofu init
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Test connection:**
|
||||||
|
```bash
|
||||||
|
tofu plan
|
||||||
|
```
|
||||||
|
|
||||||
|
## Defining VMs
|
||||||
|
|
||||||
|
All VMs are defined in the `vms.tf` file in the `locals.vms` map. Each VM can specify custom configurations or use defaults from `variables.tf`.
|
||||||
|
|
||||||
|
### Example: DHCP VM
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
vms = {
|
||||||
|
"simple-vm" = {
|
||||||
|
cpu_cores = 2
|
||||||
|
memory = 2048
|
||||||
|
disk_size = "20G"
|
||||||
|
# No "ip" field = DHCP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: Static IP VM
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
vms = {
|
||||||
|
"web-server" = {
|
||||||
|
ip = "10.69.13.50/24"
|
||||||
|
cpu_cores = 4
|
||||||
|
memory = 4096
|
||||||
|
disk_size = "50G"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: Minimal VM (all defaults)
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
vms = {
|
||||||
|
"test-vm" = {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: Multiple VMs
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
vms = {
|
||||||
|
"vm1" = {
|
||||||
|
ip = "10.69.13.50/24"
|
||||||
|
}
|
||||||
|
"vm2" = {
|
||||||
|
ip = "10.69.13.51/24"
|
||||||
|
cpu_cores = 4
|
||||||
|
memory = 8192
|
||||||
|
}
|
||||||
|
"vm3" = {
|
||||||
|
# DHCP
|
||||||
|
cpu_cores = 2
|
||||||
|
memory = 2048
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example: Test VM with Custom Git Branch
|
||||||
|
|
||||||
|
For testing pipeline changes without polluting master:
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
vms = {
|
||||||
|
"test-vm" = {
|
||||||
|
ip = "10.69.13.100/24"
|
||||||
|
flake_branch = "test-pipeline" # Bootstrap from this branch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This VM will bootstrap from the `test-pipeline` branch instead of `master`. Production VMs should omit the `flake_branch` field.
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
Each VM in the `vms` map supports the following fields (all optional):
|
||||||
|
|
||||||
|
| Field | Description | Default |
|
||||||
|
|-------|-------------|---------|
|
||||||
|
| `ip` | Static IP with CIDR (e.g., "10.69.13.50/24"). Omit for DHCP | DHCP |
|
||||||
|
| `gateway` | Network gateway (used with static IP) | `10.69.13.1` |
|
||||||
|
| `cpu_cores` | Number of CPU cores | `2` |
|
||||||
|
| `memory` | Memory in MB | `2048` |
|
||||||
|
| `disk_size` | Disk size (e.g., "20G", "100G") | `"20G"` |
|
||||||
|
| `flake_branch` | Git branch for bootstrap (for testing, omit for production) | `master` |
|
||||||
|
| `target_node` | Proxmox node to deploy to | `"pve1"` |
|
||||||
|
| `template_name` | Template VM to clone from | `"nixos-25.11.20260128.fa83fd8"` |
|
||||||
|
| `storage` | Storage backend | `"local-zfs"` |
|
||||||
|
| `bridge` | Network bridge | `"vmbr0"` |
|
||||||
|
| `vlan_tag` | VLAN tag | `13` |
|
||||||
|
| `ssh_public_key` | SSH public key for root | See `variables.tf` |
|
||||||
|
| `nameservers` | DNS servers | `"10.69.13.5 10.69.13.6"` |
|
||||||
|
| `search_domain` | DNS search domain | `"home.2rjus.net"` |
|
||||||
|
|
||||||
|
Defaults are defined in `variables.tf` and can be changed globally.
|
||||||
|
|
||||||
|
## Deployment Commands
|
||||||
|
|
||||||
|
### Deploy All VMs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tofu apply
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deploy Specific VM
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tofu apply -target=proxmox_vm_qemu.vm[\"vm-name\"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Destroy Specific VM
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tofu destroy -target=proxmox_vm_qemu.vm[\"vm-name\"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### View Deployed VMs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tofu output vm_ips
|
||||||
|
tofu output deployment_summary
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plan Changes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tofu plan
|
||||||
|
```
|
||||||
|
|
||||||
|
## Outputs
|
||||||
|
|
||||||
|
After deployment, OpenTofu provides two outputs:
|
||||||
|
|
||||||
|
**vm_ips**: IP addresses and SSH commands for each VM
|
||||||
|
```
|
||||||
|
vm_ips = {
|
||||||
|
"vm1" = {
|
||||||
|
ip = "10.69.13.50"
|
||||||
|
ssh_command = "ssh root@10.69.13.50"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**deployment_summary**: Full specifications for each VM
|
||||||
|
```
|
||||||
|
deployment_summary = {
|
||||||
|
"vm1" = {
|
||||||
|
cpu_cores = 4
|
||||||
|
memory_mb = 4096
|
||||||
|
disk_size = "50G"
|
||||||
|
ip = "10.69.13.50"
|
||||||
|
node = "pve1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Workflow
|
||||||
|
|
||||||
|
1. Edit `vms.tf` to define your VMs in the `locals.vms` map
|
||||||
|
2. Run `tofu plan` to preview changes
|
||||||
|
3. Run `tofu apply` to deploy
|
||||||
|
4. Run `tofu output vm_ips` to get IP addresses
|
||||||
|
5. SSH to VMs and configure as needed
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
- `main.tf` - Provider configuration
|
||||||
|
- `variables.tf` - Variable definitions and defaults
|
||||||
|
- `vms.tf` - VM definitions and deployment logic
|
||||||
|
- `cloud-init.tf` - Cloud-init disk management (SSH keys, networking, branch config)
|
||||||
|
- `outputs.tf` - Output definitions for deployed VMs
|
||||||
|
- `terraform.tfvars.example` - Example credentials file
|
||||||
|
- `terraform.tfvars` - Your actual credentials (gitignored)
|
||||||
|
- `vm.tf.old` - Archived single-VM configuration (reference)
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- VMs are deployed as full clones (not linked clones)
|
||||||
|
- Cloud-init handles initial networking configuration
|
||||||
|
- QEMU guest agent is enabled on all VMs
|
||||||
|
- All VMs start on boot by default
|
||||||
|
- IPv6 is disabled
|
||||||
|
- Destroying VMs removes them from Proxmox but does not clean up DNS entries or NixOS configurations
|
||||||
58
terraform/cloud-init.tf
Normal file
58
terraform/cloud-init.tf
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# Cloud-init configuration for all VMs
|
||||||
|
#
|
||||||
|
# This file manages cloud-init disks for all VMs using the proxmox_cloud_init_disk resource.
|
||||||
|
# VMs with flake_branch set will include NIXOS_FLAKE_BRANCH environment variable.
|
||||||
|
|
||||||
|
resource "proxmox_cloud_init_disk" "ci" {
|
||||||
|
for_each = local.vm_configs
|
||||||
|
|
||||||
|
name = each.key
|
||||||
|
pve_node = each.value.target_node
|
||||||
|
storage = "local" # Cloud-init disks must be on storage that supports ISO/snippets
|
||||||
|
|
||||||
|
# User data includes SSH keys and optionally NIXOS_FLAKE_BRANCH
|
||||||
|
user_data = <<-EOT
|
||||||
|
#cloud-config
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ${each.value.ssh_public_key}
|
||||||
|
${each.value.flake_branch != null ? <<-BRANCH
|
||||||
|
write_files:
|
||||||
|
- path: /etc/environment
|
||||||
|
content: |
|
||||||
|
NIXOS_FLAKE_BRANCH=${each.value.flake_branch}
|
||||||
|
append: true
|
||||||
|
BRANCH
|
||||||
|
: ""}
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# Network configuration - static IP or DHCP
|
||||||
|
network_config = each.value.ip != null ? yamlencode({
|
||||||
|
version = 1
|
||||||
|
config = [{
|
||||||
|
type = "physical"
|
||||||
|
name = "ens18"
|
||||||
|
subnets = [{
|
||||||
|
type = "static"
|
||||||
|
address = each.value.ip
|
||||||
|
gateway = each.value.gateway
|
||||||
|
dns_nameservers = split(" ", each.value.nameservers)
|
||||||
|
dns_search = [each.value.search_domain]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}) : yamlencode({
|
||||||
|
version = 1
|
||||||
|
config = [{
|
||||||
|
type = "physical"
|
||||||
|
name = "ens18"
|
||||||
|
subnets = [{
|
||||||
|
type = "dhcp"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
|
||||||
|
# Instance metadata
|
||||||
|
meta_data = yamlencode({
|
||||||
|
instance_id = sha1(each.key)
|
||||||
|
local-hostname = each.key
|
||||||
|
})
|
||||||
|
}
|
||||||
18
terraform/main.tf
Normal file
18
terraform/main.tf
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">= 1.0"
|
||||||
|
required_providers {
|
||||||
|
proxmox = {
|
||||||
|
source = "telmate/proxmox"
|
||||||
|
version = "3.0.2-rc07"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "proxmox" {
|
||||||
|
pm_api_url = var.proxmox_api_url
|
||||||
|
pm_api_token_id = var.proxmox_api_token_id
|
||||||
|
pm_api_token_secret = var.proxmox_api_token_secret
|
||||||
|
pm_tls_insecure = var.proxmox_tls_insecure
|
||||||
|
}
|
||||||
|
|
||||||
|
# Provider configured - ready to add resources
|
||||||
24
terraform/outputs.tf
Normal file
24
terraform/outputs.tf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# Dynamic outputs for all deployed VMs
|
||||||
|
|
||||||
|
output "vm_ips" {
|
||||||
|
description = "IP addresses and SSH commands for deployed VMs"
|
||||||
|
value = {
|
||||||
|
for name, vm in proxmox_vm_qemu.vm : name => {
|
||||||
|
ip = vm.default_ipv4_address
|
||||||
|
ssh_command = "ssh root@${vm.default_ipv4_address}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output "deployment_summary" {
|
||||||
|
description = "Summary of deployed VMs with their specifications"
|
||||||
|
value = {
|
||||||
|
for name, vm in proxmox_vm_qemu.vm : name => {
|
||||||
|
cpu_cores = vm.cpu[0].cores
|
||||||
|
memory_mb = vm.memory
|
||||||
|
disk_size = vm.disks[0].virtio[0].virtio0[0].disk[0].size
|
||||||
|
ip = vm.default_ipv4_address
|
||||||
|
node = vm.target_node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
7
terraform/terraform.tfvars.example
Normal file
7
terraform/terraform.tfvars.example
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# Copy this file to terraform.tfvars and fill in your values
|
||||||
|
# terraform.tfvars is gitignored to keep credentials safe
|
||||||
|
|
||||||
|
proxmox_api_url = "https://your-proxmox-host.home.2rjus.net:8006/api2/json"
|
||||||
|
proxmox_api_token_id = "root@pam!terraform"
|
||||||
|
proxmox_api_token_secret = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||||
|
proxmox_tls_insecure = true
|
||||||
97
terraform/variables.tf
Normal file
97
terraform/variables.tf
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
variable "proxmox_api_url" {
|
||||||
|
description = "Proxmox API URL (e.g., https://proxmox.home.2rjus.net:8006/api2/json)"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "proxmox_api_token_id" {
|
||||||
|
description = "Proxmox API Token ID (e.g., root@pam!terraform)"
|
||||||
|
type = string
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "proxmox_api_token_secret" {
|
||||||
|
description = "Proxmox API Token Secret"
|
||||||
|
type = string
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "proxmox_tls_insecure" {
|
||||||
|
description = "Skip TLS verification (set to true for self-signed certs)"
|
||||||
|
type = bool
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default values for VM configurations
|
||||||
|
# These can be overridden per-VM in vms.tf
|
||||||
|
|
||||||
|
variable "default_target_node" {
|
||||||
|
description = "Default Proxmox node to deploy VMs to"
|
||||||
|
type = string
|
||||||
|
default = "pve1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_template_name" {
|
||||||
|
description = "Default template VM name to clone from"
|
||||||
|
type = string
|
||||||
|
default = "nixos-25.11.20260128.fa83fd8"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_ssh_public_key" {
|
||||||
|
description = "Default SSH public key for root user"
|
||||||
|
type = string
|
||||||
|
default = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwfb2jpKrBnCw28aevnH8HbE5YbcMXpdaVv2KmueDu6 torjus@gunter"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_storage" {
|
||||||
|
description = "Default storage backend for VM disks"
|
||||||
|
type = string
|
||||||
|
default = "local-zfs"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_bridge" {
|
||||||
|
description = "Default network bridge"
|
||||||
|
type = string
|
||||||
|
default = "vmbr0"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_vlan_tag" {
|
||||||
|
description = "Default VLAN tag"
|
||||||
|
type = number
|
||||||
|
default = 13
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_gateway" {
|
||||||
|
description = "Default network gateway for static IP VMs"
|
||||||
|
type = string
|
||||||
|
default = "10.69.13.1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_nameservers" {
|
||||||
|
description = "Default DNS nameservers"
|
||||||
|
type = string
|
||||||
|
default = "10.69.13.5 10.69.13.6"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_search_domain" {
|
||||||
|
description = "Default DNS search domain"
|
||||||
|
type = string
|
||||||
|
default = "home.2rjus.net"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_cpu_cores" {
|
||||||
|
description = "Default number of CPU cores"
|
||||||
|
type = number
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_memory" {
|
||||||
|
description = "Default memory in MB"
|
||||||
|
type = number
|
||||||
|
default = 2048
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "default_disk_size" {
|
||||||
|
description = "Default disk size"
|
||||||
|
type = string
|
||||||
|
default = "20G"
|
||||||
|
}
|
||||||
135
terraform/vms.tf
Normal file
135
terraform/vms.tf
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# VM Definitions
|
||||||
|
# Define all VMs to deploy in the locals.vms map below
|
||||||
|
# Omit fields to use defaults from variables.tf
|
||||||
|
|
||||||
|
locals {
|
||||||
|
# Define VMs here
|
||||||
|
# Each VM can override defaults by specifying values
|
||||||
|
# Omit "ip" field for DHCP, include it for static IP
|
||||||
|
vms = {
|
||||||
|
# Example DHCP VM (uncomment to deploy):
|
||||||
|
# "example-dhcp-vm" = {
|
||||||
|
# cpu_cores = 2
|
||||||
|
# memory = 2048
|
||||||
|
# disk_size = "20G"
|
||||||
|
# }
|
||||||
|
|
||||||
|
# Example Static IP VM (uncomment to deploy):
|
||||||
|
# "example-static-vm" = {
|
||||||
|
# ip = "10.69.13.50/24"
|
||||||
|
# cpu_cores = 4
|
||||||
|
# memory = 4096
|
||||||
|
# disk_size = "50G"
|
||||||
|
# }
|
||||||
|
|
||||||
|
# Example Test VM with custom git branch (for testing pipeline changes):
|
||||||
|
# "test-vm" = {
|
||||||
|
# ip = "10.69.13.100/24"
|
||||||
|
# flake_branch = "test-pipeline" # Bootstrap from this branch instead of master
|
||||||
|
# }
|
||||||
|
|
||||||
|
# Example Minimal VM using all defaults (uncomment to deploy):
|
||||||
|
# "minimal-vm" = {}
|
||||||
|
# "bootstrap-verify-test" = {}
|
||||||
|
"testvm01" = {
|
||||||
|
ip = "10.69.13.101/24"
|
||||||
|
cpu_cores = 2
|
||||||
|
memory = 2048
|
||||||
|
disk_size = "20G"
|
||||||
|
flake_branch = "pipeline-testing-improvements"
|
||||||
|
}
|
||||||
|
"vault01" = {
|
||||||
|
ip = "10.69.13.19/24"
|
||||||
|
cpu_cores = 2
|
||||||
|
memory = 2048
|
||||||
|
disk_size = "20G"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compute VM configurations with defaults applied
|
||||||
|
vm_configs = {
|
||||||
|
for name, vm in local.vms : name => {
|
||||||
|
target_node = lookup(vm, "target_node", var.default_target_node)
|
||||||
|
template_name = lookup(vm, "template_name", var.default_template_name)
|
||||||
|
cpu_cores = lookup(vm, "cpu_cores", var.default_cpu_cores)
|
||||||
|
memory = lookup(vm, "memory", var.default_memory)
|
||||||
|
disk_size = lookup(vm, "disk_size", var.default_disk_size)
|
||||||
|
storage = lookup(vm, "storage", var.default_storage)
|
||||||
|
bridge = lookup(vm, "bridge", var.default_bridge)
|
||||||
|
vlan_tag = lookup(vm, "vlan_tag", var.default_vlan_tag)
|
||||||
|
ssh_public_key = lookup(vm, "ssh_public_key", var.default_ssh_public_key)
|
||||||
|
nameservers = lookup(vm, "nameservers", var.default_nameservers)
|
||||||
|
search_domain = lookup(vm, "search_domain", var.default_search_domain)
|
||||||
|
# Network configuration - detect DHCP vs static
|
||||||
|
ip = lookup(vm, "ip", null)
|
||||||
|
gateway = lookup(vm, "gateway", var.default_gateway)
|
||||||
|
# Branch configuration for bootstrap (optional, uses master if not set)
|
||||||
|
flake_branch = lookup(vm, "flake_branch", null)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy all VMs using for_each
|
||||||
|
resource "proxmox_vm_qemu" "vm" {
|
||||||
|
for_each = local.vm_configs
|
||||||
|
|
||||||
|
name = each.key
|
||||||
|
target_node = each.value.target_node
|
||||||
|
|
||||||
|
# Clone from template
|
||||||
|
clone = each.value.template_name
|
||||||
|
full_clone = true
|
||||||
|
|
||||||
|
# Boot configuration
|
||||||
|
boot = "order=virtio0"
|
||||||
|
scsihw = "virtio-scsi-single"
|
||||||
|
|
||||||
|
# VM settings
|
||||||
|
cpu {
|
||||||
|
cores = each.value.cpu_cores
|
||||||
|
}
|
||||||
|
memory = each.value.memory
|
||||||
|
|
||||||
|
# Network
|
||||||
|
network {
|
||||||
|
id = 0
|
||||||
|
model = "virtio"
|
||||||
|
bridge = each.value.bridge
|
||||||
|
tag = each.value.vlan_tag
|
||||||
|
}
|
||||||
|
|
||||||
|
# Disk settings
|
||||||
|
disks {
|
||||||
|
virtio {
|
||||||
|
virtio0 {
|
||||||
|
disk {
|
||||||
|
size = each.value.disk_size
|
||||||
|
storage = each.value.storage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ide {
|
||||||
|
ide2 {
|
||||||
|
# Reference the custom cloud-init disk created in cloud-init.tf
|
||||||
|
cdrom {
|
||||||
|
iso = proxmox_cloud_init_disk.ci[each.key].id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Start on boot
|
||||||
|
start_at_node_boot = true
|
||||||
|
|
||||||
|
# Agent
|
||||||
|
agent = 1
|
||||||
|
|
||||||
|
# Skip IPv6 since we don't use it
|
||||||
|
skip_ipv6 = true
|
||||||
|
|
||||||
|
# RNG device for better entropy
|
||||||
|
rng {
|
||||||
|
source = "/dev/urandom"
|
||||||
|
period = 1000
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user