Compare commits
245 Commits
deploy-tes
...
nrec-forge
| Author | SHA1 | Date | |
|---|---|---|---|
|
6a3e78a479
|
|||
|
cfc0c6f6cb
|
|||
|
822380695e
|
|||
|
0941bd52f5
|
|||
| 9ebdd94773 | |||
|
adc267bd95
|
|||
|
7ffe2d71d6
|
|||
|
dd9ba42eb5
|
|||
| 3ee0433a6f | |||
|
73d804105b
|
|||
|
d2a4e4a0a1
|
|||
| 28eba49d68 | |||
| 4bf726a674 | |||
| 774fd92524 | |||
|
55da459108
|
|||
|
813c5c0f29
|
|||
|
013ab8f621
|
|||
| f75b773485 | |||
| 58c3844950 | |||
| 80e5fa08fa | |||
|
cf55d07ce5
|
|||
| 4941e38dac | |||
| 03ffcc1ad0 | |||
|
5e92eb3220
|
|||
| 2321e191a2 | |||
|
136116ab33
|
|||
|
c8cadd09c5
|
|||
|
72acaa872b
|
|||
|
a7c1ce932d
|
|||
|
2b42145d94
|
|||
|
05e8556bda
|
|||
|
75fdd7ae40
|
|||
|
5346889b73
|
|||
|
7e19f51dfa
|
|||
|
9f7aab86a0
|
|||
|
bb53b922fa
|
|||
|
75cd7c6c2d
|
|||
|
72c3a938b0
|
|||
|
2f89d564f7
|
|||
|
4a83363ee5
|
|||
|
b578520905
|
|||
|
8a5aa1c4f5
|
|||
|
0f8c4783a8
|
|||
|
2ca2509083
|
|||
|
58702bd10b
|
|||
|
c9f47acb01
|
|||
|
09ce018fb2
|
|||
| 3042803c4d | |||
|
1e7200b494
|
|||
|
eec1e374b2
|
|||
|
fcc410afad
|
|||
|
59f0c7ceda
|
|||
| d713f06c6e | |||
|
7374d1ff7f
|
|||
| e912c75b6c | |||
|
b218b4f8bc
|
|||
|
65acf13e6f
|
|||
| 95a96b2192 | |||
|
4f593126c0
|
|||
| 1bba6f106a | |||
|
a6013d3950
|
|||
| 7f69c0738a | |||
|
35924c7b01
|
|||
|
87d8571d62
|
|||
|
43c81f6688
|
|||
|
58f901ad3e
|
|||
|
c13921d302
|
|||
|
2903873d52
|
|||
|
74e7c9faa4
|
|||
| 471f536f1f | |||
|
a013e80f1a
|
|||
|
4cbaa33475
|
|||
|
e329f87b0b
|
|||
|
c151f31011
|
|||
| f5362d6936 | |||
|
3e7aabc73a
|
|||
|
361e7f2a1b
|
|||
|
1942591d2e
|
|||
|
4d614d8716
|
|||
| fd7caf7f00 | |||
|
af8e385b6e
|
|||
|
0db9fc6802
|
|||
|
5d68662035
|
|||
|
d485948df0
|
|||
|
7b804450a3
|
|||
|
2f0dad1acc
|
|||
|
1544415ef3
|
|||
|
5babd7f507
|
|||
|
7e0c5fbf0f
|
|||
|
ffaf95d109
|
|||
|
b2b6ab4799
|
|||
|
5d3d93b280
|
|||
|
ae823e439d
|
|||
|
0d9f49a3b4
|
|||
|
08d9e1ec3f
|
|||
|
fa8d65b612
|
|||
|
6726f111e3
|
|||
| 3a083285cb | |||
|
ed1821b073
|
|||
|
fa4a418007
|
|||
| 963e5f6d3c | |||
|
0bc10cb1fe
|
|||
|
b03e2e8ee4
|
|||
|
ddcbc30665
|
|||
|
75210805d5
|
|||
|
ade0538717
|
|||
|
83fce5f927
|
|||
|
afff3f28ca
|
|||
|
49f7e3ae2e
|
|||
|
751edfc11d
|
|||
|
98a7301985
|
|||
| 34efa58cfe | |||
|
5bfb51a497
|
|||
|
f83145d97a
|
|||
|
47747329c4
|
|||
|
2d9ca2a73f
|
|||
|
98ea679ef2
|
|||
|
b709c0b703
|
|||
|
33c5d5b3f0
|
|||
|
0a28c5f495
|
|||
|
9bd48e0808
|
|||
|
1460eea700
|
|||
|
98c4f54f94
|
|||
|
d1b0a5dc20
|
|||
|
4d32707130
|
|||
|
8e1753c2c8
|
|||
|
75e4fb61a5
|
|||
|
2be213e454
|
|||
|
12c252653b
|
|||
|
6493338c4c
|
|||
|
6e08ba9720
|
|||
|
7ff3d2a09b
|
|||
|
e85f15b73d
|
|||
|
2f5a2a4bf1
|
|||
|
287141c623
|
|||
|
9ed11b712f
|
|||
|
ffad2dd205
|
|||
|
ed7d2aa727
|
|||
|
bf7a025364
|
|||
| 4ae99dbc89 | |||
|
5c142b1323
|
|||
|
4091e51f41
|
|||
|
a8e558a6b7
|
|||
|
4efc798c38
|
|||
|
016f8c9119
|
|||
| fec2a261ab | |||
|
60c04a2052
|
|||
|
39e3f37263
|
|||
| a2d93baba8 | |||
|
f66dfc753c
|
|||
| 79a6a72719 | |||
|
89d0a6f358
|
|||
|
03ebee4d82
|
|||
|
05630eb4d4
|
|||
|
1e52eec02a
|
|||
|
d333aa0164
|
|||
|
a5d5827dcc
|
|||
|
1c13ec12a4
|
|||
|
4bf0eeeadb
|
|||
| 304cb117ce | |||
|
02270a0e4a
|
|||
|
030e8518c5
|
|||
|
9ffdd4f862
|
|||
|
0b977808ca
|
|||
|
8786113f8f
|
|||
|
fdb2c31f84
|
|||
|
78eb04205f
|
|||
| 19cb61ebbc | |||
|
9ed09c9a9c
|
|||
|
b31c64f1b9
|
|||
|
54b6e37420
|
|||
|
b845a8bb8b
|
|||
|
bfbf0cea68
|
|||
|
3abe5e83a7
|
|||
|
67c27555f3
|
|||
|
1674b6a844
|
|||
|
311be282b6
|
|||
|
11cbb64097
|
|||
|
e2dd21c994
|
|||
|
463342133e
|
|||
|
de36b9d016
|
|||
|
3f1d966919
|
|||
|
7fcc043a4d
|
|||
|
70ec5f8109
|
|||
|
c2ec34cab9
|
|||
|
8fbf1224fa
|
|||
|
8959829f77
|
|||
|
93dbb45802
|
|||
|
538c2ad097
|
|||
|
d99c82c74c
|
|||
|
ca0e3fd629
|
|||
|
732e9b8c22
|
|||
|
3a14ffd6b5
|
|||
|
f9a3961457
|
|||
|
003d4ccf03
|
|||
|
735b8a9ee3
|
|||
|
94feae82a0
|
|||
|
3f94f7ee95
|
|||
|
b7e398c9a7
|
|||
|
8ec2a083bd
|
|||
|
ec4ac1477e
|
|||
|
e937c68965
|
|||
|
98e808cd6c
|
|||
|
ba9f47f914
|
|||
|
1066e81ba8
|
|||
|
f0950b33de
|
|||
|
bf199bd7c6
|
|||
| 4e8ecb8a99 | |||
|
38c104ea8c
|
|||
|
536daee4c7
|
|||
| 4c1debf0a3 | |||
|
f36457ee0d
|
|||
|
aedccbd9a0
|
|||
|
bdc6057689
|
|||
| 3a25e3f7bc | |||
|
46f03871f1
|
|||
|
9d019f2b9a
|
|||
|
21db7e9573
|
|||
|
979040aaf7
|
|||
|
8791c29402
|
|||
|
c7a067d7b3
|
|||
|
c518093578
|
|||
| 0b462f0a96 | |||
|
116abf3bec
|
|||
|
b794aa89db
|
|||
|
50a85daa44
|
|||
|
23e561cf49
|
|||
|
7d291f85bf
|
|||
|
2a842c655a
|
|||
|
1f4a5571dc
|
|||
| 13d6d0ea3a | |||
|
eea000b337
|
|||
|
f19ba2f4b6
|
|||
|
a90d9c33d5
|
|||
|
09c9df1bbe
|
|||
|
ae3039af19
|
|||
|
11261c4636
|
|||
|
4ca3c8890f
|
|||
|
78e8d7a600
|
|||
|
0cf72ec191
|
|||
|
6a3a51407e
|
|||
|
a1ae766eb8
|
|||
|
11999b37f3
|
|||
|
29b2b7db52
|
|||
|
b046a1b862
|
180
.claude/agents/auditor.md
Normal file
180
.claude/agents/auditor.md
Normal file
@@ -0,0 +1,180 @@
|
||||
---
|
||||
name: auditor
|
||||
description: Analyzes audit logs to investigate user activity, command execution, and suspicious behavior on hosts. Can be used standalone for security reviews or called by other agents for behavioral context.
|
||||
tools: Read, Grep, Glob
|
||||
mcpServers:
|
||||
- lab-monitoring
|
||||
---
|
||||
|
||||
You are a security auditor for a NixOS homelab infrastructure. Your task is to analyze audit logs and reconstruct user activity on hosts.
|
||||
|
||||
## Input
|
||||
|
||||
You may receive:
|
||||
- A host or list of hosts to investigate
|
||||
- A time window (e.g., "last hour", "today", "between 14:00 and 15:00")
|
||||
- Optional context: specific events to look for, user to focus on, or suspicious activity to investigate
|
||||
- Optional context from a parent investigation (e.g., "a service stopped at 14:32, what happened around that time?")
|
||||
|
||||
## Audit Log Structure
|
||||
|
||||
Logs are shipped to Loki via promtail. Audit events use these labels:
|
||||
- `hostname` - hostname
|
||||
- `systemd_unit` - typically `auditd.service` for audit logs
|
||||
- `job` - typically `systemd-journal`
|
||||
|
||||
Audit log entries contain structured data:
|
||||
- `EXECVE` - command execution with full arguments
|
||||
- `USER_LOGIN` / `USER_LOGOUT` - session start/end
|
||||
- `USER_CMD` - sudo command execution
|
||||
- `CRED_ACQ` / `CRED_DISP` - credential acquisition/disposal
|
||||
- `SERVICE_START` / `SERVICE_STOP` - systemd service events
|
||||
|
||||
## Investigation Techniques
|
||||
|
||||
### 1. SSH Session Activity
|
||||
|
||||
Find SSH logins and session activity:
|
||||
```logql
|
||||
{hostname="<hostname>", systemd_unit="sshd.service"}
|
||||
```
|
||||
|
||||
Look for:
|
||||
- Accepted/Failed authentication
|
||||
- Session opened/closed
|
||||
- Unusual source IPs or users
|
||||
|
||||
### 2. Command Execution
|
||||
|
||||
Query executed commands (filter out noise):
|
||||
```logql
|
||||
{hostname="<hostname>"} |= "EXECVE" != "PATH item" != "PROCTITLE" != "SYSCALL" != "BPF"
|
||||
```
|
||||
|
||||
Further filtering:
|
||||
- Exclude systemd noise: `!= "systemd" != "/nix/store"`
|
||||
- Focus on specific commands: `|= "rm" |= "-rf"`
|
||||
- Focus on specific user: `|= "uid=1000"`
|
||||
|
||||
### 3. Sudo Activity
|
||||
|
||||
Check for privilege escalation:
|
||||
```logql
|
||||
{hostname="<hostname>"} |= "sudo" |= "COMMAND"
|
||||
```
|
||||
|
||||
Or via audit:
|
||||
```logql
|
||||
{hostname="<hostname>"} |= "USER_CMD"
|
||||
```
|
||||
|
||||
### 4. Service Manipulation
|
||||
|
||||
Check if services were manually stopped/started:
|
||||
```logql
|
||||
{hostname="<hostname>"} |= "EXECVE" |= "systemctl"
|
||||
```
|
||||
|
||||
### 5. File Operations
|
||||
|
||||
Look for file modifications (if auditd rules are configured):
|
||||
```logql
|
||||
{hostname="<hostname>"} |= "EXECVE" |= "vim"
|
||||
{hostname="<hostname>"} |= "EXECVE" |= "nano"
|
||||
{hostname="<hostname>"} |= "EXECVE" |= "rm"
|
||||
```
|
||||
|
||||
## Query Guidelines
|
||||
|
||||
**Start narrow, expand if needed:**
|
||||
- Begin with `limit: 20-30`
|
||||
- Use tight time windows: `start: "15m"` or `start: "30m"`
|
||||
- Add filters progressively
|
||||
|
||||
**Avoid:**
|
||||
- Querying all audit logs without EXECVE filter (extremely verbose)
|
||||
- Large time ranges without specific filters
|
||||
- Limits over 50 without tight filters
|
||||
|
||||
**Time-bounded queries:**
|
||||
When investigating around a specific event:
|
||||
```logql
|
||||
{hostname="<hostname>"} |= "EXECVE" != "systemd"
|
||||
```
|
||||
With `start: "2026-02-08T14:30:00Z"` and `end: "2026-02-08T14:35:00Z"`
|
||||
|
||||
## Suspicious Patterns to Watch For
|
||||
|
||||
1. **Unusual login times** - Activity outside normal hours
|
||||
2. **Failed authentication** - Brute force attempts
|
||||
3. **Privilege escalation** - Unexpected sudo usage
|
||||
4. **Reconnaissance commands** - `whoami`, `id`, `uname`, `cat /etc/passwd`
|
||||
5. **Data exfiltration indicators** - `curl`, `wget`, `scp`, `rsync` to external destinations
|
||||
6. **Persistence mechanisms** - Cron modifications, systemd service creation
|
||||
7. **Log tampering** - Commands targeting log files
|
||||
8. **Lateral movement** - SSH to other internal hosts
|
||||
9. **Service manipulation** - Stopping security services, disabling firewalls
|
||||
10. **Cleanup activity** - Deleting bash history, clearing logs
|
||||
|
||||
## Output Format
|
||||
|
||||
### For Standalone Security Reviews
|
||||
|
||||
```
|
||||
## Activity Summary
|
||||
|
||||
**Host:** <hostname>
|
||||
**Time Period:** <start> to <end>
|
||||
**Sessions Found:** <count>
|
||||
|
||||
## User Sessions
|
||||
|
||||
### Session 1: <user> from <source_ip>
|
||||
- **Login:** HH:MM:SSZ
|
||||
- **Logout:** HH:MM:SSZ (or ongoing)
|
||||
- **Commands executed:**
|
||||
- HH:MM:SSZ - <command>
|
||||
- HH:MM:SSZ - <command>
|
||||
|
||||
## Suspicious Activity
|
||||
|
||||
[If any patterns from the watch list were detected]
|
||||
- **Finding:** <description>
|
||||
- **Evidence:** <log entries>
|
||||
- **Risk Level:** Low / Medium / High
|
||||
|
||||
## Summary
|
||||
|
||||
[Overall assessment: normal activity, concerning patterns, or clear malicious activity]
|
||||
```
|
||||
|
||||
### When Called by Another Agent
|
||||
|
||||
Provide a focused response addressing the specific question:
|
||||
|
||||
```
|
||||
## Audit Findings
|
||||
|
||||
**Query:** <what was asked>
|
||||
**Time Window:** <investigated period>
|
||||
|
||||
## Relevant Activity
|
||||
|
||||
[Chronological list of relevant events]
|
||||
- HH:MM:SSZ - <event>
|
||||
- HH:MM:SSZ - <event>
|
||||
|
||||
## Assessment
|
||||
|
||||
[Direct answer to the question with supporting evidence]
|
||||
```
|
||||
|
||||
## Guidelines
|
||||
|
||||
- Reconstruct timelines chronologically
|
||||
- Correlate events (login → commands → logout)
|
||||
- Note gaps or missing data
|
||||
- Distinguish between automated (systemd, cron) and interactive activity
|
||||
- Consider the host's role and tier when assessing severity
|
||||
- When called by another agent, focus on answering their specific question
|
||||
- Don't speculate without evidence - state what the logs show and don't show
|
||||
211
.claude/agents/investigate-alarm.md
Normal file
211
.claude/agents/investigate-alarm.md
Normal file
@@ -0,0 +1,211 @@
|
||||
---
|
||||
name: investigate-alarm
|
||||
description: Investigates a single system alarm by querying Prometheus metrics and Loki logs, analyzing configuration files for affected hosts/services, and providing root cause analysis.
|
||||
tools: Read, Grep, Glob
|
||||
mcpServers:
|
||||
- lab-monitoring
|
||||
- git-explorer
|
||||
---
|
||||
|
||||
You are an alarm investigation specialist for a NixOS homelab infrastructure. Your task is to analyze a single alarm and determine its root cause.
|
||||
|
||||
## Input
|
||||
|
||||
You will receive information about an alarm, which may include:
|
||||
- Alert name and severity
|
||||
- Affected host or service
|
||||
- Alert expression/threshold
|
||||
- Current value or status
|
||||
- When it started firing
|
||||
|
||||
## Investigation Process
|
||||
|
||||
### 1. Understand the Alert Context
|
||||
|
||||
Start by understanding what the alert is measuring:
|
||||
- Use `get_alert` if you have a fingerprint, or `list_alerts` to find matching alerts
|
||||
- Use `get_metric_metadata` to understand the metric being monitored
|
||||
- Use `search_metrics` to find related metrics
|
||||
|
||||
### 2. Query Current State
|
||||
|
||||
Gather evidence about the current system state:
|
||||
- Use `query` to check the current metric values and related metrics
|
||||
- Use `list_targets` to verify the host/service is being scraped successfully
|
||||
- Look for correlated metrics that might explain the issue
|
||||
|
||||
### 3. Check Service Logs
|
||||
|
||||
Search for relevant log entries using `query_logs`. Focus on service-specific logs and errors.
|
||||
|
||||
**Query strategies (start narrow, expand if needed):**
|
||||
- Start with `limit: 20-30`, increase only if needed
|
||||
- Use tight time windows: `start: "15m"` or `start: "30m"` initially
|
||||
- Filter to specific services: `{hostname="<hostname>", systemd_unit="<service>.service"}`
|
||||
- Search for errors: `{hostname="<hostname>"} |= "error"` or `|= "failed"`
|
||||
|
||||
**Common patterns:**
|
||||
- Service logs: `{hostname="<hostname>", systemd_unit="<service>.service"}`
|
||||
- All errors on host: `{hostname="<hostname>"} |= "error"`
|
||||
- Journal for a unit: `{hostname="<hostname>", systemd_unit="nginx.service"} |= "failed"`
|
||||
|
||||
**Avoid:**
|
||||
- Using `start: "1h"` with no filters on busy hosts
|
||||
- Limits over 50 without specific filters
|
||||
|
||||
### 4. Investigate User Activity
|
||||
|
||||
For any analysis of user activity, **always spawn the `auditor` agent**. Do not query audit logs (EXECVE, USER_LOGIN, etc.) directly - delegate this to the auditor.
|
||||
|
||||
**Always call the auditor when:**
|
||||
- A service stopped unexpectedly (may have been manually stopped)
|
||||
- A process was killed or a config was changed
|
||||
- You need to know who was logged in around the time of an incident
|
||||
- You need to understand what commands led to the current state
|
||||
- The cause isn't obvious from service logs alone
|
||||
|
||||
**Do NOT try to query audit logs yourself.** The auditor is specialized for:
|
||||
- Parsing EXECVE records and reconstructing command lines
|
||||
- Correlating SSH sessions with commands executed
|
||||
- Identifying suspicious patterns
|
||||
- Filtering out systemd/nix-store noise
|
||||
|
||||
**Example prompt for auditor:**
|
||||
```
|
||||
Investigate user activity on <hostname> between <start_time> and <end_time>.
|
||||
Context: The prometheus-node-exporter service stopped at 14:32.
|
||||
Determine if it was manually stopped and by whom.
|
||||
```
|
||||
|
||||
Incorporate the auditor's findings into your timeline and root cause analysis.
|
||||
|
||||
### 5. Check Configuration (if relevant)
|
||||
|
||||
If the alert relates to a NixOS-managed service:
|
||||
- Check host configuration in `/hosts/<hostname>/`
|
||||
- Check service modules in `/services/<service>/`
|
||||
- Look for thresholds, resource limits, or misconfigurations
|
||||
- Check `homelab.host` options for tier/priority/role metadata
|
||||
|
||||
### 6. Check for Configuration Drift
|
||||
|
||||
Use the git-explorer MCP server to compare the host's deployed configuration against the current master branch. This helps identify:
|
||||
- Hosts running outdated configurations
|
||||
- Recent changes that might have caused the issue
|
||||
- Whether a fix has already been committed but not deployed
|
||||
|
||||
**Step 1: Get the deployed revision from Prometheus**
|
||||
```promql
|
||||
nixos_flake_info{hostname="<hostname>"}
|
||||
```
|
||||
The `current_rev` label contains the deployed git commit hash.
|
||||
|
||||
**Step 2: Check if the host is behind master**
|
||||
```
|
||||
resolve_ref("master") # Get current master commit
|
||||
is_ancestor(deployed, master) # Check if host is behind
|
||||
```
|
||||
|
||||
**Step 3: See what commits are missing**
|
||||
```
|
||||
commits_between(deployed, master) # List commits not yet deployed
|
||||
```
|
||||
|
||||
**Step 4: Check which files changed**
|
||||
```
|
||||
get_diff_files(deployed, master) # Files modified since deployment
|
||||
```
|
||||
Look for files in `hosts/<hostname>/`, `services/<relevant-service>/`, or `system/` that affect this host.
|
||||
|
||||
**Step 5: View configuration at the deployed revision**
|
||||
```
|
||||
get_file_at_commit(deployed, "services/<service>/default.nix")
|
||||
```
|
||||
Compare against the current file to understand differences.
|
||||
|
||||
**Step 6: Find when something changed**
|
||||
```
|
||||
search_commits("<service-name>") # Find commits mentioning the service
|
||||
get_commit_info(<hash>) # Get full details of a specific change
|
||||
```
|
||||
|
||||
**Example workflow for a service-related alert:**
|
||||
1. Query `nixos_flake_info{hostname="monitoring02"}` → `current_rev: 8959829`
|
||||
2. `resolve_ref("master")` → `4633421`
|
||||
3. `is_ancestor("8959829", "4633421")` → Yes, host is behind
|
||||
4. `commits_between("8959829", "4633421")` → 7 commits missing
|
||||
5. `get_diff_files("8959829", "4633421")` → Check if relevant service files changed
|
||||
6. If a fix was committed after the deployed rev, recommend deployment
|
||||
|
||||
### 7. Consider Common Causes
|
||||
|
||||
For infrastructure alerts, common causes include:
|
||||
- **Manual intervention**: Service manually stopped/restarted (call auditor to confirm)
|
||||
- **Configuration drift**: Host running outdated config, fix already in master
|
||||
- **Disk space**: Nix store growth, logs, temp files
|
||||
- **Memory pressure**: Service memory leaks, insufficient limits
|
||||
- **CPU**: Runaway processes, build jobs
|
||||
- **Network**: DNS issues, connectivity problems
|
||||
- **Service restarts**: Failed upgrades, configuration errors
|
||||
- **Scrape failures**: Service down, firewall issues, port changes
|
||||
|
||||
**Note:** If a service stopped unexpectedly and service logs don't show a crash or error, it was likely manual intervention - call the auditor to investigate.
|
||||
|
||||
## Output Format
|
||||
|
||||
Provide a concise report with one of two outcomes:
|
||||
|
||||
### If Root Cause Identified:
|
||||
|
||||
```
|
||||
## Root Cause
|
||||
[1-2 sentence summary of the root cause]
|
||||
|
||||
## Timeline
|
||||
[Chronological sequence of relevant events leading to the alert]
|
||||
- HH:MM:SSZ - [Event description]
|
||||
- HH:MM:SSZ - [Event description]
|
||||
- HH:MM:SSZ - [Alert fired]
|
||||
|
||||
### Timeline sources
|
||||
- HH:MM:SSZ - [Source for information about this event. Which metric or log file]
|
||||
- HH:MM:SSZ - [Source for information about this event. Which metric or log file]
|
||||
- HH:MM:SSZ - [Alert fired]
|
||||
|
||||
|
||||
## Evidence
|
||||
- [Specific metric values or log entries that support the conclusion]
|
||||
- [Configuration details if relevant]
|
||||
|
||||
|
||||
## Recommended Actions
|
||||
1. [Specific remediation step]
|
||||
2. [Follow-up actions if any]
|
||||
```
|
||||
|
||||
### If Root Cause Unclear:
|
||||
|
||||
```
|
||||
## Investigation Summary
|
||||
[What was checked and what was found]
|
||||
|
||||
## Possible Causes
|
||||
- [Hypothesis 1 with supporting/contradicting evidence]
|
||||
- [Hypothesis 2 with supporting/contradicting evidence]
|
||||
|
||||
## Additional Information Needed
|
||||
- [Specific data, logs, or access that would help]
|
||||
- [Suggested queries or checks for the operator]
|
||||
```
|
||||
|
||||
## Guidelines
|
||||
|
||||
- Be concise and actionable
|
||||
- Reference specific metric names and values as evidence
|
||||
- Include log snippets when they're informative
|
||||
- Don't speculate without evidence
|
||||
- If the alert is a false positive or expected behavior, explain why
|
||||
- Consider the host's tier (test vs prod) when assessing severity
|
||||
- Build a timeline from log timestamps and metrics to show the sequence of events
|
||||
- **Query logs incrementally**: start with narrow filters and small limits, expand only if needed
|
||||
- **Always delegate to the auditor agent** for any user activity analysis - never query EXECVE or audit logs directly
|
||||
@@ -30,11 +30,13 @@ Use the `lab-monitoring` MCP server tools:
|
||||
### Label Reference
|
||||
|
||||
Available labels for log queries:
|
||||
- `host` - Hostname (e.g., `ns1`, `monitoring01`, `ha1`)
|
||||
- `hostname` - Hostname (e.g., `ns1`, `monitoring02`, `ha1`) - matches the Prometheus `hostname` label
|
||||
- `systemd_unit` - Systemd unit name (e.g., `nsd.service`, `nixos-upgrade.service`)
|
||||
- `job` - Either `systemd-journal` (most logs) or `varlog` (file-based logs)
|
||||
- `job` - Either `systemd-journal` (most logs), `varlog` (file-based logs), or `bootstrap` (VM bootstrap logs)
|
||||
- `filename` - For `varlog` job, the log file path
|
||||
- `hostname` - Alternative to `host` for some streams
|
||||
- `tier` - Deployment tier (`test` or `prod`)
|
||||
- `role` - Host role (e.g., `dns`, `vault`, `monitoring`) - matches the Prometheus `role` label
|
||||
- `level` - Log level mapped from journal PRIORITY (`critical`, `error`, `warning`, `notice`, `info`, `debug`) - journal scrape only
|
||||
|
||||
### Log Format
|
||||
|
||||
@@ -47,12 +49,12 @@ Journal logs are JSON-formatted. Key fields:
|
||||
|
||||
**Logs from a specific service on a host:**
|
||||
```logql
|
||||
{host="ns1", systemd_unit="nsd.service"}
|
||||
{hostname="ns1", systemd_unit="nsd.service"}
|
||||
```
|
||||
|
||||
**All logs from a host:**
|
||||
```logql
|
||||
{host="monitoring01"}
|
||||
{hostname="monitoring02"}
|
||||
```
|
||||
|
||||
**Logs from a service across all hosts:**
|
||||
@@ -62,17 +64,31 @@ Journal logs are JSON-formatted. Key fields:
|
||||
|
||||
**Substring matching (case-sensitive):**
|
||||
```logql
|
||||
{host="ha1"} |= "error"
|
||||
{hostname="ha1"} |= "error"
|
||||
```
|
||||
|
||||
**Exclude pattern:**
|
||||
```logql
|
||||
{host="ns1"} != "routine"
|
||||
{hostname="ns1"} != "routine"
|
||||
```
|
||||
|
||||
**Regex matching:**
|
||||
```logql
|
||||
{systemd_unit="prometheus.service"} |~ "scrape.*failed"
|
||||
{systemd_unit="victoriametrics.service"} |~ "scrape.*failed"
|
||||
```
|
||||
|
||||
**Filter by level (journal scrape only):**
|
||||
```logql
|
||||
{level="error"} # All errors across the fleet
|
||||
{level=~"critical|error", tier="prod"} # Prod errors and criticals
|
||||
{hostname="ns1", level="warning"} # Warnings from a specific host
|
||||
```
|
||||
|
||||
**Filter by tier/role:**
|
||||
```logql
|
||||
{tier="prod"} |= "error" # All errors on prod hosts
|
||||
{role="dns"} # All DNS server logs
|
||||
{tier="test", job="systemd-journal"} # Journal logs from test hosts
|
||||
```
|
||||
|
||||
**File-based logs (caddy access logs, etc):**
|
||||
@@ -93,7 +109,7 @@ Default lookback is 1 hour. Use `start` parameter for older logs:
|
||||
Useful systemd units for troubleshooting:
|
||||
- `nixos-upgrade.service` - Daily auto-upgrade logs
|
||||
- `nsd.service` - DNS server (ns1/ns2)
|
||||
- `prometheus.service` - Metrics collection
|
||||
- `victoriametrics.service` - Metrics collection
|
||||
- `loki.service` - Log aggregation
|
||||
- `caddy.service` - Reverse proxy
|
||||
- `home-assistant.service` - Home automation
|
||||
@@ -102,11 +118,41 @@ Useful systemd units for troubleshooting:
|
||||
- `sshd.service` - SSH daemon
|
||||
- `nix-gc.service` - Nix garbage collection
|
||||
|
||||
### Bootstrap Logs
|
||||
|
||||
VMs provisioned from template2 send bootstrap progress directly to Loki via curl (before promtail is available). These logs use `job="bootstrap"` with additional labels:
|
||||
|
||||
- `hostname` - Target hostname
|
||||
- `branch` - Git branch being deployed
|
||||
- `stage` - Bootstrap stage (see table below)
|
||||
|
||||
**Bootstrap stages:**
|
||||
|
||||
| Stage | Message | Meaning |
|
||||
|-------|---------|---------|
|
||||
| `starting` | Bootstrap starting for \<host\> (branch: \<branch\>) | Bootstrap service has started |
|
||||
| `network_ok` | Network connectivity confirmed | Can reach git server |
|
||||
| `vault_ok` | Vault credentials unwrapped and stored | AppRole credentials provisioned |
|
||||
| `vault_skip` | No Vault token provided - skipping credential setup | No wrapped token was provided |
|
||||
| `vault_warn` | Failed to unwrap Vault token - continuing without secrets | Token unwrap failed (expired/used) |
|
||||
| `building` | Starting nixos-rebuild boot | NixOS build starting |
|
||||
| `success` | Build successful - rebooting into new configuration | Build complete, rebooting |
|
||||
| `failed` | nixos-rebuild failed - manual intervention required | Build failed |
|
||||
|
||||
**Bootstrap queries:**
|
||||
|
||||
```logql
|
||||
{job="bootstrap"} # All bootstrap logs
|
||||
{job="bootstrap", hostname="myhost"} # Specific host
|
||||
{job="bootstrap", stage="failed"} # All failures
|
||||
{job="bootstrap", stage=~"building|success"} # Track build progress
|
||||
```
|
||||
|
||||
### Extracting JSON Fields
|
||||
|
||||
Parse JSON and filter on fields:
|
||||
```logql
|
||||
{systemd_unit="prometheus.service"} | json | PRIORITY="3"
|
||||
{systemd_unit="victoriametrics.service"} | json | PRIORITY="3"
|
||||
```
|
||||
|
||||
---
|
||||
@@ -175,31 +221,94 @@ Disk space (root filesystem):
|
||||
node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"}
|
||||
```
|
||||
|
||||
### Service-Specific Metrics
|
||||
### Prometheus Jobs
|
||||
|
||||
Common job names:
|
||||
- `node-exporter` - System metrics (all hosts)
|
||||
- `nixos-exporter` - NixOS version/generation metrics
|
||||
- `caddy` - Reverse proxy metrics
|
||||
- `prometheus` / `loki` / `grafana` - Monitoring stack
|
||||
- `home-assistant` - Home automation
|
||||
- `step-ca` - Internal CA
|
||||
All available Prometheus job names:
|
||||
|
||||
### Instance Label Format
|
||||
**System exporters (on all/most hosts):**
|
||||
- `node-exporter` - System metrics (CPU, memory, disk, network)
|
||||
- `nixos-exporter` - NixOS flake revision and generation info
|
||||
- `systemd-exporter` - Systemd unit status metrics
|
||||
- `homelab-deploy` - Deployment listener metrics
|
||||
|
||||
The `instance` label uses FQDN format:
|
||||
**Service-specific exporters:**
|
||||
- `caddy` - Reverse proxy metrics (http-proxy)
|
||||
- `nix-cache_caddy` - Nix binary cache metrics
|
||||
- `home-assistant` - Home automation metrics (ha1)
|
||||
- `jellyfin` - Media server metrics (jelly01)
|
||||
- `kanidm` - Authentication server metrics (kanidm01)
|
||||
- `nats` - NATS messaging metrics (nats1)
|
||||
- `openbao` - Secrets management metrics (vault01)
|
||||
- `unbound` - DNS resolver metrics (ns1, ns2)
|
||||
- `wireguard` - VPN tunnel metrics (http-proxy)
|
||||
|
||||
```
|
||||
<hostname>.home.2rjus.net:<port>
|
||||
```
|
||||
**Monitoring stack (localhost on monitoring02):**
|
||||
- `victoriametrics` - VictoriaMetrics self-metrics
|
||||
- `loki` - Loki self-metrics
|
||||
- `grafana` - Grafana self-metrics
|
||||
- `alertmanager` - Alertmanager metrics
|
||||
|
||||
Example queries filtering by host:
|
||||
**External/infrastructure:**
|
||||
- `pve-exporter` - Proxmox hypervisor metrics
|
||||
- `smartctl` - Disk SMART health (gunter)
|
||||
- `restic_rest` - Backup server metrics
|
||||
- `ghettoptt` - PTT service metrics (gunter)
|
||||
|
||||
### Target Labels
|
||||
|
||||
All scrape targets have these labels:
|
||||
|
||||
**Standard labels:**
|
||||
- `instance` - Full target address (`<hostname>.home.2rjus.net:<port>`)
|
||||
- `job` - Job name (e.g., `node-exporter`, `unbound`, `nixos-exporter`)
|
||||
- `hostname` - Short hostname (e.g., `ns1`, `monitoring02`) - use this for host filtering
|
||||
|
||||
**Host metadata labels** (when configured in `homelab.host`):
|
||||
- `role` - Host role (e.g., `dns`, `build-host`, `vault`)
|
||||
- `tier` - Deployment tier (`test` for test VMs, absent for prod)
|
||||
- `dns_role` - DNS-specific role (`primary` or `secondary` for ns1/ns2)
|
||||
|
||||
### Filtering by Host
|
||||
|
||||
Use the `hostname` label for easy host filtering across all jobs:
|
||||
|
||||
```promql
|
||||
up{instance=~"monitoring01.*"}
|
||||
node_load1{instance=~"ns1.*"}
|
||||
{hostname="ns1"} # All metrics from ns1
|
||||
node_load1{hostname="monitoring02"} # Specific metric by hostname
|
||||
up{hostname="ha1"} # Check if ha1 is up
|
||||
```
|
||||
|
||||
This is simpler than wildcarding the `instance` label:
|
||||
|
||||
```promql
|
||||
# Old way (still works but verbose)
|
||||
up{instance=~"monitoring02.*"}
|
||||
|
||||
# New way (preferred)
|
||||
up{hostname="monitoring02"}
|
||||
```
|
||||
|
||||
### Filtering by Role/Tier
|
||||
|
||||
Filter hosts by their role or tier:
|
||||
|
||||
```promql
|
||||
up{role="dns"} # All DNS servers (ns1, ns2)
|
||||
node_cpu_seconds_total{role="build-host"} # Build hosts only (nix-cache01)
|
||||
up{tier="test"} # All test-tier VMs
|
||||
up{dns_role="primary"} # Primary DNS only (ns1)
|
||||
```
|
||||
|
||||
Current host labels:
|
||||
| Host | Labels |
|
||||
|------|--------|
|
||||
| ns1 | `role=dns`, `dns_role=primary` |
|
||||
| ns2 | `role=dns`, `dns_role=secondary` |
|
||||
| nix-cache01 | `role=build-host` |
|
||||
| vault01 | `role=vault` |
|
||||
| kanidm01 | `role=auth`, `tier=test` |
|
||||
| testvm01/02/03 | `tier=test` |
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting Workflows
|
||||
@@ -212,11 +321,12 @@ node_load1{instance=~"ns1.*"}
|
||||
|
||||
### Investigate Service Issues
|
||||
|
||||
1. Check `up{job="<service>"}` for scrape failures
|
||||
1. Check `up{job="<service>"}` or `up{hostname="<host>"}` for scrape failures
|
||||
2. Use `list_targets` to see target health details
|
||||
3. Query service logs: `{host="<host>", systemd_unit="<service>.service"}`
|
||||
4. Search for errors: `{host="<host>"} |= "error"`
|
||||
3. Query service logs: `{hostname="<host>", systemd_unit="<service>.service"}`
|
||||
4. Search for errors: `{hostname="<host>"} |= "error"`
|
||||
5. Check `list_alerts` for related alerts
|
||||
6. Use role filters for group issues: `up{role="dns"}` to check all DNS servers
|
||||
|
||||
### After Deploying Changes
|
||||
|
||||
@@ -225,10 +335,21 @@ node_load1{instance=~"ns1.*"}
|
||||
3. Check service logs for startup issues
|
||||
4. Check service metrics are being scraped
|
||||
|
||||
### Monitor VM Bootstrap
|
||||
|
||||
When provisioning new VMs, track bootstrap progress:
|
||||
|
||||
1. Watch bootstrap logs: `{job="bootstrap", hostname="<hostname>"}`
|
||||
2. Check for failures: `{job="bootstrap", hostname="<hostname>", stage="failed"}`
|
||||
3. After success, verify host appears in metrics: `up{hostname="<hostname>"}`
|
||||
4. Check logs are flowing: `{hostname="<hostname>"}`
|
||||
|
||||
See [docs/host-creation.md](../../../docs/host-creation.md) for the full host creation pipeline.
|
||||
|
||||
### Debug SSH/Access Issues
|
||||
|
||||
```logql
|
||||
{host="<host>", systemd_unit="sshd.service"}
|
||||
{hostname="<host>", systemd_unit="sshd.service"}
|
||||
```
|
||||
|
||||
### Check Recent Upgrades
|
||||
@@ -246,5 +367,6 @@ With `start: "24h"` to see last 24 hours of upgrades across all hosts.
|
||||
- Default scrape interval is 15s for most metrics targets
|
||||
- Default log lookback is 1h - use `start` parameter for older logs
|
||||
- Use `rate()` for counter metrics, direct queries for gauges
|
||||
- The `instance` label includes the port, use regex matching (`=~`) for hostname-only filters
|
||||
- Use the `hostname` label to filter metrics by host (simpler than regex on `instance`)
|
||||
- Host metadata labels (`role`, `tier`, `dns_role`) are propagated to all scrape targets
|
||||
- Log `MESSAGE` field contains the actual log content in JSON format
|
||||
|
||||
@@ -73,6 +73,7 @@ Additional context, caveats, or references.
|
||||
- **Reference existing patterns**: Mention how this fits with existing infrastructure
|
||||
- **Tables for comparisons**: Use markdown tables when comparing options
|
||||
- **Practical focus**: Emphasize what needs to happen, not theory
|
||||
- **Mermaid diagrams**: Use mermaid code blocks for architecture diagrams, flow charts, or other graphs when relevant to the plan. Keep node labels short and use `<br/>` for line breaks
|
||||
|
||||
## Examples of Good Plans
|
||||
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,6 +2,9 @@
|
||||
result
|
||||
result-*
|
||||
|
||||
# MCP config (contains secrets)
|
||||
.mcp.json
|
||||
|
||||
# Terraform/OpenTofu
|
||||
terraform/.terraform/
|
||||
terraform/.terraform.lock.hcl
|
||||
|
||||
@@ -20,7 +20,9 @@
|
||||
"env": {
|
||||
"PROMETHEUS_URL": "https://prometheus.home.2rjus.net",
|
||||
"ALERTMANAGER_URL": "https://alertmanager.home.2rjus.net",
|
||||
"LOKI_URL": "http://monitoring01.home.2rjus.net:3100"
|
||||
"LOKI_URL": "https://loki.home.2rjus.net",
|
||||
"LOKI_USERNAME": "promtail",
|
||||
"LOKI_PASSWORD": "<password from: bao kv get -field=password secret/shared/loki/push-auth>"
|
||||
}
|
||||
},
|
||||
"homelab-deploy": {
|
||||
@@ -31,9 +33,16 @@
|
||||
"--",
|
||||
"mcp",
|
||||
"--nats-url", "nats://nats1.home.2rjus.net:4222",
|
||||
"--nkey-file", "/home/torjus/.config/homelab-deploy/test-deployer.nkey"
|
||||
"--nkey-file", "/home/torjus/.config/homelab-deploy/test-deployer.nkey",
|
||||
"--enable-builds"
|
||||
]
|
||||
},
|
||||
"git-explorer": {
|
||||
"command": "nix",
|
||||
"args": ["run", "git+https://git.t-juice.club/torjus/labmcp#git-explorer", "--", "serve"],
|
||||
"env": {
|
||||
"GIT_REPO_PATH": "/home/torjus/git/nixos-servers"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
52
.sops.yaml
52
.sops.yaml
@@ -1,52 +0,0 @@
|
||||
keys:
|
||||
- &admin_torjus age1lznyk4ee7e7x8n92cq2n87kz9920473ks5u9jlhd3dczfzq4wamqept56u
|
||||
- &server_ns1 age1hz2lz4k050ru3shrk5j3zk3f8azxmrp54pktw5a7nzjml4saudesx6jsl0
|
||||
- &server_ns2 age1w2q4gm2lrcgdzscq8du3ssyvk6qtzm4fcszc92z9ftclq23yyydqdga5um
|
||||
- &server_ha1 age1d2w5zece9647qwyq4vas9qyqegg96xwmg6c86440a6eg4uj6dd2qrq0w3l
|
||||
- &server_http-proxy age1gq8434ku0xekqmvnseeunv83e779cg03c06gwrusnymdsr3rpufqx6vr3m
|
||||
- &server_ca age1288993th0ge00reg4zqueyvmkrsvk829cs068eekjqfdprsrkeqql7mljk
|
||||
- &server_monitoring01 age1vpns76ykll8jgdlu3h05cur4ew2t3k7u03kxdg8y6ypfhsfhq9fqyurjey
|
||||
- &server_jelly01 age1hchvlf3apn8g8jq2743pw53sd6v6ay6xu6lqk0qufrjeccan9vzsc7hdfq
|
||||
- &server_nix-cache01 age1w029fksjv0edrff9p7s03tgk3axecdkppqymfpwfn2nu2gsqqefqc37sxq
|
||||
- &server_pgdb1 age1ha34qeksr4jeaecevqvv2afqem67eja2mvawlmrqsudch0e7fe7qtpsekv
|
||||
- &server_nats1 age1cxt8kwqzx35yuldazcc49q88qvgy9ajkz30xu0h37uw3ts97jagqgmn2ga
|
||||
creation_rules:
|
||||
- path_regex: secrets/[^/]+\.(yaml|json|env|ini)
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_torjus
|
||||
- *server_ns1
|
||||
- *server_ns2
|
||||
- *server_ha1
|
||||
- *server_http-proxy
|
||||
- *server_ca
|
||||
- *server_monitoring01
|
||||
- *server_jelly01
|
||||
- *server_nix-cache01
|
||||
- *server_pgdb1
|
||||
- *server_nats1
|
||||
- path_regex: secrets/ca/[^/]+\.(yaml|json|env|ini|)
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_torjus
|
||||
- *server_ca
|
||||
- path_regex: secrets/monitoring01/[^/]+\.(yaml|json|env|ini)
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_torjus
|
||||
- *server_monitoring01
|
||||
- path_regex: secrets/ca/keys/.+
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_torjus
|
||||
- *server_ca
|
||||
- path_regex: secrets/nix-cache01/.+
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_torjus
|
||||
- *server_nix-cache01
|
||||
- path_regex: secrets/http-proxy/.+
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_torjus
|
||||
- *server_http-proxy
|
||||
272
CLAUDE.md
272
CLAUDE.md
@@ -35,6 +35,34 @@ nix build .#create-host
|
||||
|
||||
Do not automatically deploy changes. Deployments are usually done by updating the master branch, and then triggering the auto update on the specific host.
|
||||
|
||||
### SSH Commands
|
||||
|
||||
Do not run SSH commands directly. If a command needs to be run on a remote host, provide the command to the user and ask them to run it manually.
|
||||
|
||||
### Sharing Command Output via Loki
|
||||
|
||||
All hosts have the `pipe-to-loki` script for sending command output or terminal sessions to Loki, allowing users to share output with Claude without copy-pasting.
|
||||
|
||||
**Pipe mode** - send command output:
|
||||
```bash
|
||||
command | pipe-to-loki # Auto-generated ID
|
||||
command | pipe-to-loki --id my-test # Custom ID
|
||||
```
|
||||
|
||||
**Session mode** - record interactive terminal session:
|
||||
```bash
|
||||
pipe-to-loki --record # Start recording, exit to send
|
||||
pipe-to-loki --record --id my-session # With custom ID
|
||||
```
|
||||
|
||||
The script prints the session ID which the user can share. Query results with:
|
||||
```logql
|
||||
{job="pipe-to-loki"} # All entries
|
||||
{job="pipe-to-loki", id="my-test"} # Specific ID
|
||||
{job="pipe-to-loki", hostname="testvm01"} # From specific host
|
||||
{job="pipe-to-loki", type="session"} # Only sessions
|
||||
```
|
||||
|
||||
### Testing Feature Branches on Hosts
|
||||
|
||||
All hosts have the `nixos-rebuild-test` helper script for testing feature branches before merging:
|
||||
@@ -61,25 +89,53 @@ Do not run `nix flake update`. Should only be done manually by user.
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Enter development shell (provides ansible, python3)
|
||||
# Enter development shell
|
||||
nix develop
|
||||
```
|
||||
|
||||
The devshell provides: `ansible`, `tofu` (OpenTofu), `bao` (OpenBao CLI), `create-host`, and `homelab-deploy`.
|
||||
|
||||
**Important:** When suggesting commands that use devshell tools, always use `nix develop -c <command>` syntax rather than assuming the user is already in a devshell. For example:
|
||||
```bash
|
||||
# Good - works regardless of current shell
|
||||
nix develop -c tofu plan
|
||||
|
||||
# Avoid - requires user to be in devshell
|
||||
tofu plan
|
||||
```
|
||||
|
||||
**OpenTofu:** Use the `-chdir` option instead of `cd` when running tofu commands in subdirectories:
|
||||
```bash
|
||||
# Good - uses -chdir option
|
||||
nix develop -c tofu -chdir=terraform plan
|
||||
nix develop -c tofu -chdir=terraform/vault apply
|
||||
|
||||
# Avoid - changing directories
|
||||
cd terraform && tofu plan
|
||||
```
|
||||
|
||||
### Ansible
|
||||
|
||||
Ansible configuration and playbooks are in `/ansible/`. See [ansible/README.md](ansible/README.md) for inventory groups, available playbooks, and usage examples.
|
||||
|
||||
The devshell sets `ANSIBLE_CONFIG` automatically, so no `-i` flag is needed.
|
||||
|
||||
### Secrets Management
|
||||
|
||||
Secrets are managed by OpenBao (Vault) using AppRole authentication. Most hosts use the
|
||||
`vault.secrets` option defined in `system/vault-secrets.nix` to fetch secrets at boot.
|
||||
Terraform manages the secrets and AppRole policies in `terraform/vault/`.
|
||||
|
||||
Legacy sops-nix is still present but only actively used by the `ca` host. Do not edit any
|
||||
`.sops.yaml` or any file within `secrets/`. Ask the user to modify if necessary.
|
||||
|
||||
### Git Workflow
|
||||
|
||||
**Important:** Never commit directly to `master` unless the user explicitly asks for it. Always create a feature branch for changes.
|
||||
|
||||
**Important:** Never amend commits to `master` unless the user explicitly asks for it. Amending rewrites history and causes issues for deployed configurations.
|
||||
|
||||
**Important:** Never force push to `master`. If a commit on master has an error, fix it with a new commit rather than rewriting history.
|
||||
|
||||
**Important:** Do not use `gh pr create` to create pull requests. The git server does not support GitHub CLI for PR creation. Instead, push the branch and let the user create the PR manually via the web interface.
|
||||
|
||||
When starting a new plan or task, the first step should typically be to create and checkout a new branch with an appropriate name (e.g., `git checkout -b dns-automation` or `git checkout -b fix-nginx-config`).
|
||||
|
||||
### Plan Management
|
||||
@@ -132,67 +188,16 @@ Two MCP servers are available for searching NixOS options and packages:
|
||||
|
||||
This ensures documentation matches the exact nixpkgs version (currently NixOS 25.11) used by this flake.
|
||||
|
||||
### Lab Monitoring Log Queries
|
||||
### Lab Monitoring
|
||||
|
||||
The **lab-monitoring** MCP server can query logs from Loki. All hosts ship systemd journal logs via Promtail.
|
||||
The **lab-monitoring** MCP server provides access to Prometheus metrics and Loki logs. Use the `/observability` skill for detailed reference on:
|
||||
|
||||
**Loki Label Reference:**
|
||||
- Available Prometheus jobs and exporters
|
||||
- Loki labels and LogQL query syntax
|
||||
- Bootstrap log monitoring for new VMs
|
||||
- Common troubleshooting workflows
|
||||
|
||||
- `host` - Hostname (e.g., `ns1`, `ns2`, `monitoring01`, `ha1`). Use this label, not `hostname`.
|
||||
- `systemd_unit` - Systemd unit name (e.g., `nsd.service`, `prometheus.service`, `nixos-upgrade.service`)
|
||||
- `job` - Either `systemd-journal` (most logs) or `varlog` (file-based logs like caddy access logs)
|
||||
- `filename` - For `varlog` job, the log file path (e.g., `/var/log/caddy/nix-cache.log`)
|
||||
|
||||
Journal log entries are JSON-formatted with the actual log message in the `MESSAGE` field. Other useful fields include `PRIORITY` and `SYSLOG_IDENTIFIER`.
|
||||
|
||||
**Example LogQL queries:**
|
||||
```
|
||||
# Logs from a specific service on a host
|
||||
{host="ns2", systemd_unit="nsd.service"}
|
||||
|
||||
# Substring match on log content
|
||||
{host="ns1", systemd_unit="nsd.service"} |= "error"
|
||||
|
||||
# File-based logs (e.g., caddy access logs)
|
||||
{job="varlog", hostname="nix-cache01"}
|
||||
```
|
||||
|
||||
Default lookback is 1 hour. Use the `start` parameter with relative durations (e.g., `24h`, `168h`) for older logs.
|
||||
|
||||
### Lab Monitoring Prometheus Queries
|
||||
|
||||
The **lab-monitoring** MCP server can query Prometheus metrics via PromQL. The `instance` label uses the FQDN format `<host>.home.2rjus.net:<port>`.
|
||||
|
||||
**Prometheus Job Names:**
|
||||
|
||||
- `node-exporter` - System metrics from all hosts (CPU, memory, disk, network)
|
||||
- `caddy` - Reverse proxy metrics (http-proxy)
|
||||
- `nix-cache_caddy` - Nix binary cache metrics
|
||||
- `home-assistant` - Home automation metrics
|
||||
- `jellyfin` - Media server metrics
|
||||
- `loki` / `prometheus` / `grafana` - Monitoring stack self-metrics
|
||||
- `step-ca` - Internal CA metrics
|
||||
- `pve-exporter` - Proxmox hypervisor metrics
|
||||
- `smartctl` - Disk SMART health (gunter)
|
||||
- `wireguard` - VPN metrics (http-proxy)
|
||||
- `pushgateway` - Push-based metrics (e.g., backup results)
|
||||
- `restic_rest` - Backup server metrics
|
||||
- `labmon` / `ghettoptt` / `alertmanager` - Other service metrics
|
||||
|
||||
**Example PromQL queries:**
|
||||
```
|
||||
# Check all targets are up
|
||||
up
|
||||
|
||||
# CPU usage for a specific host
|
||||
rate(node_cpu_seconds_total{instance=~"ns1.*", mode!="idle"}[5m])
|
||||
|
||||
# Memory usage across all hosts
|
||||
node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes
|
||||
|
||||
# Disk space
|
||||
node_filesystem_avail_bytes{mountpoint="/"}
|
||||
```
|
||||
The skill contains up-to-date information about all scrape targets, host labels, and example queries.
|
||||
|
||||
### Deploying to Test Hosts
|
||||
|
||||
@@ -229,6 +234,21 @@ deploy(role="vault", action="switch")
|
||||
|
||||
**Note:** Only test-tier hosts with `homelab.deploy.enable = true` and the listener service running will respond to deployments.
|
||||
|
||||
**Deploying to Prod Hosts:**
|
||||
|
||||
The MCP server only deploys to test-tier hosts. For prod hosts, use the CLI directly:
|
||||
|
||||
```bash
|
||||
nix develop -c homelab-deploy -- deploy \
|
||||
--nats-url nats://nats1.home.2rjus.net:4222 \
|
||||
--nkey-file ~/.config/homelab-deploy/admin-deployer.nkey \
|
||||
--branch <branch-name> \
|
||||
--action switch \
|
||||
deploy.prod.<hostname>
|
||||
```
|
||||
|
||||
Subject format: `deploy.<tier>.<hostname>` (e.g., `deploy.prod.monitoring02`, `deploy.test.testvm01`)
|
||||
|
||||
**Verifying Deployments:**
|
||||
|
||||
After deploying, use the `nixos_flake_info` metric from nixos-exporter to verify the host is running the expected revision:
|
||||
@@ -248,10 +268,11 @@ The `current_rev` label contains the git commit hash of the deployed flake confi
|
||||
- `default.nix` - Entry point, imports configuration.nix and services
|
||||
- `configuration.nix` - Host-specific settings (networking, hardware, users)
|
||||
- `/system/` - Shared system-level configurations applied to ALL hosts
|
||||
- Core modules: nix.nix, sshd.nix, sops.nix (legacy), vault-secrets.nix, acme.nix, autoupgrade.nix
|
||||
- Core modules: nix.nix, sshd.nix, vault-secrets.nix, acme.nix, autoupgrade.nix
|
||||
- Additional modules: motd.nix (dynamic MOTD), packages.nix (base packages), root-user.nix (root config), homelab-deploy.nix (NATS listener)
|
||||
- Monitoring: node-exporter and promtail on every host
|
||||
- `/modules/` - Custom NixOS modules
|
||||
- `homelab/` - Homelab-specific options (DNS automation, monitoring scrape targets)
|
||||
- `homelab/` - Homelab-specific options (see "Homelab Module Options" section below)
|
||||
- `/lib/` - Nix library functions
|
||||
- `dns-zone.nix` - DNS zone generation functions
|
||||
- `monitoring.nix` - Prometheus scrape target generation functions
|
||||
@@ -259,14 +280,17 @@ The `current_rev` label contains the git commit hash of the deployed flake confi
|
||||
- `home-assistant/` - Home automation stack
|
||||
- `monitoring/` - Observability stack (Prometheus, Grafana, Loki, Tempo)
|
||||
- `ns/` - DNS services (authoritative, resolver, zone generation)
|
||||
- `http-proxy/`, `ca/`, `postgres/`, `nats/`, `jellyfin/`, etc.
|
||||
- `/secrets/` - SOPS-encrypted secrets with age encryption (legacy, only used by ca)
|
||||
- `vault/` - OpenBao (Vault) secrets server
|
||||
- `actions-runner/` - GitHub Actions runner
|
||||
- `http-proxy/`, `postgres/`, `nats/`, `jellyfin/`, etc.
|
||||
- `/common/` - Shared configurations (e.g., VM guest agent)
|
||||
- `/docs/` - Documentation and plans
|
||||
- `plans/` - Future plans and proposals
|
||||
- `plans/completed/` - Completed plans (moved here when done)
|
||||
- `/playbooks/` - Ansible playbooks for fleet management
|
||||
- `/.sops.yaml` - SOPS configuration with age keys (legacy, only used by ca)
|
||||
- `/ansible/` - Ansible configuration and playbooks
|
||||
- `ansible.cfg` - Ansible configuration (inventory path, defaults)
|
||||
- `inventory/` - Dynamic and static inventory sources
|
||||
- `playbooks/` - Ansible playbooks for fleet management
|
||||
|
||||
### Configuration Inheritance
|
||||
|
||||
@@ -283,37 +307,27 @@ All hosts automatically get:
|
||||
- Nix binary cache (nix-cache.home.2rjus.net)
|
||||
- SSH with root login enabled
|
||||
- OpenBao (Vault) secrets management via AppRole
|
||||
- Internal ACME CA integration (ca.home.2rjus.net)
|
||||
- Internal ACME CA integration (OpenBao PKI at vault.home.2rjus.net)
|
||||
- Daily auto-upgrades with auto-reboot
|
||||
- Prometheus node-exporter + Promtail (logs to monitoring01)
|
||||
- Prometheus node-exporter + Promtail (logs to monitoring02)
|
||||
- Monitoring scrape target auto-registration via `homelab.monitoring` options
|
||||
- Custom root CA trust
|
||||
- DNS zone auto-registration via `homelab.dns` options
|
||||
|
||||
### Active Hosts
|
||||
### Hosts
|
||||
|
||||
Production servers managed by `rebuild-all.sh`:
|
||||
- `ns1`, `ns2` - Primary/secondary DNS servers (10.69.13.5/6)
|
||||
- `ca` - Internal Certificate Authority
|
||||
- `ha1` - Home Assistant + Zigbee2MQTT + Mosquitto
|
||||
- `http-proxy` - Reverse proxy
|
||||
- `monitoring01` - Full observability stack (Prometheus, Grafana, Loki, Tempo, Pyroscope)
|
||||
- `jelly01` - Jellyfin media server
|
||||
- `nix-cache01` - Binary cache server
|
||||
- `pgdb1` - PostgreSQL database
|
||||
- `nats1` - NATS messaging server
|
||||
Host configurations are in `/hosts/<hostname>/`. See `flake.nix` for the complete list of `nixosConfigurations`.
|
||||
|
||||
Template/test hosts:
|
||||
- `template1` - Base template for cloning new hosts
|
||||
Use `nix flake show` or `nix develop -c ansible-inventory --graph` to list all hosts.
|
||||
|
||||
### Flake Inputs
|
||||
|
||||
- `nixpkgs` - NixOS 25.11 stable (primary)
|
||||
- `nixpkgs-unstable` - Unstable channel (available via overlay as `pkgs.unstable.<package>`)
|
||||
- `sops-nix` - Secrets management (legacy, only used by ca)
|
||||
- `nixos-exporter` - NixOS module for exposing flake revision metrics (used to verify deployments)
|
||||
- `homelab-deploy` - NATS-based remote deployment tool for test-tier hosts
|
||||
- Custom packages from git.t-juice.club:
|
||||
- `alerttonotify` - Alert routing
|
||||
- `labmon` - Lab monitoring
|
||||
|
||||
### Network Architecture
|
||||
|
||||
@@ -321,7 +335,7 @@ Template/test hosts:
|
||||
- Infrastructure subnet: `10.69.13.x`
|
||||
- DNS: ns1/ns2 provide authoritative DNS with primary-secondary setup
|
||||
- Internal CA for ACME certificates (no Let's Encrypt)
|
||||
- Centralized monitoring at monitoring01
|
||||
- Centralized monitoring at monitoring02
|
||||
- Static networking via systemd-networkd
|
||||
|
||||
### Secrets Management
|
||||
@@ -335,12 +349,7 @@ Most hosts use OpenBao (Vault) for secrets:
|
||||
- `extractKey` option extracts a single key from vault JSON as a plain file
|
||||
- Secrets fetched at boot by `vault-secret-<name>.service` systemd units
|
||||
- Fallback to cached secrets in `/var/lib/vault/cache/` when Vault is unreachable
|
||||
- Provision AppRole credentials: `nix develop -c ansible-playbook playbooks/provision-approle.yml -e hostname=<host>`
|
||||
|
||||
Legacy SOPS (only used by `ca` host):
|
||||
- SOPS with age encryption, keys in `.sops.yaml`
|
||||
- Shared secrets: `/secrets/secrets.yaml`
|
||||
- Per-host secrets: `/secrets/<hostname>/`
|
||||
- Provision AppRole credentials: `nix develop -c ansible-playbook ansible/playbooks/provision-approle.yml -l <hostname>`
|
||||
|
||||
### Auto-Upgrade System
|
||||
|
||||
@@ -364,7 +373,7 @@ Template VMs are built from `hosts/template2` and deployed to Proxmox using Ansi
|
||||
|
||||
```bash
|
||||
# Build NixOS image and deploy to Proxmox as template
|
||||
nix develop -c ansible-playbook -i playbooks/inventory.ini playbooks/build-and-deploy-template.yml
|
||||
nix develop -c ansible-playbook ansible/playbooks/build-and-deploy-template.yml
|
||||
```
|
||||
|
||||
This playbook:
|
||||
@@ -402,9 +411,21 @@ Example VM deployment includes:
|
||||
- Custom CPU/memory/disk sizing
|
||||
- VLAN tagging
|
||||
- QEMU guest agent
|
||||
- Automatic Vault credential provisioning via `vault_wrapped_token`
|
||||
|
||||
OpenTofu outputs the VM's IP address after deployment for easy SSH access.
|
||||
|
||||
**Automatic Vault Credential Provisioning:**
|
||||
|
||||
VMs can receive Vault (OpenBao) credentials automatically during bootstrap:
|
||||
|
||||
1. OpenTofu generates a wrapped token via `terraform/vault/` and stores it in the VM configuration
|
||||
2. Cloud-init passes `VAULT_WRAPPED_TOKEN` and `NIXOS_FLAKE_BRANCH` to the bootstrap script
|
||||
3. The bootstrap script unwraps the token to obtain AppRole credentials
|
||||
4. Credentials are written to `/var/lib/vault/approle/` before the NixOS rebuild
|
||||
|
||||
This eliminates the need for manual `provision-approle.yml` playbook runs on new VMs. Bootstrap progress is logged to Loki with `job="bootstrap"` labels.
|
||||
|
||||
#### Template Rebuilding and Terraform State
|
||||
|
||||
When the Proxmox template is rebuilt (via `build-and-deploy-template.yml`), the template name may change. This would normally cause Terraform to want to recreate all existing VMs, but that's unnecessary since VMs are independent once cloned.
|
||||
@@ -427,7 +448,7 @@ This means:
|
||||
- `tofu plan` won't show spurious changes for Proxmox-managed defaults
|
||||
|
||||
**When rebuilding the template:**
|
||||
1. Run `nix develop -c ansible-playbook -i playbooks/inventory.ini playbooks/build-and-deploy-template.yml`
|
||||
1. Run `nix develop -c ansible-playbook ansible/playbooks/build-and-deploy-template.yml`
|
||||
2. Update `default_template_name` in `terraform/variables.tf` if the name changed
|
||||
3. Run `tofu plan` - should show no VM recreations (only template name in state)
|
||||
4. Run `tofu apply` - updates state without touching existing VMs
|
||||
@@ -435,20 +456,11 @@ This means:
|
||||
|
||||
### Adding a New Host
|
||||
|
||||
1. Create `/hosts/<hostname>/` directory
|
||||
2. Copy structure from `template1` or similar host
|
||||
3. Add host entry to `flake.nix` nixosConfigurations
|
||||
4. Configure networking in `configuration.nix` (static IP via `systemd.network.networks`, DNS servers)
|
||||
5. (Optional) Add `homelab.dns.cnames` if the host needs CNAME aliases
|
||||
6. Add `vault.enable = true;` to the host configuration
|
||||
7. Add AppRole policy in `terraform/vault/approle.tf` and any secrets in `secrets.tf`
|
||||
8. Run `tofu apply` in `terraform/vault/`
|
||||
9. User clones template host
|
||||
10. User runs `prepare-host.sh` on new host
|
||||
11. Provision AppRole credentials: `nix develop -c ansible-playbook playbooks/provision-approle.yml -e hostname=<host>`
|
||||
12. Commit changes, and merge to master.
|
||||
13. Deploy by running `nixos-rebuild boot --flake URL#<hostname>` on the host.
|
||||
14. Run auto-upgrade on DNS servers (ns1, ns2) to pick up the new host's DNS entry
|
||||
See [docs/host-creation.md](docs/host-creation.md) for the complete host creation pipeline, including:
|
||||
- Using the `create-host` script to generate host configurations
|
||||
- Deploying VMs and secrets with OpenTofu
|
||||
- Monitoring the bootstrap process via Loki
|
||||
- Verification and troubleshooting steps
|
||||
|
||||
**Note:** DNS A records and Prometheus node-exporter scrape targets are auto-generated from the host's `systemd.network.networks` static IP configuration. No manual zone file or Prometheus config editing is required.
|
||||
|
||||
@@ -468,27 +480,21 @@ This means:
|
||||
|
||||
### Monitoring Stack
|
||||
|
||||
All hosts ship metrics and logs to `monitoring01`:
|
||||
- **Metrics**: Prometheus scrapes node-exporter from all hosts
|
||||
- **Logs**: Promtail ships logs to Loki on monitoring01
|
||||
- **Access**: Grafana at monitoring01 for visualization
|
||||
- **Tracing**: Tempo for distributed tracing
|
||||
- **Profiling**: Pyroscope for continuous profiling
|
||||
All hosts ship metrics and logs to `monitoring02`:
|
||||
- **Metrics**: VictoriaMetrics scrapes node-exporter from all hosts
|
||||
- **Logs**: Promtail ships logs to Loki on monitoring02
|
||||
- **Access**: Grafana at monitoring02 for visualization
|
||||
|
||||
**Scrape Target Auto-Generation:**
|
||||
|
||||
Prometheus scrape targets are automatically generated from host configurations, following the same pattern as DNS zone generation:
|
||||
VictoriaMetrics scrape targets are automatically generated from host configurations, following the same pattern as DNS zone generation:
|
||||
|
||||
- **Node-exporter**: All flake hosts with static IPs are automatically added as node-exporter targets
|
||||
- **Service targets**: Defined via `homelab.monitoring.scrapeTargets` in service modules
|
||||
- **External targets**: Non-flake hosts defined in `/services/monitoring/external-targets.nix`
|
||||
- **Library**: `lib/monitoring.nix` provides `generateNodeExporterTargets` and `generateScrapeConfigs`
|
||||
|
||||
Host monitoring options (`homelab.monitoring.*`):
|
||||
- `enable` (default: `true`) - Include host in Prometheus node-exporter scrape targets
|
||||
- `scrapeTargets` (default: `[]`) - Additional scrape targets exposed by this host (job_name, port, metrics_path, scheme, scrape_interval, honor_labels)
|
||||
|
||||
Service modules declare their scrape targets directly (e.g., `services/ca/default.nix` declares step-ca on port 9000). The Prometheus config on monitoring01 auto-generates scrape configs from all hosts.
|
||||
Service modules declare their scrape targets directly via `homelab.monitoring.scrapeTargets`. The VictoriaMetrics config on monitoring02 auto-generates scrape configs from all hosts. See "Homelab Module Options" section for available options.
|
||||
|
||||
To add monitoring targets for non-NixOS hosts, edit `/services/monitoring/external-targets.nix`.
|
||||
|
||||
@@ -507,13 +513,31 @@ DNS zone entries are automatically generated from host configurations:
|
||||
- **External hosts**: Non-flake hosts defined in `/services/ns/external-hosts.nix`
|
||||
- **Serial number**: Uses `self.sourceInfo.lastModified` (git commit timestamp)
|
||||
|
||||
Host DNS options (`homelab.dns.*`):
|
||||
- `enable` (default: `true`) - Include host in DNS zone generation
|
||||
- `cnames` (default: `[]`) - List of CNAME aliases pointing to this host
|
||||
|
||||
Hosts are automatically excluded from DNS if:
|
||||
- `homelab.dns.enable = false` (e.g., template hosts)
|
||||
- No static IP configured (e.g., DHCP-only hosts)
|
||||
- Network interface is a VPN/tunnel (wg*, tun*, tap*)
|
||||
|
||||
To add DNS entries for non-NixOS hosts, edit `/services/ns/external-hosts.nix`.
|
||||
|
||||
### Homelab Module Options
|
||||
|
||||
The `modules/homelab/` directory defines custom options used across hosts for automation and metadata.
|
||||
|
||||
**Host options (`homelab.host.*`):**
|
||||
- `tier` - Deployment tier: `test` or `prod`. Test-tier hosts can receive remote deployments and have different credential access.
|
||||
- `priority` - Alerting priority: `high` or `low`. Controls alerting thresholds for the host.
|
||||
- `role` - Primary role designation (e.g., `dns`, `database`, `bastion`, `vault`)
|
||||
- `labels` - Free-form key-value metadata for host categorization
|
||||
- `ansible = "false"` - Exclude host from Ansible dynamic inventory
|
||||
|
||||
**DNS options (`homelab.dns.*`):**
|
||||
- `enable` (default: `true`) - Include host in DNS zone generation
|
||||
- `cnames` (default: `[]`) - List of CNAME aliases pointing to this host
|
||||
|
||||
**Monitoring options (`homelab.monitoring.*`):**
|
||||
- `enable` (default: `true`) - Include host in Prometheus node-exporter scrape targets
|
||||
- `scrapeTargets` (default: `[]`) - Additional scrape targets exposed by this host
|
||||
|
||||
**Deploy options (`homelab.deploy.*`):**
|
||||
- `enable` (default: `false`) - Enable NATS-based remote deployment listener. When enabled, the host listens for deployment commands via NATS and can be targeted by the `homelab-deploy` MCP server.
|
||||
|
||||
@@ -10,10 +10,9 @@ NixOS Flake-based configuration repository for a homelab infrastructure. All hos
|
||||
| `ca` | Internal Certificate Authority |
|
||||
| `ha1` | Home Assistant + Zigbee2MQTT + Mosquitto |
|
||||
| `http-proxy` | Reverse proxy |
|
||||
| `monitoring01` | Prometheus, Grafana, Loki, Tempo, Pyroscope |
|
||||
| `monitoring02` | VictoriaMetrics, Grafana, Loki, Alertmanager |
|
||||
| `jelly01` | Jellyfin media server |
|
||||
| `nix-cache01` | Nix binary cache |
|
||||
| `pgdb1` | PostgreSQL |
|
||||
| `nix-cache02` | Nix binary cache + NATS-based build service |
|
||||
| `nats1` | NATS messaging |
|
||||
| `vault01` | OpenBao (Vault) secrets management |
|
||||
| `template1`, `template2` | VM templates for cloning new hosts |
|
||||
@@ -122,4 +121,4 @@ No manual intervention is required after `tofu apply`.
|
||||
- Infrastructure subnet: `10.69.13.0/24`
|
||||
- DNS: ns1/ns2 authoritative with primary-secondary AXFR
|
||||
- Internal CA for TLS certificates (migrating from step-ca to OpenBao PKI)
|
||||
- Centralized monitoring at monitoring01
|
||||
- Centralized monitoring at monitoring02
|
||||
|
||||
120
ansible/README.md
Normal file
120
ansible/README.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# Ansible Configuration
|
||||
|
||||
This directory contains Ansible configuration for fleet management tasks.
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
ansible/
|
||||
├── ansible.cfg # Ansible configuration
|
||||
├── inventory/
|
||||
│ ├── dynamic_flake.py # Dynamic inventory from NixOS flake
|
||||
│ ├── static.yml # Non-flake hosts (Proxmox, etc.)
|
||||
│ └── group_vars/
|
||||
│ └── all.yml # Common variables
|
||||
└── playbooks/
|
||||
├── build-and-deploy-template.yml
|
||||
├── provision-approle.yml
|
||||
├── restart-service.yml
|
||||
└── run-upgrade.yml
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The devshell automatically configures `ANSIBLE_CONFIG`, so commands work without extra flags:
|
||||
|
||||
```bash
|
||||
# List inventory groups
|
||||
nix develop -c ansible-inventory --graph
|
||||
|
||||
# List hosts in a specific group
|
||||
nix develop -c ansible-inventory --list | jq '.role_dns'
|
||||
|
||||
# Run a playbook
|
||||
nix develop -c ansible-playbook ansible/playbooks/run-upgrade.yml -l tier_test
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
The inventory combines dynamic and static sources automatically.
|
||||
|
||||
### Dynamic Inventory (from flake)
|
||||
|
||||
The `dynamic_flake.py` script extracts hosts from the NixOS flake using `homelab.host.*` options:
|
||||
|
||||
**Groups generated:**
|
||||
- `flake_hosts` - All NixOS hosts from the flake
|
||||
- `tier_test`, `tier_prod` - By `homelab.host.tier`
|
||||
- `role_dns`, `role_vault`, `role_monitoring`, etc. - By `homelab.host.role`
|
||||
|
||||
**Host variables set:**
|
||||
- `tier` - Deployment tier (test/prod)
|
||||
- `role` - Host role
|
||||
- `short_hostname` - Hostname without domain
|
||||
|
||||
### Static Inventory
|
||||
|
||||
Non-flake hosts are defined in `inventory/static.yml`:
|
||||
|
||||
- `proxmox` - Proxmox hypervisors
|
||||
|
||||
## Playbooks
|
||||
|
||||
| Playbook | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `run-upgrade.yml` | Trigger nixos-upgrade on hosts | `-l tier_prod` |
|
||||
| `restart-service.yml` | Restart a systemd service | `-l role_dns -e service=unbound` |
|
||||
| `reboot.yml` | Rolling reboot (one host at a time) | `-l tier_test` |
|
||||
| `provision-approle.yml` | Deploy Vault credentials (single host only) | `-l testvm01` |
|
||||
| `build-and-deploy-template.yml` | Build and deploy Proxmox template | (no limit needed) |
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Restart unbound on all DNS servers
|
||||
nix develop -c ansible-playbook ansible/playbooks/restart-service.yml \
|
||||
-l role_dns -e service=unbound
|
||||
|
||||
# Trigger upgrade on all test hosts
|
||||
nix develop -c ansible-playbook ansible/playbooks/run-upgrade.yml -l tier_test
|
||||
|
||||
# Provision Vault credentials for a specific host
|
||||
nix develop -c ansible-playbook ansible/playbooks/provision-approle.yml -l testvm01
|
||||
|
||||
# Build and deploy Proxmox template
|
||||
nix develop -c ansible-playbook ansible/playbooks/build-and-deploy-template.yml
|
||||
|
||||
# Rolling reboot of test hosts (one at a time, waits for each to come back)
|
||||
nix develop -c ansible-playbook ansible/playbooks/reboot.yml -l tier_test
|
||||
```
|
||||
|
||||
## Excluding Flake Hosts
|
||||
|
||||
To exclude a flake host from the dynamic inventory, add the `ansible = "false"` label in the host's configuration:
|
||||
|
||||
```nix
|
||||
homelab.host.labels.ansible = "false";
|
||||
```
|
||||
|
||||
Hosts with `homelab.dns.enable = false` are also excluded automatically.
|
||||
|
||||
## Adding Non-Flake Hosts
|
||||
|
||||
Edit `inventory/static.yml` to add hosts not managed by the NixOS flake:
|
||||
|
||||
```yaml
|
||||
all:
|
||||
children:
|
||||
my_group:
|
||||
hosts:
|
||||
host1.example.com:
|
||||
ansible_user: admin
|
||||
```
|
||||
|
||||
## Common Variables
|
||||
|
||||
Variables in `inventory/group_vars/all.yml` apply to all hosts:
|
||||
|
||||
- `ansible_user` - Default SSH user (root)
|
||||
- `domain` - Domain name (home.2rjus.net)
|
||||
- `vault_addr` - Vault server URL
|
||||
17
ansible/ansible.cfg
Normal file
17
ansible/ansible.cfg
Normal file
@@ -0,0 +1,17 @@
|
||||
[defaults]
|
||||
inventory = inventory/
|
||||
remote_user = root
|
||||
host_key_checking = False
|
||||
|
||||
# Reduce SSH connection overhead
|
||||
forks = 10
|
||||
pipelining = True
|
||||
|
||||
# Output formatting (YAML output via builtin default callback)
|
||||
stdout_callback = default
|
||||
callbacks_enabled = profile_tasks
|
||||
result_format = yaml
|
||||
|
||||
[ssh_connection]
|
||||
# Reuse SSH connections
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s
|
||||
162
ansible/inventory/dynamic_flake.py
Executable file
162
ansible/inventory/dynamic_flake.py
Executable file
@@ -0,0 +1,162 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Dynamic Ansible inventory script that extracts host information from the NixOS flake.
|
||||
|
||||
Generates groups:
|
||||
- flake_hosts: All hosts defined in the flake
|
||||
- tier_test, tier_prod: Hosts by deployment tier
|
||||
- role_<name>: Hosts by role (dns, vault, monitoring, etc.)
|
||||
|
||||
Usage:
|
||||
./dynamic_flake.py --list # Return full inventory
|
||||
./dynamic_flake.py --host X # Return host vars (not used, but required by Ansible)
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def get_flake_dir() -> Path:
|
||||
"""Find the flake root directory."""
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
# ansible/inventory/dynamic_flake.py -> repo root
|
||||
return script_dir.parent.parent
|
||||
|
||||
|
||||
def evaluate_flake() -> dict:
|
||||
"""Evaluate the flake and extract host metadata."""
|
||||
flake_dir = get_flake_dir()
|
||||
|
||||
# Nix expression to extract relevant config from each host
|
||||
nix_expr = """
|
||||
configs: builtins.mapAttrs (name: cfg: {
|
||||
hostname = cfg.config.networking.hostName;
|
||||
domain = cfg.config.networking.domain or "home.2rjus.net";
|
||||
tier = cfg.config.homelab.host.tier;
|
||||
role = cfg.config.homelab.host.role;
|
||||
labels = cfg.config.homelab.host.labels;
|
||||
dns_enabled = cfg.config.homelab.dns.enable;
|
||||
}) configs
|
||||
"""
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[
|
||||
"nix",
|
||||
"eval",
|
||||
"--json",
|
||||
f"{flake_dir}#nixosConfigurations",
|
||||
"--apply",
|
||||
nix_expr,
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
cwd=flake_dir,
|
||||
)
|
||||
return json.loads(result.stdout)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error evaluating flake: {e.stderr}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error parsing nix output: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def sanitize_group_name(name: str) -> str:
|
||||
"""Sanitize a string for use as an Ansible group name.
|
||||
|
||||
Ansible group names should contain only alphanumeric characters and underscores.
|
||||
"""
|
||||
return name.replace("-", "_")
|
||||
|
||||
|
||||
def build_inventory(hosts_data: dict) -> dict:
|
||||
"""Build Ansible inventory structure from host data."""
|
||||
inventory = {
|
||||
"_meta": {"hostvars": {}},
|
||||
"flake_hosts": {"hosts": []},
|
||||
}
|
||||
|
||||
# Track groups we need to create
|
||||
tier_groups: dict[str, list[str]] = {}
|
||||
role_groups: dict[str, list[str]] = {}
|
||||
|
||||
for _config_name, host_info in hosts_data.items():
|
||||
hostname = host_info["hostname"]
|
||||
domain = host_info["domain"]
|
||||
tier = host_info["tier"]
|
||||
role = host_info["role"]
|
||||
labels = host_info["labels"]
|
||||
dns_enabled = host_info["dns_enabled"]
|
||||
|
||||
# Skip hosts that have DNS disabled (like templates)
|
||||
if not dns_enabled:
|
||||
continue
|
||||
|
||||
# Skip hosts with ansible = "false" label
|
||||
if labels.get("ansible") == "false":
|
||||
continue
|
||||
|
||||
fqdn = f"{hostname}.{domain}"
|
||||
|
||||
# Use short hostname as inventory name, FQDN for connection
|
||||
inventory_name = hostname
|
||||
|
||||
# Add to flake_hosts group
|
||||
inventory["flake_hosts"]["hosts"].append(inventory_name)
|
||||
|
||||
# Add host variables
|
||||
inventory["_meta"]["hostvars"][inventory_name] = {
|
||||
"ansible_host": fqdn, # Connect using FQDN
|
||||
"fqdn": fqdn,
|
||||
"tier": tier,
|
||||
"role": role,
|
||||
}
|
||||
|
||||
# Group by tier
|
||||
tier_group = f"tier_{sanitize_group_name(tier)}"
|
||||
if tier_group not in tier_groups:
|
||||
tier_groups[tier_group] = []
|
||||
tier_groups[tier_group].append(inventory_name)
|
||||
|
||||
# Group by role (if set)
|
||||
if role:
|
||||
role_group = f"role_{sanitize_group_name(role)}"
|
||||
if role_group not in role_groups:
|
||||
role_groups[role_group] = []
|
||||
role_groups[role_group].append(inventory_name)
|
||||
|
||||
# Add tier groups to inventory
|
||||
for group_name, hosts in tier_groups.items():
|
||||
inventory[group_name] = {"hosts": hosts}
|
||||
|
||||
# Add role groups to inventory
|
||||
for group_name, hosts in role_groups.items():
|
||||
inventory[group_name] = {"hosts": hosts}
|
||||
|
||||
return inventory
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: dynamic_flake.py --list | --host <hostname>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if sys.argv[1] == "--list":
|
||||
hosts_data = evaluate_flake()
|
||||
inventory = build_inventory(hosts_data)
|
||||
print(json.dumps(inventory, indent=2))
|
||||
elif sys.argv[1] == "--host":
|
||||
# Ansible calls this to get vars for a specific host
|
||||
# We provide all vars in _meta.hostvars, so just return empty
|
||||
print(json.dumps({}))
|
||||
else:
|
||||
print(f"Unknown option: {sys.argv[1]}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
5
ansible/inventory/group_vars/all.yml
Normal file
5
ansible/inventory/group_vars/all.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
# Common variables for all hosts
|
||||
|
||||
ansible_user: root
|
||||
domain: home.2rjus.net
|
||||
vault_addr: https://vault01.home.2rjus.net:8200
|
||||
13
ansible/inventory/static.yml
Normal file
13
ansible/inventory/static.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
# Static inventory for non-flake hosts
|
||||
#
|
||||
# Hosts defined here are merged with the dynamic flake inventory.
|
||||
# Use this for infrastructure that isn't managed by NixOS.
|
||||
#
|
||||
# Use short hostnames as inventory names with ansible_host for FQDN.
|
||||
|
||||
all:
|
||||
children:
|
||||
proxmox:
|
||||
hosts:
|
||||
pve1:
|
||||
ansible_host: pve1.home.2rjus.net
|
||||
@@ -15,13 +15,13 @@
|
||||
- name: Build NixOS image
|
||||
ansible.builtin.command:
|
||||
cmd: "nixos-rebuild build-image --image-variant proxmox --flake .#template2"
|
||||
chdir: "{{ playbook_dir }}/.."
|
||||
chdir: "{{ playbook_dir }}/../.."
|
||||
register: build_result
|
||||
changed_when: true
|
||||
|
||||
- name: Find built image file
|
||||
ansible.builtin.find:
|
||||
paths: "{{ playbook_dir}}/../result"
|
||||
paths: "{{ playbook_dir}}/../../result"
|
||||
patterns: "*.vma.zst"
|
||||
recurse: true
|
||||
register: image_files
|
||||
@@ -99,3 +99,48 @@
|
||||
- name: Display success message
|
||||
ansible.builtin.debug:
|
||||
msg: "Template VM {{ template_vmid }} created successfully on {{ storage }}"
|
||||
|
||||
- name: Update Terraform template name
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
terraform_dir: "{{ playbook_dir }}/../../terraform"
|
||||
|
||||
tasks:
|
||||
- name: Get image filename from earlier play
|
||||
ansible.builtin.set_fact:
|
||||
image_filename: "{{ hostvars['localhost']['image_filename'] }}"
|
||||
|
||||
- name: Extract template name from image filename
|
||||
ansible.builtin.set_fact:
|
||||
new_template_name: "{{ image_filename | regex_replace('\\.vma\\.zst$', '') | regex_replace('^vzdump-qemu-', '') }}"
|
||||
|
||||
- name: Read current Terraform variables file
|
||||
ansible.builtin.slurp:
|
||||
src: "{{ terraform_dir }}/variables.tf"
|
||||
register: variables_tf_content
|
||||
|
||||
- name: Extract current template name from variables.tf
|
||||
ansible.builtin.set_fact:
|
||||
current_template_name: "{{ (variables_tf_content.content | b64decode) | regex_search('variable \"default_template_name\"[^}]+default\\s*=\\s*\"([^\"]+)\"', '\\1') | first }}"
|
||||
|
||||
- name: Check if template name has changed
|
||||
ansible.builtin.set_fact:
|
||||
template_name_changed: "{{ current_template_name != new_template_name }}"
|
||||
|
||||
- name: Display template name status
|
||||
ansible.builtin.debug:
|
||||
msg: "Template name: {{ current_template_name }} -> {{ new_template_name }} ({{ 'changed' if template_name_changed else 'unchanged' }})"
|
||||
|
||||
- name: Update default_template_name in variables.tf
|
||||
ansible.builtin.replace:
|
||||
path: "{{ terraform_dir }}/variables.tf"
|
||||
regexp: '(variable "default_template_name"[^}]+default\s*=\s*)"[^"]+"'
|
||||
replace: '\1"{{ new_template_name }}"'
|
||||
when: template_name_changed
|
||||
|
||||
- name: Display update result
|
||||
ansible.builtin.debug:
|
||||
msg: "Updated terraform/variables.tf with new template name: {{ new_template_name }}"
|
||||
when: template_name_changed
|
||||
@@ -1,54 +1,60 @@
|
||||
---
|
||||
# Provision OpenBao AppRole credentials to an existing host
|
||||
# Usage: nix develop -c ansible-playbook playbooks/provision-approle.yml -e hostname=ha1
|
||||
# Provision OpenBao AppRole credentials to a host
|
||||
#
|
||||
# Usage: ansible-playbook ansible/playbooks/provision-approle.yml -l <hostname>
|
||||
# Requires: BAO_ADDR and BAO_TOKEN environment variables set
|
||||
#
|
||||
# IMPORTANT: This playbook must target exactly one host to prevent
|
||||
# accidentally regenerating credentials for multiple hosts.
|
||||
|
||||
- name: Fetch AppRole credentials from OpenBao
|
||||
hosts: localhost
|
||||
connection: local
|
||||
- name: Validate single host target
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Fail if targeting multiple hosts
|
||||
ansible.builtin.fail:
|
||||
msg: |
|
||||
This playbook must target exactly one host.
|
||||
Use: ansible-playbook provision-approle.yml -l <hostname>
|
||||
|
||||
Targeting multiple hosts would regenerate credentials for all of them,
|
||||
potentially breaking existing services.
|
||||
when: ansible_play_hosts | length != 1
|
||||
run_once: true
|
||||
|
||||
- name: Provision AppRole credentials
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
vault_addr: "{{ lookup('env', 'BAO_ADDR') | default('https://vault01.home.2rjus.net:8200', true) }}"
|
||||
domain: "home.2rjus.net"
|
||||
target_hostname: "{{ inventory_hostname.split('.')[0] }}"
|
||||
|
||||
tasks:
|
||||
- name: Validate hostname is provided
|
||||
ansible.builtin.fail:
|
||||
msg: "hostname variable is required. Use: -e hostname=<name>"
|
||||
when: hostname is not defined
|
||||
- name: Display target host
|
||||
ansible.builtin.debug:
|
||||
msg: "Provisioning AppRole credentials for: {{ target_hostname }}"
|
||||
|
||||
- name: Get role-id for host
|
||||
ansible.builtin.command:
|
||||
cmd: "bao read -field=role_id auth/approle/role/{{ hostname }}/role-id"
|
||||
cmd: "bao read -field=role_id auth/approle/role/{{ target_hostname }}/role-id"
|
||||
environment:
|
||||
BAO_ADDR: "{{ vault_addr }}"
|
||||
BAO_SKIP_VERIFY: "1"
|
||||
register: role_id_result
|
||||
changed_when: false
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Generate secret-id for host
|
||||
ansible.builtin.command:
|
||||
cmd: "bao write -field=secret_id -f auth/approle/role/{{ hostname }}/secret-id"
|
||||
cmd: "bao write -field=secret_id -f auth/approle/role/{{ target_hostname }}/secret-id"
|
||||
environment:
|
||||
BAO_ADDR: "{{ vault_addr }}"
|
||||
BAO_SKIP_VERIFY: "1"
|
||||
register: secret_id_result
|
||||
changed_when: true
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Add target host to inventory
|
||||
ansible.builtin.add_host:
|
||||
name: "{{ hostname }}.{{ domain }}"
|
||||
groups: vault_target
|
||||
ansible_user: root
|
||||
vault_role_id: "{{ role_id_result.stdout }}"
|
||||
vault_secret_id: "{{ secret_id_result.stdout }}"
|
||||
|
||||
- name: Deploy AppRole credentials to host
|
||||
hosts: vault_target
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Create AppRole directory
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/vault/approle
|
||||
@@ -59,7 +65,7 @@
|
||||
|
||||
- name: Write role-id
|
||||
ansible.builtin.copy:
|
||||
content: "{{ vault_role_id }}"
|
||||
content: "{{ role_id_result.stdout }}"
|
||||
dest: /var/lib/vault/approle/role-id
|
||||
mode: "0600"
|
||||
owner: root
|
||||
@@ -67,7 +73,7 @@
|
||||
|
||||
- name: Write secret-id
|
||||
ansible.builtin.copy:
|
||||
content: "{{ vault_secret_id }}"
|
||||
content: "{{ secret_id_result.stdout }}"
|
||||
dest: /var/lib/vault/approle/secret-id
|
||||
mode: "0600"
|
||||
owner: root
|
||||
48
ansible/playbooks/reboot.yml
Normal file
48
ansible/playbooks/reboot.yml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
# Reboot hosts with rolling strategy to avoid taking down redundant services
|
||||
#
|
||||
# Usage examples:
|
||||
# # Reboot a single host
|
||||
# ansible-playbook reboot.yml -l testvm01
|
||||
#
|
||||
# # Reboot all test hosts (one at a time)
|
||||
# ansible-playbook reboot.yml -l tier_test
|
||||
#
|
||||
# # Reboot all DNS servers safely (one at a time)
|
||||
# ansible-playbook reboot.yml -l role_dns
|
||||
#
|
||||
# Safety features:
|
||||
# - serial: 1 ensures only one host reboots at a time
|
||||
# - Waits for host to come back online before proceeding
|
||||
# - Groups hosts by role to avoid rebooting same-role hosts consecutively
|
||||
|
||||
- name: Reboot hosts (rolling)
|
||||
hosts: all
|
||||
serial: 1
|
||||
order: shuffle # Randomize to spread out same-role hosts
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
reboot_timeout: 300 # 5 minutes to wait for host to come back
|
||||
|
||||
tasks:
|
||||
- name: Display reboot target
|
||||
ansible.builtin.debug:
|
||||
msg: "Rebooting {{ inventory_hostname }} (role: {{ role | default('none') }})"
|
||||
|
||||
- name: Reboot the host
|
||||
ansible.builtin.systemd:
|
||||
name: reboot.target
|
||||
state: started
|
||||
async: 1
|
||||
poll: 0
|
||||
ignore_errors: true
|
||||
|
||||
- name: Wait for host to come back online
|
||||
ansible.builtin.wait_for_connection:
|
||||
delay: 5
|
||||
timeout: "{{ reboot_timeout }}"
|
||||
|
||||
- name: Display reboot result
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ inventory_hostname }} rebooted successfully"
|
||||
40
ansible/playbooks/restart-service.yml
Normal file
40
ansible/playbooks/restart-service.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
# Restart a systemd service on target hosts
|
||||
#
|
||||
# Usage examples:
|
||||
# # Restart unbound on all DNS servers
|
||||
# ansible-playbook restart-service.yml -l role_dns -e service=unbound
|
||||
#
|
||||
# # Restart nginx on a specific host
|
||||
# ansible-playbook restart-service.yml -l http-proxy.home.2rjus.net -e service=nginx
|
||||
#
|
||||
# # Restart promtail on all prod hosts
|
||||
# ansible-playbook restart-service.yml -l tier_prod -e service=promtail
|
||||
|
||||
- name: Restart systemd service
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Validate service name provided
|
||||
ansible.builtin.fail:
|
||||
msg: |
|
||||
The 'service' variable is required.
|
||||
Usage: ansible-playbook restart-service.yml -l <target> -e service=<name>
|
||||
|
||||
Examples:
|
||||
-e service=nginx
|
||||
-e service=unbound
|
||||
-e service=promtail
|
||||
when: service is not defined
|
||||
run_once: true
|
||||
|
||||
- name: Restart {{ service }}
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ service }}"
|
||||
state: restarted
|
||||
register: restart_result
|
||||
|
||||
- name: Display result
|
||||
ansible.builtin.debug:
|
||||
msg: "Service {{ service }} restarted on {{ inventory_hostname }}"
|
||||
21
common/ssh-audit.nix
Normal file
21
common/ssh-audit.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
# SSH session command auditing
|
||||
#
|
||||
# Logs all commands executed by users who logged in interactively (SSH).
|
||||
# System services and nix builds are excluded via auid filter.
|
||||
#
|
||||
# Logs are sent to journald and forwarded to Loki via promtail.
|
||||
# Query with: {host="<hostname>"} |= "EXECVE"
|
||||
{
|
||||
# Enable Linux audit subsystem
|
||||
security.audit.enable = true;
|
||||
security.auditd.enable = true;
|
||||
|
||||
# Log execve syscalls only from interactive login sessions
|
||||
# auid!=4294967295 means "audit login uid is set" (excludes system services, nix builds)
|
||||
security.audit.rules = [
|
||||
"-a exit,always -F arch=b64 -S execve -F auid!=4294967295"
|
||||
];
|
||||
|
||||
# Forward audit logs to journald (so promtail ships them to Loki)
|
||||
services.journald.audit = true;
|
||||
}
|
||||
217
docs/host-creation.md
Normal file
217
docs/host-creation.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# Host Creation Pipeline
|
||||
|
||||
This document describes the process for creating new hosts in the homelab infrastructure.
|
||||
|
||||
## Overview
|
||||
|
||||
We use the `create-host` script to create new hosts, which generates default configurations from a template. We then use OpenTofu to deploy both secrets and VMs. The VMs boot using a template image (built from `hosts/template2`), which starts a bootstrap process. This bootstrap process applies the host's NixOS configuration and then reboots into the new config.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
All tools are available in the devshell: `create-host`, `bao` (OpenBao CLI), `tofu`.
|
||||
|
||||
```bash
|
||||
nix develop
|
||||
```
|
||||
|
||||
## Steps
|
||||
|
||||
Steps marked with **USER** must be performed by the user due to credential requirements.
|
||||
|
||||
1. **USER**: Run `create-host --hostname <name> --ip <ip/prefix>`
|
||||
2. Edit the auto-generated configurations in `hosts/<hostname>/` to import whatever modules are needed for its purpose
|
||||
3. Add any secrets needed to `terraform/vault/`
|
||||
4. Edit the VM specs in `terraform/vms.tf` if needed. To deploy from a branch other than master, add `flake_branch = "<branch>"` to the VM definition
|
||||
5. Push configuration to master (or the branch specified by `flake_branch`)
|
||||
6. **USER**: Apply terraform:
|
||||
```bash
|
||||
nix develop -c tofu -chdir=terraform/vault apply
|
||||
nix develop -c tofu -chdir=terraform apply
|
||||
```
|
||||
7. Once terraform completes, a VM boots in Proxmox using the template image
|
||||
8. The VM runs the `nixos-bootstrap` service, which applies the host config and reboots
|
||||
9. After reboot, the host should be operational
|
||||
10. Trigger auto-upgrade on `ns1` and `ns2` to propagate DNS records for the new host
|
||||
11. Trigger auto-upgrade on `monitoring01` to add the host to Prometheus scrape targets
|
||||
|
||||
## Tier Specification
|
||||
|
||||
New hosts should set `homelab.host.tier` in their configuration:
|
||||
|
||||
```nix
|
||||
homelab.host.tier = "test"; # or "prod"
|
||||
```
|
||||
|
||||
- **test** - Test-tier hosts can receive remote deployments via the `homelab-deploy` MCP server and have different credential access. Use for staging/testing.
|
||||
- **prod** - Production hosts. Deployments require direct access or the CLI with appropriate credentials.
|
||||
|
||||
## Observability
|
||||
|
||||
During the bootstrap process, status updates are sent to Loki. Query bootstrap logs with:
|
||||
|
||||
```
|
||||
{job="bootstrap", hostname="<hostname>"}
|
||||
```
|
||||
|
||||
### Bootstrap Stages
|
||||
|
||||
The bootstrap process reports these stages via the `stage` label:
|
||||
|
||||
| Stage | Message | Meaning |
|
||||
|-------|---------|---------|
|
||||
| `starting` | Bootstrap starting for \<host\> (branch: \<branch\>) | Bootstrap service has started |
|
||||
| `network_ok` | Network connectivity confirmed | Can reach git server |
|
||||
| `vault_ok` | Vault credentials unwrapped and stored | AppRole credentials provisioned |
|
||||
| `vault_skip` | No Vault token provided - skipping credential setup | No wrapped token was provided |
|
||||
| `vault_warn` | Failed to unwrap Vault token - continuing without secrets | Token unwrap failed (expired/used) |
|
||||
| `building` | Starting nixos-rebuild boot | NixOS build starting |
|
||||
| `success` | Build successful - rebooting into new configuration | Build complete, rebooting |
|
||||
| `failed` | nixos-rebuild failed - manual intervention required | Build failed |
|
||||
|
||||
### Useful Queries
|
||||
|
||||
```
|
||||
# All bootstrap activity for a host
|
||||
{job="bootstrap", hostname="myhost"}
|
||||
|
||||
# Track all failures
|
||||
{job="bootstrap", stage="failed"}
|
||||
|
||||
# Monitor builds in progress
|
||||
{job="bootstrap", stage=~"building|success"}
|
||||
```
|
||||
|
||||
Once the VM reboots with its full configuration, it will start publishing metrics to Prometheus and logs to Loki via Promtail.
|
||||
|
||||
## Verification
|
||||
|
||||
1. Check bootstrap completed successfully:
|
||||
```
|
||||
{job="bootstrap", hostname="<hostname>", stage="success"}
|
||||
```
|
||||
|
||||
2. Verify the host is up and reporting metrics:
|
||||
```promql
|
||||
up{instance=~"<hostname>.*"}
|
||||
```
|
||||
|
||||
3. Verify the correct flake revision is deployed:
|
||||
```promql
|
||||
nixos_flake_info{instance=~"<hostname>.*"}
|
||||
```
|
||||
|
||||
4. Check logs are flowing:
|
||||
```
|
||||
{hostname="<hostname>"}
|
||||
```
|
||||
|
||||
5. Confirm expected services are running and producing logs
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Bootstrap Failed
|
||||
|
||||
#### Common Issues
|
||||
|
||||
* VM has trouble running initial nixos-rebuild. Usually caused if it needs to compile packages from scratch if they are not available in our local nix-cache.
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
1. Check bootstrap logs in Loki - if they never progress past `building`, the rebuild likely consumed all resources:
|
||||
```
|
||||
{job="bootstrap", hostname="<hostname>"}
|
||||
```
|
||||
|
||||
2. **USER**: SSH into the host and check the bootstrap service:
|
||||
```bash
|
||||
ssh root@<hostname>
|
||||
journalctl -u nixos-bootstrap.service
|
||||
```
|
||||
|
||||
3. If the build failed due to resource constraints, increase VM specs in `terraform/vms.tf` and redeploy, or manually run the rebuild:
|
||||
```bash
|
||||
nixos-rebuild boot --flake git+https://git.t-juice.club/torjus/nixos-servers.git#<hostname>
|
||||
```
|
||||
|
||||
4. If the host config doesn't exist in the flake, ensure step 5 was completed (config pushed to the correct branch).
|
||||
|
||||
### Vault Credentials Not Working
|
||||
|
||||
Usually caused by running the `create-host` script without proper credentials, or the wrapped token has expired/already been used.
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
1. Check if credentials exist on the host:
|
||||
```bash
|
||||
ssh root@<hostname>
|
||||
ls -la /var/lib/vault/approle/
|
||||
```
|
||||
|
||||
2. Check bootstrap logs for vault-related stages:
|
||||
```
|
||||
{job="bootstrap", hostname="<hostname>", stage=~"vault.*"}
|
||||
```
|
||||
|
||||
3. **USER**: Regenerate and provision credentials manually:
|
||||
```bash
|
||||
nix develop -c ansible-playbook playbooks/provision-approle.yml -e hostname=<hostname>
|
||||
```
|
||||
|
||||
### Host Not Appearing in DNS
|
||||
|
||||
Usually caused by not having deployed the commit with the new host to ns1/ns2.
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
1. Verify the host config has a static IP configured in `systemd.network.networks`
|
||||
|
||||
2. Check that `homelab.dns.enable` is not set to `false`
|
||||
|
||||
3. **USER**: Trigger auto-upgrade on DNS servers:
|
||||
```bash
|
||||
ssh root@ns1 systemctl start nixos-upgrade.service
|
||||
ssh root@ns2 systemctl start nixos-upgrade.service
|
||||
```
|
||||
|
||||
4. Verify DNS resolution after upgrade completes:
|
||||
```bash
|
||||
dig @ns1.home.2rjus.net <hostname>.home.2rjus.net
|
||||
```
|
||||
|
||||
### Host Not Being Scraped by Prometheus
|
||||
|
||||
Usually caused by not having deployed the commit with the new host to the monitoring host.
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
1. Check that `homelab.monitoring.enable` is not set to `false`
|
||||
|
||||
2. **USER**: Trigger auto-upgrade on monitoring01:
|
||||
```bash
|
||||
ssh root@monitoring01 systemctl start nixos-upgrade.service
|
||||
```
|
||||
|
||||
3. Verify the target appears in Prometheus:
|
||||
```promql
|
||||
up{instance=~"<hostname>.*"}
|
||||
```
|
||||
|
||||
4. If the target is down, check that node-exporter is running on the host:
|
||||
```bash
|
||||
ssh root@<hostname> systemctl status prometheus-node-exporter.service
|
||||
```
|
||||
|
||||
## Related Files
|
||||
|
||||
| Path | Description |
|
||||
|------|-------------|
|
||||
| `scripts/create-host/` | The `create-host` script that generates host configurations |
|
||||
| `hosts/template2/` | Template VM configuration (base image for new VMs) |
|
||||
| `hosts/template2/bootstrap.nix` | Bootstrap service that applies NixOS config on first boot |
|
||||
| `terraform/vms.tf` | VM definitions (specs, IPs, branch overrides) |
|
||||
| `terraform/cloud-init.tf` | Cloud-init configuration (passes hostname, branch, vault token) |
|
||||
| `terraform/vault/approle.tf` | AppRole policies for each host |
|
||||
| `terraform/vault/secrets.tf` | Secret definitions in Vault |
|
||||
| `terraform/vault/hosts-generated.tf` | Auto-generated wrapped tokens for VM bootstrap |
|
||||
| `playbooks/provision-approle.yml` | Ansible playbook for manual credential provisioning |
|
||||
| `flake.nix` | Flake with all host configurations (add new hosts here) |
|
||||
@@ -1,192 +0,0 @@
|
||||
# Authentication System Replacement Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Replace the current auth01 setup (LLDAP + Authelia) with a modern, unified authentication solution. The current setup is not in active use, making this a good time to evaluate alternatives.
|
||||
|
||||
## Goals
|
||||
|
||||
1. **Central user database** - Manage users across all homelab hosts from a single source
|
||||
2. **Linux PAM/NSS integration** - Users can SSH into hosts using central credentials
|
||||
3. **UID/GID consistency** - Proper POSIX attributes for NAS share permissions
|
||||
4. **OIDC provider** - Single sign-on for homelab web services (Grafana, etc.)
|
||||
|
||||
## Options Evaluated
|
||||
|
||||
### OpenLDAP (raw)
|
||||
|
||||
- **NixOS Support:** Good (`services.openldap` with `declarativeContents`)
|
||||
- **Pros:** Most widely supported, very flexible
|
||||
- **Cons:** LDIF format is painful, schema management is complex, no built-in OIDC, requires SSSD on each client
|
||||
- **Verdict:** Doesn't address LDAP complexity concerns
|
||||
|
||||
### LLDAP + Authelia (current)
|
||||
|
||||
- **NixOS Support:** Both have good modules
|
||||
- **Pros:** Already configured, lightweight, nice web UIs
|
||||
- **Cons:** Two services to manage, limited POSIX attribute support in LLDAP, requires SSSD on every client host
|
||||
- **Verdict:** Workable but has friction for NAS/UID goals
|
||||
|
||||
### FreeIPA
|
||||
|
||||
- **NixOS Support:** None
|
||||
- **Pros:** Full enterprise solution (LDAP + Kerberos + DNS + CA)
|
||||
- **Cons:** Extremely heavy, wants to own DNS, designed for Red Hat ecosystems, massive overkill for homelab
|
||||
- **Verdict:** Overkill, no NixOS support
|
||||
|
||||
### Keycloak
|
||||
|
||||
- **NixOS Support:** None
|
||||
- **Pros:** Good OIDC/SAML, nice UI
|
||||
- **Cons:** Primarily an identity broker not a user directory, poor POSIX support, heavy (Java)
|
||||
- **Verdict:** Wrong tool for Linux user management
|
||||
|
||||
### Authentik
|
||||
|
||||
- **NixOS Support:** None (would need Docker)
|
||||
- **Pros:** All-in-one with LDAP outpost and OIDC, modern UI
|
||||
- **Cons:** Heavy stack (Python + PostgreSQL + Redis), LDAP is a separate component
|
||||
- **Verdict:** Would work but requires Docker and is heavy
|
||||
|
||||
### Kanidm
|
||||
|
||||
- **NixOS Support:** Excellent - first-class module with PAM/NSS integration
|
||||
- **Pros:**
|
||||
- Native PAM/NSS module (no SSSD needed)
|
||||
- Built-in OIDC provider
|
||||
- Optional LDAP interface for legacy services
|
||||
- Declarative provisioning via NixOS (users, groups, OAuth2 clients)
|
||||
- Modern, written in Rust
|
||||
- Single service handles everything
|
||||
- **Cons:** Newer project, smaller community than LDAP
|
||||
- **Verdict:** Best fit for requirements
|
||||
|
||||
### Pocket-ID
|
||||
|
||||
- **NixOS Support:** Unknown
|
||||
- **Pros:** Very lightweight, passkey-first
|
||||
- **Cons:** No LDAP, no PAM/NSS integration - purely OIDC for web apps
|
||||
- **Verdict:** Doesn't solve Linux user management goal
|
||||
|
||||
## Recommendation: Kanidm
|
||||
|
||||
Kanidm is the recommended solution for the following reasons:
|
||||
|
||||
| Requirement | Kanidm Support |
|
||||
|-------------|----------------|
|
||||
| Central user database | Native |
|
||||
| Linux PAM/NSS (host login) | Native NixOS module |
|
||||
| UID/GID for NAS | POSIX attributes supported |
|
||||
| OIDC for services | Built-in |
|
||||
| Declarative config | Excellent NixOS provisioning |
|
||||
| Simplicity | Modern API, LDAP optional |
|
||||
| NixOS integration | First-class |
|
||||
|
||||
### Key NixOS Features
|
||||
|
||||
**Server configuration:**
|
||||
```nix
|
||||
services.kanidm.enableServer = true;
|
||||
services.kanidm.serverSettings = {
|
||||
domain = "home.2rjus.net";
|
||||
origin = "https://auth.home.2rjus.net";
|
||||
ldapbindaddress = "0.0.0.0:636"; # Optional LDAP interface
|
||||
};
|
||||
```
|
||||
|
||||
**Declarative user provisioning:**
|
||||
```nix
|
||||
services.kanidm.provision.enable = true;
|
||||
services.kanidm.provision.persons.torjus = {
|
||||
displayName = "Torjus";
|
||||
groups = [ "admins" "nas-users" ];
|
||||
};
|
||||
```
|
||||
|
||||
**Declarative OAuth2 clients:**
|
||||
```nix
|
||||
services.kanidm.provision.systems.oauth2.grafana = {
|
||||
displayName = "Grafana";
|
||||
originUrl = "https://grafana.home.2rjus.net/login/generic_oauth";
|
||||
originLanding = "https://grafana.home.2rjus.net";
|
||||
};
|
||||
```
|
||||
|
||||
**Client host configuration (add to system/):**
|
||||
```nix
|
||||
services.kanidm.enableClient = true;
|
||||
services.kanidm.enablePam = true;
|
||||
services.kanidm.clientSettings.uri = "https://auth.home.2rjus.net";
|
||||
```
|
||||
|
||||
## NAS Integration
|
||||
|
||||
### Current: TrueNAS CORE (FreeBSD)
|
||||
|
||||
TrueNAS CORE has a built-in LDAP client. Kanidm's read-only LDAP interface will work for NFS share permissions:
|
||||
|
||||
- **NFS shares**: Only need consistent UID/GID mapping - Kanidm's LDAP provides this
|
||||
- **No SMB requirement**: SMB would need Samba schema attributes (deprecated in TrueNAS 13.0+), but we're NFS-only
|
||||
|
||||
Configuration approach:
|
||||
1. Enable Kanidm's LDAP interface (`ldapbindaddress = "0.0.0.0:636"`)
|
||||
2. Import internal CA certificate into TrueNAS
|
||||
3. Configure TrueNAS LDAP client with Kanidm's Base DN and bind credentials
|
||||
4. Users/groups appear in TrueNAS permission dropdowns
|
||||
|
||||
Note: Kanidm's LDAP is read-only and uses LDAPS only (no StartTLS). This is fine for our use case.
|
||||
|
||||
### Future: NixOS NAS
|
||||
|
||||
When the NAS is migrated to NixOS, it becomes a first-class citizen:
|
||||
|
||||
- Native Kanidm PAM/NSS integration (same as other hosts)
|
||||
- No LDAP compatibility layer needed
|
||||
- Full integration with the rest of the homelab
|
||||
|
||||
This future migration path is a strong argument for Kanidm over LDAP-only solutions.
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. **Create Kanidm service module** in `services/kanidm/`
|
||||
- Server configuration
|
||||
- TLS via internal ACME
|
||||
- Vault secrets for admin passwords
|
||||
|
||||
2. **Configure declarative provisioning**
|
||||
- Define initial users and groups
|
||||
- Set up POSIX attributes (UID/GID ranges)
|
||||
|
||||
3. **Add OIDC clients** for homelab services
|
||||
- Grafana
|
||||
- Other services as needed
|
||||
|
||||
4. **Create client module** in `system/` for PAM/NSS
|
||||
- Enable on all hosts that need central auth
|
||||
- Configure trusted CA
|
||||
|
||||
5. **Test NAS integration**
|
||||
- Configure TrueNAS LDAP client to connect to Kanidm
|
||||
- Verify UID/GID mapping works with NFS shares
|
||||
|
||||
6. **Migrate auth01**
|
||||
- Remove LLDAP and Authelia services
|
||||
- Deploy Kanidm
|
||||
- Update DNS CNAMEs if needed
|
||||
|
||||
7. **Documentation**
|
||||
- User management procedures
|
||||
- Adding new OAuth2 clients
|
||||
- Troubleshooting PAM/NSS issues
|
||||
|
||||
## Open Questions
|
||||
|
||||
- What UID/GID range should be reserved for Kanidm-managed users?
|
||||
- Which hosts should have PAM/NSS enabled initially?
|
||||
- What OAuth2 clients are needed at launch?
|
||||
|
||||
## References
|
||||
|
||||
- [Kanidm Documentation](https://kanidm.github.io/kanidm/stable/)
|
||||
- [NixOS Kanidm Module](https://search.nixos.org/options?query=services.kanidm)
|
||||
- [Kanidm PAM/NSS Integration](https://kanidm.github.io/kanidm/stable/pam_and_nsswitch.html)
|
||||
183
docs/plans/completed/auth-system-replacement.md
Normal file
183
docs/plans/completed/auth-system-replacement.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Authentication System Replacement Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Deploy a modern, unified authentication solution for the homelab. Provides central user management, SSO for web services, and consistent UID/GID mapping for NAS permissions.
|
||||
|
||||
## Goals
|
||||
|
||||
1. **Central user database** - Manage users across all homelab hosts from a single source
|
||||
2. **Linux PAM/NSS integration** - Users can SSH into hosts using central credentials
|
||||
3. **UID/GID consistency** - Proper POSIX attributes for NAS share permissions
|
||||
4. **OIDC provider** - Single sign-on for homelab web services (Grafana, etc.)
|
||||
|
||||
## Solution: Kanidm
|
||||
|
||||
Kanidm was chosen for the following reasons:
|
||||
|
||||
| Requirement | Kanidm Support |
|
||||
|-------------|----------------|
|
||||
| Central user database | Native |
|
||||
| Linux PAM/NSS (host login) | Native NixOS module |
|
||||
| UID/GID for NAS | POSIX attributes supported |
|
||||
| OIDC for services | Built-in |
|
||||
| Declarative config | Excellent NixOS provisioning |
|
||||
| Simplicity | Modern API, LDAP optional |
|
||||
| NixOS integration | First-class |
|
||||
|
||||
### Configuration Files
|
||||
|
||||
- **Host configuration:** `hosts/kanidm01/`
|
||||
- **Service module:** `services/kanidm/default.nix`
|
||||
|
||||
## NAS Integration
|
||||
|
||||
### Current: TrueNAS CORE (FreeBSD)
|
||||
|
||||
TrueNAS CORE has a built-in LDAP client. Kanidm's read-only LDAP interface will work for NFS share permissions:
|
||||
|
||||
- **NFS shares**: Only need consistent UID/GID mapping - Kanidm's LDAP provides this
|
||||
- **No SMB requirement**: SMB would need Samba schema attributes (deprecated in TrueNAS 13.0+), but we're NFS-only
|
||||
|
||||
Configuration approach:
|
||||
1. Enable Kanidm's LDAP interface (`ldapbindaddress = "0.0.0.0:636"`)
|
||||
2. Import internal CA certificate into TrueNAS
|
||||
3. Configure TrueNAS LDAP client with Kanidm's Base DN and bind credentials
|
||||
4. Users/groups appear in TrueNAS permission dropdowns
|
||||
|
||||
Note: Kanidm's LDAP is read-only and uses LDAPS only (no StartTLS). This is fine for our use case.
|
||||
|
||||
### Future: NixOS NAS
|
||||
|
||||
When the NAS is migrated to NixOS, it becomes a first-class citizen:
|
||||
|
||||
- Native Kanidm PAM/NSS integration (same as other hosts)
|
||||
- No LDAP compatibility layer needed
|
||||
- Full integration with the rest of the homelab
|
||||
|
||||
This future migration path is a strong argument for Kanidm over LDAP-only solutions.
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. **Create kanidm01 host and service module** ✅
|
||||
- Host: `kanidm01.home.2rjus.net` (10.69.13.23, test tier)
|
||||
- Service module: `services/kanidm/`
|
||||
- TLS via internal ACME (`auth.home.2rjus.net`)
|
||||
- Vault integration for idm_admin password
|
||||
- LDAPS on port 636
|
||||
|
||||
2. **Configure provisioning** ✅
|
||||
- Groups provisioned declaratively: `admins`, `users`, `ssh-users`
|
||||
- Users managed imperatively via CLI (allows setting POSIX passwords in one step)
|
||||
- POSIX attributes enabled (UID/GID range 65,536-69,999)
|
||||
|
||||
3. **Test NAS integration** (in progress)
|
||||
- ✅ LDAP interface verified working
|
||||
- Configure TrueNAS LDAP client to connect to Kanidm
|
||||
- Verify UID/GID mapping works with NFS shares
|
||||
|
||||
4. **Add OIDC clients** for homelab services
|
||||
- Grafana
|
||||
- Other services as needed
|
||||
|
||||
5. **Create client module** in `system/` for PAM/NSS ✅
|
||||
- Module: `system/kanidm-client.nix`
|
||||
- `homelab.kanidm.enable = true` enables PAM/NSS
|
||||
- Short usernames (not SPN format)
|
||||
- Home directory symlinks via `home_alias`
|
||||
- Enabled on test tier: testvm01, testvm02, testvm03
|
||||
|
||||
6. **Documentation** ✅
|
||||
- `docs/user-management.md` - CLI workflows, troubleshooting
|
||||
- User/group creation procedures verified working
|
||||
|
||||
## Progress
|
||||
|
||||
### Completed (2026-02-08)
|
||||
|
||||
**Kanidm server deployed on kanidm01 (test tier):**
|
||||
- Host: `kanidm01.home.2rjus.net` (10.69.13.23)
|
||||
- WebUI: `https://auth.home.2rjus.net`
|
||||
- LDAPS: port 636
|
||||
- Valid certificate from internal CA
|
||||
|
||||
**Configuration:**
|
||||
- Kanidm 1.8 with secret provisioning support
|
||||
- Daily backups at 22:00 (7 versions retained)
|
||||
- Vault integration for idm_admin password
|
||||
- Prometheus monitoring scrape target configured
|
||||
|
||||
**Provisioned entities:**
|
||||
- Groups: `admins`, `users`, `ssh-users` (declarative)
|
||||
- Users managed via CLI (imperative)
|
||||
|
||||
**Verified working:**
|
||||
- WebUI login with idm_admin
|
||||
- LDAP bind and search with POSIX-enabled user
|
||||
- LDAPS with valid internal CA certificate
|
||||
|
||||
### Completed (2026-02-08) - PAM/NSS Client
|
||||
|
||||
**Client module deployed (`system/kanidm-client.nix`):**
|
||||
- `homelab.kanidm.enable = true` enables PAM/NSS integration
|
||||
- Connects to auth.home.2rjus.net
|
||||
- Short usernames (`torjus` instead of `torjus@home.2rjus.net`)
|
||||
- Home directory symlinks (`/home/torjus` → UUID-based dir)
|
||||
- Login restricted to `ssh-users` group
|
||||
|
||||
**Enabled on test tier:**
|
||||
- testvm01, testvm02, testvm03
|
||||
|
||||
**Verified working:**
|
||||
- User/group resolution via `getent`
|
||||
- SSH login with Kanidm unix passwords
|
||||
- Home directory creation with symlinks
|
||||
- Imperative user/group creation via CLI
|
||||
|
||||
**Documentation:**
|
||||
- `docs/user-management.md` with full CLI workflows
|
||||
- Password requirements (min 10 chars)
|
||||
- Troubleshooting guide (nscd, cache invalidation)
|
||||
|
||||
### UID/GID Range (Resolved)
|
||||
|
||||
**Range: 65,536 - 69,999** (manually allocated)
|
||||
|
||||
- Users: 65,536 - 67,999 (up to ~2500 users)
|
||||
- Groups: 68,000 - 69,999 (up to ~2000 groups)
|
||||
|
||||
Rationale:
|
||||
- Starts at Kanidm's recommended minimum (65,536)
|
||||
- Well above NixOS system users (typically <1000)
|
||||
- Avoids Podman/container issues with very high GIDs
|
||||
|
||||
### Completed (2026-02-08) - OAuth2/OIDC for Grafana
|
||||
|
||||
**OAuth2 client deployed for Grafana on monitoring02:**
|
||||
- Client ID: `grafana`
|
||||
- Redirect URL: `https://grafana-test.home.2rjus.net/login/generic_oauth`
|
||||
- Scope maps: `openid`, `profile`, `email`, `groups` for `users` group
|
||||
- Role mapping: `admins` group → Grafana Admin, others → Viewer
|
||||
|
||||
**Configuration locations:**
|
||||
- Kanidm OAuth2 client: `services/kanidm/default.nix`
|
||||
- Grafana OIDC config: `services/grafana/default.nix`
|
||||
- Vault secret: `services/grafana/oauth2-client-secret`
|
||||
|
||||
**Key findings:**
|
||||
- PKCE is required by Kanidm - enable `use_pkce = true` in Grafana
|
||||
- Must set `email_attribute_path`, `login_attribute_path`, `name_attribute_path` to extract from userinfo
|
||||
- Users need: primary credential (password + TOTP for MFA), membership in `users` group, email address set
|
||||
- Unix password is separate from primary credential (web login requires primary credential)
|
||||
|
||||
### Next Steps
|
||||
|
||||
1. Enable PAM/NSS on production hosts (after test tier validation)
|
||||
2. Configure TrueNAS LDAP client for NAS integration testing
|
||||
3. Add OAuth2 clients for other services as needed
|
||||
|
||||
## References
|
||||
|
||||
- [Kanidm Documentation](https://kanidm.github.io/kanidm/stable/)
|
||||
- [NixOS Kanidm Module](https://search.nixos.org/options?query=services.kanidm)
|
||||
- [Kanidm PAM/NSS Integration](https://kanidm.github.io/kanidm/stable/pam_and_nsswitch.html)
|
||||
35
docs/plans/completed/bootstrap-cache.md
Normal file
35
docs/plans/completed/bootstrap-cache.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Plan: Configure Template2 to Use Nix Cache
|
||||
|
||||
## Problem
|
||||
|
||||
New VMs bootstrapped from template2 don't use our local nix cache (nix-cache.home.2rjus.net) during the initial `nixos-rebuild boot`. This means the first build downloads everything from cache.nixos.org, which is slower and uses more bandwidth.
|
||||
|
||||
## Solution
|
||||
|
||||
Update the template2 base image to include the nix cache configuration, so new VMs immediately benefit from cached builds during bootstrap.
|
||||
|
||||
## Implementation
|
||||
|
||||
1. Add nix cache configuration to `hosts/template2/configuration.nix`:
|
||||
```nix
|
||||
nix.settings = {
|
||||
substituters = [ "https://nix-cache.home.2rjus.net" "https://cache.nixos.org" ];
|
||||
trusted-public-keys = [
|
||||
"nix-cache.home.2rjus.net:..." # Add the cache's public key
|
||||
"cache.nixos.org-1:..."
|
||||
];
|
||||
};
|
||||
```
|
||||
|
||||
2. Rebuild and redeploy the Proxmox template:
|
||||
```bash
|
||||
nix develop -c ansible-playbook -i playbooks/inventory.ini playbooks/build-and-deploy-template.yml
|
||||
```
|
||||
|
||||
3. Update `default_template_name` in `terraform/variables.tf` if the template name changed
|
||||
|
||||
## Benefits
|
||||
|
||||
- Faster VM bootstrap times
|
||||
- Reduced bandwidth to external cache
|
||||
- Most derivations will already be cached from other hosts
|
||||
72
docs/plans/completed/cert-monitoring.md
Normal file
72
docs/plans/completed/cert-monitoring.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Certificate Monitoring Plan
|
||||
|
||||
## Summary
|
||||
|
||||
This document describes the removal of labmon certificate monitoring and outlines future needs for certificate monitoring in the homelab.
|
||||
|
||||
## What Was Removed
|
||||
|
||||
### labmon Service
|
||||
|
||||
The `labmon` service was a custom Go application that provided:
|
||||
|
||||
1. **StepMonitor**: Monitoring for step-ca (Smallstep CA) certificate provisioning and health
|
||||
2. **TLSConnectionMonitor**: Periodic TLS connection checks to verify certificate validity and expiration
|
||||
|
||||
The service exposed Prometheus metrics at `:9969` including:
|
||||
- `labmon_tlsconmon_certificate_seconds_left` - Time until certificate expiration
|
||||
- `labmon_tlsconmon_certificate_check_error` - Whether the TLS check failed
|
||||
- `labmon_stepmon_certificate_seconds_left` - Step-CA internal certificate expiration
|
||||
|
||||
### Affected Files
|
||||
|
||||
- `hosts/monitoring01/configuration.nix` - Removed labmon configuration block
|
||||
- `services/monitoring/prometheus.nix` - Removed labmon scrape target
|
||||
- `services/monitoring/rules.yml` - Removed `certificate_rules` alert group
|
||||
- `services/monitoring/alloy.nix` - Deleted (was only used for labmon profiling)
|
||||
- `services/monitoring/default.nix` - Removed alloy.nix import
|
||||
|
||||
### Removed Alerts
|
||||
|
||||
- `certificate_expiring_soon` - Warned when any monitored TLS cert had < 24h validity
|
||||
- `step_ca_serving_cert_expiring` - Critical alert for step-ca's own serving certificate
|
||||
- `certificate_check_error` - Warned when TLS connection check failed
|
||||
- `step_ca_certificate_expiring` - Critical alert for step-ca issued certificates
|
||||
|
||||
## Why It Was Removed
|
||||
|
||||
1. **step-ca decommissioned**: The primary monitoring target (step-ca) is no longer in use
|
||||
2. **Outdated codebase**: labmon was a custom tool that required maintenance
|
||||
3. **Limited value**: With ACME auto-renewal, certificates should renew automatically
|
||||
|
||||
## Current State
|
||||
|
||||
ACME certificates are now issued by OpenBao PKI at `vault.home.2rjus.net:8200`. The ACME protocol handles automatic renewal, and certificates are typically renewed well before expiration.
|
||||
|
||||
## Future Needs
|
||||
|
||||
While ACME handles renewal automatically, we should consider monitoring for:
|
||||
|
||||
1. **ACME renewal failures**: Alert when a certificate fails to renew
|
||||
- Could monitor ACME client logs (via Loki queries)
|
||||
- Could check certificate file modification times
|
||||
|
||||
2. **Certificate expiration as backup**: Even with auto-renewal, a last-resort alert for certificates approaching expiration would catch renewal failures
|
||||
|
||||
3. **Certificate transparency**: Monitor for unexpected certificate issuance
|
||||
|
||||
### Potential Solutions
|
||||
|
||||
1. **Prometheus blackbox_exporter**: Can probe TLS endpoints and export certificate expiration metrics
|
||||
- `probe_ssl_earliest_cert_expiry` metric
|
||||
- Already a standard tool, well-maintained
|
||||
|
||||
2. **Custom Loki alerting**: Query ACME service logs for renewal failures
|
||||
- Works with existing infrastructure
|
||||
- No additional services needed
|
||||
|
||||
3. **Node-exporter textfile collector**: Script that checks local certificate files and writes expiration metrics
|
||||
|
||||
## Status
|
||||
|
||||
**Not yet implemented.** This document serves as a placeholder for future work on certificate monitoring.
|
||||
46
docs/plans/completed/garage-s3-storage.md
Normal file
46
docs/plans/completed/garage-s3-storage.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Garage S3 Storage Server
|
||||
|
||||
## Overview
|
||||
|
||||
Deploy a Garage instance for self-hosted S3-compatible object storage.
|
||||
|
||||
## Garage Basics
|
||||
|
||||
- S3-compatible distributed object storage designed for self-hosting
|
||||
- Supports per-key, per-bucket permissions (read/write/owner)
|
||||
- Keys without explicit grants have no access
|
||||
|
||||
## NixOS Module
|
||||
|
||||
Available as `services.garage` with these key options:
|
||||
|
||||
- `services.garage.enable` - Enable the service
|
||||
- `services.garage.package` - Must be set explicitly
|
||||
- `services.garage.settings` - Freeform TOML config (replication mode, ports, RPC, etc.)
|
||||
- `services.garage.settings.metadata_dir` - Metadata storage (SSD recommended)
|
||||
- `services.garage.settings.data_dir` - Data block storage (supports multiple dirs since v0.9)
|
||||
- `services.garage.environmentFile` - For secrets like `GARAGE_RPC_SECRET`
|
||||
- `services.garage.logLevel` - error/warn/info/debug/trace
|
||||
|
||||
The NixOS module only manages the server daemon. Buckets and keys are managed externally.
|
||||
|
||||
## Bucket/Key Management
|
||||
|
||||
No declarative NixOS options for buckets or keys. Two options:
|
||||
|
||||
1. **Terraform provider** - `jkossis/terraform-provider-garage` manages buckets, keys, and permissions via the Garage Admin API v2. Could live in `terraform/garage/` similar to `terraform/vault/`.
|
||||
2. **CLI** - `garage key create`, `garage bucket create`, `garage bucket allow`
|
||||
|
||||
## Integration Ideas
|
||||
|
||||
- Store Garage API keys in Vault, fetch via `vault.secrets` on consuming hosts
|
||||
- Terraform manages both Vault secrets and Garage buckets/keys
|
||||
- Enable admin API with token for Terraform provider access
|
||||
- Add Prometheus metrics scraping (Garage exposes metrics endpoint)
|
||||
|
||||
## Open Questions
|
||||
|
||||
- Single-node or multi-node replication?
|
||||
- Which host to deploy on?
|
||||
- What to store? (backups, media, app data)
|
||||
- Expose via HTTP proxy or direct S3 API only?
|
||||
156
docs/plans/completed/monitoring-migration-victoriametrics.md
Normal file
156
docs/plans/completed/monitoring-migration-victoriametrics.md
Normal file
@@ -0,0 +1,156 @@
|
||||
# Monitoring Stack Migration to VictoriaMetrics
|
||||
|
||||
## Overview
|
||||
|
||||
Migrate from Prometheus to VictoriaMetrics on a new host (monitoring02) to gain better compression
|
||||
and longer retention. Run in parallel with monitoring01 until validated, then switch over using
|
||||
a `monitoring` CNAME for seamless transition.
|
||||
|
||||
## Current State
|
||||
|
||||
**monitoring02** (10.69.13.24) - **PRIMARY**:
|
||||
- 4 CPU cores, 8GB RAM, 60GB disk
|
||||
- VictoriaMetrics with 3-month retention
|
||||
- vmalert with alerting enabled (routes to local Alertmanager)
|
||||
- Alertmanager -> alerttonotify -> NATS notification pipeline
|
||||
- Grafana with Kanidm OIDC (`grafana.home.2rjus.net`)
|
||||
- Loki (log aggregation)
|
||||
- CNAMEs: monitoring, alertmanager, grafana, grafana-test, metrics, vmalert, loki
|
||||
|
||||
**monitoring01** (10.69.13.13) - **SHUT DOWN**:
|
||||
- No longer running, pending decommission
|
||||
|
||||
## Decision: VictoriaMetrics
|
||||
|
||||
Per `docs/plans/long-term-metrics-storage.md`, VictoriaMetrics is the recommended starting point:
|
||||
- Single binary replacement for Prometheus
|
||||
- 5-10x better compression (30 days could become 180+ days in same space)
|
||||
- Same PromQL query language (Grafana dashboards work unchanged)
|
||||
- Same scrape config format (existing auto-generated configs work)
|
||||
|
||||
If multi-year retention with downsampling becomes necessary later, Thanos can be evaluated.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────┐
|
||||
│ monitoring02 │
|
||||
│ VictoriaMetrics│
|
||||
│ + Grafana │
|
||||
monitoring │ + Loki │
|
||||
CNAME ──────────│ + Alertmanager │
|
||||
│ (vmalert) │
|
||||
└─────────────────┘
|
||||
▲
|
||||
│ scrapes
|
||||
┌───────────────┼───────────────┐
|
||||
│ │ │
|
||||
┌────┴────┐ ┌─────┴────┐ ┌─────┴────┐
|
||||
│ ns1 │ │ ha1 │ │ ... │
|
||||
│ :9100 │ │ :9100 │ │ :9100 │
|
||||
└─────────┘ └──────────┘ └──────────┘
|
||||
```
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Create monitoring02 Host [COMPLETE]
|
||||
|
||||
Host created and deployed at 10.69.13.24 (prod tier) with:
|
||||
- 4 CPU cores, 8GB RAM, 60GB disk
|
||||
- Vault integration enabled
|
||||
- NATS-based remote deployment enabled
|
||||
- Grafana with Kanidm OIDC deployed as test instance (`grafana-test.home.2rjus.net`)
|
||||
|
||||
### Phase 2: Set Up VictoriaMetrics Stack [COMPLETE]
|
||||
|
||||
New service module at `services/victoriametrics/` for VictoriaMetrics + vmalert + Alertmanager.
|
||||
Imported by monitoring02 alongside the existing Grafana service.
|
||||
|
||||
1. **VictoriaMetrics** (port 8428):
|
||||
- `services.victoriametrics.enable = true`
|
||||
- `retentionPeriod = "3"` (3 months)
|
||||
- All scrape configs migrated from Prometheus (22 jobs including auto-generated)
|
||||
- Static user override (DynamicUser disabled) for credential file access
|
||||
- OpenBao token fetch service + 30min refresh timer
|
||||
- Apiary bearer token via vault.secrets
|
||||
|
||||
2. **vmalert** for alerting rules:
|
||||
- Points to VictoriaMetrics datasource at localhost:8428
|
||||
- Reuses existing `services/monitoring/rules.yml` directly via `settings.rule`
|
||||
- Notifier sends to local Alertmanager at localhost:9093
|
||||
|
||||
3. **Alertmanager** (port 9093):
|
||||
- Same configuration as monitoring01 (alerttonotify webhook routing)
|
||||
- alerttonotify imported on monitoring02, routes alerts via NATS
|
||||
|
||||
4. **Grafana** (port 3000):
|
||||
- VictoriaMetrics datasource (localhost:8428) as default
|
||||
- Loki datasource pointing to localhost:3100
|
||||
|
||||
5. **Loki** (port 3100):
|
||||
- Same configuration as monitoring01 in standalone `services/loki/` module
|
||||
- Grafana datasource updated to localhost:3100
|
||||
|
||||
**Note:** pve-exporter and pushgateway scrape targets are not included on monitoring02.
|
||||
pve-exporter requires a local exporter instance; pushgateway is replaced by VictoriaMetrics
|
||||
native push support.
|
||||
|
||||
### Phase 3: Parallel Operation [COMPLETE]
|
||||
|
||||
Ran both monitoring01 and monitoring02 simultaneously to validate data collection and dashboards.
|
||||
|
||||
### Phase 4: Add monitoring CNAME [COMPLETE]
|
||||
|
||||
Added CNAMEs to monitoring02: monitoring, alertmanager, grafana, metrics, vmalert, loki.
|
||||
|
||||
### Phase 5: Update References [COMPLETE]
|
||||
|
||||
- Moved alertmanager, grafana, prometheus CNAMEs from http-proxy to monitoring02
|
||||
- Removed corresponding Caddy reverse proxy entries from http-proxy
|
||||
- monitoring02 Caddy serves alertmanager, grafana, metrics, vmalert directly
|
||||
|
||||
### Phase 6: Enable Alerting [COMPLETE]
|
||||
|
||||
- Switched vmalert from blackhole mode to local Alertmanager
|
||||
- alerttonotify service running on monitoring02 (NATS nkey from Vault)
|
||||
- prometheus-metrics Vault policy added for OpenBao scraping
|
||||
- Full alerting pipeline verified: vmalert -> Alertmanager -> alerttonotify -> NATS
|
||||
|
||||
### Phase 7: Cutover and Decommission [IN PROGRESS]
|
||||
|
||||
- monitoring01 shut down (2026-02-17)
|
||||
- Vault AppRole moved from approle.tf to hosts-generated.tf with extra_policies support
|
||||
|
||||
**Remaining cleanup (separate branch):**
|
||||
- [ ] Update `system/monitoring/logs.nix` - Promtail still points to monitoring01
|
||||
- [ ] Update `hosts/template2/bootstrap.nix` - Bootstrap Loki URL still points to monitoring01
|
||||
- [ ] Remove monitoring01 from flake.nix and host configuration
|
||||
- [ ] Destroy monitoring01 VM in Proxmox
|
||||
- [ ] Remove monitoring01 from terraform state
|
||||
- [ ] Remove or archive `services/monitoring/` (Prometheus config)
|
||||
|
||||
## Completed
|
||||
|
||||
- 2026-02-08: Phase 1 - monitoring02 host created
|
||||
- 2026-02-17: Phase 2 - VictoriaMetrics, vmalert, Alertmanager, Loki, Grafana configured
|
||||
- 2026-02-17: Phase 6 - Alerting enabled, CNAMEs migrated, monitoring01 shut down
|
||||
|
||||
## VictoriaMetrics Service Configuration
|
||||
|
||||
Implemented in `services/victoriametrics/default.nix`. Key design decisions:
|
||||
|
||||
- **Static user**: VictoriaMetrics NixOS module uses `DynamicUser`, overridden with a static
|
||||
`victoriametrics` user so vault.secrets and credential files work correctly
|
||||
- **Shared rules**: vmalert reuses `services/monitoring/rules.yml` via `settings.rule` path
|
||||
reference (no YAML-to-Nix conversion needed)
|
||||
- **Scrape config reuse**: Uses the same `lib/monitoring.nix` functions and
|
||||
`services/monitoring/external-targets.nix` as Prometheus for auto-generated targets
|
||||
|
||||
## Notes
|
||||
|
||||
- VictoriaMetrics uses port 8428 vs Prometheus 9090
|
||||
- PromQL compatibility is excellent
|
||||
- VictoriaMetrics native push replaces Pushgateway (remove from http-proxy if not needed)
|
||||
- monitoring02 deployed via OpenTofu using `create-host` script
|
||||
- Grafana dashboards defined declaratively via NixOS, not imported from monitoring01 state
|
||||
- Tempo and Pyroscope deferred (not actively used; can be added later if needed)
|
||||
135
docs/plans/completed/monitoring02-reboot-alert-investigation.md
Normal file
135
docs/plans/completed/monitoring02-reboot-alert-investigation.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# monitoring02 Reboot Alert Investigation
|
||||
|
||||
**Date:** 2026-02-10
|
||||
**Status:** Completed - False positive identified
|
||||
|
||||
## Summary
|
||||
|
||||
A `host_reboot` alert fired for monitoring02 at 16:27:36 UTC. Investigation determined this was a **false positive** caused by NTP clock adjustments, not an actual reboot.
|
||||
|
||||
## Alert Details
|
||||
|
||||
- **Alert:** `host_reboot`
|
||||
- **Rule:** `changes(node_boot_time_seconds[10m]) > 0`
|
||||
- **Host:** monitoring02
|
||||
- **Time:** 2026-02-10T16:27:36Z
|
||||
|
||||
## Investigation Findings
|
||||
|
||||
### Evidence Against Actual Reboot
|
||||
|
||||
1. **Uptime:** System had been up for ~40 hours (143,751 seconds) at time of alert
|
||||
2. **Consistent BOOT_ID:** All logs showed the same systemd BOOT_ID (`fd26e7f3d86f4cd688d1b1d7af62f2ad`) from Feb 9 through the alert time
|
||||
3. **No log gaps:** Logs were continuous - no shutdown/restart cycle visible
|
||||
4. **Prometheus metrics:** `node_boot_time_seconds` showed a 1-second fluctuation, then returned to normal
|
||||
|
||||
### Root Cause: NTP Clock Adjustment
|
||||
|
||||
The `node_boot_time_seconds` metric fluctuated by 1 second due to how Linux calculates boot time:
|
||||
|
||||
```
|
||||
btime = current_wall_clock_time - monotonic_uptime
|
||||
```
|
||||
|
||||
When NTP adjusts the wall clock, `btime` shifts by the same amount. The `node_timex_*` metrics confirmed this:
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| `node_timex_maxerror_seconds` (max in 3h) | 1.02 seconds |
|
||||
| `node_timex_maxerror_seconds` (max in 24h) | 2.05 seconds |
|
||||
| `node_timex_sync_status` | 1 (synced) |
|
||||
| Current `node_timex_offset_seconds` | ~9ms (normal) |
|
||||
|
||||
The kernel's estimated maximum clock error spiked to over 1 second, causing the boot time calculation to drift momentarily.
|
||||
|
||||
Additionally, `systemd-resolved` logged "Clock change detected. Flushing caches." at 16:26:53Z, corroborating the NTP adjustment.
|
||||
|
||||
## Current Time Sync Configuration
|
||||
|
||||
### NixOS Guests
|
||||
- **NTP client:** systemd-timesyncd (NixOS default)
|
||||
- **No explicit configuration** in the codebase
|
||||
- Uses default NixOS NTP server pool
|
||||
|
||||
### Proxmox VMs
|
||||
- **Clocksource:** `kvm-clock` (optimal for KVM VMs)
|
||||
- **QEMU guest agent:** Enabled
|
||||
- **No additional QEMU timing args** configured
|
||||
|
||||
## Potential Improvements
|
||||
|
||||
### 1. Improve Alert Rule (Recommended)
|
||||
|
||||
Add tolerance to filter out small NTP adjustments:
|
||||
|
||||
```yaml
|
||||
# Current rule (triggers on any change)
|
||||
expr: changes(node_boot_time_seconds[10m]) > 0
|
||||
|
||||
# Improved rule (requires >60 second shift)
|
||||
expr: changes(node_boot_time_seconds[10m]) > 0 and abs(delta(node_boot_time_seconds[10m])) > 60
|
||||
```
|
||||
|
||||
### 2. Switch to Chrony (Optional)
|
||||
|
||||
Chrony handles time adjustments more gracefully than systemd-timesyncd:
|
||||
|
||||
```nix
|
||||
# In common/vm/qemu-guest.nix
|
||||
{
|
||||
services.qemuGuest.enable = true;
|
||||
|
||||
services.timesyncd.enable = false;
|
||||
services.chrony = {
|
||||
enable = true;
|
||||
extraConfig = ''
|
||||
makestep 1 3
|
||||
rtcsync
|
||||
'';
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Add QEMU Timing Args (Optional)
|
||||
|
||||
In `terraform/vms.tf`:
|
||||
|
||||
```hcl
|
||||
args = "-global kvm-pit.lost_tick_policy=delay -rtc driftfix=slew"
|
||||
```
|
||||
|
||||
### 4. Local NTP Server (Optional)
|
||||
|
||||
Running a local NTP server (e.g., on ns1/ns2) would reduce latency and improve sync stability across all hosts.
|
||||
|
||||
## Monitoring NTP Health
|
||||
|
||||
The `node_timex_*` metrics from node_exporter provide visibility into NTP health:
|
||||
|
||||
```promql
|
||||
# Clock offset from reference
|
||||
node_timex_offset_seconds
|
||||
|
||||
# Sync status (1 = synced)
|
||||
node_timex_sync_status
|
||||
|
||||
# Maximum estimated error - useful for alerting
|
||||
node_timex_maxerror_seconds
|
||||
```
|
||||
|
||||
A potential alert for NTP issues:
|
||||
|
||||
```yaml
|
||||
- alert: ntp_clock_drift
|
||||
expr: node_timex_maxerror_seconds > 1
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "High clock drift on {{ $labels.hostname }}"
|
||||
description: "NTP max error is {{ $value }}s on {{ $labels.hostname }}"
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
No action required for the alert itself - the system was healthy. Consider implementing the improved alert rule to prevent future false positives from NTP adjustments.
|
||||
156
docs/plans/completed/nix-cache-reprovision.md
Normal file
156
docs/plans/completed/nix-cache-reprovision.md
Normal file
@@ -0,0 +1,156 @@
|
||||
# Nix Cache Host Reprovision
|
||||
|
||||
## Overview
|
||||
|
||||
Reprovision `nix-cache01` using the OpenTofu workflow, and improve the build/cache system with:
|
||||
1. NATS-based remote build triggering (replacing the current bash script)
|
||||
2. Safer flake update workflow that validates builds before pushing to master
|
||||
|
||||
## Status
|
||||
|
||||
**Phase 1: New Build Host** - COMPLETE
|
||||
**Phase 2: NATS Build Triggering** - COMPLETE
|
||||
**Phase 3: Safe Flake Update Workflow** - NOT STARTED
|
||||
**Phase 4: Complete Migration** - COMPLETE
|
||||
**Phase 5: Scheduled Builds** - COMPLETE
|
||||
|
||||
## Completed Work
|
||||
|
||||
### New Build Host (nix-cache02)
|
||||
|
||||
Instead of reprovisioning nix-cache01 in-place, we created a new host `nix-cache02` at 10.69.13.25:
|
||||
|
||||
- **Specs**: 8 CPU cores, 16GB RAM (temporarily, will increase to 24GB after nix-cache01 decommissioned), 200GB disk
|
||||
- **Provisioned via OpenTofu** with automatic Vault credential bootstrapping
|
||||
- **Builder service** configured with two repos:
|
||||
- `nixos-servers` → `git+https://git.t-juice.club/torjus/nixos-servers.git`
|
||||
- `nixos` (gunter) → `git+https://git.t-juice.club/torjus/nixos.git`
|
||||
|
||||
### NATS-Based Build Triggering
|
||||
|
||||
The `homelab-deploy` tool was extended with a builder mode:
|
||||
|
||||
**NATS Subjects:**
|
||||
- `build.<repo>.<target>` - e.g., `build.nixos-servers.all` or `build.nixos-servers.ns1`
|
||||
|
||||
**NATS Permissions (in DEPLOY account):**
|
||||
| User | Publish | Subscribe |
|
||||
|------|---------|-----------|
|
||||
| Builder | `build.responses.>` | `build.>` |
|
||||
| Test deployer | `deploy.test.>`, `deploy.discover`, `build.>` | `deploy.responses.>`, `deploy.discover`, `build.responses.>` |
|
||||
| Admin deployer | `deploy.>`, `build.>` | `deploy.>`, `build.responses.>` |
|
||||
|
||||
**Vault Secrets:**
|
||||
- `shared/homelab-deploy/builder-nkey` - NKey seed for builder authentication
|
||||
|
||||
**NixOS Configuration:**
|
||||
- `hosts/nix-cache02/builder.nix` - Builder service configuration
|
||||
- `services/nats/default.nix` - Updated with builder NATS user
|
||||
|
||||
**MCP Integration:**
|
||||
- `.mcp.json` updated with `--enable-builds` flag
|
||||
- Build tool available via MCP for Claude Code
|
||||
|
||||
**Tested:**
|
||||
- Single host build: `build nixos-servers testvm01` (~30s)
|
||||
- All hosts build: `build nixos-servers all` (16 hosts in ~226s)
|
||||
|
||||
### Harmonia Binary Cache
|
||||
|
||||
- Parameterized `services/nix-cache/harmonia.nix` to use hostname-based Vault paths
|
||||
- Parameterized `services/nix-cache/proxy.nix` for hostname-based domain
|
||||
- New signing key: `nix-cache02.home.2rjus.net-1`
|
||||
- Vault secret: `hosts/nix-cache02/cache-secret`
|
||||
- Removed unused Gitea Actions runner from nix-cache01
|
||||
|
||||
## Current State
|
||||
|
||||
### nix-cache02 (Active)
|
||||
- Running at 10.69.13.25
|
||||
- Serving `https://nix-cache.home.2rjus.net` (canonical URL)
|
||||
- Builder service active, responding to NATS build requests
|
||||
- Metrics exposed on port 9973 (`homelab-deploy-builder` job)
|
||||
- Harmonia binary cache server running
|
||||
- Signing key: `nix-cache02.home.2rjus.net-1`
|
||||
- Prod tier with `build-host` role
|
||||
|
||||
### nix-cache01 (Decommissioned)
|
||||
- VM deleted from Proxmox
|
||||
- Host configuration removed from repo
|
||||
- Vault AppRole and secrets removed
|
||||
- Old signing key removed from trusted-public-keys
|
||||
|
||||
## Remaining Work
|
||||
|
||||
### Phase 3: Safe Flake Update Workflow
|
||||
|
||||
1. Create `.github/workflows/flake-update-safe.yaml`
|
||||
2. Disable or remove old `flake-update.yaml`
|
||||
3. Test manually with `workflow_dispatch`
|
||||
4. Monitor first automated run
|
||||
|
||||
### Phase 4: Complete Migration ✅
|
||||
|
||||
1. ~~**Add Harmonia to nix-cache02**~~ ✅ Done - new signing key, parameterized service
|
||||
2. ~~**Add trusted public key to all hosts**~~ ✅ Done - `system/nix.nix` updated
|
||||
3. ~~**Test cache from other hosts**~~ ✅ Done - verified from testvm01
|
||||
4. ~~**Update proxy and DNS**~~ ✅ Done - `nix-cache.home.2rjus.net` CNAME now points to nix-cache02
|
||||
5. ~~**Deploy to all hosts**~~ ✅ Done - all hosts have new trusted key
|
||||
6. ~~**Decommission nix-cache01**~~ ✅ Done - 2026-02-10:
|
||||
- Removed `hosts/nix-cache01/` directory
|
||||
- Removed `services/nix-cache/build-flakes.{nix,sh}`
|
||||
- Removed Vault AppRole and secrets
|
||||
- Removed old signing key from `system/nix.nix`
|
||||
- Removed from `flake.nix`
|
||||
- Deleted VM from Proxmox
|
||||
|
||||
### Phase 5: Scheduled Builds ✅
|
||||
|
||||
Implemented a systemd timer on nix-cache02 that triggers builds every 2 hours:
|
||||
|
||||
- **Timer**: `scheduled-build.timer` runs every 2 hours with 5m random jitter
|
||||
- **Service**: `scheduled-build.service` calls `homelab-deploy build` for both repos
|
||||
- **Authentication**: Dedicated scheduler NKey stored in Vault
|
||||
- **NATS user**: Added to DEPLOY account with publish `build.>` and subscribe `build.responses.>`
|
||||
|
||||
Files:
|
||||
- `hosts/nix-cache02/scheduler.nix` - Timer and service configuration
|
||||
- `services/nats/default.nix` - Scheduler NATS user
|
||||
- `terraform/vault/secrets.tf` - Scheduler NKey secret
|
||||
- `terraform/vault/variables.tf` - Variable for scheduler NKey
|
||||
|
||||
## Resolved Questions
|
||||
|
||||
- **Parallel vs sequential builds?** Sequential - hosts share packages, subsequent builds are fast after first
|
||||
- **What about gunter?** Configured as `nixos` repo in builder settings
|
||||
- **Disk size?** 200GB for new host
|
||||
- **Build host specs?** 8 cores, 16-24GB RAM matches current nix-cache01
|
||||
|
||||
### Phase 6: Observability
|
||||
|
||||
1. **Alerting rules** for build failures:
|
||||
```promql
|
||||
# Alert if any build fails
|
||||
increase(homelab_deploy_build_host_total{status="failure"}[1h]) > 0
|
||||
|
||||
# Alert if no successful builds in 24h (scheduled builds stopped)
|
||||
time() - homelab_deploy_build_last_success_timestamp > 86400
|
||||
```
|
||||
|
||||
2. **Grafana dashboard** for build metrics:
|
||||
- Build success/failure rate over time
|
||||
- Average build duration per host (histogram)
|
||||
- Build frequency (builds per hour/day)
|
||||
- Last successful build timestamp per repo
|
||||
|
||||
Available metrics:
|
||||
- `homelab_deploy_builds_total{repo, status}` - total builds by repo and status
|
||||
- `homelab_deploy_build_host_total{repo, host, status}` - per-host build counts
|
||||
- `homelab_deploy_build_duration_seconds_{bucket,sum,count}` - build duration histogram
|
||||
- `homelab_deploy_build_last_timestamp{repo}` - last build attempt
|
||||
- `homelab_deploy_build_last_success_timestamp{repo}` - last successful build
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [x] ~~When to cut over DNS from nix-cache01 to nix-cache02?~~ Done - 2026-02-10
|
||||
- [ ] Implement safe flake update workflow before or after full migration?
|
||||
107
docs/plans/completed/ns1-recreation.md
Normal file
107
docs/plans/completed/ns1-recreation.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# ns1 Recreation Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Recreate ns1 using the OpenTofu workflow after the existing VM entered emergency mode due to incorrect hardware-configuration.nix (hardcoded UUIDs that don't match actual disk layout).
|
||||
|
||||
## Current ns1 Configuration to Preserve
|
||||
|
||||
- **IP:** 10.69.13.5/24
|
||||
- **Gateway:** 10.69.13.1
|
||||
- **Role:** Primary DNS (authoritative + resolver)
|
||||
- **Services:**
|
||||
- `../../services/ns/master-authorative.nix`
|
||||
- `../../services/ns/resolver.nix`
|
||||
- **Metadata:**
|
||||
- `homelab.host.role = "dns"`
|
||||
- `homelab.host.labels.dns_role = "primary"`
|
||||
- **Vault:** enabled
|
||||
- **Deploy:** enabled
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Phase 1: Remove Old Configuration
|
||||
|
||||
```bash
|
||||
nix develop -c create-host --remove --hostname ns1 --force
|
||||
```
|
||||
|
||||
This removes:
|
||||
- `hosts/ns1/` directory
|
||||
- Entry from `flake.nix`
|
||||
- Any terraform entries (none exist currently)
|
||||
|
||||
### Phase 2: Create New Configuration
|
||||
|
||||
```bash
|
||||
nix develop -c create-host --hostname ns1 --ip 10.69.13.5/24
|
||||
```
|
||||
|
||||
This creates:
|
||||
- `hosts/ns1/` with template2-based configuration
|
||||
- Entry in `flake.nix`
|
||||
- Entry in `terraform/vms.tf`
|
||||
- Vault wrapped token for bootstrap
|
||||
|
||||
### Phase 3: Customize Configuration
|
||||
|
||||
After create-host, manually update `hosts/ns1/configuration.nix` to add:
|
||||
|
||||
1. DNS service imports:
|
||||
```nix
|
||||
../../services/ns/master-authorative.nix
|
||||
../../services/ns/resolver.nix
|
||||
```
|
||||
|
||||
2. Host metadata:
|
||||
```nix
|
||||
homelab.host = {
|
||||
tier = "prod";
|
||||
role = "dns";
|
||||
labels.dns_role = "primary";
|
||||
};
|
||||
```
|
||||
|
||||
3. Disable resolved (conflicts with Unbound):
|
||||
```nix
|
||||
services.resolved.enable = false;
|
||||
```
|
||||
|
||||
### Phase 4: Commit Changes
|
||||
|
||||
```bash
|
||||
git add -A
|
||||
git commit -m "ns1: recreate with OpenTofu workflow
|
||||
|
||||
Old VM had incorrect hardware-configuration.nix with hardcoded UUIDs
|
||||
that didn't match actual disk layout, causing boot failure.
|
||||
|
||||
Recreated using template2-based configuration for OpenTofu provisioning."
|
||||
```
|
||||
|
||||
### Phase 5: Infrastructure
|
||||
|
||||
1. Delete old ns1 VM in Proxmox (it's broken anyway)
|
||||
2. Run `nix develop -c tofu -chdir=terraform apply`
|
||||
3. Wait for bootstrap to complete
|
||||
4. Verify ns1 is functional:
|
||||
- DNS resolution working
|
||||
- Zone transfer to ns2 working
|
||||
- All exporters responding
|
||||
|
||||
### Phase 6: Finalize
|
||||
|
||||
- Push to master
|
||||
- Move this plan to `docs/plans/completed/`
|
||||
|
||||
## Rollback
|
||||
|
||||
If the new VM fails:
|
||||
1. ns2 is still operational as secondary DNS
|
||||
2. Can recreate with different settings if needed
|
||||
|
||||
## Notes
|
||||
|
||||
- ns2 will continue serving DNS during the migration
|
||||
- Zone data is generated from flake, so no data loss
|
||||
- The old VM's disk can be kept briefly in Proxmox as backup if desired
|
||||
87
docs/plans/completed/openbao-kanidm-oidc.md
Normal file
87
docs/plans/completed/openbao-kanidm-oidc.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# OpenBao + Kanidm OIDC Integration
|
||||
|
||||
## Status: Completed
|
||||
|
||||
Implemented 2026-02-09.
|
||||
|
||||
## Overview
|
||||
|
||||
Enable Kanidm users to authenticate to OpenBao (Vault) using OIDC for Web UI access. Members of the `admins` group get full read/write access to secrets.
|
||||
|
||||
## Implementation
|
||||
|
||||
### Files Modified
|
||||
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `terraform/vault/oidc.tf` | New - OIDC auth backend and roles |
|
||||
| `terraform/vault/policies.tf` | Added oidc-admin and oidc-default policies |
|
||||
| `terraform/vault/secrets.tf` | Added OAuth2 client secret |
|
||||
| `terraform/vault/approle.tf` | Granted kanidm01 access to openbao secrets |
|
||||
| `services/kanidm/default.nix` | Added openbao OAuth2 client, enabled imperative group membership |
|
||||
|
||||
### Kanidm Configuration
|
||||
|
||||
OAuth2 client `openbao` with:
|
||||
- Confidential client (uses client secret)
|
||||
- Web UI callback only: `https://vault.home.2rjus.net:8200/ui/vault/auth/oidc/oidc/callback`
|
||||
- Legacy crypto enabled (RS256 for OpenBao compatibility)
|
||||
- Scope maps for `admins` and `users` groups
|
||||
|
||||
Group membership is now managed imperatively (`overwriteMembers = false`) to prevent provisioning from resetting group memberships on service restart.
|
||||
|
||||
### OpenBao Configuration
|
||||
|
||||
OIDC auth backend at `/oidc` with two roles:
|
||||
|
||||
| Role | Bound Claims | Policy | Access |
|
||||
|------|--------------|--------|--------|
|
||||
| `admin` | `groups = admins@home.2rjus.net` | `oidc-admin` | Full read/write to secrets, system health/metrics |
|
||||
| `default` | (none) | `oidc-default` | Token lookup-self, system health |
|
||||
|
||||
Both roles request scopes: `openid`, `profile`, `email`, `groups`
|
||||
|
||||
### Policies
|
||||
|
||||
**oidc-admin:**
|
||||
- `secret/*` - create, read, update, delete, list
|
||||
- `sys/health` - read
|
||||
- `sys/metrics` - read
|
||||
- `sys/auth` - read
|
||||
- `sys/mounts` - read
|
||||
|
||||
**oidc-default:**
|
||||
- `auth/token/lookup-self` - read
|
||||
- `sys/health` - read
|
||||
|
||||
## Usage
|
||||
|
||||
### Web UI Login
|
||||
1. Navigate to https://vault.home.2rjus.net:8200
|
||||
2. Select "OIDC" authentication method
|
||||
3. Enter role: `admin` (for admins) or `default` (for any user)
|
||||
4. Click "Sign in with OIDC"
|
||||
5. Authenticate with Kanidm
|
||||
|
||||
### Group Management
|
||||
Add users to admins group for full access:
|
||||
```bash
|
||||
kanidm group add-members admins <username>
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
**CLI login not supported:** Kanidm requires HTTPS for all redirect URIs on confidential (non-public) OAuth2 clients. OpenBao CLI uses `http://localhost:8250/oidc/callback` which Kanidm rejects. Public clients would allow localhost redirects, but OpenBao requires a client secret for OIDC auth.
|
||||
|
||||
## Lessons Learned
|
||||
|
||||
1. **Kanidm group names:** Groups are returned as `groupname@domain` (e.g., `admins@home.2rjus.net`), not just the short name
|
||||
2. **RS256 required:** OpenBao only supports RS256 for JWT signing; Kanidm defaults to ES256, requiring `enableLegacyCrypto = true`
|
||||
3. **Scope request:** OIDC roles must explicitly request the `groups` scope via `oidc_scopes`
|
||||
4. **Provisioning resets:** Kanidm provisioning with default `overwriteMembers = true` resets group memberships on restart
|
||||
5. **Two-phase Terraform:** Secret must exist before OIDC backend can validate discovery URL
|
||||
|
||||
## References
|
||||
|
||||
- [OpenBao JWT/OIDC Auth Method](https://openbao.org/docs/auth/jwt/)
|
||||
- [Kanidm OAuth2 Documentation](https://kanidm.github.io/kanidm/stable/integrations/oauth2.html)
|
||||
113
docs/plans/completed/pgdb1-decommission.md
Normal file
113
docs/plans/completed/pgdb1-decommission.md
Normal file
@@ -0,0 +1,113 @@
|
||||
# pgdb1 Decommissioning Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Decommission the pgdb1 PostgreSQL server. The only consumer was Open WebUI on gunter, which has been migrated to use a local PostgreSQL instance.
|
||||
|
||||
## Pre-flight Verification
|
||||
|
||||
Before proceeding, verify that gunter is no longer using pgdb1:
|
||||
|
||||
1. Check Open WebUI on gunter is configured for local PostgreSQL (not 10.69.13.16)
|
||||
2. Optionally: Check pgdb1 for recent connection activity:
|
||||
```bash
|
||||
ssh pgdb1 'sudo -u postgres psql -c "SELECT * FROM pg_stat_activity WHERE datname IS NOT NULL;"'
|
||||
```
|
||||
|
||||
## Files to Remove
|
||||
|
||||
### Host Configuration
|
||||
- `hosts/pgdb1/default.nix`
|
||||
- `hosts/pgdb1/configuration.nix`
|
||||
- `hosts/pgdb1/hardware-configuration.nix`
|
||||
- `hosts/pgdb1/` (directory)
|
||||
|
||||
### Service Module
|
||||
- `services/postgres/postgres.nix`
|
||||
- `services/postgres/default.nix`
|
||||
- `services/postgres/` (directory)
|
||||
|
||||
Note: This service module is only used by pgdb1, so it can be removed entirely.
|
||||
|
||||
### Flake Entry
|
||||
Remove from `flake.nix` (lines 131-138):
|
||||
```nix
|
||||
pgdb1 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/pgdb1
|
||||
];
|
||||
};
|
||||
```
|
||||
|
||||
### Vault AppRole
|
||||
Remove from `terraform/vault/approle.tf` (lines 69-73):
|
||||
```hcl
|
||||
"pgdb1" = {
|
||||
paths = [
|
||||
"secret/data/hosts/pgdb1/*",
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Monitoring Rules
|
||||
Remove from `services/monitoring/rules.yml` the `postgres_down` alert (lines 359-365):
|
||||
```yaml
|
||||
- name: postgres_rules
|
||||
rules:
|
||||
- alert: postgres_down
|
||||
expr: node_systemd_unit_state{instance="pgdb1.home.2rjus.net:9100", name="postgresql.service", state="active"} == 0
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
```
|
||||
|
||||
### Utility Scripts
|
||||
Delete `rebuild-all.sh` entirely (obsolete script).
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### Phase 1: Verification
|
||||
- [ ] Confirm Open WebUI on gunter uses local PostgreSQL
|
||||
- [ ] Verify no active connections to pgdb1
|
||||
|
||||
### Phase 2: Code Cleanup
|
||||
- [ ] Create feature branch: `git checkout -b decommission-pgdb1`
|
||||
- [ ] Remove `hosts/pgdb1/` directory
|
||||
- [ ] Remove `services/postgres/` directory
|
||||
- [ ] Remove pgdb1 entry from `flake.nix`
|
||||
- [ ] Remove postgres alert from `services/monitoring/rules.yml`
|
||||
- [ ] Delete `rebuild-all.sh` (obsolete)
|
||||
- [ ] Run `nix flake check` to verify no broken references
|
||||
- [ ] Commit changes
|
||||
|
||||
### Phase 3: Terraform Cleanup
|
||||
- [ ] Remove pgdb1 from `terraform/vault/approle.tf`
|
||||
- [ ] Run `tofu plan` in `terraform/vault/` to preview changes
|
||||
- [ ] Run `tofu apply` to remove the AppRole
|
||||
- [ ] Commit terraform changes
|
||||
|
||||
### Phase 4: Infrastructure Cleanup
|
||||
- [ ] Shut down pgdb1 VM in Proxmox
|
||||
- [ ] Delete the VM from Proxmox
|
||||
- [ ] (Optional) Remove any DNS entries if not auto-generated
|
||||
|
||||
### Phase 5: Finalize
|
||||
- [ ] Merge feature branch to master
|
||||
- [ ] Trigger auto-upgrade on DNS servers (ns1, ns2) to remove DNS entry
|
||||
- [ ] Move this plan to `docs/plans/completed/`
|
||||
|
||||
## Rollback
|
||||
|
||||
If issues arise after decommissioning:
|
||||
1. The VM can be recreated from template using the git history
|
||||
2. Database data would need to be restored from backup (if any exists)
|
||||
|
||||
## Notes
|
||||
|
||||
- pgdb1 IP: 10.69.13.16
|
||||
- The postgres service allowed connections from gunter (10.69.30.105)
|
||||
- No restic backup was configured for this host
|
||||
@@ -1,10 +1,38 @@
|
||||
# Prometheus Scrape Target Labels
|
||||
|
||||
## Implementation Status
|
||||
|
||||
| Step | Status | Notes |
|
||||
|------|--------|-------|
|
||||
| 1. Create `homelab.host` module | ✅ Complete | `modules/homelab/host.nix` |
|
||||
| 2. Update `lib/monitoring.nix` | ✅ Complete | Labels extracted and propagated |
|
||||
| 3. Update Prometheus config | ✅ Complete | Uses structured static_configs |
|
||||
| 4. Set metadata on hosts | ✅ Complete | All relevant hosts configured |
|
||||
| 5. Update alert rules | ✅ Complete | Role-based filtering implemented |
|
||||
| 6. Labels for service targets | ✅ Complete | Host labels propagated to all services |
|
||||
| 7. Add hostname label | ✅ Complete | All targets have `hostname` label for easy filtering |
|
||||
|
||||
**Hosts with metadata configured:**
|
||||
- `ns1`, `ns2`: `role = "dns"`, `labels.dns_role = "primary"/"secondary"`
|
||||
- `nix-cache01`: `role = "build-host"`
|
||||
- `vault01`: `role = "vault"`
|
||||
- `testvm01/02/03`: `tier = "test"`
|
||||
|
||||
**Implementation complete.** Branch: `prometheus-scrape-target-labels`
|
||||
|
||||
**Query examples:**
|
||||
- `{hostname="ns1"}` - all metrics from ns1 (any job/port)
|
||||
- `node_cpu_seconds_total{hostname="monitoring01"}` - specific metric by hostname
|
||||
- `up{role="dns"}` - all DNS servers
|
||||
- `up{tier="test"}` - all test-tier hosts
|
||||
|
||||
---
|
||||
|
||||
## Goal
|
||||
|
||||
Add support for custom per-host labels on Prometheus scrape targets, enabling alert rules to reference host metadata (priority, role) instead of hardcoding instance names.
|
||||
|
||||
**Related:** This plan shares the `homelab.host` module with `docs/plans/nats-deploy-service.md`, which uses the same metadata for deployment tier assignment.
|
||||
**Related:** This plan shares the `homelab.host` module with `docs/plans/completed/nats-deploy-service.md`, which uses the same metadata for deployment tier assignment.
|
||||
|
||||
## Motivation
|
||||
|
||||
@@ -54,12 +82,11 @@ or
|
||||
|
||||
## Implementation
|
||||
|
||||
This implementation uses a shared `homelab.host` module that provides host metadata for multiple consumers (Prometheus labels, deployment tiers, etc.). See also `docs/plans/nats-deploy-service.md` which uses the same module for deployment tier assignment.
|
||||
This implementation uses a shared `homelab.host` module that provides host metadata for multiple consumers (Prometheus labels, deployment tiers, etc.). See also `docs/plans/completed/nats-deploy-service.md` which uses the same module for deployment tier assignment.
|
||||
|
||||
### 1. Create `homelab.host` module
|
||||
|
||||
**Status:** Step 1 (Create `homelab.host` module) is complete. The module is in
|
||||
`modules/homelab/host.nix` with tier, priority, role, and labels options.
|
||||
✅ **Complete.** The module is in `modules/homelab/host.nix`.
|
||||
|
||||
Create `modules/homelab/host.nix` with shared host metadata options:
|
||||
|
||||
@@ -98,6 +125,8 @@ Import this module in `modules/homelab/default.nix`.
|
||||
|
||||
### 2. Update `lib/monitoring.nix`
|
||||
|
||||
✅ **Complete.** Labels are now extracted and propagated.
|
||||
|
||||
- `extractHostMonitoring` should also extract `homelab.host` values (priority, role, labels).
|
||||
- Build the combined label set from `homelab.host`:
|
||||
|
||||
@@ -126,6 +155,8 @@ This requires grouping hosts by their label attrset and producing one `static_co
|
||||
|
||||
### 3. Update `services/monitoring/prometheus.nix`
|
||||
|
||||
✅ **Complete.** Now uses structured static_configs output.
|
||||
|
||||
Change the node-exporter scrape config to use the new structured output:
|
||||
|
||||
```nix
|
||||
@@ -138,36 +169,37 @@ static_configs = nodeExporterTargets;
|
||||
|
||||
### 4. Set metadata on hosts
|
||||
|
||||
✅ **Complete.** All relevant hosts have metadata configured. Note: The implementation filters by `role` rather than `priority`, which matches the existing nix-cache01 configuration.
|
||||
|
||||
Example in `hosts/nix-cache01/configuration.nix`:
|
||||
|
||||
```nix
|
||||
homelab.host = {
|
||||
tier = "test"; # can be deployed by MCP (used by homelab-deploy)
|
||||
priority = "low"; # relaxed alerting thresholds
|
||||
role = "build-host";
|
||||
};
|
||||
```
|
||||
|
||||
**Note:** Current implementation only sets `role = "build-host"`. Consider adding `priority = "low"` when label propagation is implemented.
|
||||
|
||||
Example in `hosts/ns1/configuration.nix`:
|
||||
|
||||
```nix
|
||||
homelab.host = {
|
||||
tier = "prod";
|
||||
priority = "high";
|
||||
role = "dns";
|
||||
labels.dns_role = "primary";
|
||||
};
|
||||
```
|
||||
|
||||
**Note:** `tier` and `priority` use defaults ("prod" and "high"), which is the intended behavior. The current ns1/ns2 configurations match this pattern.
|
||||
|
||||
### 5. Update alert rules
|
||||
|
||||
After implementing labels, review and update `services/monitoring/rules.yml`:
|
||||
✅ **Complete.** Updated `services/monitoring/rules.yml`:
|
||||
|
||||
- Replace instance-name exclusions with label-based filters (e.g. `{priority!="low"}` instead of `{instance!="nix-cache01.home.2rjus.net:9100"}`).
|
||||
- Consider whether any other rules should differentiate by priority or role.
|
||||
- `high_cpu_load`: Replaced `instance!="nix-cache01..."` with `role!="build-host"` for standard hosts (15m duration) and `role="build-host"` for build hosts (2h duration).
|
||||
- `unbound_low_cache_hit_ratio`: Added `dns_role="primary"` filter to only alert on the primary DNS resolver (secondary has a cold cache).
|
||||
|
||||
Specifically, the `high_cpu_load` rule currently has a nix-cache01 exclusion that should be replaced with a `priority`-based filter.
|
||||
### 6. Labels for `generateScrapeConfigs` (service targets)
|
||||
|
||||
### 6. Consider labels for `generateScrapeConfigs` (service targets)
|
||||
|
||||
The same label propagation could be applied to service-level scrape targets. This is optional and can be deferred -- service targets are more specialized and less likely to need generic label-based filtering.
|
||||
✅ **Complete.** Host labels are now propagated to all auto-generated service scrape targets (unbound, homelab-deploy, nixos-exporter, etc.). This enables semantic filtering on any service metric, such as using `dns_role="primary"` with the unbound job.
|
||||
@@ -9,33 +9,34 @@ hosts are decommissioned or deferred.
|
||||
|
||||
## Current State
|
||||
|
||||
Hosts already managed by OpenTofu: `vault01`, `testvm01`, `vaulttest01`
|
||||
Hosts already managed by OpenTofu: `vault01`, `testvm01`, `testvm02`, `testvm03`, `ns2`, `ns1`
|
||||
|
||||
Hosts to migrate:
|
||||
|
||||
| Host | Category | Notes |
|
||||
|------|----------|-------|
|
||||
| ns1 | Stateless | Primary DNS, recreate |
|
||||
| ns2 | Stateless | Secondary DNS, recreate |
|
||||
| ~~ns1~~ | ~~Stateless~~ | ✓ Complete |
|
||||
| nix-cache01 | Stateless | Binary cache, recreate |
|
||||
| http-proxy | Stateless | Reverse proxy, recreate |
|
||||
| nats1 | Stateless | Messaging, recreate |
|
||||
| auth01 | Decommission | No longer in use |
|
||||
| ha1 | Stateful | Home Assistant + Zigbee2MQTT + Mosquitto |
|
||||
| monitoring01 | Stateful | Prometheus, Grafana, Loki |
|
||||
| ~~monitoring01~~ | ~~Decommission~~ | ✓ Complete — replaced by monitoring02 (VictoriaMetrics) |
|
||||
| jelly01 | Stateful | Jellyfin metadata, watch history, config |
|
||||
| pgdb1 | Stateful | PostgreSQL databases |
|
||||
| jump | Decommission | No longer needed |
|
||||
| ca | Deferred | Pending Phase 4c PKI migration to OpenBao |
|
||||
| ~~pgdb1~~ | ~~Decommission~~ | ✓ Complete |
|
||||
| ~~jump~~ | ~~Decommission~~ | ✓ Complete |
|
||||
| ~~auth01~~ | ~~Decommission~~ | ✓ Complete |
|
||||
| ~~ca~~ | ~~Deferred~~ | ✓ Complete |
|
||||
|
||||
## Phase 1: Backup Preparation
|
||||
|
||||
Before migrating any stateful host, ensure restic backups are in place and verified.
|
||||
|
||||
### 1a. Expand monitoring01 Grafana Backup
|
||||
### ~~1a. Expand monitoring01 Grafana Backup~~ ✓ N/A
|
||||
|
||||
The existing backup only covers `/var/lib/grafana/plugins` and a sqlite dump of `grafana.db`.
|
||||
Expand to back up all of `/var/lib/grafana/` to capture config directory and any other state.
|
||||
~~The existing backup only covers `/var/lib/grafana/plugins` and a sqlite dump of `grafana.db`.
|
||||
Expand to back up all of `/var/lib/grafana/` to capture config directory and any other state.~~
|
||||
|
||||
No longer needed — monitoring01 decommissioned, replaced by monitoring02 with declarative Grafana dashboards.
|
||||
|
||||
### 1b. Add Jellyfin Backup to jelly01
|
||||
|
||||
@@ -46,39 +47,19 @@ No backup currently exists. Add a restic backup job for `/var/lib/jellyfin/` whi
|
||||
Media files are on the NAS (`nas.home.2rjus.net:/mnt/hdd-pool/media`) and do not need backup.
|
||||
The cache directory (`/var/cache/jellyfin/`) does not need backup — it regenerates.
|
||||
|
||||
### 1c. Add PostgreSQL Backup to pgdb1
|
||||
|
||||
No backup currently exists. Add a restic backup job with a `pg_dumpall` pre-hook to capture
|
||||
all databases and roles. The dump should be piped through restic's stdin backup (similar to
|
||||
the Grafana DB dump pattern on monitoring01).
|
||||
|
||||
### 1d. Verify Existing ha1 Backup
|
||||
### 1c. Verify Existing ha1 Backup
|
||||
|
||||
ha1 already backs up `/var/lib/hass`, `/var/lib/zigbee2mqtt`, `/var/lib/mosquitto`. Verify
|
||||
these backups are current and restorable before proceeding with migration.
|
||||
|
||||
### 1e. Verify All Backups
|
||||
### 1d. Verify All Backups
|
||||
|
||||
After adding/expanding backup jobs:
|
||||
1. Trigger a manual backup run on each host
|
||||
2. Verify backup integrity with `restic check`
|
||||
3. Test a restore to a temporary location to confirm data is recoverable
|
||||
|
||||
## Phase 2: Declare pgdb1 Databases in Nix
|
||||
|
||||
Before migrating pgdb1, audit the manually-created databases and users on the running
|
||||
instance, then declare them in the Nix configuration using `ensureDatabases` and
|
||||
`ensureUsers`. This makes the PostgreSQL setup reproducible on the new host.
|
||||
|
||||
Steps:
|
||||
1. SSH to pgdb1, run `\l` and `\du` in psql to list databases and roles
|
||||
2. Add `ensureDatabases` and `ensureUsers` to `services/postgres/postgres.nix`
|
||||
3. Document any non-default PostgreSQL settings or extensions per database
|
||||
|
||||
After reprovisioning, the databases will be created by NixOS, and data restored from the
|
||||
`pg_dumpall` backup.
|
||||
|
||||
## Phase 3: Stateless Host Migration
|
||||
## Phase 2: Stateless Host Migration
|
||||
|
||||
These hosts have no meaningful state and can be recreated fresh. For each host:
|
||||
|
||||
@@ -95,13 +76,14 @@ Migrate stateless hosts in an order that minimizes disruption:
|
||||
|
||||
1. **nix-cache01** — low risk, no downstream dependencies during migration
|
||||
2. **nats1** — low risk, verify no persistent JetStream streams first
|
||||
4. **http-proxy** — brief disruption to proxied services, migrate during low-traffic window
|
||||
5. **ns1, ns2** — migrate one at a time, verify DNS resolution between each
|
||||
3. **http-proxy** — brief disruption to proxied services, migrate during low-traffic window
|
||||
4. ~~**ns1** — ns2 already migrated, verify AXFR works after ns1 migration~~ ✓ Complete
|
||||
|
||||
For ns1/ns2: migrate ns2 first (secondary), verify AXFR works, then migrate ns1. All hosts
|
||||
use both ns1 and ns2 as resolvers, so one being down briefly is tolerable.
|
||||
~~For ns1/ns2: migrate ns2 first (secondary), verify AXFR works, then migrate ns1.~~ Both ns1
|
||||
and ns2 migration complete. Zone transfer (AXFR) verified working between ns1 (primary) and
|
||||
ns2 (secondary).
|
||||
|
||||
## Phase 4: Stateful Host Migration
|
||||
## Phase 3: Stateful Host Migration
|
||||
|
||||
For each stateful host, the procedure is:
|
||||
|
||||
@@ -114,27 +96,19 @@ For each stateful host, the procedure is:
|
||||
7. Start services and verify functionality
|
||||
8. Decommission the old VM
|
||||
|
||||
### 4a. pgdb1
|
||||
### 3a. monitoring01 ✓ COMPLETE
|
||||
|
||||
1. Run final `pg_dumpall` backup via restic
|
||||
2. Stop PostgreSQL on the old host
|
||||
3. Provision new pgdb1 via OpenTofu
|
||||
4. After bootstrap, NixOS creates the declared databases/users
|
||||
5. Restore data with `pg_restore` or `psql < dumpall.sql`
|
||||
6. Verify database connectivity from gunter (`10.69.30.105`)
|
||||
7. Decommission old VM
|
||||
~~1. Run final Grafana backup~~
|
||||
~~2. Provision new monitoring01 via OpenTofu~~
|
||||
~~3. After bootstrap, restore `/var/lib/grafana/` from restic~~
|
||||
~~4. Restart Grafana, verify dashboards and datasources are intact~~
|
||||
~~5. Prometheus and Loki start fresh with empty data (acceptable)~~
|
||||
~~6. Verify all scrape targets are being collected~~
|
||||
~~7. Decommission old VM~~
|
||||
|
||||
### 4b. monitoring01
|
||||
Replaced by monitoring02 with VictoriaMetrics, standalone Loki and Grafana modules. Host configuration, old service modules, and terraform resources removed.
|
||||
|
||||
1. Run final Grafana backup
|
||||
2. Provision new monitoring01 via OpenTofu
|
||||
3. After bootstrap, restore `/var/lib/grafana/` from restic
|
||||
4. Restart Grafana, verify dashboards and datasources are intact
|
||||
5. Prometheus and Loki start fresh with empty data (acceptable)
|
||||
6. Verify all scrape targets are being collected
|
||||
7. Decommission old VM
|
||||
|
||||
### 4c. jelly01
|
||||
### 3b. jelly01
|
||||
|
||||
1. Run final Jellyfin backup
|
||||
2. Provision new jelly01 via OpenTofu
|
||||
@@ -143,7 +117,7 @@ For each stateful host, the procedure is:
|
||||
5. Start Jellyfin, verify watch history and library metadata are present
|
||||
6. Decommission old VM
|
||||
|
||||
### 4d. ha1
|
||||
### 3c. ha1
|
||||
|
||||
1. Verify latest restic backup is current
|
||||
2. Stop Home Assistant, Zigbee2MQTT, and Mosquitto on old host
|
||||
@@ -167,47 +141,69 @@ OpenTofu/Proxmox. Verify the USB device ID on the hypervisor and add the appropr
|
||||
`usb` block to the VM definition in `terraform/vms.tf`. The USB device must be passed
|
||||
through before starting Zigbee2MQTT on the new host.
|
||||
|
||||
## Phase 5: Decommission jump and auth01 Hosts
|
||||
## Phase 4: Decommission Hosts
|
||||
|
||||
### jump
|
||||
1. Verify nothing depends on the jump host (no SSH proxy configs pointing to it, etc.)
|
||||
2. Remove host configuration from `hosts/jump/`
|
||||
3. Remove from `flake.nix`
|
||||
4. Remove any secrets in `secrets/jump/`
|
||||
5. Remove from `.sops.yaml`
|
||||
6. Destroy the VM in Proxmox
|
||||
7. Commit cleanup
|
||||
### jump ✓ COMPLETE
|
||||
|
||||
### auth01
|
||||
1. Remove host configuration from `hosts/auth01/`
|
||||
2. Remove from `flake.nix`
|
||||
3. Remove any secrets in `secrets/auth01/`
|
||||
4. Remove from `.sops.yaml`
|
||||
5. Remove `services/authelia/` and `services/lldap/` (only used by auth01)
|
||||
6. Destroy the VM in Proxmox
|
||||
7. Commit cleanup
|
||||
~~1. Verify nothing depends on the jump host (no SSH proxy configs pointing to it, etc.)~~
|
||||
~~2. Remove host configuration from `hosts/jump/`~~
|
||||
~~3. Remove from `flake.nix`~~
|
||||
~~4. Remove any secrets in `secrets/jump/`~~
|
||||
~~5. Remove from `.sops.yaml`~~
|
||||
~~6. Destroy the VM in Proxmox~~
|
||||
~~7. Commit cleanup~~
|
||||
|
||||
## Phase 6: Decommission ca Host (Deferred)
|
||||
Host was already removed from flake.nix and VM destroyed. Configuration cleaned up in ba9f47f.
|
||||
|
||||
Deferred until Phase 4c (PKI migration to OpenBao) is complete. Once all hosts use the
|
||||
### auth01 ✓ COMPLETE
|
||||
|
||||
~~1. Remove host configuration from `hosts/auth01/`~~
|
||||
~~2. Remove from `flake.nix`~~
|
||||
~~3. Remove any secrets in `secrets/auth01/`~~
|
||||
~~4. Remove from `.sops.yaml`~~
|
||||
~~5. Remove `services/authelia/` and `services/lldap/` (only used by auth01)~~
|
||||
~~6. Destroy the VM in Proxmox~~
|
||||
~~7. Commit cleanup~~
|
||||
|
||||
Host configuration, services, and VM already removed.
|
||||
|
||||
### pgdb1 ✓ COMPLETE
|
||||
|
||||
~~Only consumer was Open WebUI on gunter, which has been migrated to use local PostgreSQL.~~
|
||||
|
||||
~~1. Verify Open WebUI on gunter is using local PostgreSQL (not pgdb1)~~
|
||||
~~2. Remove host configuration from `hosts/pgdb1/`~~
|
||||
~~3. Remove `services/postgres/` (only used by pgdb1)~~
|
||||
~~4. Remove from `flake.nix`~~
|
||||
~~5. Remove Vault AppRole from `terraform/vault/approle.tf`~~
|
||||
~~6. Destroy the VM in Proxmox~~
|
||||
~~7. Commit cleanup~~
|
||||
|
||||
Host configuration, services, terraform resources, and VM removed. See `docs/plans/pgdb1-decommission.md` for detailed plan.
|
||||
|
||||
## Phase 5: Decommission ca Host ✓ COMPLETE
|
||||
|
||||
~~Deferred until Phase 4c (PKI migration to OpenBao) is complete. Once all hosts use the
|
||||
OpenBao ACME endpoint for certificates, the step-ca host can be decommissioned following
|
||||
the same cleanup steps as the jump host.
|
||||
the same cleanup steps as the jump host.~~
|
||||
|
||||
## Phase 7: Remove sops-nix
|
||||
PKI migration to OpenBao complete. Host configuration, `services/ca/`, and VM removed.
|
||||
|
||||
Once `ca` is decommissioned (Phase 6), `sops-nix` is no longer used by any host. Remove
|
||||
all remnants:
|
||||
- `sops-nix` input from `flake.nix` and `flake.lock`
|
||||
- `sops-nix.nixosModules.sops` from all host module lists in `flake.nix`
|
||||
- `inherit sops-nix` from all specialArgs in `flake.nix`
|
||||
- `system/sops.nix` and its import in `system/default.nix`
|
||||
- `.sops.yaml`
|
||||
- `secrets/` directory
|
||||
- All `sops.secrets.*` declarations in `services/ca/`, `services/authelia/`, `services/lldap/`
|
||||
- Template scripts that generate age keys for sops (`hosts/template/scripts.nix`,
|
||||
`hosts/template2/scripts.nix`)
|
||||
## Phase 6: Remove sops-nix ✓ COMPLETE
|
||||
|
||||
See `docs/plans/completed/sops-to-openbao-migration.md` for full context.
|
||||
~~Once `ca` is decommissioned (Phase 6), `sops-nix` is no longer used by any host. Remove
|
||||
all remnants:~~
|
||||
~~- `sops-nix` input from `flake.nix` and `flake.lock`~~
|
||||
~~- `sops-nix.nixosModules.sops` from all host module lists in `flake.nix`~~
|
||||
~~- `inherit sops-nix` from all specialArgs in `flake.nix`~~
|
||||
~~- `system/sops.nix` and its import in `system/default.nix`~~
|
||||
~~- `.sops.yaml`~~
|
||||
~~- `secrets/` directory~~
|
||||
~~- All `sops.secrets.*` declarations in `services/ca/`, `services/authelia/`, `services/lldap/`~~
|
||||
~~- Template scripts that generate age keys for sops (`hosts/template/scripts.nix`,
|
||||
`hosts/template2/scripts.nix`)~~
|
||||
|
||||
All sops-nix remnants removed. See `docs/plans/completed/sops-to-openbao-migration.md` for context.
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -216,7 +212,7 @@ See `docs/plans/completed/sops-to-openbao-migration.md` for full context.
|
||||
- The old VMs use IPs that the new VMs need, so the old VM must be shut down before
|
||||
the new one is provisioned (or use a temporary IP and swap after verification)
|
||||
- Stateful migrations should be done during low-usage windows
|
||||
- After all migrations are complete, the only hosts not in OpenTofu will be ca (deferred)
|
||||
- After all migrations are complete, all decommissioned hosts (jump, auth01, ca) have been removed
|
||||
- Since many hosts are being recreated, this is a good opportunity to establish consistent
|
||||
hostname naming conventions before provisioning the new VMs. Current naming is inconsistent
|
||||
(e.g. `ns1` vs `nix-cache01`, `ha1` vs `auth01`, `pgdb1` vs `http-proxy`). Decide on a
|
||||
|
||||
79
docs/plans/local-ntp-chrony.md
Normal file
79
docs/plans/local-ntp-chrony.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Local NTP with Chrony
|
||||
|
||||
## Overview/Goal
|
||||
|
||||
Set up pve1 as a local NTP server and switch all NixOS VMs from systemd-timesyncd to chrony, pointing at pve1 as the sole time source. This eliminates clock drift issues that cause false `host_reboot` alerts.
|
||||
|
||||
## Current State
|
||||
|
||||
- All NixOS hosts use `systemd-timesyncd` with default NixOS pool servers (`0.nixos.pool.ntp.org` etc.)
|
||||
- No NTP/timesyncd configuration exists in the repo — all defaults
|
||||
- pve1 (Proxmox, bare metal) already runs chrony but only as a client
|
||||
- VMs drift noticeably — ns1 (~19ms) and jelly01 (~39ms) are worst offenders
|
||||
- Clock step corrections from timesyncd trigger false `host_reboot` alerts via `changes(node_boot_time_seconds[10m]) > 0`
|
||||
- pve1 itself stays at 0ms offset thanks to chrony
|
||||
|
||||
## Why systemd-timesyncd is Insufficient
|
||||
|
||||
- Minimal SNTP client, no proper clock discipline or frequency tracking
|
||||
- Backs off polling interval when it thinks clock is stable, missing drift
|
||||
- Corrects via step adjustments rather than gradual slewing, causing metric jumps
|
||||
- Each VM resolves to different pool servers with varying accuracy
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### 1. Configure pve1 as NTP Server
|
||||
|
||||
Add to pve1's `/etc/chrony/chrony.conf`:
|
||||
|
||||
```
|
||||
# Allow NTP clients from the infrastructure subnet
|
||||
allow 10.69.13.0/24
|
||||
```
|
||||
|
||||
Restart chrony on pve1.
|
||||
|
||||
### 2. Add Chrony to NixOS System Config
|
||||
|
||||
Create `system/chrony.nix` (applied to all hosts via system imports):
|
||||
|
||||
```nix
|
||||
{
|
||||
# Disable systemd-timesyncd (chrony takes over)
|
||||
services.timesyncd.enable = false;
|
||||
|
||||
# Enable chrony pointing at pve1
|
||||
services.chrony = {
|
||||
enable = true;
|
||||
servers = [ "pve1.home.2rjus.net" ];
|
||||
serverOption = "iburst";
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Optional: Add Chrony Exporter
|
||||
|
||||
For better visibility into NTP sync quality:
|
||||
|
||||
```nix
|
||||
services.prometheus.exporters.chrony.enable = true;
|
||||
```
|
||||
|
||||
Add chrony exporter scrape targets via `homelab.monitoring.scrapeTargets` and create a Grafana dashboard for NTP offset across all hosts.
|
||||
|
||||
### 4. Roll Out
|
||||
|
||||
- Deploy to a test-tier host first to verify
|
||||
- Then deploy to all hosts via auto-upgrade
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [ ] Does pve1's chrony config need `local stratum 10` as fallback if upstream is unreachable?
|
||||
- [ ] Should we also enable `enableRTCTrimming` for the VMs?
|
||||
- [ ] Worth adding a chrony exporter on pve1 as well (manual install like node-exporter)?
|
||||
|
||||
## Notes
|
||||
|
||||
- No fallback NTP servers needed on VMs — if pve1 is down, all VMs are down too
|
||||
- The `host_reboot` alert rule (`changes(node_boot_time_seconds[10m]) > 0`) should stop false-firing once clock corrections are slewed instead of stepped
|
||||
- pn01/pn02 are bare metal but still benefit from syncing to pve1 for consistency
|
||||
196
docs/plans/loki-improvements.md
Normal file
196
docs/plans/loki-improvements.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Loki Setup Improvements
|
||||
|
||||
## Overview
|
||||
|
||||
The current Loki deployment on monitoring01 is functional but minimal. It lacks retention policies, rate limiting, and uses local filesystem storage. This plan evaluates improvement options across several dimensions: retention management, storage backend, resource limits, and operational improvements.
|
||||
|
||||
## Current State
|
||||
|
||||
**Loki** on monitoring01 (`services/monitoring/loki.nix`):
|
||||
- Single-node deployment, no HA
|
||||
- Filesystem storage at `/var/lib/loki/chunks` (~6.8 GB as of 2026-02-13)
|
||||
- TSDB index (v13 schema, 24h period)
|
||||
- 30-day compactor-based retention with basic rate limits
|
||||
- No caching layer
|
||||
- Auth disabled (trusted network)
|
||||
|
||||
**Promtail** on all 16 hosts (`system/monitoring/logs.nix`):
|
||||
- Ships systemd journal (JSON) + `/var/log/**/*.log`
|
||||
- Labels: `hostname`, `tier`, `role`, `level`, `job` (systemd-journal/varlog), `systemd_unit`
|
||||
- `level` label mapped from journal PRIORITY (critical/error/warning/notice/info/debug)
|
||||
- Hardcoded to `http://monitoring01.home.2rjus.net:3100`
|
||||
|
||||
**Additional log sources:**
|
||||
- `pipe-to-loki` script (manual log submission, `job=pipe-to-loki`)
|
||||
- Bootstrap logs from template2 (`job=bootstrap`)
|
||||
|
||||
**Context:** The VictoriaMetrics migration plan (`docs/plans/monitoring-migration-victoriametrics.md`) includes moving Loki to monitoring02 with "same configuration as current". These improvements could be applied either before or after that migration.
|
||||
|
||||
## Improvement Areas
|
||||
|
||||
### 1. Retention Policy
|
||||
|
||||
**Implemented.** Compactor-based retention with 30-day period. Note: Loki 3.6.3 requires `delete_request_store = "filesystem"` when retention is enabled (not documented in older guides).
|
||||
|
||||
```nix
|
||||
compactor = {
|
||||
working_directory = "/var/lib/loki/compactor";
|
||||
compaction_interval = "10m";
|
||||
retention_enabled = true;
|
||||
retention_delete_delay = "2h";
|
||||
retention_delete_worker_count = 150;
|
||||
delete_request_store = "filesystem";
|
||||
};
|
||||
|
||||
limits_config = {
|
||||
retention_period = "30d";
|
||||
};
|
||||
```
|
||||
|
||||
### 2. Storage Backend
|
||||
|
||||
**Decision:** Stay with filesystem storage for now. Garage S3 was considered but ruled out - the current single-node Garage (replication_factor=1) offers no real durability benefit over local disk. S3 storage can be revisited after the NAS migration, when a more robust S3-compatible solution will likely be available.
|
||||
|
||||
### 3. Limits Configuration
|
||||
|
||||
**Implemented.** Basic guardrails added alongside retention in `limits_config`:
|
||||
|
||||
```nix
|
||||
limits_config = {
|
||||
retention_period = "30d";
|
||||
ingestion_rate_mb = 10; # MB/s per tenant
|
||||
ingestion_burst_size_mb = 20; # Burst allowance
|
||||
max_streams_per_user = 10000; # Prevent label explosion
|
||||
max_query_series = 500; # Limit query resource usage
|
||||
max_query_parallelism = 8;
|
||||
};
|
||||
```
|
||||
|
||||
### 4. Promtail Label Improvements
|
||||
|
||||
**Problem:** Label inconsistencies and missing useful metadata:
|
||||
- The `varlog` scrape config uses `hostname` while journal uses `host` (different label name)
|
||||
- No `tier` or `role` labels, making it hard to filter logs by deployment tier or host function
|
||||
|
||||
**Implemented:** Standardized on `hostname` to match Prometheus labels. The journal scrape previously used a relabel from `__journal__hostname` to `host`; now both scrape configs use a static `hostname` label from `config.networking.hostName`. Also updated `pipe-to-loki` and bootstrap scripts to use `hostname` instead of `host`.
|
||||
|
||||
1. **Standardized label:** Both scrape configs use `hostname` (matching Prometheus) via shared `hostLabels`
|
||||
2. **Added `tier` label:** Static label from `config.homelab.host.tier` (`test`/`prod`) on both scrape configs
|
||||
3. **Added `role` label:** Static label from `config.homelab.host.role` on both scrape configs (conditionally, only when non-null)
|
||||
|
||||
No cardinality impact - `tier` and `role` are 1:1 with `hostname`, so they add metadata to existing streams without creating new ones.
|
||||
|
||||
This enables queries like:
|
||||
- `{tier="prod"} |= "error"` - all errors on prod hosts
|
||||
- `{role="dns"}` - all DNS server logs
|
||||
- `{tier="test", job="systemd-journal"}` - journal logs from test hosts
|
||||
|
||||
### 5. Journal Priority → Level Label
|
||||
|
||||
**Implemented.** Promtail pipeline stages map journal `PRIORITY` to a `level` label:
|
||||
|
||||
| PRIORITY | level |
|
||||
|----------|-------|
|
||||
| 0-2 | critical |
|
||||
| 3 | error |
|
||||
| 4 | warning |
|
||||
| 5 | notice |
|
||||
| 6 | info |
|
||||
| 7 | debug |
|
||||
|
||||
Uses a `json` stage to extract PRIORITY, `template` to map to level name, and `labels` to attach it. This gives reliable level filtering for all journal logs, unlike Loki's `detected_level` which only works for apps that embed level keywords in message text.
|
||||
|
||||
Example queries:
|
||||
- `{level="error"}` - all errors across the fleet
|
||||
- `{level=~"critical|error", tier="prod"}` - prod errors and criticals
|
||||
- `{level="warning", role="dns"}` - warnings from DNS servers
|
||||
|
||||
### 6. Enable JSON Logging on Services
|
||||
|
||||
**Problem:** Many services support structured JSON log output but may be using plain text by default. JSON logs are significantly easier to query in Loki - `| json` cleanly extracts all fields, whereas plain text requires fragile regex or pattern matching.
|
||||
|
||||
**Audit results (2026-02-13):**
|
||||
|
||||
**Already logging JSON:**
|
||||
- Caddy (all instances) - JSON by default for access logs
|
||||
- homelab-deploy (listener/builder) - Go app, logs structured JSON
|
||||
|
||||
**Supports JSON, not configured (high value):**
|
||||
|
||||
| Service | How to enable | Config file |
|
||||
|---------|--------------|-------------|
|
||||
| Prometheus | `--log.format=json` | `services/monitoring/prometheus.nix` |
|
||||
| Alertmanager | `--log.format=json` | `services/monitoring/prometheus.nix` |
|
||||
| Loki | `--log.format=json` | `services/monitoring/loki.nix` |
|
||||
| Grafana | `log.console.format = "json"` | `services/monitoring/grafana.nix` |
|
||||
| Tempo | `log_format: json` in config | `services/monitoring/tempo.nix` |
|
||||
| OpenBao | `log_format = "json"` | `services/vault/default.nix` |
|
||||
|
||||
**Supports JSON, not configured (lower value - minimal log output):**
|
||||
|
||||
| Service | How to enable |
|
||||
|---------|--------------|
|
||||
| Pyroscope | `--log.format=json` (OCI container) |
|
||||
| Blackbox Exporter | `--log.format=json` |
|
||||
| Node Exporter | `--log.format=json` (all 16 hosts) |
|
||||
| Systemd Exporter | `--log.format=json` (all 16 hosts) |
|
||||
|
||||
**No JSON support (syslog/text only):**
|
||||
- NSD, Unbound, OpenSSH, Mosquitto
|
||||
|
||||
**Needs verification:**
|
||||
- Kanidm, Jellyfin, Home Assistant, Harmonia, Zigbee2MQTT, NATS
|
||||
|
||||
**Recommendation:** Start with the monitoring stack (Prometheus, Alertmanager, Loki, Grafana, Tempo) since they're all Go apps with the same `--log.format=json` flag. Then OpenBao. The exporters are lower priority since they produce minimal log output.
|
||||
|
||||
### 7. Monitoring CNAME for Promtail Target
|
||||
|
||||
**Problem:** Promtail hardcodes `monitoring01.home.2rjus.net:3100`. The VictoriaMetrics migration plan already addresses this by switching to a `monitoring` CNAME.
|
||||
|
||||
**Recommendation:** This should happen as part of the monitoring02 migration, not independently. If we do Loki improvements before that migration, keep pointing to monitoring01.
|
||||
|
||||
## Priority Ranking
|
||||
|
||||
| # | Improvement | Effort | Impact | Status |
|
||||
|---|-------------|--------|--------|--------|
|
||||
| 1 | **Retention policy** | Low | High | Done (30d compactor retention) |
|
||||
| 2 | **Limits config** | Low | Medium | Done (rate limits + stream guards) |
|
||||
| 3 | **Promtail labels** | Trivial | Low | Done (hostname/tier/role/level) |
|
||||
| 4 | **Journal priority → level** | Low-medium | Medium | Done (pipeline stages) |
|
||||
| 5 | **JSON logging audit** | Low-medium | Medium | Audited, not yet enabled |
|
||||
| 6 | **Monitoring CNAME** | Low | Medium | Part of monitoring02 migration |
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Phase 1: Retention + Labels (done 2026-02-13)
|
||||
|
||||
1. ~~Add `compactor` section to `services/monitoring/loki.nix`~~ Done
|
||||
2. ~~Add `limits_config` with 30-day retention and basic rate limits~~ Done
|
||||
3. ~~Update `system/monitoring/logs.nix`~~ Done:
|
||||
- Standardized on `hostname` label (matching Prometheus) for both scrape configs
|
||||
- Added `tier` and `role` static labels from `homelab.host` options
|
||||
- Added pipeline stages for journal PRIORITY → `level` label mapping
|
||||
4. ~~Update `pipe-to-loki` and bootstrap scripts to use `hostname`~~ Done
|
||||
5. ~~Deploy and verify labels~~ Done - all 15 hosts reporting with correct labels
|
||||
|
||||
### Phase 2: JSON Logging (not started)
|
||||
|
||||
Enable JSON logging on services that support it, starting with the monitoring stack:
|
||||
1. Prometheus, Alertmanager, Loki, Grafana, Tempo (`--log.format=json`)
|
||||
2. OpenBao (`log_format = "json"`)
|
||||
3. Lower priority: exporters (node-exporter, systemd-exporter, blackbox)
|
||||
|
||||
### Phase 3 (future): S3 Storage Migration
|
||||
|
||||
Revisit after NAS migration when a proper S3-compatible storage solution is available. At that point, add a new schema period with `object_store = "s3"` - the old filesystem period will continue serving historical data until it ages out past retention.
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [ ] Do we want per-stream retention (e.g., keep bootstrap/pipe-to-loki longer)?
|
||||
|
||||
## Notes
|
||||
|
||||
- Loki schema changes require adding a new period entry (not modifying existing ones). The old period continues serving historical data.
|
||||
- Loki 3.6.3 requires `delete_request_store = "filesystem"` in the compactor config when retention is enabled.
|
||||
- S3 storage deferred until post-NAS migration when a proper solution is available.
|
||||
- As of 2026-02-13, Loki uses ~6.8 GB for ~30 days of logs from 16 hosts. Prometheus uses ~7.6 GB on the same disk (33 GB total, ~8 GB free).
|
||||
244
docs/plans/media-pc-replacement.md
Normal file
244
docs/plans/media-pc-replacement.md
Normal file
@@ -0,0 +1,244 @@
|
||||
# Media PC Replacement
|
||||
|
||||
## Overview
|
||||
|
||||
Replace the aging Linux+Kodi media PC connected to the TV with a modern, compact solution. Primary use cases are Jellyfin/Kodi playback and watching Twitch/YouTube. The current machine (`media`, 10.69.31.50) is on VLAN 31.
|
||||
|
||||
## Current State
|
||||
|
||||
### Hardware
|
||||
- **CPU**: Intel Core i7-4770K @ 3.50GHz (Haswell, 4C/8T, 2013)
|
||||
- **GPU**: Nvidia GeForce GT 710 (Kepler, GK208B)
|
||||
- **OS**: Ubuntu 22.04.5 LTS (Jammy)
|
||||
- **Software**: Kodi
|
||||
- **Network**: `media.home.2rjus.net` at `10.69.31.50` (VLAN 31)
|
||||
|
||||
### Control & Display
|
||||
- **Input**: Wireless keyboard (works well, useful for browser)
|
||||
- **TV**: 1080p (no 4K/HDR currently, but may upgrade TV later)
|
||||
- **Audio**: Surround system connected via HDMI ARC from TV (PC → HDMI → TV → ARC → surround)
|
||||
|
||||
### Notes on Current Hardware
|
||||
- The i7-4770K is massively overpowered for media playback — it's a full desktop CPU from 2013
|
||||
- The GT 710 is a low-end passive GPU; supports NVDEC for H.264/H.265 hardware decode but limited to 4K@30Hz over HDMI 1.4
|
||||
- Ubuntu 22.04 is approaching EOL (April 2027) and is not managed by this repo
|
||||
- The whole system is likely in a full-size or mid-tower case — not ideal for a TV setup
|
||||
|
||||
### Integration
|
||||
- **Media source**: Jellyfin on `jelly01` (10.69.13.14) serves media from NAS via NFS
|
||||
- **DNS**: A record in `services/ns/external-hosts.nix`
|
||||
- **Not managed**: Not a NixOS host in this repo, no monitoring/auto-updates
|
||||
|
||||
## Options
|
||||
|
||||
### Option 1: Dedicated Streaming Device (Apple TV / Nvidia Shield)
|
||||
|
||||
| Aspect | Apple TV 4K | Nvidia Shield Pro |
|
||||
|--------|-------------|-------------------|
|
||||
| **Price** | ~$130-180 | ~$200 |
|
||||
| **Jellyfin** | Swiftfin app (good) | Jellyfin Android TV (good) |
|
||||
| **Kodi** | Not available (tvOS) | Full Kodi support |
|
||||
| **Twitch** | Native app | Native app |
|
||||
| **YouTube** | Native app | Native app |
|
||||
| **HDR/DV** | Dolby Vision + HDR10 | Dolby Vision + HDR10 |
|
||||
| **4K** | Yes | Yes |
|
||||
| **Form factor** | Tiny, silent | Small, silent |
|
||||
| **Remote** | Excellent Siri remote | Decent, supports CEC |
|
||||
| **Homelab integration** | None | Minimal (Plex/Kodi only) |
|
||||
|
||||
**Pros:**
|
||||
- Zero maintenance - appliance experience
|
||||
- Excellent app ecosystem (native Twitch, YouTube, streaming services)
|
||||
- Silent, tiny form factor
|
||||
- Great remote control / CEC support
|
||||
- Hardware-accelerated codec support out of the box
|
||||
|
||||
**Cons:**
|
||||
- No NixOS management, monitoring, or auto-updates
|
||||
- Can't run arbitrary software
|
||||
- Jellyfin clients are decent but not as mature as Kodi
|
||||
- Vendor lock-in (Apple ecosystem / Google ecosystem)
|
||||
- No SSH access for troubleshooting
|
||||
|
||||
### Option 2: NixOS Mini PC (Kodi Appliance)
|
||||
|
||||
A small form factor PC (Intel NUC, Beelink, MinisForum, etc.) running NixOS with Kodi as the desktop environment.
|
||||
|
||||
**NixOS has built-in support:**
|
||||
- `services.xserver.desktopManager.kodi.enable` - boots directly into Kodi
|
||||
- `kodi-gbm` package - Kodi with direct DRM/KMS rendering (no X11/Wayland needed)
|
||||
- `kodiPackages.jellycon` - Jellyfin integration for Kodi
|
||||
- `kodiPackages.sendtokodi` - plays streams via yt-dlp (Twitch, YouTube)
|
||||
- `kodiPackages.inputstream-adaptive` - adaptive streaming support
|
||||
|
||||
**Example NixOS config sketch:**
|
||||
```nix
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.xserver.desktopManager.kodi = {
|
||||
enable = true;
|
||||
package = pkgs.kodi.withPackages (p: [
|
||||
p.jellycon
|
||||
p.sendtokodi
|
||||
p.inputstream-adaptive
|
||||
]);
|
||||
};
|
||||
|
||||
# Auto-login to Kodi session
|
||||
services.displayManager.autoLogin = {
|
||||
enable = true;
|
||||
user = "kodi";
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Full NixOS management (monitoring, auto-updates, vault, promtail)
|
||||
- Kodi is a proven TV interface with excellent remote/CEC support
|
||||
- JellyCon integrates Jellyfin library directly into Kodi
|
||||
- Twitch/YouTube via sendtokodi + yt-dlp or Kodi browser addons
|
||||
- Can run arbitrary services (e.g., Home Assistant dashboard)
|
||||
- Declarative, reproducible config in this repo
|
||||
|
||||
**Cons:**
|
||||
- More maintenance than an appliance
|
||||
- NixOS + Kodi on bare metal needs GPU driver setup (Intel iGPU is usually fine)
|
||||
- Kodi YouTube/Twitch addons are less polished than native apps
|
||||
- Need to buy hardware (~$150-400 for a decent mini PC)
|
||||
- Power consumption higher than a streaming device
|
||||
|
||||
### Option 3: NixOS Mini PC (Wayland Desktop)
|
||||
|
||||
A mini PC running NixOS with a lightweight Wayland compositor, launching Kodi for media and a browser for Twitch/YouTube.
|
||||
|
||||
**Pros:**
|
||||
- Best of both worlds: Kodi for media, Firefox/Chromium for Twitch/YouTube
|
||||
- Full NixOS management
|
||||
- Can switch between Kodi and browser easily
|
||||
- Native web experience for streaming sites
|
||||
|
||||
**Cons:**
|
||||
- More complex setup (compositor + Kodi + browser)
|
||||
- Harder to get a good "10-foot UI" experience
|
||||
- Keyboard/mouse may be needed alongside remote
|
||||
- Significantly more maintenance
|
||||
|
||||
## Comparison
|
||||
|
||||
| Criteria | Dedicated Device | NixOS Kodi | NixOS Desktop |
|
||||
|----------|-----------------|------------|---------------|
|
||||
| **Maintenance** | None | Low | Medium |
|
||||
| **Media experience** | Excellent | Excellent | Good |
|
||||
| **Twitch/YouTube** | Excellent (native apps) | Good (addons/yt-dlp) | Excellent (browser) |
|
||||
| **Homelab integration** | None | Full | Full |
|
||||
| **Form factor** | Tiny | Small | Small |
|
||||
| **Cost** | $130-200 | $150-400 | $150-400 |
|
||||
| **Silent operation** | Yes | Likely (fanless options) | Likely |
|
||||
| **CEC remote** | Yes | Yes (Kodi) | Partial |
|
||||
|
||||
## Decision: NixOS Mini PC with Kodi (Option 2)
|
||||
|
||||
**Rationale:**
|
||||
- Already comfortable with Kodi + wireless keyboard workflow
|
||||
- Browser access for Twitch/YouTube is important — Kodi can launch a browser when needed
|
||||
- Homelab integration comes for free (monitoring, auto-updates, vault)
|
||||
- Natural fit alongside the other 16 NixOS hosts in this repo
|
||||
- Dedicated devices lose the browser/keyboard workflow
|
||||
|
||||
### Display Server: Sway/Hyprland
|
||||
|
||||
Options evaluated:
|
||||
|
||||
| Approach | Pros | Cons |
|
||||
|----------|------|------|
|
||||
| Cage (kiosk) | Simplest, single-app | No browser without TTY switching |
|
||||
| kodi-gbm (no compositor) | Best HDR support | No browser at all, ALSA-only audio |
|
||||
| **Sway/Hyprland** | **Workspace switching, VA-API in browser** | **Slightly more config** |
|
||||
| Full DE (GNOME/KDE) | Everything works | Overkill, heavy |
|
||||
|
||||
**Decision: Sway or Hyprland** (Hyprland preferred — same as desktop)
|
||||
|
||||
- Kodi fullscreen on workspace 1, Firefox on workspace 2
|
||||
- Switch via keybinding on wireless keyboard
|
||||
- Auto-start both on login via greetd
|
||||
- Minimal config — no bar, no decorations, just workspaces
|
||||
- VA-API hardware decode works in Firefox on Wayland (important for YouTube/Twitch)
|
||||
- Can revisit kodi-gbm later if HDR becomes a priority (just a config change)
|
||||
|
||||
### Twitch/YouTube
|
||||
|
||||
Firefox on workspace 2, switched to via keyboard. Kodi addons (sendtokodi, YouTube plugin) available as secondary options but a real browser is the primary approach.
|
||||
|
||||
### Media Playback: Kodi + JellyCon + NFS Direct Path
|
||||
|
||||
Three options were evaluated for media playback:
|
||||
|
||||
| Approach | Transcoding | Library management | Watch state sync |
|
||||
|----------|-------------|-------------------|-----------------|
|
||||
| Jellyfin only (browser) | Yes — browsers lack codec support for DTS, PGS subs, etc. | Jellyfin | Jellyfin |
|
||||
| Kodi + NFS only | No — Kodi plays everything natively | Kodi local DB | None |
|
||||
| **Kodi + JellyCon + NFS** | **No — Kodi's native player, direct path via NFS** | **Jellyfin** | **Jellyfin** |
|
||||
|
||||
**Decision: Kodi + JellyCon with NFS direct path**
|
||||
|
||||
- JellyCon presents the Jellyfin library inside Kodi's UI (browse, search, metadata, artwork)
|
||||
- Playback uses Kodi's native player — direct play, no transcoding, full codec support including surround passthrough
|
||||
- JellyCon's "direct path" mode maps Jellyfin paths to local NFS mounts, so playback goes straight over NFS without streaming through Jellyfin's HTTP layer
|
||||
- Watch state, resume position, etc. sync back to Jellyfin — accessible from other devices too
|
||||
- NFS mount follows the same pattern as jelly01 (`nas.home.2rjus.net:/mnt/hdd-pool/media`)
|
||||
|
||||
### Audio Passthrough
|
||||
|
||||
Kodi on NixOS supports HDMI audio passthrough for surround formats (AC3, DTS, etc.). The ARC chain (PC → HDMI → TV → ARC → surround) works transparently — Kodi just needs to be configured for passthrough rather than decoding audio locally.
|
||||
|
||||
## Hardware
|
||||
|
||||
### Leading Candidate: GMKtec G3
|
||||
|
||||
- **CPU**: Intel N100 (Alder Lake-N, 4C/4T)
|
||||
- **RAM**: 16GB
|
||||
- **Storage**: 512GB NVMe
|
||||
- **Price**: ~NOK 2800 (~$250 USD)
|
||||
- **Source**: AliExpress
|
||||
|
||||
The N100 supports hardware decode for all relevant 4K codecs:
|
||||
|
||||
| Codec | Support | Used by |
|
||||
|-------|---------|---------|
|
||||
| H.264/AVC | Yes (Quick Sync) | Older media |
|
||||
| H.265/HEVC 10-bit | Yes (Quick Sync) | Most 4K media, HDR |
|
||||
| VP9 | Yes (Quick Sync) | YouTube 4K |
|
||||
| AV1 | Yes (Quick Sync) | YouTube, Twitch, newer encodes |
|
||||
|
||||
16GB RAM is comfortable for Kodi + browser + NixOS system services (node-exporter, promtail, etc.) with plenty of headroom.
|
||||
|
||||
### Key Requirements
|
||||
- HDMI 2.0+ for 4K future-proofing (current TV is 1080p)
|
||||
- Hardware video decode via VA-API / Intel Quick Sync
|
||||
- HDR support (for future TV upgrade)
|
||||
- Fanless or near-silent operation
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. **Choose and order hardware**
|
||||
2. **Create host configuration** (`hosts/media1/`)
|
||||
- Kodi desktop manager with Jellyfin + streaming addons
|
||||
- Intel/AMD iGPU driver and VA-API hardware decode
|
||||
- HDMI audio passthrough for surround
|
||||
- NFS mount for media (same pattern as jelly01)
|
||||
- Browser package (Firefox/Chromium) for Twitch/YouTube fallback
|
||||
- Standard system modules (monitoring, promtail, vault, auto-upgrade)
|
||||
3. **Install NixOS** on the mini PC
|
||||
4. **Configure Kodi** (Jellyfin server, addons, audio passthrough)
|
||||
5. **Update DNS** - point `media.home.2rjus.net` to new IP (or keep on VLAN 31)
|
||||
6. **Retire old media PC**
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [x] What are the current media PC specs? — i7-4770K, GT 710, Ubuntu 22.04. Overkill CPU, weak GPU, large form factor. Not worth reusing if goal is compact/silent.
|
||||
- [x] VLAN? — Keep on VLAN 31 for now, same as current media PC. Can revisit later.
|
||||
- [x] Is CEC needed? — No, not using it currently. Can add later if desired.
|
||||
- [x] Is 4K HDR output needed? — TV is 1080p now, but want 4K/HDR capability for future TV upgrade
|
||||
- [x] Audio setup? — Surround system via HDMI ARC from TV. Media PC outputs HDMI to TV, TV passes audio to surround via ARC. Kodi/any player just needs HDMI audio output with surround passthrough.
|
||||
- [x] Are there streaming service apps needed? — No. Only Twitch/YouTube, which work fine in any browser.
|
||||
- [x] Budget? — ~NOK 2800 for GMKtec G3 (N100, 16GB, 512GB NVMe)
|
||||
116
docs/plans/memory-issues-follow-up.md
Normal file
116
docs/plans/memory-issues-follow-up.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Memory Issues Follow-up
|
||||
|
||||
Tracking the zram change to verify it resolves OOM issues during nixos-upgrade on low-memory hosts.
|
||||
|
||||
## Background
|
||||
|
||||
On 2026-02-08, ns2 (2GB RAM) experienced an OOM kill during nixos-upgrade. The Nix evaluation process consumed ~1.6GB before being killed by the kernel. ns1 (manually increased to 4GB) succeeded with the same upgrade.
|
||||
|
||||
Root cause: 2GB RAM is insufficient for Nix flake evaluation without swap.
|
||||
|
||||
## Fix Applied
|
||||
|
||||
**Commit:** `1674b6a` - system: enable zram swap for all hosts
|
||||
|
||||
**Merged:** 2026-02-08 ~12:15 UTC
|
||||
|
||||
**Change:** Added `zramSwap.enable = true` to `system/zram.nix`, providing ~2GB compressed swap on all hosts.
|
||||
|
||||
## Timeline
|
||||
|
||||
| Time (UTC) | Event |
|
||||
|------------|-------|
|
||||
| 05:00:46 | ns2 nixos-upgrade OOM killed |
|
||||
| 05:01:47 | `nixos_upgrade_failed` alert fired |
|
||||
| 12:15 | zram commit merged to master |
|
||||
| 12:19 | ns2 rebooted with zram enabled |
|
||||
| 12:20 | ns1 rebooted (memory reduced to 2GB via tofu) |
|
||||
|
||||
## Hosts Affected
|
||||
|
||||
All 2GB VMs that run nixos-upgrade:
|
||||
- ns1, ns2 (DNS)
|
||||
- vault01
|
||||
- testvm01, testvm02, testvm03
|
||||
- kanidm01
|
||||
|
||||
## Metrics to Monitor
|
||||
|
||||
Check these in Grafana or via PromQL to verify the fix:
|
||||
|
||||
### Swap availability (should be ~2GB after upgrade)
|
||||
```promql
|
||||
node_memory_SwapTotal_bytes / 1024 / 1024
|
||||
```
|
||||
|
||||
### Swap usage during upgrades
|
||||
```promql
|
||||
(node_memory_SwapTotal_bytes - node_memory_SwapFree_bytes) / 1024 / 1024
|
||||
```
|
||||
|
||||
### Zswap compressed bytes (active compression)
|
||||
```promql
|
||||
node_memory_Zswap_bytes / 1024 / 1024
|
||||
```
|
||||
|
||||
### Upgrade failures (should be 0)
|
||||
```promql
|
||||
node_systemd_unit_state{name="nixos-upgrade.service", state="failed"}
|
||||
```
|
||||
|
||||
### Memory available during upgrades
|
||||
```promql
|
||||
node_memory_MemAvailable_bytes / 1024 / 1024
|
||||
```
|
||||
|
||||
## Verification Steps
|
||||
|
||||
After a few days (allow auto-upgrades to run on all hosts):
|
||||
|
||||
1. Check all hosts have swap enabled:
|
||||
```promql
|
||||
node_memory_SwapTotal_bytes > 0
|
||||
```
|
||||
|
||||
2. Check for any upgrade failures since the fix:
|
||||
```promql
|
||||
count_over_time(ALERTS{alertname="nixos_upgrade_failed"}[7d])
|
||||
```
|
||||
|
||||
3. Review if any hosts used swap during upgrades (check historical graphs)
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- No `nixos_upgrade_failed` alerts due to OOM after 2026-02-08
|
||||
- All hosts show ~2GB swap available
|
||||
- Upgrades complete successfully on 2GB VMs
|
||||
|
||||
## Fallback Options
|
||||
|
||||
If zram is insufficient:
|
||||
|
||||
1. **Increase VM memory** - Update `terraform/vms.tf` to 4GB for affected hosts
|
||||
2. **Enable memory ballooning** - Configure VMs with dynamic memory allocation (see below)
|
||||
3. **Use remote builds** - Configure `nix.buildMachines` to offload evaluation
|
||||
4. **Reduce flake size** - Split configurations to reduce evaluation memory
|
||||
|
||||
### Memory Ballooning
|
||||
|
||||
Proxmox supports memory ballooning, which allows VMs to dynamically grow/shrink memory allocation based on demand. The balloon driver inside the guest communicates with the hypervisor to release or reclaim memory pages.
|
||||
|
||||
Configuration in `terraform/vms.tf`:
|
||||
```hcl
|
||||
memory = 4096 # maximum memory
|
||||
balloon = 2048 # minimum memory (shrinks to this when idle)
|
||||
```
|
||||
|
||||
Pros:
|
||||
- VMs get memory on-demand without reboots
|
||||
- Better host memory utilization
|
||||
- Solves upgrade OOM without permanently allocating 4GB
|
||||
|
||||
Cons:
|
||||
- Requires QEMU guest agent running in guest
|
||||
- Guest can experience memory pressure if host is overcommitted
|
||||
|
||||
Ballooning and zram are complementary - ballooning provides headroom from the host, zram provides overflow within the guest.
|
||||
145
docs/plans/new-services.md
Normal file
145
docs/plans/new-services.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# New Service Candidates
|
||||
|
||||
Ideas for additional services to deploy in the homelab. These lean more enterprise/obscure
|
||||
than the typical self-hosted fare.
|
||||
|
||||
## Litestream
|
||||
|
||||
Continuous SQLite replication to S3-compatible storage. Streams WAL changes in near-real-time,
|
||||
providing point-in-time recovery without scheduled backup jobs.
|
||||
|
||||
**Why:** Several services use SQLite (Home Assistant, potentially others). Litestream would
|
||||
give continuous backup to Garage S3 with minimal resource overhead and near-zero configuration.
|
||||
Replaces cron-based backup scripts with a small daemon per database.
|
||||
|
||||
**Integration points:**
|
||||
- Garage S3 as replication target (already deployed)
|
||||
- Home Assistant SQLite database is the primary candidate
|
||||
- Could also cover any future SQLite-backed services
|
||||
|
||||
**Complexity:** Low. Single Go binary, minimal config (source DB path + S3 endpoint).
|
||||
|
||||
**NixOS packaging:** Available in nixpkgs as `litestream`.
|
||||
|
||||
---
|
||||
|
||||
## ntopng
|
||||
|
||||
Deep network traffic analysis and flow monitoring. Provides real-time visibility into bandwidth
|
||||
usage, protocol distribution, top talkers, and anomaly detection via a web UI.
|
||||
|
||||
**Why:** We have host-level metrics (node-exporter) and logs (Loki) but no network-level
|
||||
visibility. ntopng would show traffic patterns across the infrastructure — NFS throughput to
|
||||
the NAS, DNS query volume, inter-host traffic, and bandwidth anomalies. Useful for capacity
|
||||
planning and debugging network issues.
|
||||
|
||||
**Integration points:**
|
||||
- Could export metrics to Prometheus via its built-in exporter
|
||||
- Web UI behind http-proxy with Kanidm OIDC (if supported) or Pomerium
|
||||
- NetFlow/sFlow from managed switches (if available)
|
||||
- Passive traffic capture on a mirror port or the monitoring host itself
|
||||
|
||||
**Complexity:** Medium. Needs network tap or mirror port for full visibility, or can run
|
||||
in host-local mode. May need a dedicated interface or VLAN mirror.
|
||||
|
||||
**NixOS packaging:** Available in nixpkgs as `ntopng`.
|
||||
|
||||
---
|
||||
|
||||
## Renovate
|
||||
|
||||
Automated dependency update bot that understands Nix flakes natively. Creates branches/PRs
|
||||
to bump flake inputs on a configurable schedule.
|
||||
|
||||
**Why:** Currently `nix flake update` is manual. Renovate can automatically propose updates
|
||||
to individual flake inputs (nixpkgs, homelab-deploy, nixos-exporter, etc.), group related
|
||||
updates, and respect schedules. More granular than updating everything at once — can bump
|
||||
nixpkgs weekly but hold back other inputs, auto-merge patch-level changes, etc.
|
||||
|
||||
**Integration points:**
|
||||
- Runs against git.t-juice.club repositories
|
||||
- Understands `flake.lock` format natively
|
||||
- Could target both `nixos-servers` and `nixos` repos
|
||||
- Update branches would be validated by homelab-deploy builder
|
||||
|
||||
**Complexity:** Medium. Needs git forge integration (Gitea/Forgejo API). Self-hosted runner
|
||||
mode available. Configuration via `renovate.json` in each repo.
|
||||
|
||||
**NixOS packaging:** Available in nixpkgs as `renovate`.
|
||||
|
||||
---
|
||||
|
||||
## Pomerium
|
||||
|
||||
Identity-aware reverse proxy implementing zero-trust access. Every request is authenticated
|
||||
and authorized based on identity, device, and context — not just network location.
|
||||
|
||||
**Why:** Currently Caddy terminates TLS but doesn't enforce authentication on most services.
|
||||
Pomerium would put Kanidm OIDC authentication in front of every internal service, with
|
||||
per-route authorization policies (e.g., "only admins can access Prometheus," "require re-auth
|
||||
for Vault UI"). Directly addresses the security hardening plan's goals.
|
||||
|
||||
**Integration points:**
|
||||
- Kanidm as OIDC identity provider (already deployed)
|
||||
- Could replace or sit in front of Caddy for internal services
|
||||
- Per-route policies based on Kanidm groups (admins, users, ssh-users)
|
||||
- Centralizes access logging and audit trail
|
||||
|
||||
**Complexity:** Medium-high. Needs careful integration with existing Caddy reverse proxy.
|
||||
Decision needed on whether Pomerium replaces Caddy or works alongside it (Pomerium for
|
||||
auth, Caddy for TLS termination and routing, or Pomerium handles everything).
|
||||
|
||||
**NixOS packaging:** Available in nixpkgs as `pomerium`.
|
||||
|
||||
---
|
||||
|
||||
## Apache Guacamole
|
||||
|
||||
Clientless remote desktop and SSH gateway. Provides browser-based access to hosts via
|
||||
RDP, VNC, SSH, and Telnet with no client software required. Supports session recording
|
||||
and playback.
|
||||
|
||||
**Why:** Provides an alternative remote access path that doesn't require VPN software or
|
||||
SSH keys on the client device. Useful for accessing hosts from untrusted machines (phone,
|
||||
borrowed laptop) or providing temporary access to others. Session recording gives an audit
|
||||
trail. Could complement the WireGuard remote access plan rather than replace it.
|
||||
|
||||
**Integration points:**
|
||||
- Kanidm for authentication (OIDC or LDAP)
|
||||
- Behind http-proxy or Pomerium for TLS
|
||||
- SSH access to all hosts in the fleet
|
||||
- Session recordings could be stored on Garage S3
|
||||
- Could serve as the "emergency access" path when VPN is unavailable
|
||||
|
||||
**Complexity:** Medium. Java-based (guacd + web app), typically needs PostgreSQL for
|
||||
connection/user storage (already available). Docker is the common deployment method but
|
||||
native packaging exists.
|
||||
|
||||
**NixOS packaging:** Available in nixpkgs as `guacamole-server` and `guacamole-client`.
|
||||
|
||||
---
|
||||
|
||||
## CrowdSec
|
||||
|
||||
Collaborative intrusion prevention system with crowd-sourced threat intelligence.
|
||||
Parses logs to detect attack patterns, applies remediation (firewall bans, CAPTCHA),
|
||||
and shares/receives threat signals from a global community network.
|
||||
|
||||
**Why:** Goes beyond fail2ban with behavioral detection, crowd-sourced IP reputation,
|
||||
and a scenario-based engine. Fits the security hardening plan. The community blocklist
|
||||
means we benefit from threat intelligence gathered across thousands of deployments.
|
||||
Could parse SSH logs, HTTP access logs, and other service logs to detect and block
|
||||
malicious activity.
|
||||
|
||||
**Integration points:**
|
||||
- Could consume logs from Loki or directly from journald/log files
|
||||
- Firewall bouncer for iptables/nftables remediation
|
||||
- Caddy bouncer for HTTP-level blocking
|
||||
- Prometheus metrics exporter for alert integration
|
||||
- Scenarios available for SSH brute force, HTTP scanning, and more
|
||||
- Feeds into existing alerting pipeline (Alertmanager -> alerttonotify)
|
||||
|
||||
**Complexity:** Medium. Agent (log parser + decision engine) on each host or centralized.
|
||||
Bouncers (enforcement) on edge hosts. Free community tier includes threat intel access.
|
||||
|
||||
**NixOS packaging:** Available in nixpkgs as `crowdsec`.
|
||||
232
docs/plans/nixos-hypervisor.md
Normal file
232
docs/plans/nixos-hypervisor.md
Normal file
@@ -0,0 +1,232 @@
|
||||
# NixOS Hypervisor
|
||||
|
||||
## Overview
|
||||
|
||||
Experiment with running a NixOS-based hypervisor as an alternative/complement to the current Proxmox setup. Goal is better homelab integration — declarative config, monitoring, auto-updates — while retaining the ability to run VMs with a Terraform-like workflow.
|
||||
|
||||
## Motivation
|
||||
|
||||
- Proxmox works but doesn't integrate with the NixOS-managed homelab (no monitoring, no auto-updates, no vault, no declarative config)
|
||||
- The PN51 units (once stable) are good candidates for experimentation — test-tier, plenty of RAM (32-64GB), 8C/16T
|
||||
- Long-term: could reduce reliance on Proxmox or provide a secondary hypervisor pool
|
||||
- **VM migration**: Currently all VMs (including both nameservers) run on a single Proxmox host. Being able to migrate VMs between hypervisors would allow rebooting a host for kernel updates without downtime for critical services like DNS.
|
||||
|
||||
## Hardware Candidates
|
||||
|
||||
| | pn01 | pn02 |
|
||||
|---|---|---|
|
||||
| **CPU** | Ryzen 7 5700U (8C/16T) | Ryzen 7 5700U (8C/16T) |
|
||||
| **RAM** | 64GB (2x32GB) | 32GB (1x32GB, second slot available) |
|
||||
| **Storage** | 1TB NVMe | 1TB SATA SSD (NVMe planned) |
|
||||
| **Status** | Stability testing | Stability testing |
|
||||
|
||||
## Options
|
||||
|
||||
### Option 1: Incus
|
||||
|
||||
Fork of LXD (after Canonical made LXD proprietary). Supports both containers (LXC) and VMs (QEMU/KVM).
|
||||
|
||||
**NixOS integration:**
|
||||
- `virtualisation.incus.enable` module in nixpkgs
|
||||
- Manages storage pools, networks, and instances
|
||||
- REST API for automation
|
||||
- CLI tool (`incus`) for management
|
||||
|
||||
**Terraform integration:**
|
||||
- `lxd` provider works with Incus (API-compatible)
|
||||
- Dedicated `incus` Terraform provider also exists
|
||||
- Can define VMs/containers in OpenTofu, similar to current Proxmox workflow
|
||||
|
||||
**Migration:**
|
||||
- Built-in live and offline migration via `incus move <instance> --target <host>`
|
||||
- Clustering makes hosts aware of each other — migration is a first-class operation
|
||||
- Shared storage (NFS, Ceph) or Incus can transfer storage during migration
|
||||
- Stateful stop-and-move also supported for offline migration
|
||||
|
||||
**Pros:**
|
||||
- Supports both containers and VMs
|
||||
- REST API + CLI for automation
|
||||
- Built-in clustering and migration — closest to Proxmox experience
|
||||
- Good NixOS module support
|
||||
- Image-based workflow (can build NixOS images and import)
|
||||
- Active development and community
|
||||
|
||||
**Cons:**
|
||||
- Another abstraction layer on top of QEMU/KVM
|
||||
- Less mature Terraform provider than libvirt
|
||||
- Container networking can be complex
|
||||
- NixOS guests in Incus VMs need some setup
|
||||
|
||||
### Option 2: libvirt/QEMU
|
||||
|
||||
Standard Linux virtualization stack. Thin wrapper around QEMU/KVM.
|
||||
|
||||
**NixOS integration:**
|
||||
- `virtualisation.libvirtd.enable` module in nixpkgs
|
||||
- Mature and well-tested
|
||||
- virsh CLI for management
|
||||
|
||||
**Terraform integration:**
|
||||
- `dmacvicar/libvirt` provider — mature, well-maintained
|
||||
- Supports cloud-init, volume management, network config
|
||||
- Very similar workflow to current Proxmox+OpenTofu setup
|
||||
- Can reuse cloud-init patterns from existing `terraform/` config
|
||||
|
||||
**Migration:**
|
||||
- Supports live and offline migration via `virsh migrate`
|
||||
- Requires shared storage (NFS, Ceph, or similar) for live migration
|
||||
- Requires matching CPU models between hosts (or CPU model masking)
|
||||
- Works but is manual — no cluster awareness, must specify target URI
|
||||
- No built-in orchestration for multi-host scenarios
|
||||
|
||||
**Pros:**
|
||||
- Closest to current Proxmox+Terraform workflow
|
||||
- Most mature Terraform provider
|
||||
- Minimal abstraction — direct QEMU/KVM management
|
||||
- Well-understood, massive community
|
||||
- Cloud-init works identically to Proxmox workflow
|
||||
- Can reuse existing template-building patterns
|
||||
|
||||
**Cons:**
|
||||
- VMs only (no containers without adding LXC separately)
|
||||
- No built-in REST API (would need to expose libvirt socket)
|
||||
- No web UI without adding cockpit or virt-manager
|
||||
- Migration works but requires manual setup — no clustering, no orchestration
|
||||
- Less feature-rich than Incus for multi-host scenarios
|
||||
|
||||
### Option 3: microvm.nix
|
||||
|
||||
NixOS-native microVM framework. VMs defined as NixOS modules in the host's flake.
|
||||
|
||||
**NixOS integration:**
|
||||
- VMs are NixOS configurations in the same flake
|
||||
- Supports multiple backends: cloud-hypervisor, QEMU, firecracker, kvmtool
|
||||
- Lightweight — shares host's nix store with guests via virtiofs
|
||||
- Declarative network, storage, and resource allocation
|
||||
|
||||
**Terraform integration:**
|
||||
- None — everything is defined in Nix
|
||||
- Fundamentally different workflow from current Proxmox+Terraform approach
|
||||
|
||||
**Pros:**
|
||||
- Most NixOS-native approach
|
||||
- VMs defined right alongside host configs in this repo
|
||||
- Very lightweight — fast boot, minimal overhead
|
||||
- Shares nix store with host (no duplicate packages)
|
||||
- No cloud-init needed — guest config is part of the flake
|
||||
|
||||
**Migration:**
|
||||
- No migration support — VMs are tied to the host's NixOS config
|
||||
- Moving a VM means rebuilding it on another host
|
||||
|
||||
**Cons:**
|
||||
- Very niche, smaller community
|
||||
- Different mental model from current workflow
|
||||
- Only NixOS guests (no Ubuntu, FreeBSD, etc.)
|
||||
- No Terraform integration
|
||||
- No migration support
|
||||
- Less isolation than full QEMU VMs
|
||||
- Would need to learn a new deployment pattern
|
||||
|
||||
## Comparison
|
||||
|
||||
| Criteria | Incus | libvirt | microvm.nix |
|
||||
|----------|-------|---------|-------------|
|
||||
| **Workflow similarity** | Medium | High | Low |
|
||||
| **Terraform support** | Yes (lxd/incus provider) | Yes (mature provider) | No |
|
||||
| **NixOS module** | Yes | Yes | Yes |
|
||||
| **Containers + VMs** | Both | VMs only | VMs only |
|
||||
| **Non-NixOS guests** | Yes | Yes | No |
|
||||
| **Live migration** | Built-in (first-class) | Yes (manual setup) | No |
|
||||
| **Offline migration** | Built-in | Yes (manual setup) | No (rebuild) |
|
||||
| **Clustering** | Built-in | Manual | No |
|
||||
| **Learning curve** | Medium | Low | Medium |
|
||||
| **Community/maturity** | Growing | Very mature | Niche |
|
||||
| **Overhead** | Low | Minimal | Minimal |
|
||||
|
||||
## Recommendation
|
||||
|
||||
Start with **Incus**. Migration and clustering are key requirements:
|
||||
- Built-in clustering makes two PN51s a proper hypervisor pool
|
||||
- Live and offline migration are first-class operations, similar to Proxmox
|
||||
- Can move VMs between hosts for maintenance (kernel updates, hardware work) without downtime
|
||||
- Supports both containers and VMs — flexibility for future use
|
||||
- Terraform provider exists (less mature than libvirt's, but functional)
|
||||
- REST API enables automation beyond what Terraform covers
|
||||
|
||||
libvirt could achieve similar results but requires significantly more manual setup for migration and has no clustering awareness. For a two-node setup where migration is a priority, Incus provides much more out of the box.
|
||||
|
||||
**microvm.nix** is off the table given the migration requirement.
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Single-Node Setup (on one PN51)
|
||||
|
||||
1. Enable `virtualisation.incus` on pn01 (or whichever is stable)
|
||||
2. Initialize Incus (`incus admin init`) — configure storage pool (local NVMe) and network bridge
|
||||
3. Configure bridge networking for VM traffic on VLAN 12
|
||||
4. Build a NixOS VM image and import it into Incus
|
||||
5. Create a test VM manually with `incus launch` to validate the setup
|
||||
|
||||
### Phase 2: Two-Node Cluster (PN51s only)
|
||||
|
||||
1. Enable Incus on the second PN51
|
||||
2. Form a cluster between both nodes
|
||||
3. Configure shared storage (NFS from NAS, or Ceph if warranted)
|
||||
4. Test offline migration: `incus move <vm> --target <other-node>`
|
||||
5. Test live migration with shared storage
|
||||
6. CPU compatibility is not an issue here — both nodes have identical Ryzen 7 5700U CPUs
|
||||
|
||||
### Phase 3: Terraform Integration
|
||||
|
||||
1. Add Incus Terraform provider to `terraform/`
|
||||
2. Define a test VM in OpenTofu (cloud-init, static IP, vault provisioning)
|
||||
3. Verify the full pipeline: tofu apply -> VM boots -> cloud-init -> vault credentials -> NixOS rebuild
|
||||
4. Compare workflow with existing Proxmox pipeline
|
||||
|
||||
### Phase 4: Evaluate and Expand
|
||||
|
||||
- Is the workflow comparable to Proxmox?
|
||||
- Migration reliability — does live migration work cleanly?
|
||||
- Performance overhead acceptable on Ryzen 5700U?
|
||||
- Worth migrating some test-tier VMs from Proxmox?
|
||||
- Could ns1/ns2 run on separate Incus nodes instead of the single Proxmox host?
|
||||
|
||||
### Phase 5: Proxmox Replacement (optional)
|
||||
|
||||
If Incus works well on the PN51s, consider replacing Proxmox entirely for a three-node cluster.
|
||||
|
||||
**CPU compatibility for mixed cluster:**
|
||||
|
||||
| Node | CPU | Architecture | x86-64-v3 |
|
||||
|------|-----|-------------|-----------|
|
||||
| Proxmox host | AMD Ryzen 9 3900X (12C/24T) | Zen 2 | Yes |
|
||||
| pn01 | AMD Ryzen 7 5700U (8C/16T) | Zen 3 | Yes |
|
||||
| pn02 | AMD Ryzen 7 5700U (8C/16T) | Zen 3 | Yes |
|
||||
|
||||
All three CPUs are AMD and support `x86-64-v3`. The 3900X (Zen 2) is the oldest, so it defines the feature ceiling — but `x86-64-v3` is well within its capabilities. VMs configured with `x86-64-v3` can migrate freely between all three nodes.
|
||||
|
||||
Being all-AMD also avoids the trickier Intel/AMD cross-vendor migration edge cases (different CPUID layouts, virtualization extensions).
|
||||
|
||||
The 3900X (12C/24T) would be the most powerful node, making it the natural home for heavier workloads, with the PN51s (8C/16T each) handling lighter VMs or serving as migration targets during maintenance.
|
||||
|
||||
Steps:
|
||||
1. Install NixOS + Incus on the Proxmox host (or a replacement machine)
|
||||
2. Join it to the existing Incus cluster with `x86-64-v3` CPU baseline
|
||||
3. Migrate VMs from Proxmox to the Incus cluster
|
||||
4. Decommission Proxmox
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [ ] PN51 units pass stability testing (see `pn51-stability.md`)
|
||||
- [ ] Decide which unit to use first (pn01 preferred — 64GB RAM, NVMe, currently more stable)
|
||||
|
||||
## Open Questions
|
||||
|
||||
- How to handle VM storage? Local NVMe, NFS from NAS, or Ceph between the two nodes?
|
||||
- Network topology: bridge on VLAN 12, or trunk multiple VLANs to the PN51?
|
||||
- Should VMs be on the same VLAN as the hypervisor host, or separate?
|
||||
- Incus clustering with only two nodes — any quorum issues? Three nodes (with Proxmox replacement) would solve this
|
||||
- How to handle NixOS guest images? Build with nixos-generators, or use Incus image builder?
|
||||
- ~~What CPU does the current Proxmox host have?~~ AMD Ryzen 9 3900X (Zen 2) — `x86-64-v3` confirmed, all-AMD cluster
|
||||
- If replacing Proxmox: migrate VMs first, or fresh start and rebuild?
|
||||
182
docs/plans/nixos-router.md
Normal file
182
docs/plans/nixos-router.md
Normal file
@@ -0,0 +1,182 @@
|
||||
# NixOS Router — Replace EdgeRouter
|
||||
|
||||
Replace the aging Ubiquiti EdgeRouter (gw, 10.69.10.1) with a NixOS-based router.
|
||||
The EdgeRouter is suspected to be a throughput bottleneck. A NixOS router integrates
|
||||
naturally with the existing fleet: same config management, same monitoring pipeline,
|
||||
same deployment workflow.
|
||||
|
||||
## Goals
|
||||
|
||||
- Eliminate the EdgeRouter throughput bottleneck
|
||||
- Full integration with existing monitoring (node-exporter, promtail, Prometheus, Loki)
|
||||
- Declarative firewall and routing config managed in the flake
|
||||
- Inter-VLAN routing for all existing subnets
|
||||
- DHCP server for client subnets
|
||||
- NetFlow/traffic accounting for future ntopng integration
|
||||
- Foundation for WireGuard remote access (see remote-access.md)
|
||||
|
||||
## Current Network Topology
|
||||
|
||||
**Subnets (known VLANs):**
|
||||
| VLAN/Subnet | Purpose | Notable hosts |
|
||||
|----------------|------------------|----------------------------------------|
|
||||
| 10.69.10.0/24 | Gateway | gw (10.69.10.1) |
|
||||
| 10.69.12.0/24 | Core services | nas, pve1, arr jails, restic |
|
||||
| 10.69.13.0/24 | Infrastructure | All NixOS servers (static IPs) |
|
||||
| 10.69.22.0/24 | WLAN | unifi-ctrl |
|
||||
| 10.69.30.0/24 | Workstations | gunter |
|
||||
| 10.69.31.0/24 | Media | media |
|
||||
| 10.69.99.0/24 | Management | sw1 (MikroTik CRS326-24G-2S+) |
|
||||
|
||||
**DNS:** ns1 (10.69.13.5) and ns2 (10.69.13.6) handle all resolution. Upstream is
|
||||
Cloudflare/Google over DoT via Unbound.
|
||||
|
||||
**Switch:** MikroTik CRS326-24G-2S+ — L2 switching with VLAN trunking. Capable of
|
||||
L3 routing via RouterOS but not ideal for sustained routing throughput.
|
||||
|
||||
## Hardware
|
||||
|
||||
Needs a small x86 box with:
|
||||
- At least 2 NICs (WAN + LAN trunk). Dual 2.5GbE preferred.
|
||||
- Enough CPU for nftables NAT at line rate (any modern x86 is fine)
|
||||
- 4-8 GB RAM (plenty for routing + DHCP + NetFlow accounting)
|
||||
- Low power consumption, fanless preferred for always-on use
|
||||
|
||||
**Leading candidate:** [Topton Solid Mini PC](https://www.aliexpress.com/item/1005008981218625.html)
|
||||
with Intel i3-N300 (8 E-cores), 2x10GbE SFP+ + 3x2.5GbE (~NOK 3000 barebones). The N300
|
||||
gives headroom for ntopng DPI and potential Suricata IDS without being overkill.
|
||||
|
||||
### Hardware Alternatives
|
||||
|
||||
Domestic availability for firewall mini PCs is limited — likely ordering from AliExpress.
|
||||
|
||||
Key things to verify:
|
||||
- NIC chipset: Intel i225-V/i226-V preferred over Realtek for Linux driver support
|
||||
- RAM/storage: some listings are barebones, check what's included
|
||||
- Import duties: factor in ~25% on top of listing price
|
||||
|
||||
| Option | NICs | Notes | Price |
|
||||
|--------|------|-------|-------|
|
||||
| [Topton Solid Firewall Router](https://www.aliexpress.com/item/1005008059819023.html) | 2x10GbE SFP+, 4x2.5GbE | No RAM/SSD, only Intel N150 available currently | ~NOK 2500 |
|
||||
| [Topton Solid Mini PC](https://www.aliexpress.com/item/1005008981218625.html) | 2x10GbE SFP+, 3x2.5GbE | No RAM/SSD, only Intel i3-N300 available currently | ~NOK 3000 |
|
||||
| [MINISFORUM MS-01](https://www.aliexpress.com/item/1005007308262492.html) | 2x10GbE SFP+, 2x2.5GbE | No RAM/SSD, i5-12600H | ~NOK 4500 |
|
||||
|
||||
The LAN port would carry a VLAN trunk to the MikroTik switch, with sub-interfaces
|
||||
for each VLAN. WAN port connects to the ISP uplink.
|
||||
|
||||
## NixOS Configuration
|
||||
|
||||
### Stability Policy
|
||||
|
||||
The router is treated differently from the rest of the fleet:
|
||||
- **No auto-upgrade** — `system.autoUpgrade.enable = false`
|
||||
- **No homelab-deploy listener** — `homelab.deploy.enable = false`
|
||||
- **Manual updates only** — update every few months, test-build first
|
||||
- **Use `nixos-rebuild boot`** — changes take effect on next deliberate reboot
|
||||
- **Tier: prod, priority: high** — alerts treated with highest priority
|
||||
|
||||
### Core Services
|
||||
|
||||
**Routing & NAT:**
|
||||
- `systemd-networkd` for all interface config (consistent with rest of fleet)
|
||||
- VLAN sub-interfaces on the LAN trunk (one per subnet)
|
||||
- `networking.nftables` for stateful firewall and NAT
|
||||
- IP forwarding enabled (`net.ipv4.ip_forward = 1`)
|
||||
- Masquerade outbound traffic on WAN interface
|
||||
|
||||
**DHCP:**
|
||||
- Kea or dnsmasq for DHCP on client subnets (WLAN, workstations, media)
|
||||
- Infrastructure subnet (10.69.13.0/24) stays static — no DHCP needed
|
||||
- Static leases for known devices
|
||||
|
||||
**Firewall (nftables):**
|
||||
- Default deny between VLANs
|
||||
- Explicit allow rules for known cross-VLAN traffic:
|
||||
- All subnets → ns1/ns2 (DNS)
|
||||
- All subnets → monitoring01 (metrics/logs)
|
||||
- Infrastructure → all (management access)
|
||||
- Workstations → media, core services
|
||||
- NAT masquerade on WAN
|
||||
- Rate limiting on WAN-facing services
|
||||
|
||||
**Traffic Accounting:**
|
||||
- nftables flow accounting or softflowd for NetFlow export
|
||||
- Export to future ntopng instance (see new-services.md)
|
||||
|
||||
**IDS/IPS (future consideration):**
|
||||
- Suricata for inline intrusion detection/prevention on the WAN interface
|
||||
- Signature-based threat detection, protocol anomaly detection
|
||||
- CPU-intensive — feasible at typical home internet speeds (500Mbps-1Gbps) on the N300
|
||||
- Not a day-one requirement, but the hardware should support it
|
||||
|
||||
### Monitoring Integration
|
||||
|
||||
Since this is a NixOS host in the flake, it gets the standard monitoring stack for free:
|
||||
- node-exporter for system metrics (CPU, memory, NIC throughput per interface)
|
||||
- promtail shipping logs to Loki
|
||||
- Prometheus scrape target auto-registration
|
||||
- Alertmanager alerts for host-down, high CPU, etc.
|
||||
|
||||
Additional router-specific monitoring:
|
||||
- Per-VLAN interface traffic metrics via node-exporter (automatic for all interfaces)
|
||||
- NAT connection tracking table size
|
||||
- WAN uplink status and throughput
|
||||
- DHCP lease metrics (if Kea, it has a Prometheus exporter)
|
||||
|
||||
This is a significant advantage over the EdgeRouter — full observability through
|
||||
the existing Grafana dashboards and Loki log search, debuggable via the monitoring
|
||||
MCP tools.
|
||||
|
||||
### WireGuard Integration
|
||||
|
||||
The remote access plan (remote-access.md) currently proposes a separate `extgw01`
|
||||
gateway host. With a NixOS router, there's a decision to make:
|
||||
|
||||
**Option A:** WireGuard terminates on the router itself. Simplest topology — the
|
||||
router is already the gateway, so VPN traffic doesn't need extra hops or firewall
|
||||
rules. But adds complexity to the router, which should stay simple.
|
||||
|
||||
**Option B:** Keep extgw01 as a separate host (original plan). Router just routes
|
||||
traffic to it. Better separation of concerns, router stays minimal.
|
||||
|
||||
Recommendation: Start with option B (keep it separate). The router should do routing
|
||||
and nothing else. WireGuard can move to the router later if extgw01 feels redundant.
|
||||
|
||||
## Migration Plan
|
||||
|
||||
### Phase 1: Build and lab test
|
||||
- Acquire hardware
|
||||
- Create host config in the flake (routing, NAT, DHCP, firewall)
|
||||
- Test-build on workstation: `nix build .#nixosConfigurations.router01.config.system.build.toplevel`
|
||||
- Lab test with a temporary setup if possible (two NICs, isolated VLAN)
|
||||
|
||||
### Phase 2: Prepare cutover
|
||||
- Pre-configure the MikroTik switch trunk port for the new router
|
||||
- Document current EdgeRouter config (port forwarding, NAT rules, DHCP leases)
|
||||
- Replicate all rules in the NixOS config
|
||||
- Verify DNS, DHCP, and inter-VLAN routing work in test
|
||||
|
||||
### Phase 3: Cutover
|
||||
- Schedule a maintenance window (brief downtime expected)
|
||||
- Swap WAN cable from EdgeRouter to new router
|
||||
- Swap LAN trunk from EdgeRouter to new router
|
||||
- Verify connectivity from each VLAN
|
||||
- Verify internet access, DNS resolution, inter-VLAN routing
|
||||
- Monitor via Prometheus/Loki (immediately available since it's a fleet host)
|
||||
|
||||
### Phase 4: Decommission EdgeRouter
|
||||
- Keep EdgeRouter available as fallback for a few weeks
|
||||
- Remove `gw` entry from external-hosts.nix, replace with flake-managed host
|
||||
- Update any references to 10.69.10.1 if the router IP changes
|
||||
|
||||
## Open Questions
|
||||
|
||||
- **Router IP:** Keep 10.69.10.1 or move to a different address? Each VLAN
|
||||
sub-interface needs an IP (the gateway address for that subnet).
|
||||
- **ISP uplink:** What type of WAN connection? PPPoE, DHCP, static IP?
|
||||
- **Port forwarding:** What ports are currently forwarded on the EdgeRouter?
|
||||
These need to be replicated in nftables.
|
||||
- **DHCP scope:** Which subnets currently get DHCP from the EdgeRouter vs
|
||||
other sources (UniFi controller for WLAN?)?
|
||||
- **UPnP/NAT-PMP:** Needed for any devices? (gaming consoles, etc.)
|
||||
- **Hardware preference:** Fanless mini PC budget and preferred vendor?
|
||||
104
docs/plans/openstack-nixos-image.md
Normal file
104
docs/plans/openstack-nixos-image.md
Normal file
@@ -0,0 +1,104 @@
|
||||
# NixOS OpenStack Image
|
||||
|
||||
## Overview
|
||||
|
||||
Build and upload a NixOS base image to the OpenStack cluster at work, enabling NixOS-based VPS instances to replace the current Debian+Podman setup. This image will serve as the foundation for multiple external services:
|
||||
|
||||
- **Forgejo** (replacing Gitea on docker2)
|
||||
- **WireGuard gateway** (replacing docker2's tunnel role, feeding into the remote-access plan)
|
||||
- Any future externally-hosted services
|
||||
|
||||
## Current State
|
||||
|
||||
- VPS hosting runs on an OpenStack cluster with a personal quota
|
||||
- Current VPS (`docker2.t-juice.club`) runs Debian with Podman containers
|
||||
- Homelab already has a working Proxmox image pipeline: `template2` builds via `nixos-rebuild build-image --image-variant proxmox`, deployed via Ansible
|
||||
- nixpkgs has a built-in `openstack` image variant in the same `image.modules` system used for Proxmox
|
||||
|
||||
## Decisions
|
||||
|
||||
- **No cloud-init dependency** - SSH key baked into the image, no need for metadata service
|
||||
- **No bootstrap script** - VPS deployments are infrequent; manual `nixos-rebuild` after first boot is fine
|
||||
- **No Vault access** - secrets handled manually until WireGuard access is set up (see remote-access plan)
|
||||
- **Separate from homelab services** - no logging/metrics integration initially; revisit after remote-access WireGuard is in place
|
||||
- **Repo placement TBD** - keep in this flake for now for convenience, but external hosts may move to a separate flake later since they can't use most shared `system/` modules (no Vault, no internal DNS, no Promtail)
|
||||
- **OpenStack CLI in devshell** - add `openstackclient` package; credentials (`clouds.yaml`) stay outside the repo
|
||||
- **Parallel deployment** - new Forgejo instance runs alongside docker2 initially, then CNAME moves over
|
||||
|
||||
## Approach
|
||||
|
||||
Follow the same pattern as the Proxmox template (`hosts/template2`), but targeting OpenStack's qcow2 format.
|
||||
|
||||
### What nixpkgs provides
|
||||
|
||||
The `image.modules.openstack` module produces a qcow2 image with:
|
||||
- `openstack-config.nix`: EC2 metadata fetcher, SSH enabled, GRUB bootloader, serial console, auto-growing root partition
|
||||
- `qemu-guest.nix` profile (virtio drivers)
|
||||
- ext4 root filesystem with `autoResize`
|
||||
|
||||
### What we need to customize
|
||||
|
||||
The stock OpenStack image pulls SSH keys and hostname from EC2-style metadata. Since we're baking the SSH key into the image, we need a simpler configuration:
|
||||
|
||||
- SSH authorized keys baked into the image
|
||||
- Base packages (age, vim, wget, git)
|
||||
- Nix substituters (`cache.nixos.org` only - internal cache not reachable)
|
||||
- systemd-networkd with DHCP
|
||||
- GRUB bootloader
|
||||
- Firewall enabled (public-facing host)
|
||||
|
||||
### Differences from template2
|
||||
|
||||
| Aspect | template2 (Proxmox) | openstack-template (OpenStack) |
|
||||
|--------|---------------------|-------------------------------|
|
||||
| Image format | VMA (`.vma.zst`) | qcow2 (`.qcow2`) |
|
||||
| Image variant | `proxmox` | `openstack` |
|
||||
| Cloud-init | ConfigDrive + NoCloud | Not used (SSH key baked in) |
|
||||
| Nix cache | Internal + nixos.org | `cache.nixos.org` only |
|
||||
| Vault | AppRole via wrapped token | None |
|
||||
| Bootstrap | Automatic nixos-rebuild on first boot | Manual |
|
||||
| Network | Internal DHCP | OpenStack DHCP |
|
||||
| DNS | Internal ns1/ns2 | Public DNS |
|
||||
| Firewall | Disabled (trusted network) | Enabled |
|
||||
| System modules | Full `../../system` import | Minimal (sshd, packages only) |
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Phase 1: Build the image
|
||||
|
||||
1. Create `hosts/openstack-template/` with minimal configuration
|
||||
- `default.nix` - imports (only sshd and packages from `system/`, not the full set)
|
||||
- `configuration.nix` - base config: SSH key, DHCP, GRUB, base packages, firewall on
|
||||
- `hardware-configuration.nix` - qemu-guest profile with virtio drivers
|
||||
- Exclude from DNS and monitoring (`homelab.dns.enable = false`, `homelab.monitoring.enable = false`)
|
||||
- May need to override parts of `image.modules.openstack` to disable the EC2 metadata fetcher if it causes boot delays
|
||||
2. Build with `nixos-rebuild build-image --image-variant openstack --flake .#openstack-template`
|
||||
3. Verify the qcow2 image is produced in `result/`
|
||||
|
||||
### Phase 2: Upload and test
|
||||
|
||||
1. Add `openstackclient` to the devshell
|
||||
2. Upload image: `openstack image create --disk-format qcow2 --file result/<image>.qcow2 nixos-template`
|
||||
3. Boot a test instance from the image
|
||||
4. Verify: SSH access works, DHCP networking, Nix builds work
|
||||
5. Test manual `nixos-rebuild switch --flake` against the instance
|
||||
|
||||
### Phase 3: Automation (optional, later)
|
||||
|
||||
Consider an Ansible playbook similar to `build-and-deploy-template.yml` for image builds + uploads. Low priority since this will be done rarely.
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [ ] Should external VPS hosts eventually move to a separate flake? (Depends on how different they end up being from homelab hosts)
|
||||
- [ ] Will the stock `openstack-config.nix` metadata fetcher cause boot delays/errors if the metadata service isn't reachable? May need to disable it.
|
||||
- [ ] **Flavor selection** - investigate what flavors are available in the quota. The standard small flavors likely have insufficient root disk for a NixOS host (Nix store grows fast). Options:
|
||||
- Use a larger flavor with adequate root disk
|
||||
- Create a custom flavor (if permissions allow)
|
||||
- Cinder block storage is an option in theory, but was very slow last time it was tested - avoid if possible
|
||||
- [ ] Consolidation opportunity - currently running multiple smaller VMs on OpenStack. Could a single larger NixOS VM replace several of them?
|
||||
|
||||
## Notes
|
||||
|
||||
- `nixos-rebuild build-image --image-variant openstack` uses the same `image.modules` system as Proxmox
|
||||
- nixpkgs also has an `openstack-zfs` variant if ZFS root is ever wanted
|
||||
- The stock OpenStack module imports `ec2-data.nix` and `amazon-init.nix` - these may need to be disabled or overridden if they cause issues without a metadata service
|
||||
231
docs/plans/pn51-stability.md
Normal file
231
docs/plans/pn51-stability.md
Normal file
@@ -0,0 +1,231 @@
|
||||
# ASUS PN51 Stability Testing
|
||||
|
||||
## Overview
|
||||
|
||||
Two ASUS PN51-E1 mini PCs (Ryzen 7 5700U) purchased years ago but shelved due to stability issues. Revisiting them to potentially add to the homelab.
|
||||
|
||||
## Hardware
|
||||
|
||||
| | pn01 (10.69.12.60) | pn02 (10.69.12.61) |
|
||||
|---|---|---|
|
||||
| **CPU** | AMD Ryzen 7 5700U (8C/16T) | AMD Ryzen 7 5700U (8C/16T) |
|
||||
| **RAM** | 2x 32GB DDR4 SO-DIMM (64GB) | 1x 32GB DDR4 SO-DIMM (32GB) |
|
||||
| **Storage** | 1TB NVMe | 1TB Samsung 870 EVO (SATA SSD) |
|
||||
| **BIOS** | 0508 (2023-11-08) | Updated 2026-02-21 (latest from ASUS) |
|
||||
|
||||
## Original Issues
|
||||
|
||||
- **pn01**: Would boot but freeze randomly after some time. No console errors, completely unresponsive. memtest86 passed.
|
||||
- **pn02**: Had trouble booting — would start loading kernel from installer USB then instantly reboot. When it did boot, would also freeze randomly.
|
||||
|
||||
## Debugging Steps
|
||||
|
||||
### 2026-02-21: Initial Setup
|
||||
|
||||
1. **Disabled fTPM** (labeled "Security Device" in ASUS BIOS) on both units
|
||||
- AMD Ryzen 5000 series had a known fTPM bug causing random hard freezes with no console output
|
||||
- Both units booted the NixOS installer successfully after this change
|
||||
2. Installed NixOS on both, added to repo as `pn01` and `pn02` on VLAN 12
|
||||
3. Configured monitoring (node-exporter, promtail, nixos-exporter)
|
||||
|
||||
### 2026-02-21: pn02 First Freeze
|
||||
|
||||
- pn02 froze approximately 1 hour after boot
|
||||
- All three Prometheus targets went down simultaneously — hard freeze, not graceful shutdown
|
||||
- Journal on next boot: `system.journal corrupted or uncleanly shut down`
|
||||
- Kernel warnings from boot log before freeze:
|
||||
- **TSC clocksource unstable**: `Marking clocksource 'tsc' as unstable because the skew is too large` — TSC skewing ~3.8ms over 500ms relative to HPET watchdog
|
||||
- **AMD PSP error**: `psp gfx command LOAD_TA(0x1) failed and response status is (0x7)` — Platform Security Processor failing to load trusted application
|
||||
- pn01 did not show these warnings on this particular boot, but has shown them historically (see below)
|
||||
|
||||
### 2026-02-21: pn02 BIOS Update
|
||||
|
||||
- Updated pn02 BIOS to latest version from ASUS website
|
||||
- **TSC still unstable** after BIOS update — same ~3.8ms skew
|
||||
- **PSP LOAD_TA still failing** after BIOS update
|
||||
- Monitoring back up, letting it run to see if freeze recurs
|
||||
|
||||
### 2026-02-22: TSC/PSP Confirmed on Both Units
|
||||
|
||||
- Checked kernel logs after ~9 hours uptime — both units still running
|
||||
- **pn01 now shows TSC unstable and PSP LOAD_TA failure** on this boot (same ~3.8ms TSC skew, same PSP error)
|
||||
- pn01 had these same issues historically when tested years ago — the earlier clean boot was just lucky TSC calibration timing
|
||||
- **Conclusion**: TSC instability and PSP LOAD_TA are platform-level quirks of the PN51-E1 / Ryzen 5700U, present on both units
|
||||
- The kernel handles TSC instability gracefully (falls back to HPET), and PSP LOAD_TA is non-fatal
|
||||
- Neither issue is likely the cause of the hard freezes — the fTPM bug remains the primary suspect
|
||||
|
||||
### 2026-02-22: Stress Test (1 hour)
|
||||
|
||||
- Ran `stress-ng --cpu 16 --vm 2 --vm-bytes 8G --timeout 1h` on both units
|
||||
- CPU temps peaked at ~85°C, settled to ~80°C sustained (throttle limit is 105°C)
|
||||
- Both survived the full hour with no freezes, no MCE errors, no kernel issues
|
||||
- No concerning log entries during or after the test
|
||||
|
||||
### 2026-02-22: TSC Runtime Switch Test
|
||||
|
||||
- Attempted to switch clocksource back to TSC at runtime on pn01:
|
||||
```
|
||||
echo tsc > /sys/devices/system/clocksource/clocksource0/current_clocksource
|
||||
```
|
||||
- Kernel watchdog immediately reverted to HPET — TSC skew is ongoing, not just a boot-time issue
|
||||
- **Conclusion**: TSC is genuinely unstable on the PN51-E1 platform. HPET is the correct clocksource.
|
||||
- For virtualization (Incus), this means guest VMs will use HPET-backed timing. Performance impact is minimal for typical server workloads (DNS, monitoring, light services) but would matter for latency-sensitive applications.
|
||||
|
||||
### 2026-02-22: BIOS Tweaks (Both Units)
|
||||
|
||||
- Disabled ErP Ready on both (EU power efficiency mode — aggressively cuts power in idle)
|
||||
- Disabled WiFi and Bluetooth in BIOS on both
|
||||
- **TSC still unstable** after these changes — same ~3.8ms skew on both units
|
||||
- ErP/power states are not the cause of the TSC issue
|
||||
|
||||
### 2026-02-22: pn02 Second Freeze
|
||||
|
||||
- pn02 froze again ~5.5 hours after boot (at idle, not under load)
|
||||
- All Prometheus targets down simultaneously — same hard freeze pattern
|
||||
- Last log entry was normal nix-daemon activity — zero warning/error logs before crash
|
||||
- Survived the 1h stress test earlier but froze at idle later — not thermal
|
||||
- pn01 remains stable throughout
|
||||
- **Action**: Blacklisted `amdgpu` kernel module on pn02 (`boot.blacklistedKernelModules = [ "amdgpu" ]`) to eliminate GPU/PSP firmware interactions as a cause. No console output but managed via SSH.
|
||||
- **Action**: Added diagnostic/recovery config to pn02:
|
||||
- `panic=10` + `nmi_watchdog=1` kernel params — auto-reboot after 10s on panic
|
||||
- `softlockup_panic` + `hardlockup_panic` sysctls — convert lockups to panics with stack traces
|
||||
- `hardware.rasdaemon` with recording — logs hardware errors (MCE, PCIe AER, memory) to sqlite database, survives reboots
|
||||
- Check recorded errors: `ras-mc-ctl --summary`, `ras-mc-ctl --errors`
|
||||
|
||||
## Benign Kernel Errors (Both Units)
|
||||
|
||||
These appear on both units and can be ignored:
|
||||
- `clocksource: Marking clocksource 'tsc' as unstable` — TSC skew vs HPET, kernel falls back gracefully. Platform-level quirk on PN51-E1, not always reproducible on every boot.
|
||||
- `psp gfx command LOAD_TA(0x1) failed` — AMD PSP firmware error, non-fatal. Present on both units across all BIOS versions.
|
||||
- `pcie_mp2_amd: amd_sfh_hid_client_init failed err -95` — AMD Sensor Fusion Hub, no sensors connected
|
||||
- `Bluetooth: hci0: Reading supported features failed` — Bluetooth init quirk
|
||||
- `Serial bus multi instantiate pseudo device driver INT3515:00: error -ENXIO` — unused serial bus device
|
||||
- `snd_hda_intel: no codecs found` — no audio device connected, headless server
|
||||
- `ata2.00: supports DRM functions and may not be fully accessible` — Samsung SSD DRM quirk (pn02 only)
|
||||
|
||||
### 2026-02-23: processor.max_cstate=1 and Proxmox Forums
|
||||
|
||||
- Found a thread on the Proxmox forums about PN51 units with similar freeze issues
|
||||
- Many users reporting identical symptoms — random hard freezes, no log evidence
|
||||
- No conclusive fix. Some have frequent freezes, others only a few times a month
|
||||
- Some reported BIOS updates helped, but results inconsistent
|
||||
- Added `processor.max_cstate=1` kernel parameter to pn02 — limits CPU to C1 halt state, preventing deep C-state sleep transitions that may trigger freezes on AMD mobile chips
|
||||
- Also applied: amdgpu blacklist, panic=10, nmi_watchdog=1, softlockup/hardlockup panic, rasdaemon
|
||||
|
||||
### 2026-02-23: logind D-Bus Deadlock (pn02)
|
||||
|
||||
- node-exporter alert fired — but host was NOT frozen
|
||||
- logind was running (PID 871) but deadlocked on D-Bus — not responding to `org.freedesktop.login1` requests
|
||||
- Every node-exporter scrape blocked for 25s waiting for logind, causing scrape timeouts
|
||||
- Likely related to amdgpu blacklist — no DRM device means no graphical seat, logind may have deadlocked during seat enumeration at boot
|
||||
- Fix: `systemctl restart systemd-logind` + `systemctl restart prometheus-node-exporter`
|
||||
- After restart, logind responded normally and reported seat0
|
||||
|
||||
### 2026-02-27: pn02 Third Freeze
|
||||
|
||||
- pn02 crashed again after ~2 days 21 hours uptime (longest run so far)
|
||||
- Evidence of crash:
|
||||
- Journal file corrupted: `system.journal corrupted or uncleanly shut down`
|
||||
- Boot partition fsck: `Dirty bit is set. Fs was not properly unmounted`
|
||||
- No orderly shutdown logs from previous boot
|
||||
- No auto-upgrade triggered
|
||||
- **NMI watchdog did NOT fire** — no kernel panic logged. This is a true hard lockup below NMI level
|
||||
- **rasdaemon recorded nothing** — no MCE, AER, or memory errors in the sqlite database
|
||||
- **Positive**: The system auto-rebooted this time (likely hardware watchdog), unlike previous freezes that required manual power cycle
|
||||
- `processor.max_cstate=1` may have extended uptime (2d21h vs previous 1h and 5.5h) but did not prevent the freeze
|
||||
|
||||
### 2026-02-27 to 2026-03-03: Relative Stability
|
||||
|
||||
- pn02 ran without crashes for approximately one week after the third freeze
|
||||
- pn01 continued to be completely stable throughout this period
|
||||
- Auto-upgrade reboots continued daily (~4am) on both units — these are planned and healthy
|
||||
|
||||
### 2026-03-04: pn02 Fourth Crash — sched_ext Kernel Oops (pstore captured)
|
||||
|
||||
- pn02 crashed after ~5.8 days uptime (504566s)
|
||||
- **First crash captured by pstore** — kernel oops and panic stack traces preserved across reboot
|
||||
- Journal corruption confirmed: `system.journal corrupted or uncleanly shut down`
|
||||
- **Crash location**: `RIP: 0010:set_next_task_scx+0x6e/0x210` — crash in the **sched_ext (SCX) scheduler** subsystem
|
||||
- **Call trace**: `sysvec_apic_timer_interrupt` → `cpuidle_enter_state` — crashed during CPU idle, triggered by APIC timer interrupt
|
||||
- **CR2**: `ffffffffffffff89` — dereferencing an obviously invalid kernel pointer
|
||||
- **Kernel**: 6.12.74 (NixOS 25.11)
|
||||
- **Significance**: This is the first crash with actual diagnostic output. Previous crashes were silent sub-NMI freezes. The sched_ext scheduler path is a new finding — earlier crashes were assumed to be hardware-level.
|
||||
|
||||
### 2026-03-06: pn02 Fifth Crash
|
||||
|
||||
- pn02 crashed again — journal corruption on next boot
|
||||
- No pstore data captured for this crash
|
||||
|
||||
### 2026-03-07: pn02 Sixth and Seventh Crashes — Two in One Day
|
||||
|
||||
**First crash (~11:06 UTC):**
|
||||
- ~26.6 hours uptime (95994s)
|
||||
- **pstore captured both Oops and Panic**
|
||||
- **Crash location**: Scheduler code path — `pick_next_task_fair` → `__pick_next_task`
|
||||
- **CR2**: `000000c000726000` — invalid pointer dereference
|
||||
- **Notable**: `dbus-daemon` segfaulted ~50 minutes before the kernel crash (`segfault at 0` in `libdbus-1.so.3.32.4` on CPU 0) — may indicate memory corruption preceding the kernel crash
|
||||
|
||||
**Second crash (~21:15 UTC):**
|
||||
- Journal corruption confirmed on next boot
|
||||
- No pstore data captured
|
||||
|
||||
### 2026-03-07: pn01 Status
|
||||
|
||||
- pn01 has had **zero crashes** since initial setup on Feb 21
|
||||
- Zero journal corruptions, zero pstore dumps in 30 days
|
||||
- Same BOOT_ID maintained between daily auto-upgrade reboots — consistently clean shutdown/reboot cycles
|
||||
- All 8 reboots in 30 days are planned auto-upgrade reboots
|
||||
- **pn01 is fully stable**
|
||||
|
||||
## Crash Summary
|
||||
|
||||
| Date | Uptime Before Crash | Crash Type | Diagnostic Data |
|
||||
|------|---------------------|------------|-----------------|
|
||||
| Feb 21 | ~1h | Silent freeze | None — sub-NMI |
|
||||
| Feb 22 | ~5.5h | Silent freeze | None — sub-NMI |
|
||||
| Feb 27 | ~2d 21h | Silent freeze | None — sub-NMI, rasdaemon empty |
|
||||
| Mar 4 | ~5.8d | **Kernel oops** | pstore: `set_next_task_scx` (sched_ext) |
|
||||
| Mar 6 | Unknown | Crash | Journal corruption only |
|
||||
| Mar 7 | ~26.6h | **Kernel oops + panic** | pstore: `pick_next_task_fair` (scheduler) + dbus segfault |
|
||||
| Mar 7 | Unknown | Crash | Journal corruption only |
|
||||
|
||||
## Conclusion
|
||||
|
||||
**pn02 is unreliable.** After exhausting mitigations (fTPM disabled, BIOS updated, WiFi/BT disabled, ErP disabled, amdgpu blacklisted, processor.max_cstate=1, NMI watchdog, rasdaemon), the unit still crashes every few days. 26 reboots in 30 days (7 unclean crashes + daily auto-upgrade reboots).
|
||||
|
||||
The pstore crash dumps from March reveal a new dimension: at least some crashes are **kernel scheduler bugs in sched_ext**, not just silent hardware-level freezes. The `set_next_task_scx` and `pick_next_task_fair` crash sites, combined with the dbus-daemon segfault before one crash, suggest possible memory corruption that manifests in the scheduler. It's unclear whether this is:
|
||||
1. A sched_ext kernel bug exposed by the PN51's hardware quirks (unstable TSC, C-state behavior)
|
||||
2. Hardware-induced memory corruption that happens to hit scheduler data structures
|
||||
3. A pure software bug in the 6.12.74 kernel's sched_ext implementation
|
||||
|
||||
**pn01 is stable** — zero crashes in 30 days of continuous operation. Both units have identical kernel and NixOS configuration (minus pn02's diagnostic mitigations), so the difference points toward a hardware defect specific to the pn02 board.
|
||||
|
||||
## Next Steps
|
||||
|
||||
- **pn02 memtest**: Run memtest86 for 24h+ (available in systemd-boot menu). The crash signatures (userspace segfaults before kernel panics, corrupted pointers in scheduler structures) are consistent with intermittent RAM errors that a quick pass wouldn't catch. If memtest finds errors, swap the DIMM.
|
||||
- **pn02**: Consider scrapping or repurposing for non-critical workloads that tolerate random reboots (auto-recovery via hardware watchdog is now working)
|
||||
- **pn02 investigation**: Could try disabling sched_ext (`boot.kernelParams = [ "sched_ext.enabled=0" ]` or equivalent) to test whether the crashes stop — would help distinguish kernel bug from hardware defect
|
||||
- **pn01**: Continue monitoring. If it remains stable long-term, it is viable for light workloads
|
||||
- If pn01 eventually crashes, apply the same mitigations (amdgpu blacklist, max_cstate=1) to see if they help
|
||||
- For the Incus hypervisor plan: likely need different hardware. Evaluating GMKtec G3 (Intel) as an alternative. Note: mixed Intel/AMD cluster complicates live migration
|
||||
|
||||
## Diagnostics and Auto-Recovery (pn02)
|
||||
|
||||
Currently deployed on pn02:
|
||||
|
||||
```nix
|
||||
boot.blacklistedKernelModules = [ "amdgpu" ];
|
||||
boot.kernelParams = [ "panic=10" "nmi_watchdog=1" "processor.max_cstate=1" ];
|
||||
boot.kernel.sysctl."kernel.softlockup_panic" = 1;
|
||||
boot.kernel.sysctl."kernel.hardlockup_panic" = 1;
|
||||
hardware.rasdaemon.enable = true;
|
||||
hardware.rasdaemon.record = true;
|
||||
```
|
||||
|
||||
**Crash recovery is working**: pstore now captures kernel oops/panic data, and the system auto-reboots via `panic=10` or SP5100 TCO hardware watchdog.
|
||||
|
||||
**After reboot, check:**
|
||||
- `ras-mc-ctl --summary` — overview of hardware errors
|
||||
- `ras-mc-ctl --errors` — detailed error list
|
||||
- `journalctl -b -1 -p err` — kernel logs from crashed boot (if panic was logged)
|
||||
- pstore data is automatically archived by `systemd-pstore.service` and forwarded to Loki via promtail
|
||||
@@ -4,119 +4,118 @@
|
||||
|
||||
## Goal
|
||||
|
||||
Enable remote access to some or all homelab services from outside the internal network, without exposing anything directly to the internet.
|
||||
Enable personal remote access to selected homelab services from outside the internal network, without exposing anything directly to the internet.
|
||||
|
||||
## Current State
|
||||
|
||||
- All services are only accessible from the internal 10.69.13.x network
|
||||
- Exception: jelly01 has a WireGuard link to an external VPS
|
||||
- No services are directly exposed to the public internet
|
||||
- http-proxy has a WireGuard tunnel (`wg0`, `10.69.222.0/24`) to a VPS (`docker2.t-juice.club`) on an OpenStack cluster
|
||||
- VPS runs Traefik which proxies selected services (including Jellyfin) back through the tunnel to http-proxy's Caddy
|
||||
- No other services are directly exposed to the public internet
|
||||
|
||||
## Constraints
|
||||
## Decision: WireGuard Gateway
|
||||
|
||||
- Nothing should be directly accessible from the outside
|
||||
- Must use VPN or overlay network (no port forwarding of services)
|
||||
- Self-hosted solutions preferred over managed services
|
||||
After evaluating WireGuard gateway vs Headscale (self-hosted Tailscale), the **WireGuard gateway** approach was chosen:
|
||||
|
||||
## Options
|
||||
- Only 2 client devices (laptop + phone), so Headscale's device management UX isn't needed
|
||||
- Split DNS works fine on Linux laptop via systemd-resolved; all-or-nothing DNS on phone is acceptable for occasional use
|
||||
- Simpler infrastructure - no control server to maintain
|
||||
- Builds on existing WireGuard experience and setup
|
||||
|
||||
### 1. WireGuard Gateway (Internal Router)
|
||||
## Architecture
|
||||
|
||||
A dedicated NixOS host on the internal network with a WireGuard tunnel out to the VPS. The VPS becomes the public entry point, and the gateway routes traffic to internal services. Firewall rules on the gateway control which services are reachable.
|
||||
```mermaid
|
||||
graph TD
|
||||
clients["Laptop / Phone"]
|
||||
vps["VPS<br/>(WireGuard endpoint)"]
|
||||
extgw["extgw01<br/>(gateway + bastion)"]
|
||||
grafana["Grafana<br/>monitoring01:3000"]
|
||||
jellyfin["Jellyfin<br/>jelly01:8096"]
|
||||
arr["arr stack<br/>*-jail hosts"]
|
||||
|
||||
**Pros:**
|
||||
- Simple, well-understood technology
|
||||
- Already running WireGuard for jelly01
|
||||
- Full control over routing and firewall rules
|
||||
- Excellent NixOS module support
|
||||
- No extra dependencies
|
||||
clients -->|WireGuard| vps
|
||||
vps -->|WireGuard tunnel| extgw
|
||||
extgw -->|allowed traffic| grafana
|
||||
extgw -->|allowed traffic| jellyfin
|
||||
extgw -->|allowed traffic| arr
|
||||
```
|
||||
|
||||
**Cons:**
|
||||
- Hub-and-spoke topology (all traffic goes through VPS)
|
||||
- Manual peer management
|
||||
- Adding a new client device means editing configs on both VPS and gateway
|
||||
### Existing path (unchanged)
|
||||
|
||||
### 2. WireGuard Mesh (No Relay)
|
||||
The current public access path stays as-is:
|
||||
|
||||
Each client device connects directly to a WireGuard endpoint. Could be on the VPS which forwards to the homelab, or if there is a routable IP at home, directly to an internal host.
|
||||
```
|
||||
Internet → VPS (Traefik) → WireGuard → http-proxy (Caddy) → internal services
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Simple and fast
|
||||
- No extra software
|
||||
This handles public Jellyfin access and any other publicly-exposed services.
|
||||
|
||||
**Cons:**
|
||||
- Manual key and endpoint management for every peer
|
||||
- Doesn't scale well
|
||||
- If behind CGNAT, still needs the VPS as intermediary
|
||||
### New path (personal VPN)
|
||||
|
||||
### 3. Headscale (Self-Hosted Tailscale)
|
||||
A separate WireGuard tunnel for personal remote access with restricted firewall rules:
|
||||
|
||||
Run a Headscale control server (on the VPS or internally) and install the Tailscale client on homelab hosts and personal devices. Gets the Tailscale mesh networking UX without depending on Tailscale's infrastructure.
|
||||
```
|
||||
Laptop/Phone → VPS (WireGuard peers) → tunnel → extgw01 (firewall) → allowed services
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Mesh topology - devices communicate directly via NAT traversal (DERP relay as fallback)
|
||||
- Easy to add/remove devices
|
||||
- ACL support for granular access control
|
||||
- MagicDNS for service discovery
|
||||
- Good NixOS support for both headscale server and tailscale client
|
||||
- Subnet routing lets you expose the entire 10.69.13.x network or specific hosts without installing tailscale on every host
|
||||
### Access tiers
|
||||
|
||||
**Cons:**
|
||||
- More moving parts than plain WireGuard
|
||||
- Headscale is a third-party reimplementation, can lag behind Tailscale features
|
||||
- Need to run and maintain the control server
|
||||
1. **VPN (default)**: Laptop/phone connect to VPS WireGuard endpoint, traffic routed through extgw01 firewall. Only whitelisted services are reachable.
|
||||
2. **SSH + 2FA (escalated)**: SSH into extgw01 for full network access when needed.
|
||||
|
||||
### 4. Tailscale (Managed)
|
||||
## New Host: extgw01
|
||||
|
||||
Same as Headscale but using Tailscale's hosted control plane.
|
||||
A NixOS host on the internal network acting as both WireGuard gateway and SSH bastion.
|
||||
|
||||
**Pros:**
|
||||
- Zero infrastructure to manage on the control plane side
|
||||
- Polished UX, well-maintained clients
|
||||
- Free tier covers personal use
|
||||
### Responsibilities
|
||||
|
||||
**Cons:**
|
||||
- Dependency on Tailscale's service
|
||||
- Less aligned with self-hosting preference
|
||||
- Coordination metadata goes through their servers (data plane is still peer-to-peer)
|
||||
- **WireGuard tunnel** to the VPS for client traffic
|
||||
- **Firewall** with allowlist controlling which internal services are reachable through the VPN
|
||||
- **SSH bastion** with 2FA for full network access when needed
|
||||
- **DNS**: Clients get split DNS config (laptop via systemd-resolved routing domain, phone uses internal DNS for all queries)
|
||||
|
||||
### 5. Netbird (Self-Hosted)
|
||||
### Firewall allowlist (initial)
|
||||
|
||||
Open-source alternative to Tailscale with a self-hostable management server. WireGuard-based, supports ACLs and NAT traversal.
|
||||
| Service | Destination | Port |
|
||||
|------------|------------------------------|-------|
|
||||
| Grafana | monitoring01.home.2rjus.net | 3000 |
|
||||
| Jellyfin | jelly01.home.2rjus.net | 8096 |
|
||||
| Sonarr | sonarr-jail.home.2rjus.net | 8989 |
|
||||
| Radarr | radarr-jail.home.2rjus.net | 7878 |
|
||||
| NZBget | nzbget-jail.home.2rjus.net | 6789 |
|
||||
|
||||
**Pros:**
|
||||
- Fully self-hostable
|
||||
- Web UI for management
|
||||
- ACL and peer grouping support
|
||||
### SSH 2FA options (to be decided)
|
||||
|
||||
**Cons:**
|
||||
- Heavier to self-host (needs multiple components: management server, signal server, TURN relay)
|
||||
- Less mature NixOS module support compared to Tailscale/Headscale
|
||||
- **Kanidm**: Already deployed on kanidm01, supports RADIUS/OAuth2 for PAM integration
|
||||
- **SSH certificates via OpenBao**: Fits existing Vault infrastructure, short-lived certs
|
||||
- **TOTP via PAM**: Simplest fallback, Google Authenticator / similar
|
||||
|
||||
### 6. Nebula (by Defined Networking)
|
||||
## VPS Configuration
|
||||
|
||||
Certificate-based mesh VPN. Each node gets a certificate from a CA you control. No central coordination server needed at runtime.
|
||||
The VPS needs a new WireGuard interface (separate from the existing http-proxy tunnel):
|
||||
|
||||
**Pros:**
|
||||
- No always-on control plane
|
||||
- Certificate-based identity
|
||||
- Lightweight
|
||||
- WireGuard endpoint listening on a public UDP port
|
||||
- 2 peers: laptop, phone
|
||||
- Routes client traffic through tunnel to extgw01
|
||||
- Minimal config - just routing, no firewall policy (that lives on extgw01)
|
||||
|
||||
**Cons:**
|
||||
- Less convenient for ad-hoc device addition (need to issue certs)
|
||||
- NAT traversal less mature than Tailscale's
|
||||
- Smaller community/ecosystem
|
||||
## Implementation Steps
|
||||
|
||||
## Key Decision Points
|
||||
|
||||
- **Static public IP vs CGNAT?** Determines whether clients can connect directly to home network or need VPS relay.
|
||||
- **Number of client devices?** If just phone and laptop, plain WireGuard via VPS is fine. More devices favors Headscale.
|
||||
- **Per-service vs per-network access?** Gateway with firewall rules gives per-service control. Headscale ACLs can also do this. Plain WireGuard gives network-level access with gateway firewall for finer control.
|
||||
- **Subnet routing vs per-host agents?** With Headscale/Tailscale, can either install client on every host, or use a single subnet router that advertises the 10.69.13.x range. The latter is closer to the gateway approach and avoids touching every host.
|
||||
|
||||
## Leading Candidates
|
||||
|
||||
Based on existing WireGuard experience, self-hosting preference, and NixOS stack:
|
||||
|
||||
1. **Headscale with a subnet router** - Best balance of convenience and self-hosting
|
||||
2. **WireGuard gateway via VPS** - Simplest, most transparent, builds on existing setup
|
||||
1. **Create extgw01 host configuration** in this repo
|
||||
- VM provisioned via OpenTofu (same as other hosts)
|
||||
- WireGuard interface for VPS tunnel
|
||||
- nftables/iptables firewall with service allowlist
|
||||
- IP forwarding enabled
|
||||
2. **Configure VPS WireGuard** for client peers
|
||||
- New WireGuard interface with laptop + phone peers
|
||||
- Routing for 10.69.13.0/24 through extgw01 tunnel
|
||||
3. **Set up client configs**
|
||||
- Laptop: WireGuard config + systemd-resolved split DNS for `home.2rjus.net`
|
||||
- Phone: WireGuard app config with DNS pointing at internal nameservers
|
||||
4. **Set up SSH 2FA** on extgw01
|
||||
- Evaluate Kanidm integration vs OpenBao SSH certs vs TOTP
|
||||
5. **Test and verify**
|
||||
- VPN access to allowed services only
|
||||
- Firewall blocks everything else
|
||||
- SSH + 2FA grants full access
|
||||
- Existing public access path unaffected
|
||||
|
||||
224
docs/plans/security-hardening.md
Normal file
224
docs/plans/security-hardening.md
Normal file
@@ -0,0 +1,224 @@
|
||||
# Security Hardening Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Address security gaps identified in infrastructure review. Focus areas: SSH hardening, network security, logging improvements, and secrets management.
|
||||
|
||||
## Current State
|
||||
|
||||
- SSH allows password auth and unrestricted root login (`system/sshd.nix`)
|
||||
- Firewall disabled on all hosts (`networking.firewall.enable = false`)
|
||||
- Promtail ships logs over HTTP to Loki
|
||||
- Loki has no authentication (`auth_enabled = false`)
|
||||
- AppRole secret-IDs never expire (`secret_id_ttl = 0`)
|
||||
- Vault TLS verification disabled by default (`skipTlsVerify = true`)
|
||||
- Audit logging exists (`common/ssh-audit.nix`) but not applied globally
|
||||
- Alert rules focus on availability, no security event detection
|
||||
|
||||
## Priority Matrix
|
||||
|
||||
| Issue | Severity | Effort | Priority |
|
||||
|-------|----------|--------|----------|
|
||||
| SSH password auth | High | Low | **P1** |
|
||||
| Firewall disabled | High | Medium | **P1** |
|
||||
| Promtail HTTP (no TLS) | High | Medium | **P2** |
|
||||
| No security alerting | Medium | Low | **P2** |
|
||||
| Audit logging not global | Low | Low | **P2** |
|
||||
| Loki no auth | Medium | Medium | **P3** |
|
||||
| Secret-ID TTL | Medium | Medium | **P3** |
|
||||
| Vault skipTlsVerify | Medium | Low | **P3** |
|
||||
|
||||
## Phase 1: Quick Wins (P1)
|
||||
|
||||
### 1.1 SSH Hardening
|
||||
|
||||
Edit `system/sshd.nix`:
|
||||
|
||||
```nix
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PermitRootLogin = "prohibit-password"; # Key-only root login
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
**Prerequisite:** Verify all hosts have SSH keys deployed for root.
|
||||
|
||||
### 1.2 Enable Firewall
|
||||
|
||||
Create `system/firewall.nix` with default deny policy:
|
||||
|
||||
```nix
|
||||
{ ... }: {
|
||||
networking.firewall.enable = true;
|
||||
|
||||
# Use openssh's built-in firewall integration
|
||||
services.openssh.openFirewall = true;
|
||||
}
|
||||
```
|
||||
|
||||
**Useful firewall options:**
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `networking.firewall.trustedInterfaces` | Accept all traffic from these interfaces (e.g., `[ "lo" ]`) |
|
||||
| `networking.firewall.interfaces.<name>.allowedTCPPorts` | Per-interface port rules |
|
||||
| `networking.firewall.extraInputRules` | Custom nftables rules (for complex filtering) |
|
||||
|
||||
**Network range restrictions:** Consider restricting SSH to the infrastructure subnet (`10.69.13.0/24`) using `extraInputRules` for defense in depth. However, this adds complexity and may not be necessary given the trusted network model.
|
||||
|
||||
#### Per-Interface Rules (http-proxy WireGuard)
|
||||
|
||||
The `http-proxy` host has a WireGuard interface (`wg0`) that may need different rules than the LAN interface. Use `networking.firewall.interfaces` to apply per-interface policies:
|
||||
|
||||
```nix
|
||||
# Example: http-proxy with different rules per interface
|
||||
networking.firewall = {
|
||||
enable = true;
|
||||
|
||||
# Default: only SSH (via openFirewall)
|
||||
allowedTCPPorts = [ ];
|
||||
|
||||
# LAN interface: allow HTTP/HTTPS
|
||||
interfaces.ens18 = {
|
||||
allowedTCPPorts = [ 80 443 ];
|
||||
};
|
||||
|
||||
# WireGuard interface: restrict to specific services or trust fully
|
||||
interfaces.wg0 = {
|
||||
allowedTCPPorts = [ 80 443 ];
|
||||
# Or use trustedInterfaces = [ "wg0" ] if fully trusted
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
**TODO:** Investigate current WireGuard usage on http-proxy to determine appropriate rules.
|
||||
|
||||
Then per-host, open required ports:
|
||||
|
||||
| Host | Additional Ports |
|
||||
|------|------------------|
|
||||
| ns1/ns2 | 53 (TCP/UDP) |
|
||||
| vault01 | 8200 |
|
||||
| monitoring01 | 3100, 9090, 3000, 9093 |
|
||||
| http-proxy | 80, 443 |
|
||||
| nats1 | 4222 |
|
||||
| ha1 | 1883, 8123 |
|
||||
| jelly01 | 8096 |
|
||||
| nix-cache01 | 5000 |
|
||||
|
||||
## Phase 2: Logging & Detection (P2)
|
||||
|
||||
### 2.1 Enable TLS for Promtail → Loki
|
||||
|
||||
Update `system/monitoring/logs.nix`:
|
||||
|
||||
```nix
|
||||
clients = [{
|
||||
url = "https://monitoring01.home.2rjus.net:3100/loki/api/v1/push";
|
||||
tls_config = {
|
||||
ca_file = "/etc/ssl/certs/homelab-root-ca.pem";
|
||||
};
|
||||
}];
|
||||
```
|
||||
|
||||
Requires:
|
||||
- Configure Loki with TLS certificate (use internal ACME)
|
||||
- Ensure all hosts trust root CA (already done via `system/pki/root-ca.nix`)
|
||||
|
||||
### 2.2 Security Alert Rules
|
||||
|
||||
Add to `services/monitoring/rules.yml`:
|
||||
|
||||
```yaml
|
||||
- name: security_rules
|
||||
rules:
|
||||
- alert: ssh_auth_failures
|
||||
expr: increase(node_logind_sessions_total[5m]) > 20
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Unusual login activity on {{ $labels.instance }}"
|
||||
|
||||
- alert: vault_secret_fetch_failure
|
||||
expr: increase(vault_secret_failures[5m]) > 5
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Vault secret fetch failures on {{ $labels.instance }}"
|
||||
```
|
||||
|
||||
Also add Loki-based alerts for:
|
||||
- Failed SSH attempts: `{job="systemd-journal"} |= "Failed password"`
|
||||
- sudo usage: `{job="systemd-journal"} |= "sudo"`
|
||||
|
||||
### 2.3 Global Audit Logging
|
||||
|
||||
Add `./common/ssh-audit.nix` import to `system/default.nix`:
|
||||
|
||||
```nix
|
||||
imports = [
|
||||
# ... existing imports
|
||||
../common/ssh-audit.nix
|
||||
];
|
||||
```
|
||||
|
||||
## Phase 3: Defense in Depth (P3)
|
||||
|
||||
### 3.1 Loki Authentication
|
||||
|
||||
Options:
|
||||
1. **Basic auth via reverse proxy** - Put Loki behind Caddy with auth
|
||||
2. **Loki multi-tenancy** - Enable `auth_enabled = true` and use tenant IDs
|
||||
3. **Network isolation** - Bind Loki only to localhost, expose via authenticated proxy
|
||||
|
||||
Recommendation: Option 1 (reverse proxy) is simplest for homelab.
|
||||
|
||||
### 3.2 AppRole Secret Rotation
|
||||
|
||||
Update `terraform/vault/approle.tf`:
|
||||
|
||||
```hcl
|
||||
secret_id_ttl = 2592000 # 30 days
|
||||
```
|
||||
|
||||
Add documentation for manual rotation procedure or implement automated rotation via the existing `restartTrigger` mechanism in `vault-secrets.nix`.
|
||||
|
||||
### 3.3 Enable Vault TLS Verification
|
||||
|
||||
Change default in `system/vault-secrets.nix`:
|
||||
|
||||
```nix
|
||||
skipTlsVerify = mkOption {
|
||||
type = types.bool;
|
||||
default = false; # Changed from true
|
||||
};
|
||||
```
|
||||
|
||||
**Prerequisite:** Verify all hosts trust the internal CA that signed the Vault certificate.
|
||||
|
||||
## Implementation Order
|
||||
|
||||
1. **Test on test-tier first** - Deploy phases 1-2 to testvm01/02/03
|
||||
2. **Validate SSH access** - Ensure key-based login works before disabling passwords
|
||||
3. **Document firewall ports** - Create reference of ports per host before enabling
|
||||
4. **Phase prod rollout** - Deploy to prod hosts one at a time, verify each
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [ ] Do all hosts have SSH keys configured for root access?
|
||||
- [ ] Should firewall rules be per-host or use a central definition with roles?
|
||||
- [ ] Should Loki authentication use the existing Kanidm setup?
|
||||
|
||||
**Resolved:** Password-based SSH access for recovery is not required - most hosts have console access through Proxmox or physical access, which provides an out-of-band recovery path if SSH keys fail.
|
||||
|
||||
## Notes
|
||||
|
||||
- Firewall changes are the highest risk - test thoroughly on test-tier
|
||||
- SSH hardening must not lock out access - verify keys first
|
||||
- Consider creating a "break glass" procedure for emergency access if keys fail
|
||||
@@ -39,23 +39,17 @@ Expand storage capacity for the main hdd-pool. Since we need to add disks anyway
|
||||
- nzbget: NixOS service or OCI container
|
||||
- NFS exports: `services.nfs.server`
|
||||
|
||||
### Filesystem: BTRFS RAID1
|
||||
### Filesystem: Keep ZFS
|
||||
|
||||
**Decision**: Migrate from ZFS to BTRFS with RAID1
|
||||
**Decision**: Keep existing ZFS pool, import on NixOS
|
||||
|
||||
**Rationale**:
|
||||
- **In-kernel**: No out-of-tree module issues like ZFS
|
||||
- **Flexible expansion**: Add individual disks, not required to buy pairs
|
||||
- **Mixed disk sizes**: Better handling than ZFS multi-vdev approach
|
||||
- **RAID level conversion**: Can convert between RAID levels in place
|
||||
- Built-in checksumming, snapshots, compression (zstd)
|
||||
- NixOS has good BTRFS support
|
||||
|
||||
**BTRFS RAID1 notes**:
|
||||
- "RAID1" means 2 copies of all data
|
||||
- Distributes across all available devices
|
||||
- With 6+ disks, provides redundancy + capacity scaling
|
||||
- RAID5/6 avoided (known issues), RAID1/10 are stable
|
||||
- **No data migration needed**: Existing ZFS pool can be imported directly on NixOS
|
||||
- **Proven reliability**: Pool has been running reliably on TrueNAS
|
||||
- **NixOS ZFS support**: Well-supported, declarative configuration via `boot.zfs` and `services.zfs`
|
||||
- **BTRFS RAID5/6 unreliable**: Research showed BTRFS RAID5/6 write hole is still unresolved
|
||||
- **BTRFS RAID1 wasteful**: With mixed disk sizes, RAID1 wastes significant capacity vs ZFS mirrors
|
||||
- Checksumming, snapshots, compression (lz4/zstd) all available
|
||||
|
||||
### Hardware: Keep Existing + Add Disks
|
||||
|
||||
@@ -69,83 +63,94 @@ Expand storage capacity for the main hdd-pool. Since we need to add disks anyway
|
||||
|
||||
**Storage architecture**:
|
||||
|
||||
**Bulk storage** (BTRFS RAID1 on HDDs):
|
||||
- Current: 6x HDDs (2x16TB + 2x8TB + 2x8TB)
|
||||
- Add: 2x new HDDs (size TBD)
|
||||
**hdd-pool** (ZFS mirrors):
|
||||
- Current: 3 mirror vdevs (2x16TB + 2x8TB + 2x8TB) = 32TB usable
|
||||
- Add: mirror-3 with 2x 24TB = +24TB usable
|
||||
- Total after expansion: ~56TB usable
|
||||
- Use: Media, downloads, backups, non-critical data
|
||||
- Risk tolerance: High (data mostly replaceable)
|
||||
|
||||
**Critical data** (small volume):
|
||||
- Use 2x 240GB SSDs in mirror (BTRFS or ZFS)
|
||||
- Or use 2TB NVMe for critical data
|
||||
- Risk tolerance: Low (data important but small)
|
||||
|
||||
### Disk Purchase Decision
|
||||
|
||||
**Options under consideration**:
|
||||
|
||||
**Option A: 2x 16TB drives**
|
||||
- Matches largest current drives
|
||||
- Enables potential future RAID5 if desired (6x 16TB array)
|
||||
- More conservative capacity increase
|
||||
|
||||
**Option B: 2x 20-24TB drives**
|
||||
- Larger capacity headroom
|
||||
- Better $/TB ratio typically
|
||||
- Future-proofs better
|
||||
|
||||
**Initial purchase**: 2 drives (chassis has space for 2 more without modifications)
|
||||
**Decision**: 2x 24TB drives (ordered, arriving 2026-02-21)
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
### High-Level Plan
|
||||
|
||||
1. **Preparation**:
|
||||
- Purchase 2x new HDDs (16TB or 20-24TB)
|
||||
- Create NixOS configuration for new storage host
|
||||
- Set up bare metal NixOS installation
|
||||
1. **Expand ZFS pool** (on TrueNAS):
|
||||
- Install 2x 24TB drives (may need new drive trays - order from abroad if needed)
|
||||
- If chassis space is limited, temporarily replace the two oldest 8TB drives (da0/ada4)
|
||||
- Add as mirror-3 vdev to hdd-pool
|
||||
- Verify pool health and resilver completes
|
||||
- Check SMART data on old 8TB drives (all healthy as of 2026-02-20, no reallocated sectors)
|
||||
- Burn-in: at minimum short + long SMART test before adding to pool
|
||||
|
||||
2. **Initial BTRFS pool**:
|
||||
- Install 2 new disks
|
||||
- Create BTRFS filesystem in RAID1
|
||||
- Mount and test NFS exports
|
||||
2. **Prepare NixOS configuration**:
|
||||
- Create host configuration (`hosts/nas1/` or similar)
|
||||
- Configure ZFS pool import (`boot.zfs.extraPools`)
|
||||
- Set up services: radarr, sonarr, nzbget, restic-rest, NFS
|
||||
- Configure monitoring (node-exporter, promtail, smartctl-exporter)
|
||||
|
||||
3. **Data migration**:
|
||||
- Copy data from TrueNAS ZFS pool to new BTRFS pool over 10GbE
|
||||
- Verify data integrity
|
||||
3. **Install NixOS**:
|
||||
- `zfs export hdd-pool` on TrueNAS before shutdown (clean export)
|
||||
- Wipe TrueNAS boot-pool SSDs, set up as mdadm RAID1 for NixOS root
|
||||
- Install NixOS on mdadm mirror (keeps boot path ZFS-independent)
|
||||
- Import hdd-pool via `boot.zfs.extraPools`
|
||||
- Verify all datasets mount correctly
|
||||
|
||||
4. **Expand pool**:
|
||||
- As old ZFS pool is emptied, wipe drives and add to BTRFS pool
|
||||
- Pool grows incrementally: 2 → 4 → 6 → 8 disks
|
||||
- BTRFS rebalances data across new devices
|
||||
4. **Service migration**:
|
||||
- Configure NixOS services to use ZFS dataset paths
|
||||
- Update NFS exports
|
||||
- Test from consuming hosts
|
||||
|
||||
5. **Service migration**:
|
||||
- Set up radarr/sonarr/nzbget/restic as NixOS services
|
||||
- Update NFS client mounts on consuming hosts
|
||||
|
||||
6. **Cutover**:
|
||||
- Point consumers to new NAS host
|
||||
5. **Cutover**:
|
||||
- Update DNS/client mounts if IP changes
|
||||
- Verify monitoring integration
|
||||
- Decommission TrueNAS
|
||||
- Repurpose hardware or keep as spare
|
||||
|
||||
### Post-Expansion: Vdev Rebalancing
|
||||
|
||||
ZFS has no built-in rebalance command. After adding the new 24TB vdev, ZFS will
|
||||
write new data preferentially to it (most free space), leaving old vdevs packed
|
||||
at ~97%. This is suboptimal but not urgent once overall pool usage drops to ~50%.
|
||||
|
||||
To gradually rebalance, rewrite files in place so ZFS redistributes blocks across
|
||||
all vdevs proportional to free space:
|
||||
|
||||
```bash
|
||||
# Rewrite files individually (spreads blocks across all vdevs)
|
||||
find /pool/dataset -type f -exec sh -c '
|
||||
for f; do cp "$f" "$f.rebal" && mv "$f.rebal" "$f"; done
|
||||
' _ {} +
|
||||
```
|
||||
|
||||
Avoid `zfs send/recv` for large datasets (e.g. 20TB) as this would concentrate
|
||||
data on the emptiest vdev rather than spreading it evenly.
|
||||
|
||||
**Recommendation**: Do this after NixOS migration is stable. Not urgent - the pool
|
||||
will function fine with uneven distribution, just slightly suboptimal for performance.
|
||||
|
||||
### Migration Advantages
|
||||
|
||||
- **Low risk**: New pool created independently, old data remains intact during migration
|
||||
- **Incremental**: Can add old disks one at a time as space allows
|
||||
- **Flexible**: BTRFS handles mixed disk sizes gracefully
|
||||
- **Reversible**: Keep TrueNAS running until fully validated
|
||||
- **No data migration**: ZFS pool imported directly, no copying terabytes of data
|
||||
- **Low risk**: Pool expansion done on stable TrueNAS before OS swap
|
||||
- **Reversible**: Can boot back to TrueNAS if NixOS has issues (ZFS pool is OS-independent)
|
||||
- **Quick cutover**: Once NixOS config is ready, the OS swap is fast
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Decide on disk size (16TB vs 20-24TB)
|
||||
2. Purchase disks
|
||||
3. Design NixOS host configuration (`hosts/nas1/`)
|
||||
4. Plan detailed migration timeline
|
||||
5. Document NFS export mapping (current → new)
|
||||
1. ~~Decide on disk size~~ - 2x 24TB ordered
|
||||
2. Install drives and add mirror vdev to ZFS pool
|
||||
3. Check SMART data on 8TB drives - decide whether to keep or retire
|
||||
4. Design NixOS host configuration (`hosts/nas1/`)
|
||||
5. Document NFS export mapping (current -> new)
|
||||
6. Plan NixOS installation and cutover
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [ ] Final decision on disk size?
|
||||
- [ ] Hostname for new NAS host? (nas1? storage1?)
|
||||
- [ ] IP address allocation (keep 10.69.12.50 or new IP?)
|
||||
- [ ] Timeline/maintenance window for migration?
|
||||
- [ ] IP address/subnet: NAS and Proxmox are both on 10GbE to the same switch but different subnets, forcing traffic through the router (bottleneck). Move to same subnet during migration.
|
||||
- [x] Boot drive: Reuse TrueNAS boot-pool SSDs as mdadm RAID1 for NixOS root (no ZFS on boot path)
|
||||
- [ ] Retire old 8TB drives? (SMART looks healthy, keep unless chassis space is needed)
|
||||
- [x] Drive trays: ordered domestically (expected 2026-02-25 to 2026-03-03)
|
||||
- [ ] Timeline/maintenance window for NixOS swap?
|
||||
|
||||
311
docs/user-management.md
Normal file
311
docs/user-management.md
Normal file
@@ -0,0 +1,311 @@
|
||||
# User Management with Kanidm
|
||||
|
||||
Central authentication for the homelab using Kanidm.
|
||||
|
||||
## Overview
|
||||
|
||||
- **Server**: kanidm01.home.2rjus.net (auth.home.2rjus.net)
|
||||
- **WebUI**: https://auth.home.2rjus.net
|
||||
- **LDAPS**: port 636
|
||||
|
||||
## CLI Setup
|
||||
|
||||
The `kanidm` CLI is available in the devshell:
|
||||
|
||||
```bash
|
||||
nix develop
|
||||
|
||||
# Login as idm_admin
|
||||
kanidm login --name idm_admin --url https://auth.home.2rjus.net
|
||||
```
|
||||
|
||||
## User Management
|
||||
|
||||
POSIX users are managed imperatively via the `kanidm` CLI. This allows setting
|
||||
all attributes (including UNIX password) in one workflow.
|
||||
|
||||
### Creating a POSIX User
|
||||
|
||||
```bash
|
||||
# Create the person
|
||||
kanidm person create <username> "<Display Name>"
|
||||
|
||||
# Add to groups
|
||||
kanidm group add-members ssh-users <username>
|
||||
|
||||
# Enable POSIX (UID is auto-assigned)
|
||||
kanidm person posix set <username>
|
||||
|
||||
# Set UNIX password (required for SSH login, min 10 characters)
|
||||
kanidm person posix set-password <username>
|
||||
|
||||
# Optionally set login shell
|
||||
kanidm person posix set <username> --shell /bin/zsh
|
||||
```
|
||||
|
||||
### Setting Email Address
|
||||
|
||||
Email is required for OAuth2/OIDC login (e.g., Grafana):
|
||||
|
||||
```bash
|
||||
kanidm person update <username> --mail <email>
|
||||
```
|
||||
|
||||
### Example: Full User Creation
|
||||
|
||||
```bash
|
||||
kanidm person create testuser "Test User"
|
||||
kanidm person update testuser --mail testuser@home.2rjus.net
|
||||
kanidm group add-members ssh-users testuser
|
||||
kanidm group add-members users testuser # Required for OAuth2 scopes
|
||||
kanidm person posix set testuser
|
||||
kanidm person posix set-password testuser
|
||||
kanidm person get testuser
|
||||
```
|
||||
|
||||
After creation, verify on a client host:
|
||||
```bash
|
||||
getent passwd testuser
|
||||
ssh testuser@testvm01.home.2rjus.net
|
||||
```
|
||||
|
||||
### Viewing User Details
|
||||
|
||||
```bash
|
||||
kanidm person get <username>
|
||||
```
|
||||
|
||||
### Removing a User
|
||||
|
||||
```bash
|
||||
kanidm person delete <username>
|
||||
```
|
||||
|
||||
## Group Management
|
||||
|
||||
Groups for POSIX access are also managed via CLI.
|
||||
|
||||
### Creating a POSIX Group
|
||||
|
||||
```bash
|
||||
# Create the group
|
||||
kanidm group create <group-name>
|
||||
|
||||
# Enable POSIX with a specific GID
|
||||
kanidm group posix set <group-name> --gidnumber <gid>
|
||||
```
|
||||
|
||||
### Adding Members
|
||||
|
||||
```bash
|
||||
kanidm group add-members <group-name> <username>
|
||||
```
|
||||
|
||||
### Viewing Group Details
|
||||
|
||||
```bash
|
||||
kanidm group get <group-name>
|
||||
kanidm group list-members <group-name>
|
||||
```
|
||||
|
||||
### Example: Full Group Creation
|
||||
|
||||
```bash
|
||||
kanidm group create testgroup
|
||||
kanidm group posix set testgroup --gidnumber 68010
|
||||
kanidm group add-members testgroup testuser
|
||||
kanidm group get testgroup
|
||||
```
|
||||
|
||||
After creation, verify on a client host:
|
||||
```bash
|
||||
getent group testgroup
|
||||
```
|
||||
|
||||
### Current Groups
|
||||
|
||||
| Group | GID | Purpose |
|
||||
|-------|-----|---------|
|
||||
| ssh-users | 68000 | SSH login access |
|
||||
| admins | 68001 | Administrative access |
|
||||
| users | 68002 | General users |
|
||||
|
||||
### UID/GID Allocation
|
||||
|
||||
Kanidm auto-assigns UIDs/GIDs from its configured range. For manually assigned GIDs:
|
||||
|
||||
| Range | Purpose |
|
||||
|-------|---------|
|
||||
| 65,536+ | Users (auto-assigned) |
|
||||
| 68,000 - 68,999 | Groups (manually assigned) |
|
||||
|
||||
## OAuth2/OIDC Login (Web Services)
|
||||
|
||||
For OAuth2/OIDC login to web services like Grafana, users need:
|
||||
|
||||
1. **Primary credential** - Password set via `credential update` (separate from unix password)
|
||||
2. **MFA** - TOTP or passkey (Kanidm requires MFA for primary credentials)
|
||||
3. **Group membership** - Member of `users` group (for OAuth2 scope mapping)
|
||||
4. **Email address** - Set via `person update --mail`
|
||||
|
||||
### Setting Up Primary Credential (Web Login)
|
||||
|
||||
The primary credential is different from the unix/POSIX password:
|
||||
|
||||
```bash
|
||||
# Interactive credential setup
|
||||
kanidm person credential update <username>
|
||||
|
||||
# In the interactive prompt:
|
||||
# 1. Type 'password' to set a password
|
||||
# 2. Type 'totp' to add TOTP (scan QR with authenticator app)
|
||||
# 3. Type 'commit' to save
|
||||
```
|
||||
|
||||
### Verifying OAuth2 Readiness
|
||||
|
||||
```bash
|
||||
kanidm person get <username>
|
||||
```
|
||||
|
||||
Check for:
|
||||
- `mail:` - Email address set
|
||||
- `memberof:` - Includes `users@home.2rjus.net`
|
||||
- Primary credential status (check via `credential update` → `status`)
|
||||
|
||||
## PAM/NSS Client Configuration
|
||||
|
||||
Enable central authentication on a host:
|
||||
|
||||
```nix
|
||||
homelab.kanidm.enable = true;
|
||||
```
|
||||
|
||||
This configures:
|
||||
- `services.kanidm.enablePam = true`
|
||||
- Client connection to auth.home.2rjus.net
|
||||
- Login authorization for `ssh-users` group
|
||||
- Short usernames (`torjus` instead of `torjus@home.2rjus.net`)
|
||||
- Home directory symlinks (`/home/torjus` → UUID-based directory)
|
||||
|
||||
### Enabled Hosts
|
||||
|
||||
- testvm01, testvm02, testvm03 (test tier)
|
||||
|
||||
### Options
|
||||
|
||||
```nix
|
||||
homelab.kanidm = {
|
||||
enable = true;
|
||||
server = "https://auth.home.2rjus.net"; # default
|
||||
allowedLoginGroups = [ "ssh-users" ]; # default
|
||||
};
|
||||
```
|
||||
|
||||
### Home Directories
|
||||
|
||||
Home directories use UUID-based paths for stability (so renaming a user doesn't
|
||||
require moving their home directory). Symlinks provide convenient access:
|
||||
|
||||
```
|
||||
/home/torjus -> /home/e4f4c56c-4aee-4c20-846f-90cb69807733
|
||||
```
|
||||
|
||||
The symlinks are created by `kanidm-unixd-tasks` on first login.
|
||||
|
||||
## Testing
|
||||
|
||||
### Verify NSS Resolution
|
||||
|
||||
```bash
|
||||
# Check user resolution
|
||||
getent passwd <username>
|
||||
|
||||
# Check group resolution
|
||||
getent group <group-name>
|
||||
```
|
||||
|
||||
### Test SSH Login
|
||||
|
||||
```bash
|
||||
ssh <username>@<hostname>.home.2rjus.net
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "PAM user mismatch" error
|
||||
|
||||
SSH fails with "fatal: PAM user mismatch" in logs. This happens when Kanidm returns
|
||||
usernames in SPN format (`torjus@home.2rjus.net`) but SSH expects short names (`torjus`).
|
||||
|
||||
**Solution**: Configure `uid_attr_map = "name"` in unixSettings (already set in our module).
|
||||
|
||||
Check current format:
|
||||
```bash
|
||||
getent passwd torjus
|
||||
# Should show: torjus:x:65536:...
|
||||
# NOT: torjus@home.2rjus.net:x:65536:...
|
||||
```
|
||||
|
||||
### User resolves but SSH fails immediately
|
||||
|
||||
The user's login group (e.g., `ssh-users`) likely doesn't have POSIX enabled:
|
||||
|
||||
```bash
|
||||
# Check if group has POSIX
|
||||
getent group ssh-users
|
||||
|
||||
# If empty, enable POSIX on the server
|
||||
kanidm group posix set ssh-users --gidnumber 68000
|
||||
```
|
||||
|
||||
### User doesn't resolve via getent
|
||||
|
||||
1. Check kanidm-unixd service is running:
|
||||
```bash
|
||||
systemctl status kanidm-unixd
|
||||
```
|
||||
|
||||
2. Check unixd can reach server:
|
||||
```bash
|
||||
kanidm-unix status
|
||||
# Should show: system: online, Kanidm: online
|
||||
```
|
||||
|
||||
3. Check client can reach server:
|
||||
```bash
|
||||
curl -s https://auth.home.2rjus.net/status
|
||||
```
|
||||
|
||||
4. Check user has POSIX enabled on server:
|
||||
```bash
|
||||
kanidm person get <username>
|
||||
```
|
||||
|
||||
5. Restart nscd to clear stale cache:
|
||||
```bash
|
||||
systemctl restart nscd
|
||||
```
|
||||
|
||||
6. Invalidate kanidm cache:
|
||||
```bash
|
||||
kanidm-unix cache-invalidate
|
||||
```
|
||||
|
||||
### Changes not taking effect after deployment
|
||||
|
||||
NixOS uses nsncd (a Rust reimplementation of nscd) for NSS caching. After deploying
|
||||
kanidm-unixd config changes, you may need to restart both services:
|
||||
|
||||
```bash
|
||||
systemctl restart kanidm-unixd
|
||||
systemctl restart nscd
|
||||
```
|
||||
|
||||
### Test PAM authentication directly
|
||||
|
||||
Use the kanidm-unix CLI to test PAM auth without SSH:
|
||||
|
||||
```bash
|
||||
kanidm-unix auth-test --name <username>
|
||||
```
|
||||
73
flake.lock
generated
73
flake.lock
generated
@@ -28,11 +28,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1770447502,
|
||||
"narHash": "sha256-xH1PNyE3ydj4udhe1IpK8VQxBPZETGLuORZdSWYRmSU=",
|
||||
"lastModified": 1771488195,
|
||||
"narHash": "sha256-2kMxqdDyPluRQRoES22Y0oSjp7pc5fj2nRterfmSIyc=",
|
||||
"ref": "master",
|
||||
"rev": "79db119d1ca6630023947ef0a65896cc3307c2ff",
|
||||
"revCount": 22,
|
||||
"rev": "2d26de50559d8acb82ea803764e138325d95572c",
|
||||
"revCount": 37,
|
||||
"type": "git",
|
||||
"url": "https://git.t-juice.club/torjus/homelab-deploy"
|
||||
},
|
||||
@@ -42,27 +42,6 @@
|
||||
"url": "https://git.t-juice.club/torjus/homelab-deploy"
|
||||
}
|
||||
},
|
||||
"labmon": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs-unstable"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1748983975,
|
||||
"narHash": "sha256-DA5mOqxwLMj/XLb4hvBU1WtE6cuVej7PjUr8N0EZsCE=",
|
||||
"ref": "master",
|
||||
"rev": "040a73e891a70ff06ec7ab31d7167914129dbf7d",
|
||||
"revCount": 17,
|
||||
"type": "git",
|
||||
"url": "https://git.t-juice.club/torjus/labmon"
|
||||
},
|
||||
"original": {
|
||||
"ref": "master",
|
||||
"type": "git",
|
||||
"url": "https://git.t-juice.club/torjus/labmon"
|
||||
}
|
||||
},
|
||||
"nixos-exporter": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -70,11 +49,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1770422522,
|
||||
"narHash": "sha256-WmIFnquu4u58v8S2bOVWmknRwHn4x88CRfBFTzJ1inQ=",
|
||||
"lastModified": 1770593543,
|
||||
"narHash": "sha256-hT8Rj6JAwGDFvcxWEcUzTCrWSiupCfBa57pBDnM2C5g=",
|
||||
"ref": "refs/heads/master",
|
||||
"rev": "cf0ce858997af4d8dcc2ce10393ff393e17fc911",
|
||||
"revCount": 11,
|
||||
"rev": "5aa5f7275b7a08015816171ba06d2cbdc2e02d3e",
|
||||
"revCount": 15,
|
||||
"type": "git",
|
||||
"url": "https://git.t-juice.club/torjus/nixos-exporter"
|
||||
},
|
||||
@@ -85,11 +64,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1770136044,
|
||||
"narHash": "sha256-tlFqNG/uzz2++aAmn4v8J0vAkV3z7XngeIIB3rM3650=",
|
||||
"lastModified": 1772822230,
|
||||
"narHash": "sha256-yf3iYLGbGVlIthlQIk5/4/EQDZNNEmuqKZkQssMljuw=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e576e3c9cf9bad747afcddd9e34f51d18c855b4e",
|
||||
"rev": "71caefce12ba78d84fe618cf61644dce01cf3a96",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -101,11 +80,11 @@
|
||||
},
|
||||
"nixpkgs-unstable": {
|
||||
"locked": {
|
||||
"lastModified": 1770197578,
|
||||
"narHash": "sha256-AYqlWrX09+HvGs8zM6ebZ1pwUqjkfpnv8mewYwAo+iM=",
|
||||
"lastModified": 1772773019,
|
||||
"narHash": "sha256-E1bxHxNKfDoQUuvriG71+f+s/NT0qWkImXsYZNFFfCs=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "00c21e4c93d963c50d4c0c89bfa84ed6e0694df2",
|
||||
"rev": "aca4d95fce4914b3892661bcb80b8087293536c6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -119,31 +98,9 @@
|
||||
"inputs": {
|
||||
"alerttonotify": "alerttonotify",
|
||||
"homelab-deploy": "homelab-deploy",
|
||||
"labmon": "labmon",
|
||||
"nixos-exporter": "nixos-exporter",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-unstable": "nixpkgs-unstable",
|
||||
"sops-nix": "sops-nix"
|
||||
}
|
||||
},
|
||||
"sops-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs-unstable"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1770145881,
|
||||
"narHash": "sha256-ktjWTq+D5MTXQcL9N6cDZXUf9kX8JBLLBLT0ZyOTSYY=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "17eea6f3816ba6568b8c81db8a4e6ca438b30b7c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"type": "github"
|
||||
"nixpkgs-unstable": "nixpkgs-unstable"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
188
flake.nix
188
flake.nix
@@ -5,18 +5,10 @@
|
||||
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-25.11";
|
||||
nixpkgs-unstable.url = "github:nixos/nixpkgs?ref=nixos-unstable";
|
||||
|
||||
sops-nix = {
|
||||
url = "github:Mic92/sops-nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||
};
|
||||
alerttonotify = {
|
||||
url = "git+https://git.t-juice.club/torjus/alerttonotify?ref=master";
|
||||
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||
};
|
||||
labmon = {
|
||||
url = "git+https://git.t-juice.club/torjus/labmon?ref=master";
|
||||
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||
};
|
||||
nixos-exporter = {
|
||||
url = "git+https://git.t-juice.club/torjus/nixos-exporter";
|
||||
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||
@@ -32,9 +24,7 @@
|
||||
self,
|
||||
nixpkgs,
|
||||
nixpkgs-unstable,
|
||||
sops-nix,
|
||||
alerttonotify,
|
||||
labmon,
|
||||
nixos-exporter,
|
||||
homelab-deploy,
|
||||
...
|
||||
@@ -50,7 +40,6 @@
|
||||
commonOverlays = [
|
||||
overlay-unstable
|
||||
alerttonotify.overlays.default
|
||||
labmon.overlays.default
|
||||
];
|
||||
# Common modules applied to all hosts
|
||||
commonModules = [
|
||||
@@ -61,7 +50,6 @@
|
||||
system.configurationRevision = self.rev or self.dirtyRev or "dirty";
|
||||
}
|
||||
)
|
||||
sops-nix.nixosModules.sops
|
||||
nixos-exporter.nixosModules.default
|
||||
homelab-deploy.nixosModules.default
|
||||
./modules/homelab
|
||||
@@ -77,46 +65,19 @@
|
||||
in
|
||||
{
|
||||
nixosConfigurations = {
|
||||
ns1 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/ns1
|
||||
];
|
||||
};
|
||||
ns2 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/ns2
|
||||
];
|
||||
};
|
||||
ha1 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/ha1
|
||||
];
|
||||
};
|
||||
template1 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/template
|
||||
];
|
||||
};
|
||||
template2 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/template2
|
||||
@@ -125,62 +86,25 @@
|
||||
http-proxy = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/http-proxy
|
||||
];
|
||||
};
|
||||
ca = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/ca
|
||||
];
|
||||
};
|
||||
monitoring01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/monitoring01
|
||||
labmon.nixosModules.labmon
|
||||
];
|
||||
};
|
||||
jelly01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/jelly01
|
||||
];
|
||||
};
|
||||
nix-cache01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/nix-cache01
|
||||
];
|
||||
};
|
||||
pgdb1 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/pgdb1
|
||||
];
|
||||
};
|
||||
nats1 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/nats1
|
||||
@@ -189,7 +113,7 @@
|
||||
vault01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/vault01
|
||||
@@ -198,7 +122,7 @@
|
||||
testvm01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/testvm01
|
||||
@@ -207,7 +131,7 @@
|
||||
testvm02 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/testvm02
|
||||
@@ -216,12 +140,102 @@
|
||||
testvm03 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self sops-nix;
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/testvm03
|
||||
];
|
||||
};
|
||||
ns2 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/ns2
|
||||
];
|
||||
};
|
||||
ns1 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/ns1
|
||||
];
|
||||
};
|
||||
kanidm01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/kanidm01
|
||||
];
|
||||
};
|
||||
monitoring02 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/monitoring02
|
||||
];
|
||||
};
|
||||
nix-cache02 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/nix-cache02
|
||||
];
|
||||
};
|
||||
garage01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/garage01
|
||||
];
|
||||
};
|
||||
pn01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/pn01
|
||||
];
|
||||
};
|
||||
pn02 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/pn02
|
||||
];
|
||||
};
|
||||
nrec-nixos01 = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/nrec-nixos01
|
||||
];
|
||||
};
|
||||
openstack-template = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
modules = commonModules ++ [
|
||||
./hosts/openstack-template
|
||||
];
|
||||
};
|
||||
};
|
||||
packages = forAllSystems (
|
||||
{ pkgs }:
|
||||
@@ -238,9 +252,13 @@
|
||||
pkgs.ansible
|
||||
pkgs.opentofu
|
||||
pkgs.openbao
|
||||
pkgs.kanidm_1_8
|
||||
pkgs.nkeys
|
||||
pkgs.openstackclient
|
||||
(pkgs.callPackage ./scripts/create-host { })
|
||||
homelab-deploy.packages.${pkgs.system}.default
|
||||
];
|
||||
ANSIBLE_CONFIG = "./ansible/ansible.cfg";
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
@@ -1,33 +1,37 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
../template2/hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
homelab.dns.cnames = [ "nix-cache" "actions1" ];
|
||||
|
||||
homelab.host.role = "build-host";
|
||||
|
||||
fileSystems."/nix" = {
|
||||
device = "/dev/disk/by-label/nixcache";
|
||||
fsType = "xfs";
|
||||
# Host metadata (adjust as needed)
|
||||
homelab.host = {
|
||||
tier = "test"; # Start in test tier, move to prod after validation
|
||||
role = "storage";
|
||||
};
|
||||
|
||||
homelab.dns.cnames = [ "s3" ];
|
||||
|
||||
# Enable Vault integration
|
||||
vault.enable = true;
|
||||
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
configurationLimit = 3;
|
||||
};
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
|
||||
networking.hostName = "nix-cache01";
|
||||
networking.hostName = "garage01";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
@@ -41,7 +45,7 @@
|
||||
systemd.network.networks."ens18" = {
|
||||
matchConfig.Name = "ens18";
|
||||
address = [
|
||||
"10.69.13.15/24"
|
||||
"10.69.13.26/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.13.1"; }
|
||||
@@ -50,12 +54,6 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
@@ -64,13 +62,11 @@
|
||||
git
|
||||
];
|
||||
|
||||
services.qemuGuest.enable = true;
|
||||
|
||||
# Open ports in the firewall.
|
||||
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "24.05"; # Did you read the comment?
|
||||
}
|
||||
system.stateVersion = "25.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
{ ... }: {
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
./configuration.nix
|
||||
./scripts.nix
|
||||
../../services/garage
|
||||
];
|
||||
}
|
||||
}
|
||||
@@ -7,12 +7,14 @@
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
./hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
homelab.host.role = "home-automation";
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub = {
|
||||
@@ -44,10 +46,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
@@ -85,6 +84,7 @@
|
||||
"--keep-monthly 6"
|
||||
"--keep-within 1d"
|
||||
];
|
||||
extraOptions = [ "--retry-lock=5m" ];
|
||||
};
|
||||
|
||||
# Open ports in the firewall.
|
||||
|
||||
@@ -5,24 +5,20 @@
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
./hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
homelab.host.role = "proxy";
|
||||
homelab.dns.cnames = [
|
||||
"nzbget"
|
||||
"radarr"
|
||||
"sonarr"
|
||||
"ha"
|
||||
"z2m"
|
||||
"grafana"
|
||||
"prometheus"
|
||||
"alertmanager"
|
||||
"jelly"
|
||||
"pyroscope"
|
||||
"pushgw"
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
@@ -56,10 +52,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
|
||||
42
hosts/http-proxy/hardware-configuration.nix
Normal file
42
hosts/http-proxy/hardware-configuration.nix
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
boot.initrd.availableKernelModules = [
|
||||
"ata_piix"
|
||||
"uhci_hcd"
|
||||
"virtio_pci"
|
||||
"virtio_scsi"
|
||||
"sd_mod"
|
||||
"sr_mod"
|
||||
];
|
||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||
boot.kernelModules = [
|
||||
"ptp_kvm"
|
||||
];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-label/root";
|
||||
fsType = "xfs";
|
||||
};
|
||||
|
||||
swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
networking.useDHCP = lib.mkDefault true;
|
||||
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
@@ -5,12 +5,14 @@
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
./hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
homelab.host.role = "media";
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub = {
|
||||
@@ -42,10 +44,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
@@ -61,9 +60,8 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
zramSwap = {
|
||||
enable = true;
|
||||
};
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
|
||||
42
hosts/jelly01/hardware-configuration.nix
Normal file
42
hosts/jelly01/hardware-configuration.nix
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
boot.initrd.availableKernelModules = [
|
||||
"ata_piix"
|
||||
"uhci_hcd"
|
||||
"virtio_pci"
|
||||
"virtio_scsi"
|
||||
"sd_mod"
|
||||
"sr_mod"
|
||||
];
|
||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||
boot.kernelModules = [
|
||||
"ptp_kvm"
|
||||
];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-label/root";
|
||||
fsType = "xfs";
|
||||
};
|
||||
|
||||
swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
networking.useDHCP = lib.mkDefault true;
|
||||
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
../template/hardware-configuration.nix
|
||||
../../system
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
homelab.host.role = "bastion";
|
||||
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/sda";
|
||||
|
||||
networking.hostName = "jump";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
services.resolved.enable = false;
|
||||
networking.nameservers = [
|
||||
"10.69.13.5"
|
||||
"10.69.13.6"
|
||||
];
|
||||
|
||||
systemd.network.enable = true;
|
||||
systemd.network.networks."ens18" = {
|
||||
matchConfig.Name = "ens18";
|
||||
address = [
|
||||
"10.69.13.10/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.13.1"; }
|
||||
];
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
wget
|
||||
git
|
||||
];
|
||||
|
||||
# Open ports in the firewall.
|
||||
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
# boot.kernelModules = [ ];
|
||||
# boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/6889aba9-61ed-4687-ab10-e5cf4017ac8d";
|
||||
fsType = "xfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/BC07-3B7A";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[{ device = "/dev/disk/by-uuid/64e5757b-6625-4dd2-aa2a-66ca93444d23"; }];
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
@@ -1,25 +1,38 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
../template2/hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
../../services/kanidm
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
configurationLimit = 3;
|
||||
homelab.host = {
|
||||
tier = "prod";
|
||||
role = "auth";
|
||||
};
|
||||
|
||||
networking.hostName = "pgdb1";
|
||||
# DNS CNAME for auth.home.2rjus.net
|
||||
homelab.dns.cnames = [ "auth" ];
|
||||
|
||||
# Enable Vault integration
|
||||
vault.enable = true;
|
||||
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
|
||||
networking.hostName = "kanidm01";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
@@ -33,7 +46,7 @@
|
||||
systemd.network.networks."ens18" = {
|
||||
matchConfig.Name = "ens18";
|
||||
address = [
|
||||
"10.69.13.16/24"
|
||||
"10.69.13.23/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.13.1"; }
|
||||
@@ -42,10 +55,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
@@ -59,5 +69,5 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
system.stateVersion = "25.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
{ ... }:
|
||||
{
|
||||
{ ... }: {
|
||||
imports = [
|
||||
./configuration.nix
|
||||
../../services/monitoring
|
||||
];
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
configurationLimit = 3;
|
||||
};
|
||||
|
||||
networking.hostName = "monitoring01";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
services.resolved.enable = true;
|
||||
networking.nameservers = [
|
||||
"10.69.13.5"
|
||||
"10.69.13.6"
|
||||
];
|
||||
|
||||
systemd.network.enable = true;
|
||||
systemd.network.networks."ens18" = {
|
||||
matchConfig.Name = "ens18";
|
||||
address = [
|
||||
"10.69.13.13/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.13.1"; }
|
||||
];
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
wget
|
||||
git
|
||||
sqlite
|
||||
];
|
||||
|
||||
services.qemuGuest.enable = true;
|
||||
|
||||
# Vault secrets management
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
vault.secrets.backup-helper = {
|
||||
secretPath = "shared/backup/password";
|
||||
extractKey = "password";
|
||||
outputDir = "/run/secrets/backup_helper_secret";
|
||||
services = [ "restic-backups-grafana" "restic-backups-grafana-db" ];
|
||||
};
|
||||
|
||||
services.restic.backups.grafana = {
|
||||
repository = "rest:http://10.69.12.52:8000/backup-nix";
|
||||
passwordFile = "/run/secrets/backup_helper_secret";
|
||||
paths = [ "/var/lib/grafana/plugins" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
Persistent = true;
|
||||
RandomizedDelaySec = "2h";
|
||||
};
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 6"
|
||||
"--keep-within 1d"
|
||||
];
|
||||
};
|
||||
|
||||
services.restic.backups.grafana-db = {
|
||||
repository = "rest:http://10.69.12.52:8000/backup-nix";
|
||||
passwordFile = "/run/secrets/backup_helper_secret";
|
||||
command = [ "${pkgs.sqlite}/bin/sqlite3" "/var/lib/grafana/data/grafana.db" ".dump" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
Persistent = true;
|
||||
RandomizedDelaySec = "2h";
|
||||
};
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 6"
|
||||
"--keep-within 1d"
|
||||
];
|
||||
};
|
||||
|
||||
labmon = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
ListenAddr = ":9969";
|
||||
Profiling = true;
|
||||
StepMonitors = [
|
||||
{
|
||||
Enabled = true;
|
||||
BaseURL = "https://ca.home.2rjus.net";
|
||||
RootID = "3381bda8015a86b9a3cd1851439d1091890a79005e0f1f7c4301fe4bccc29d80";
|
||||
}
|
||||
];
|
||||
|
||||
TLSConnectionMonitors = [
|
||||
{
|
||||
Enabled = true;
|
||||
Address = "ca.home.2rjus.net:443";
|
||||
Verify = true;
|
||||
Duration = "12h";
|
||||
}
|
||||
{
|
||||
Enabled = true;
|
||||
Address = "jelly.home.2rjus.net:443";
|
||||
Verify = true;
|
||||
Duration = "12h";
|
||||
}
|
||||
{
|
||||
Enabled = true;
|
||||
Address = "grafana.home.2rjus.net:443";
|
||||
Verify = true;
|
||||
Duration = "12h";
|
||||
}
|
||||
{
|
||||
Enabled = true;
|
||||
Address = "prometheus.home.2rjus.net:443";
|
||||
Verify = true;
|
||||
Duration = "12h";
|
||||
}
|
||||
{
|
||||
Enabled = true;
|
||||
Address = "alertmanager.home.2rjus.net:443";
|
||||
Verify = true;
|
||||
Duration = "12h";
|
||||
}
|
||||
{
|
||||
Enabled = true;
|
||||
Address = "pyroscope.home.2rjus.net:443";
|
||||
Verify = true;
|
||||
Duration = "12h";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# Open ports in the firewall.
|
||||
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -1,25 +1,36 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
./hardware-configuration.nix
|
||||
imports = [
|
||||
../template2/hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
];
|
||||
|
||||
# Template host - exclude from DNS zone generation
|
||||
homelab.dns.enable = false;
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
homelab.host = {
|
||||
tier = "test";
|
||||
priority = "low";
|
||||
tier = "prod";
|
||||
role = "monitoring";
|
||||
};
|
||||
|
||||
homelab.dns.cnames = [ "monitoring" "alertmanager" "grafana" "grafana-test" "metrics" "vmalert" "loki" ];
|
||||
|
||||
# Enable Vault integration
|
||||
vault.enable = true;
|
||||
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/sda";
|
||||
networking.hostName = "nixos-template";
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
|
||||
networking.hostName = "monitoring02";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
@@ -33,19 +44,18 @@
|
||||
systemd.network.networks."ens18" = {
|
||||
matchConfig.Name = "ens18";
|
||||
address = [
|
||||
"10.69.8.250/24"
|
||||
"10.69.13.24/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.8.1"; }
|
||||
{ Gateway = "10.69.13.1"; }
|
||||
];
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
age
|
||||
vim
|
||||
wget
|
||||
git
|
||||
@@ -57,6 +67,5 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
|
||||
system.stateVersion = "25.11"; # Did you read the comment?
|
||||
}
|
||||
12
hosts/monitoring02/default.nix
Normal file
12
hosts/monitoring02/default.nix
Normal file
@@ -0,0 +1,12 @@
|
||||
{ ... }: {
|
||||
imports = [
|
||||
./configuration.nix
|
||||
../../services/grafana
|
||||
../../services/victoriametrics
|
||||
../../services/loki
|
||||
../../services/monitoring/alerttonotify.nix
|
||||
../../services/monitoring/blackbox.nix
|
||||
../../services/monitoring/exportarr.nix
|
||||
../../services/monitoring/pve.nix
|
||||
];
|
||||
}
|
||||
@@ -5,12 +5,14 @@
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
./hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
homelab.host.role = "messaging";
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub = {
|
||||
@@ -42,10 +44,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
@@ -59,5 +58,8 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
|
||||
42
hosts/nats1/hardware-configuration.nix
Normal file
42
hosts/nats1/hardware-configuration.nix
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
boot.initrd.availableKernelModules = [
|
||||
"ata_piix"
|
||||
"uhci_hcd"
|
||||
"virtio_pci"
|
||||
"virtio_scsi"
|
||||
"sd_mod"
|
||||
"sr_mod"
|
||||
];
|
||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||
boot.kernelModules = [
|
||||
"ptp_kvm"
|
||||
];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-label/root";
|
||||
fsType = "xfs";
|
||||
};
|
||||
|
||||
swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
networking.useDHCP = lib.mkDefault true;
|
||||
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
zramSwap = {
|
||||
enable = true;
|
||||
};
|
||||
}
|
||||
45
hosts/nix-cache02/builder.nix
Normal file
45
hosts/nix-cache02/builder.nix
Normal file
@@ -0,0 +1,45 @@
|
||||
{ config, ... }:
|
||||
{
|
||||
# Fetch builder NKey from Vault
|
||||
vault.secrets.builder-nkey = {
|
||||
secretPath = "shared/homelab-deploy/builder-nkey";
|
||||
extractKey = "nkey";
|
||||
outputDir = "/run/secrets/builder-nkey";
|
||||
services = [ "homelab-deploy-builder" ];
|
||||
};
|
||||
|
||||
# Configure the builder service
|
||||
services.homelab-deploy.builder = {
|
||||
enable = true;
|
||||
natsUrl = "nats://nats1.home.2rjus.net:4222";
|
||||
nkeyFile = "/run/secrets/builder-nkey";
|
||||
|
||||
settings.repos = {
|
||||
nixos-servers = {
|
||||
url = "git+https://git.t-juice.club/torjus/nixos-servers.git";
|
||||
defaultBranch = "master";
|
||||
};
|
||||
nixos = {
|
||||
url = "git+https://git.t-juice.club/torjus/nixos.git";
|
||||
defaultBranch = "master";
|
||||
};
|
||||
};
|
||||
|
||||
timeout = 14400;
|
||||
metrics.enable = true;
|
||||
};
|
||||
|
||||
# Expose builder metrics for Prometheus scraping
|
||||
homelab.monitoring.scrapeTargets = [
|
||||
{
|
||||
job_name = "homelab-deploy-builder";
|
||||
port = 9973;
|
||||
}
|
||||
];
|
||||
|
||||
# Ensure builder starts after vault secret is available
|
||||
systemd.services.homelab-deploy-builder = {
|
||||
after = [ "vault-secret-builder-nkey.service" ];
|
||||
requires = [ "vault-secret-builder-nkey.service" ];
|
||||
};
|
||||
}
|
||||
@@ -1,25 +1,36 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
../template2/hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
configurationLimit = 3;
|
||||
homelab.host = {
|
||||
tier = "prod";
|
||||
role = "build-host";
|
||||
};
|
||||
|
||||
networking.hostName = "ca";
|
||||
homelab.dns.cnames = [ "nix-cache" ];
|
||||
|
||||
# Enable Vault integration
|
||||
vault.enable = true;
|
||||
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
|
||||
networking.hostName = "nix-cache02";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
@@ -33,7 +44,7 @@
|
||||
systemd.network.networks."ens18" = {
|
||||
matchConfig.Name = "ens18";
|
||||
address = [
|
||||
"10.69.13.12/24"
|
||||
"10.69.13.25/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.13.1"; }
|
||||
@@ -42,10 +53,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
@@ -59,5 +67,5 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
system.stateVersion = "25.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
{ ... }:
|
||||
{
|
||||
{ ... }: {
|
||||
imports = [
|
||||
./configuration.nix
|
||||
./builder.nix
|
||||
./scheduler.nix
|
||||
../../services/nix-cache
|
||||
../../services/actions-runner
|
||||
./zram.nix
|
||||
];
|
||||
}
|
||||
}
|
||||
61
hosts/nix-cache02/scheduler.nix
Normal file
61
hosts/nix-cache02/scheduler.nix
Normal file
@@ -0,0 +1,61 @@
|
||||
{ config, pkgs, lib, inputs, ... }:
|
||||
let
|
||||
homelab-deploy = inputs.homelab-deploy.packages.${pkgs.system}.default;
|
||||
|
||||
scheduledBuildScript = pkgs.writeShellApplication {
|
||||
name = "scheduled-build";
|
||||
runtimeInputs = [ homelab-deploy ];
|
||||
text = ''
|
||||
NATS_URL="nats://nats1.home.2rjus.net:4222"
|
||||
NKEY_FILE="/run/secrets/scheduler-nkey"
|
||||
|
||||
echo "Starting scheduled builds at $(date)"
|
||||
|
||||
# Build all nixos-servers hosts
|
||||
homelab-deploy build \
|
||||
--nats-url "$NATS_URL" \
|
||||
--nkey-file "$NKEY_FILE" \
|
||||
nixos-servers --all
|
||||
|
||||
# Build all nixos (gunter) hosts
|
||||
homelab-deploy build \
|
||||
--nats-url "$NATS_URL" \
|
||||
--nkey-file "$NKEY_FILE" \
|
||||
nixos --all
|
||||
|
||||
echo "Scheduled builds completed at $(date)"
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
# Fetch scheduler NKey from Vault
|
||||
vault.secrets.scheduler-nkey = {
|
||||
secretPath = "shared/homelab-deploy/scheduler-nkey";
|
||||
extractKey = "nkey";
|
||||
outputDir = "/run/secrets/scheduler-nkey";
|
||||
services = [ "scheduled-build" ];
|
||||
};
|
||||
|
||||
# Timer: every 2 hours
|
||||
systemd.timers.scheduled-build = {
|
||||
description = "Trigger scheduled Nix builds";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 00/2:00:00"; # Every 2 hours at :00
|
||||
Persistent = true; # Run missed builds on boot
|
||||
RandomizedDelaySec = "5m"; # Slight jitter
|
||||
};
|
||||
};
|
||||
|
||||
# Service: oneshot that triggers builds
|
||||
systemd.services.scheduled-build = {
|
||||
description = "Trigger builds for all hosts via NATS";
|
||||
after = [ "network-online.target" "vault-secret-scheduler-nkey.service" ];
|
||||
requires = [ "vault-secret-scheduler-nkey.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = lib.getExe scheduledBuildScript;
|
||||
};
|
||||
};
|
||||
}
|
||||
78
hosts/nrec-nixos01/configuration.nix
Normal file
78
hosts/nrec-nixos01/configuration.nix
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PermitRootLogin = lib.mkForce "no";
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
};
|
||||
|
||||
users.users.nixos = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
shell = pkgs.zsh;
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwfb2jpKrBnCw28aevnH8HbE5YbcMXpdaVv2KmueDu6 torjus@gunter"
|
||||
];
|
||||
};
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
programs.zsh.enable = true;
|
||||
|
||||
homelab.dns.enable = false;
|
||||
homelab.monitoring.enable = false;
|
||||
homelab.host.labels.ansible = "false";
|
||||
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-label/nixos";
|
||||
fsType = "ext4";
|
||||
autoResize = true;
|
||||
};
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
networking.hostName = "nrec-nixos01";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
services.resolved.enable = true;
|
||||
|
||||
systemd.network.enable = true;
|
||||
systemd.network.networks."ens3" = {
|
||||
matchConfig.Name = "ens3";
|
||||
networkConfig.DHCP = "ipv4";
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
networking.firewall.enable = true;
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
22
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
nix.settings.substituters = [
|
||||
"https://cache.nixos.org"
|
||||
];
|
||||
nix.settings.trusted-public-keys = [
|
||||
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||
];
|
||||
|
||||
services.caddy = {
|
||||
enable = true;
|
||||
virtualHosts."nrec-nixos01.t-juice.club" = {
|
||||
extraConfig = ''
|
||||
reverse_proxy 127.0.0.1:3000
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
zramSwap.enable = true;
|
||||
|
||||
system.stateVersion = "25.11";
|
||||
}
|
||||
9
hosts/nrec-nixos01/default.nix
Normal file
9
hosts/nrec-nixos01/default.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ modulesPath, ... }:
|
||||
{
|
||||
imports = [
|
||||
./configuration.nix
|
||||
../../system/packages.nix
|
||||
../../services/forgejo
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
}
|
||||
@@ -7,23 +7,38 @@
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
../template2/hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
|
||||
# DNS services
|
||||
../../services/ns/master-authorative.nix
|
||||
../../services/ns/resolver.nix
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
# Host metadata
|
||||
homelab.host = {
|
||||
tier = "prod";
|
||||
role = "dns";
|
||||
labels.dns_role = "primary";
|
||||
};
|
||||
|
||||
# Enable Vault integration
|
||||
vault.enable = true;
|
||||
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/sda";
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
|
||||
networking.hostName = "ns1";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
# Disable resolved - conflicts with Unbound resolver
|
||||
services.resolved.enable = false;
|
||||
networking.nameservers = [
|
||||
"10.69.13.5"
|
||||
@@ -43,17 +58,6 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
homelab.host = {
|
||||
role = "dns";
|
||||
labels.dns_role = "primary";
|
||||
};
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
@@ -68,5 +72,5 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
system.stateVersion = "25.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -2,4 +2,4 @@
|
||||
imports = [
|
||||
./configuration.nix
|
||||
];
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
# boot.kernelModules = [ ];
|
||||
# boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/6889aba9-61ed-4687-ab10-e5cf4017ac8d";
|
||||
fsType = "xfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/BC07-3B7A";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[{ device = "/dev/disk/by-uuid/64e5757b-6625-4dd2-aa2a-66ca93444d23"; }];
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
@@ -7,23 +7,38 @@
|
||||
|
||||
{
|
||||
imports = [
|
||||
../template/hardware-configuration.nix
|
||||
../template2/hardware-configuration.nix
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
|
||||
# DNS services
|
||||
../../services/ns/secondary-authorative.nix
|
||||
../../services/ns/resolver.nix
|
||||
../../common/vm
|
||||
];
|
||||
|
||||
# Host metadata
|
||||
homelab.host = {
|
||||
tier = "prod";
|
||||
role = "dns";
|
||||
labels.dns_role = "secondary";
|
||||
};
|
||||
|
||||
# Enable Vault integration
|
||||
vault.enable = true;
|
||||
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
# Use the systemd-boot EFI boot loader.
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/sda";
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
|
||||
networking.hostName = "ns2";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
# Disable resolved - conflicts with Unbound resolver
|
||||
services.resolved.enable = false;
|
||||
networking.nameservers = [
|
||||
"10.69.13.5"
|
||||
@@ -43,18 +58,8 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
homelab.host = {
|
||||
role = "dns";
|
||||
labels.dns_role = "secondary";
|
||||
};
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
wget
|
||||
@@ -67,5 +72,5 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
system.stateVersion = "25.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -2,4 +2,4 @@
|
||||
imports = [
|
||||
./configuration.nix
|
||||
];
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
# boot.kernelModules = [ ];
|
||||
# boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/6889aba9-61ed-4687-ab10-e5cf4017ac8d";
|
||||
fsType = "xfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/BC07-3B7A";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[{ device = "/dev/disk/by-uuid/64e5757b-6625-4dd2-aa2a-66ca93444d23"; }];
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
72
hosts/openstack-template/configuration.nix
Normal file
72
hosts/openstack-template/configuration.nix
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PermitRootLogin = lib.mkForce "no";
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
};
|
||||
|
||||
users.users.nixos = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
shell = pkgs.zsh;
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwfb2jpKrBnCw28aevnH8HbE5YbcMXpdaVv2KmueDu6 torjus@gunter"
|
||||
];
|
||||
};
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
programs.zsh.enable = true;
|
||||
|
||||
homelab.dns.enable = false;
|
||||
homelab.monitoring.enable = false;
|
||||
homelab.host.labels.ansible = "false";
|
||||
|
||||
# Minimal fileSystems for evaluation; openstack-config.nix overrides this at image build time
|
||||
fileSystems."/" = {
|
||||
device = lib.mkDefault "/dev/vda1";
|
||||
fsType = lib.mkDefault "ext4";
|
||||
};
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
networking.hostName = "nixos-openstack-template";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
services.resolved.enable = true;
|
||||
|
||||
systemd.network.enable = true;
|
||||
systemd.network.networks."ens3" = {
|
||||
matchConfig.Name = "ens3";
|
||||
networkConfig.DHCP = "ipv4";
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
networking.firewall.enable = true;
|
||||
networking.firewall.allowedTCPPorts = [ 22 ];
|
||||
|
||||
nix.settings.substituters = [
|
||||
"https://cache.nixos.org"
|
||||
];
|
||||
nix.settings.trusted-public-keys = [
|
||||
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||
];
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
age
|
||||
vim
|
||||
wget
|
||||
git
|
||||
];
|
||||
|
||||
zramSwap.enable = true;
|
||||
|
||||
system.stateVersion = "25.11";
|
||||
}
|
||||
@@ -2,6 +2,6 @@
|
||||
{
|
||||
imports = [
|
||||
./configuration.nix
|
||||
../../services/ca
|
||||
../../system/packages.nix
|
||||
];
|
||||
}
|
||||
54
hosts/pn01/configuration.nix
Normal file
54
hosts/pn01/configuration.nix
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
../../system
|
||||
];
|
||||
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
boot.loader.systemd-boot.memtest86.enable = true;
|
||||
boot.loader.efi.canTouchEfiVariables = true;
|
||||
|
||||
networking.hostName = "pn01";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
networking.firewall.enable = false;
|
||||
services.resolved.enable = true;
|
||||
networking.nameservers = [
|
||||
"10.69.13.5"
|
||||
"10.69.13.6"
|
||||
];
|
||||
|
||||
systemd.network.enable = true;
|
||||
systemd.network.networks."enp2s0" = {
|
||||
matchConfig.Name = "enp2s0";
|
||||
address = [
|
||||
"10.69.12.60/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.12.1"; }
|
||||
];
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
homelab.host = {
|
||||
tier = "test";
|
||||
priority = "low";
|
||||
role = "compute";
|
||||
};
|
||||
|
||||
vault.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
system.stateVersion = "25.11";
|
||||
}
|
||||
33
hosts/pn01/hardware-configuration.nix
Normal file
33
hosts/pn01/hardware-configuration.nix
Normal file
@@ -0,0 +1,33 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ (modulesPath + "/installer/scan/not-detected.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "xhci_pci" "nvme" "ahci" "usb_storage" "usbhid" "sd_mod" "rtsx_usb_sdmmc" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/9444cf54-80e0-4315-adca-8ddd5037217c";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{ device = "/dev/disk/by-uuid/D897-146F";
|
||||
fsType = "vfat";
|
||||
options = [ "fmask=0022" "dmask=0022" ];
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[ { device = "/dev/disk/by-uuid/6c1e775f-342e-463a-a7f9-d7ce6593a482"; }
|
||||
];
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
||||
}
|
||||
61
hosts/pn02/configuration.nix
Normal file
61
hosts/pn02/configuration.nix
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
../../system
|
||||
];
|
||||
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
boot.loader.systemd-boot.memtest86.enable = true;
|
||||
boot.loader.efi.canTouchEfiVariables = true;
|
||||
boot.blacklistedKernelModules = [ "amdgpu" ];
|
||||
boot.kernelParams = [ "panic=10" "nmi_watchdog=1" "processor.max_cstate=1" ];
|
||||
boot.kernel.sysctl."kernel.softlockup_panic" = 1;
|
||||
boot.kernel.sysctl."kernel.hardlockup_panic" = 1;
|
||||
|
||||
hardware.rasdaemon.enable = true;
|
||||
hardware.rasdaemon.record = true;
|
||||
|
||||
networking.hostName = "pn02";
|
||||
networking.domain = "home.2rjus.net";
|
||||
networking.useNetworkd = true;
|
||||
networking.useDHCP = false;
|
||||
networking.firewall.enable = false;
|
||||
services.resolved.enable = true;
|
||||
networking.nameservers = [
|
||||
"10.69.13.5"
|
||||
"10.69.13.6"
|
||||
];
|
||||
|
||||
systemd.network.enable = true;
|
||||
systemd.network.networks."enp2s0" = {
|
||||
matchConfig.Name = "enp2s0";
|
||||
address = [
|
||||
"10.69.12.61/24"
|
||||
];
|
||||
routes = [
|
||||
{ Gateway = "10.69.12.1"; }
|
||||
];
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
};
|
||||
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
homelab.host = {
|
||||
tier = "test";
|
||||
priority = "low";
|
||||
role = "compute";
|
||||
};
|
||||
|
||||
vault.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
system.stateVersion = "25.11";
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
{ ... }:
|
||||
{
|
||||
{ ... }: {
|
||||
imports = [
|
||||
./configuration.nix
|
||||
../../services/postgres
|
||||
];
|
||||
}
|
||||
33
hosts/pn02/hardware-configuration.nix
Normal file
33
hosts/pn02/hardware-configuration.nix
Normal file
@@ -0,0 +1,33 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ (modulesPath + "/installer/scan/not-detected.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" "rtsx_usb_sdmmc" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/1d28b629-51ae-4f0e-b440-9388c2e48413";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{ device = "/dev/disk/by-uuid/A5A7-C7B2";
|
||||
fsType = "vfat";
|
||||
options = [ "fmask=0022" "dmask=0022" ];
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[ { device = "/dev/disk/by-uuid/f2570894-0922-4746-84c7-2b2fe7601ea1"; }
|
||||
];
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
prepare-host-script = pkgs.writeShellApplication {
|
||||
name = "prepare-host.sh";
|
||||
runtimeInputs = [ pkgs.age ];
|
||||
text = ''
|
||||
echo "Removing machine-id"
|
||||
rm -f /etc/machine-id || true
|
||||
|
||||
echo "Removing SSH host keys"
|
||||
rm -f /etc/ssh/ssh_host_* || true
|
||||
|
||||
echo "Restarting SSH"
|
||||
systemctl restart sshd
|
||||
|
||||
echo "Removing temporary files"
|
||||
rm -rf /tmp/* || true
|
||||
|
||||
echo "Removing logs"
|
||||
journalctl --rotate || true
|
||||
journalctl --vacuum-time=1s || true
|
||||
|
||||
echo "Removing cache"
|
||||
rm -rf /var/cache/* || true
|
||||
|
||||
echo "Generate age key"
|
||||
rm -rf /var/lib/sops-nix || true
|
||||
mkdir -p /var/lib/sops-nix
|
||||
age-keygen -o /var/lib/sops-nix/key.txt
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
environment.systemPackages = [ prepare-host-script ];
|
||||
users.motd = "Prepare host by running 'prepare-host.sh'.";
|
||||
}
|
||||
@@ -6,22 +6,79 @@ let
|
||||
text = ''
|
||||
set -euo pipefail
|
||||
|
||||
LOKI_URL="https://loki.home.2rjus.net/loki/api/v1/push"
|
||||
LOKI_AUTH_FILE="/run/secrets/promtail-loki-auth"
|
||||
|
||||
# Send a log entry to Loki with bootstrap status
|
||||
# Usage: log_to_loki <stage> <message>
|
||||
# Fails silently if Loki is unreachable
|
||||
log_to_loki() {
|
||||
local stage="$1"
|
||||
local message="$2"
|
||||
local timestamp_ns
|
||||
timestamp_ns="$(date +%s)000000000"
|
||||
|
||||
local payload
|
||||
payload=$(jq -n \
|
||||
--arg host "$HOSTNAME" \
|
||||
--arg stage "$stage" \
|
||||
--arg branch "''${BRANCH:-master}" \
|
||||
--arg ts "$timestamp_ns" \
|
||||
--arg msg "$message" \
|
||||
'{
|
||||
streams: [{
|
||||
stream: {
|
||||
job: "bootstrap",
|
||||
hostname: $host,
|
||||
stage: $stage,
|
||||
branch: $branch
|
||||
},
|
||||
values: [[$ts, $msg]]
|
||||
}]
|
||||
}')
|
||||
|
||||
local auth_args=()
|
||||
if [[ -f "$LOKI_AUTH_FILE" ]]; then
|
||||
auth_args=(-u "promtail:$(cat "$LOKI_AUTH_FILE")")
|
||||
fi
|
||||
|
||||
curl -s --connect-timeout 2 --max-time 5 \
|
||||
-X POST \
|
||||
"''${auth_args[@]}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$payload" \
|
||||
"$LOKI_URL" >/dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
echo "================================================================================"
|
||||
echo " NIXOS BOOTSTRAP IN PROGRESS"
|
||||
echo "================================================================================"
|
||||
echo ""
|
||||
|
||||
# Read hostname set by cloud-init (from Terraform VM name via user-data)
|
||||
# Cloud-init sets the system hostname from user-data.txt, so we read it from hostnamectl
|
||||
HOSTNAME=$(hostnamectl hostname)
|
||||
echo "DEBUG: Hostname from hostnamectl: '$HOSTNAME'"
|
||||
# Read git branch from environment, default to master
|
||||
BRANCH="''${NIXOS_FLAKE_BRANCH:-master}"
|
||||
|
||||
echo "Hostname: $HOSTNAME"
|
||||
echo ""
|
||||
echo "Starting NixOS bootstrap for host: $HOSTNAME"
|
||||
|
||||
log_to_loki "starting" "Bootstrap starting for $HOSTNAME (branch: $BRANCH)"
|
||||
|
||||
echo "Waiting for network connectivity..."
|
||||
|
||||
# Verify we can reach the git server via HTTPS (doesn't respond to ping)
|
||||
if ! curl -s --connect-timeout 5 --max-time 10 https://git.t-juice.club >/dev/null 2>&1; then
|
||||
echo "ERROR: Cannot reach git.t-juice.club via HTTPS"
|
||||
echo "Check network configuration and DNS settings"
|
||||
log_to_loki "failed" "Network check failed - cannot reach git.t-juice.club"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Network connectivity confirmed"
|
||||
log_to_loki "network_ok" "Network connectivity confirmed"
|
||||
|
||||
# Unwrap Vault token and store AppRole credentials (if provided)
|
||||
if [ -n "''${VAULT_WRAPPED_TOKEN:-}" ]; then
|
||||
@@ -50,6 +107,7 @@ let
|
||||
chmod 600 /var/lib/vault/approle/secret-id
|
||||
|
||||
echo "Vault credentials unwrapped and stored successfully"
|
||||
log_to_loki "vault_ok" "Vault credentials unwrapped and stored"
|
||||
else
|
||||
echo "WARNING: Failed to unwrap Vault token"
|
||||
if [ -n "$UNWRAP_RESPONSE" ]; then
|
||||
@@ -63,17 +121,17 @@ let
|
||||
echo "To regenerate token, run: create-host --hostname $HOSTNAME --force"
|
||||
echo ""
|
||||
echo "Vault secrets will not be available, but continuing bootstrap..."
|
||||
log_to_loki "vault_warn" "Failed to unwrap Vault token - continuing without secrets"
|
||||
fi
|
||||
else
|
||||
echo "No Vault wrapped token provided (VAULT_WRAPPED_TOKEN not set)"
|
||||
echo "Skipping Vault credential setup"
|
||||
log_to_loki "vault_skip" "No Vault token provided - skipping credential setup"
|
||||
fi
|
||||
|
||||
echo "Fetching and building NixOS configuration from flake..."
|
||||
|
||||
# Read git branch from environment, default to master
|
||||
BRANCH="''${NIXOS_FLAKE_BRANCH:-master}"
|
||||
echo "Using git branch: $BRANCH"
|
||||
log_to_loki "building" "Starting nixos-rebuild boot"
|
||||
|
||||
# Build and activate the host-specific configuration
|
||||
FLAKE_URL="git+https://git.t-juice.club/torjus/nixos-servers.git?ref=$BRANCH#''${HOSTNAME}"
|
||||
@@ -81,18 +139,30 @@ let
|
||||
if nixos-rebuild boot --flake "$FLAKE_URL"; then
|
||||
echo "Successfully built configuration for $HOSTNAME"
|
||||
echo "Rebooting into new configuration..."
|
||||
log_to_loki "success" "Build successful - rebooting into new configuration"
|
||||
sleep 2
|
||||
systemctl reboot
|
||||
else
|
||||
echo "ERROR: nixos-rebuild failed for $HOSTNAME"
|
||||
echo "Check that flake has configuration for this hostname"
|
||||
echo "Manual intervention required - system will not reboot"
|
||||
log_to_loki "failed" "nixos-rebuild failed - manual intervention required"
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
# Custom greeting line to indicate this is a bootstrap image
|
||||
services.getty.greetingLine = lib.mkForce ''
|
||||
================================================================================
|
||||
BOOTSTRAP IMAGE - NixOS \V (\l)
|
||||
================================================================================
|
||||
|
||||
Bootstrap service is running. Logs are displayed on tty1.
|
||||
Check status: journalctl -fu nixos-bootstrap
|
||||
'';
|
||||
|
||||
systemd.services."nixos-bootstrap" = {
|
||||
description = "Bootstrap NixOS configuration from flake on first boot";
|
||||
|
||||
@@ -107,12 +177,12 @@ in
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = "${bootstrap-script}/bin/nixos-bootstrap";
|
||||
ExecStart = lib.getExe bootstrap-script;
|
||||
|
||||
# Read environment variables from cloud-init (set by cloud-init write_files)
|
||||
EnvironmentFile = "-/run/cloud-init-env";
|
||||
|
||||
# Logging to journald
|
||||
# Log to journal and console
|
||||
StandardOutput = "journal+console";
|
||||
StandardError = "journal+console";
|
||||
};
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
homelab.host = {
|
||||
tier = "test";
|
||||
priority = "low";
|
||||
labels.ansible = "false"; # Exclude from Ansible inventory
|
||||
};
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
@@ -53,11 +54,16 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
nix.settings.substituters = [
|
||||
"https://nix-cache.home.2rjus.net"
|
||||
"https://cache.nixos.org"
|
||||
];
|
||||
nix.settings.trusted-public-keys = [
|
||||
"nix-cache.home.2rjus.net-1:2kowZOG6pvhoK4AHVO3alBlvcghH20wchzoR0V86UWI="
|
||||
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||
];
|
||||
environment.systemPackages = with pkgs; [
|
||||
age
|
||||
vim
|
||||
@@ -71,5 +77,8 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
# Compressed swap in RAM - prevents OOM during bootstrap nixos-rebuild
|
||||
zramSwap.enable = true;
|
||||
|
||||
system.stateVersion = "25.11";
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
let
|
||||
prepare-host-script = pkgs.writeShellApplication {
|
||||
name = "prepare-host.sh";
|
||||
runtimeInputs = [ pkgs.age ];
|
||||
text = ''
|
||||
echo "Removing machine-id"
|
||||
rm -f /etc/machine-id || true
|
||||
@@ -22,11 +21,6 @@ let
|
||||
|
||||
echo "Removing cache"
|
||||
rm -rf /var/cache/* || true
|
||||
|
||||
echo "Generate age key"
|
||||
rm -rf /var/lib/sops-nix || true
|
||||
mkdir -p /var/lib/sops-nix
|
||||
age-keygen -o /var/lib/sops-nix/key.txt
|
||||
'';
|
||||
};
|
||||
in
|
||||
|
||||
@@ -11,11 +11,12 @@
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
../../common/ssh-audit.nix
|
||||
];
|
||||
|
||||
# Host metadata (adjust as needed)
|
||||
homelab.host = {
|
||||
tier = "test"; # Start in test tier, move to prod after validation
|
||||
tier = "test";
|
||||
role = "test";
|
||||
};
|
||||
|
||||
# Enable Vault integration
|
||||
@@ -24,6 +25,9 @@
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
# Enable Kanidm PAM/NSS for central authentication
|
||||
homelab.kanidm.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
@@ -51,10 +55,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
@@ -62,6 +63,39 @@
|
||||
git
|
||||
];
|
||||
|
||||
# Test nginx with ACME certificate from OpenBao PKI
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts."testvm01.home.2rjus.net" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
root = pkgs.writeTextDir "index.html" ''
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>testvm01 - ACME Test</title>
|
||||
<style>
|
||||
body { font-family: monospace; max-width: 600px; margin: 50px auto; padding: 20px; }
|
||||
.joke { background: #f0f0f0; padding: 20px; border-radius: 8px; margin: 20px 0; }
|
||||
.punchline { margin-top: 15px; font-weight: bold; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>OpenBao PKI ACME Test</h1>
|
||||
<p>If you're seeing this over HTTPS, the migration worked!</p>
|
||||
<div class="joke">
|
||||
<p>Why do programmers prefer dark mode?</p>
|
||||
<p class="punchline">Because light attracts bugs.</p>
|
||||
</div>
|
||||
<p><small>Certificate issued by: vault.home.2rjus.net</small></p>
|
||||
</body>
|
||||
</html>
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Open ports in the firewall.
|
||||
# networking.firewall.allowedTCPPorts = [ ... ];
|
||||
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||
|
||||
@@ -11,11 +11,12 @@
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
../../common/ssh-audit.nix
|
||||
];
|
||||
|
||||
# Host metadata (adjust as needed)
|
||||
homelab.host = {
|
||||
tier = "test"; # Start in test tier, move to prod after validation
|
||||
tier = "test";
|
||||
role = "test";
|
||||
};
|
||||
|
||||
# Enable Vault integration
|
||||
@@ -24,6 +25,9 @@
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
# Enable Kanidm PAM/NSS for central authentication
|
||||
homelab.kanidm.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
@@ -51,10 +55,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
|
||||
@@ -11,11 +11,12 @@
|
||||
|
||||
../../system
|
||||
../../common/vm
|
||||
../../common/ssh-audit.nix
|
||||
];
|
||||
|
||||
# Host metadata (adjust as needed)
|
||||
homelab.host = {
|
||||
tier = "test"; # Start in test tier, move to prod after validation
|
||||
tier = "test";
|
||||
role = "test";
|
||||
};
|
||||
|
||||
# Enable Vault integration
|
||||
@@ -24,6 +25,9 @@
|
||||
# Enable remote deployment via NATS
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
# Enable Kanidm PAM/NSS for central authentication
|
||||
homelab.kanidm.enable = true;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.device = "/dev/vda";
|
||||
@@ -51,10 +55,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
|
||||
@@ -45,10 +45,7 @@
|
||||
};
|
||||
time.timeZone = "Europe/Oslo";
|
||||
|
||||
nix.settings.experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
|
||||
nix.settings.tarball-ttl = 0;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
@@ -62,6 +59,16 @@
|
||||
# Or disable the firewall altogether.
|
||||
networking.firewall.enable = false;
|
||||
|
||||
# Vault fetches secrets from itself (after unseal)
|
||||
vault.enable = true;
|
||||
homelab.deploy.enable = true;
|
||||
|
||||
# Ensure vault-secret services wait for openbao to be unsealed
|
||||
systemd.services.vault-secret-homelab-deploy-nkey = {
|
||||
after = [ "openbao.service" ];
|
||||
wants = [ "openbao.service" ];
|
||||
};
|
||||
|
||||
system.stateVersion = "25.11"; # Did you read the comment?
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ let
|
||||
cfg = hostConfig.config;
|
||||
monConfig = (cfg.homelab or { }).monitoring or { enable = true; scrapeTargets = [ ]; };
|
||||
dnsConfig = (cfg.homelab or { }).dns or { enable = true; };
|
||||
hostConfig' = (cfg.homelab or { }).host or { };
|
||||
hostname = cfg.networking.hostName;
|
||||
networks = cfg.systemd.network.networks or { };
|
||||
|
||||
@@ -49,20 +50,80 @@ let
|
||||
inherit hostname;
|
||||
ip = extractIP firstAddress;
|
||||
scrapeTargets = monConfig.scrapeTargets or [ ];
|
||||
# Host metadata for label propagation
|
||||
tier = hostConfig'.tier or "prod";
|
||||
priority = hostConfig'.priority or "high";
|
||||
role = hostConfig'.role or null;
|
||||
labels = hostConfig'.labels or { };
|
||||
};
|
||||
|
||||
# Build effective labels for a host
|
||||
# Always includes hostname and tier; only includes priority/role if non-default
|
||||
buildEffectiveLabels = host:
|
||||
{ hostname = host.hostname; tier = host.tier; }
|
||||
// (lib.optionalAttrs (host.priority != "high") { priority = host.priority; })
|
||||
// (lib.optionalAttrs (host.role != null) { role = host.role; })
|
||||
// host.labels;
|
||||
|
||||
# Generate node-exporter targets from all flake hosts
|
||||
# Returns a list of static_configs entries with labels
|
||||
generateNodeExporterTargets = self: externalTargets:
|
||||
let
|
||||
nixosConfigs = self.nixosConfigurations or { };
|
||||
hostList = lib.filter (x: x != null) (
|
||||
lib.mapAttrsToList extractHostMonitoring nixosConfigs
|
||||
);
|
||||
flakeTargets = map (host: "${host.hostname}.home.2rjus.net:9100") hostList;
|
||||
|
||||
# Extract hostname from a target string like "gunter.home.2rjus.net:9100"
|
||||
extractHostnameFromTarget = target:
|
||||
builtins.head (lib.splitString "." target);
|
||||
|
||||
# Build target entries with labels for each host
|
||||
flakeEntries = map
|
||||
(host: {
|
||||
target = "${host.hostname}.home.2rjus.net:9100";
|
||||
labels = buildEffectiveLabels host;
|
||||
})
|
||||
hostList;
|
||||
|
||||
# External targets get hostname extracted from the target string
|
||||
externalEntries = map
|
||||
(target: {
|
||||
inherit target;
|
||||
labels = { hostname = extractHostnameFromTarget target; };
|
||||
})
|
||||
(externalTargets.nodeExporter or [ ]);
|
||||
|
||||
# Node-exporter-only external targets (no systemd-exporter)
|
||||
externalOnlyEntries = map
|
||||
(target: {
|
||||
inherit target;
|
||||
labels = { hostname = extractHostnameFromTarget target; };
|
||||
})
|
||||
(externalTargets.nodeExporterOnly or [ ]);
|
||||
|
||||
allEntries = flakeEntries ++ externalEntries ++ externalOnlyEntries;
|
||||
|
||||
# Group entries by their label set for efficient static_configs
|
||||
# Convert labels attrset to a string key for grouping
|
||||
labelKey = entry: builtins.toJSON entry.labels;
|
||||
grouped = lib.groupBy labelKey allEntries;
|
||||
|
||||
# Convert groups to static_configs format
|
||||
# Every flake host now has at least a hostname label
|
||||
staticConfigs = lib.mapAttrsToList
|
||||
(key: entries:
|
||||
let
|
||||
labels = (builtins.head entries).labels;
|
||||
in
|
||||
{ targets = map (e: e.target) entries; labels = labels; }
|
||||
)
|
||||
grouped;
|
||||
in
|
||||
flakeTargets ++ (externalTargets.nodeExporter or [ ]);
|
||||
staticConfigs;
|
||||
|
||||
# Generate scrape configs from all flake hosts and external targets
|
||||
# Host labels are propagated to service targets for semantic alert filtering
|
||||
generateScrapeConfigs = self: externalTargets:
|
||||
let
|
||||
nixosConfigs = self.nixosConfigurations or { };
|
||||
@@ -70,13 +131,14 @@ let
|
||||
lib.mapAttrsToList extractHostMonitoring nixosConfigs
|
||||
);
|
||||
|
||||
# Collect all scrapeTargets from all hosts, grouped by job_name
|
||||
# Collect all scrapeTargets from all hosts, including host labels
|
||||
allTargets = lib.flatten (map
|
||||
(host:
|
||||
map
|
||||
(target: {
|
||||
inherit (target) job_name port metrics_path scheme scrape_interval honor_labels;
|
||||
hostname = host.hostname;
|
||||
hostLabels = buildEffectiveLabels host;
|
||||
})
|
||||
host.scrapeTargets
|
||||
)
|
||||
@@ -87,22 +149,32 @@ let
|
||||
grouped = lib.groupBy (t: t.job_name) allTargets;
|
||||
|
||||
# Generate a scrape config for each job
|
||||
# Within each job, group targets by their host labels for efficient static_configs
|
||||
flakeScrapeConfigs = lib.mapAttrsToList
|
||||
(jobName: targets:
|
||||
let
|
||||
first = builtins.head targets;
|
||||
targetAddrs = map
|
||||
(t:
|
||||
|
||||
# Group targets within this job by their host labels
|
||||
labelKey = t: builtins.toJSON t.hostLabels;
|
||||
groupedByLabels = lib.groupBy labelKey targets;
|
||||
|
||||
# Every flake host now has at least a hostname label
|
||||
staticConfigs = lib.mapAttrsToList
|
||||
(key: labelTargets:
|
||||
let
|
||||
portStr = toString t.port;
|
||||
labels = (builtins.head labelTargets).hostLabels;
|
||||
targetAddrs = map
|
||||
(t: "${t.hostname}.home.2rjus.net:${toString t.port}")
|
||||
labelTargets;
|
||||
in
|
||||
"${t.hostname}.home.2rjus.net:${portStr}")
|
||||
targets;
|
||||
{ targets = targetAddrs; labels = labels; }
|
||||
)
|
||||
groupedByLabels;
|
||||
|
||||
config = {
|
||||
job_name = jobName;
|
||||
static_configs = [{
|
||||
targets = targetAddrs;
|
||||
}];
|
||||
static_configs = staticConfigs;
|
||||
}
|
||||
// (lib.optionalAttrs (first.metrics_path != "/metrics") {
|
||||
metrics_path = first.metrics_path;
|
||||
@@ -139,7 +211,18 @@ let
|
||||
in
|
||||
flakeScrapeConfigs ++ externalScrapeConfigs;
|
||||
|
||||
# Generate systemd-exporter targets (excludes nodeExporterOnly hosts)
|
||||
generateSystemdExporterTargets = self: externalTargets:
|
||||
let
|
||||
nodeTargets = generateNodeExporterTargets self (externalTargets // { nodeExporterOnly = [ ]; });
|
||||
in
|
||||
map
|
||||
(cfg: cfg // {
|
||||
targets = map (t: builtins.replaceStrings [ ":9100" ] [ ":9558" ] t) cfg.targets;
|
||||
})
|
||||
nodeTargets;
|
||||
|
||||
in
|
||||
{
|
||||
inherit extractHostMonitoring generateNodeExporterTargets generateScrapeConfigs;
|
||||
inherit extractHostMonitoring generateNodeExporterTargets generateScrapeConfigs generateSystemdExporterTargets;
|
||||
}
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
[proxmox]
|
||||
pve1.home.2rjus.net
|
||||
|
||||
[proxmox:vars]
|
||||
ansible_user=root
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# array of hosts
|
||||
HOSTS=(
|
||||
"ns1"
|
||||
"ns2"
|
||||
"ca"
|
||||
"ha1"
|
||||
"http-proxy"
|
||||
"jelly01"
|
||||
"monitoring01"
|
||||
"nix-cache01"
|
||||
"pgdb1"
|
||||
)
|
||||
|
||||
for host in "${HOSTS[@]}"; do
|
||||
echo "Rebuilding $host"
|
||||
nixos-rebuild boot --flake .#${host} --target-host root@${host}
|
||||
done
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user