nix-cache01: decommission and remove all references
Some checks failed
Run nix flake check / flake-check (push) Has been cancelled

Removed:
- hosts/nix-cache01/ directory
- services/nix-cache/build-flakes.{nix,sh} (replaced by NATS builder)
- Vault secret and AppRole for nix-cache01
- Old signing key variable from terraform
- Old trusted public key from system/nix.nix

Updated:
- flake.nix: removed nixosConfiguration
- README.md: nix-cache01 -> nix-cache02
- Monitoring rules: removed build-flakes alerts, updated harmonia to nix-cache02
- Simplified proxy.nix (no longer needs hostname conditional)

nix-cache02 is now the sole binary cache host.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-10 23:40:51 +01:00
parent ade0538717
commit 75210805d5
16 changed files with 7 additions and 269 deletions

View File

@@ -12,7 +12,7 @@ NixOS Flake-based configuration repository for a homelab infrastructure. All hos
| `http-proxy` | Reverse proxy | | `http-proxy` | Reverse proxy |
| `monitoring01` | Prometheus, Grafana, Loki, Tempo, Pyroscope | | `monitoring01` | Prometheus, Grafana, Loki, Tempo, Pyroscope |
| `jelly01` | Jellyfin media server | | `jelly01` | Jellyfin media server |
| `nix-cache01` | Nix binary cache | | `nix-cache02` | Nix binary cache + NATS-based build service |
| `nats1` | NATS messaging | | `nats1` | NATS messaging |
| `vault01` | OpenBao (Vault) secrets management | | `vault01` | OpenBao (Vault) secrets management |
| `template1`, `template2` | VM templates for cloning new hosts | | `template1`, `template2` | VM templates for cloning new hosts |

View File

@@ -110,15 +110,6 @@
./hosts/jelly01 ./hosts/jelly01
]; ];
}; };
nix-cache01 = nixpkgs.lib.nixosSystem {
inherit system;
specialArgs = {
inherit inputs self;
};
modules = commonModules ++ [
./hosts/nix-cache01
];
};
nats1 = nixpkgs.lib.nixosSystem { nats1 = nixpkgs.lib.nixosSystem {
inherit system; inherit system;
specialArgs = { specialArgs = {

View File

@@ -1,74 +0,0 @@
{
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
../../system
../../common/vm
];
homelab.host.role = "build-host";
fileSystems."/nix" = {
device = "/dev/disk/by-label/nixcache";
fsType = "xfs";
};
nixpkgs.config.allowUnfree = true;
# Use the systemd-boot EFI boot loader.
boot.loader.grub = {
enable = true;
device = "/dev/sda";
configurationLimit = 3;
};
networking.hostName = "nix-cache01";
networking.domain = "home.2rjus.net";
networking.useNetworkd = true;
networking.useDHCP = false;
services.resolved.enable = true;
networking.nameservers = [
"10.69.13.5"
"10.69.13.6"
];
systemd.network.enable = true;
systemd.network.networks."ens18" = {
matchConfig.Name = "ens18";
address = [
"10.69.13.15/24"
];
routes = [
{ Gateway = "10.69.13.1"; }
];
linkConfig.RequiredForOnline = "routable";
};
time.timeZone = "Europe/Oslo";
nix.settings.experimental-features = [
"nix-command"
"flakes"
];
vault.enable = true;
homelab.deploy.enable = true;
nix.settings.tarball-ttl = 0;
environment.systemPackages = with pkgs; [
vim
wget
git
];
services.qemuGuest.enable = true;
# Open ports in the firewall.
# networking.firewall.allowedTCPPorts = [ ... ];
# networking.firewall.allowedUDPPorts = [ ... ];
# Or disable the firewall altogether.
networking.firewall.enable = false;
system.stateVersion = "24.05"; # Did you read the comment?
}

View File

@@ -1,7 +0,0 @@
{ ... }:
{
imports = [
./configuration.nix
../../services/nix-cache
];
}

View File

@@ -1,42 +0,0 @@
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [
"ptp_kvm"
];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-label/root";
fsType = "xfs";
};
swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View File

@@ -21,7 +21,7 @@ let
"https://pyroscope.home.2rjus.net" "https://pyroscope.home.2rjus.net"
"https://pushgw.home.2rjus.net" "https://pushgw.home.2rjus.net"
# Caddy auto-TLS on nix-cache01 # Caddy auto-TLS on nix-cache02
"https://nix-cache.home.2rjus.net" "https://nix-cache.home.2rjus.net"
# Caddy auto-TLS on grafana01 # Caddy auto-TLS on grafana01

View File

@@ -178,9 +178,7 @@ in
} }
]; ];
} }
# TODO: nix-cache_caddy can't be auto-generated because the cert is issued # Caddy metrics from nix-cache02 (serves nix-cache.home.2rjus.net)
# for nix-cache.home.2rjus.net (service CNAME), not nix-cache01 (hostname).
# Consider adding a target override to homelab.monitoring.scrapeTargets.
{ {
job_name = "nix-cache_caddy"; job_name = "nix-cache_caddy";
scheme = "https"; scheme = "https";

View File

@@ -171,37 +171,14 @@ groups:
description: "NATS has {{ $value }} slow consumers on {{ $labels.instance }}." description: "NATS has {{ $value }} slow consumers on {{ $labels.instance }}."
- name: nix_cache_rules - name: nix_cache_rules
rules: rules:
- alert: build_flakes_service_not_active_recently
expr: count_over_time(node_systemd_unit_state{instance="nix-cache01.home.2rjus.net:9100", name="build-flakes.service", state="active"}[1h]) < 1
for: 0m
labels:
severity: critical
annotations:
summary: "The build-flakes service on {{ $labels.instance }} has not run recently"
description: "The build-flakes service on {{ $labels.instance }} has not run recently"
- alert: build_flakes_error
expr: build_flakes_error == 1
labels:
severity: warning
annotations:
summary: "The build-flakes job has failed for host {{ $labels.host }}."
description: "The build-flakes job has failed for host {{ $labels.host }}."
- alert: harmonia_down - alert: harmonia_down
expr: node_systemd_unit_state {instance="nix-cache01.home.2rjus.net:9100", name = "harmonia.service", state = "active"} == 0 expr: node_systemd_unit_state{instance="nix-cache02.home.2rjus.net:9100", name="harmonia.service", state="active"} == 0
for: 5m for: 5m
labels: labels:
severity: critical severity: critical
annotations: annotations:
summary: "Harmonia not running on {{ $labels.instance }}" summary: "Harmonia not running on {{ $labels.instance }}"
description: "Harmonia has been down on {{ $labels.instance }} more than 5 minutes." description: "Harmonia has been down on {{ $labels.instance }} more than 5 minutes."
- alert: low_disk_space_nix
expr: node_filesystem_free_bytes{instance="nix-cache01.home.2rjus.net:9100", mountpoint="/nix"} / node_filesystem_size_bytes{instance="nix-cache01.home.2rjus.net:9100", mountpoint="/nix"} * 100 < 10
for: 5m
labels:
severity: warning
annotations:
summary: "Disk space low on /nix for {{ $labels.instance }}"
description: "Disk space is low on /nix for host {{ $labels.instance }}. Please check."
- name: home_assistant_rules - name: home_assistant_rules
rules: rules:
- alert: home_assistant_down - alert: home_assistant_down

View File

@@ -1,29 +0,0 @@
{ pkgs, ... }:
let
build-flake-script = pkgs.writeShellApplication {
name = "build-flake-script";
runtimeInputs = with pkgs; [
git
nix
nixos-rebuild
jq
curl
];
text = builtins.readFile ./build-flakes.sh;
};
in
{
systemd.services."build-flakes" = {
serviceConfig = {
Type = "exec";
ExecStart = "${build-flake-script}/bin/build-flake-script";
};
};
systemd.timers."build-flakes" = {
enable = true;
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* *:30:00";
};
};
}

View File

@@ -1,44 +0,0 @@
JOB_NAME="build_flakes"
cd /root/nixos-servers
git pull
echo "Starting nixos-servers builds"
for host in $(nix flake show --json| jq -r '.nixosConfigurations | keys[]'); do
echo "Building $host"
if ! nixos-rebuild --verbose -L --flake ".#$host" build; then
echo "Build failed for $host"
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
# TYPE build_flakes_error gauge
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
build_flakes_error{instance="$HOSTNAME"} 1
EOF
else
echo "Build successful for $host"
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
# TYPE build_flakes_error gauge
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
build_flakes_error{instance="$HOSTNAME"} 0
EOF
fi
done
echo "All nixos-servers builds complete"
echo "Building gunter"
cd /root/nixos
git pull
host="gunter"
if ! nixos-rebuild --verbose -L --flake ".#gunter" build; then
echo "Build failed for $host"
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
# TYPE build_flakes_error gauge
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
build_flakes_error{instance="$HOSTNAME"} 1
EOF
else
echo "Build successful for $host"
cat <<EOF | curl -sS -X PUT --data-binary @- "https://pushgw.home.2rjus.net/metrics/job/$JOB_NAME/host/$host"
# TYPE build_flakes_error gauge
# HELP build_flakes_error 0 if the build was successful, 1 if it failed
build_flakes_error{instance="$HOSTNAME"} 0
EOF
fi

View File

@@ -1,10 +1,8 @@
{ ... }: { ... }:
{ {
imports = [ imports = [
./build-flakes.nix
./harmonia.nix ./harmonia.nix
./proxy.nix ./proxy.nix
./nix.nix ./nix.nix
]; ];
} }

View File

@@ -1,14 +1,4 @@
{ pkgs, config, ... }: { pkgs, ... }:
let
# nix-cache02 serves the canonical nix-cache.home.2rjus.net
# nix-cache01 serves nix-cache01.home.2rjus.net (deprecated, pending decommission)
hostname = config.networking.hostName;
domain =
if hostname == "nix-cache02" then
"nix-cache.home.2rjus.net"
else
"${hostname}.home.2rjus.net";
in
{ {
services.caddy = { services.caddy = {
enable = true; enable = true;
@@ -20,7 +10,7 @@ in
} }
${domain} { nix-cache.home.2rjus.net {
log { log {
output file /var/log/caddy/nix-cache.log { output file /var/log/caddy/nix-cache.log {
mode 644 mode 644

View File

@@ -42,7 +42,6 @@ in
"https://cuda-maintainers.cachix.org" "https://cuda-maintainers.cachix.org"
]; ];
trusted-public-keys = [ trusted-public-keys = [
"nix-cache.home.2rjus.net-1:2kowZOG6pvhoK4AHVO3alBlvcghH20wchzoR0V86UWI="
"nix-cache02.home.2rjus.net-1:QyT5FAvJtV+EPQrgQQ6iV9JMg1kRiWuIAJftM35QMls=" "nix-cache02.home.2rjus.net-1:QyT5FAvJtV+EPQrgQQ6iV9JMg1kRiWuIAJftM35QMls="
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
"cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E=" "cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E="

View File

@@ -87,13 +87,6 @@ locals {
] ]
} }
# Wave 5: nix-cache01
"nix-cache01" = {
paths = [
"secret/data/hosts/nix-cache01/*",
]
}
# vault01: Vault server itself (fetches secrets from itself) # vault01: Vault server itself (fetches secrets from itself)
"vault01" = { "vault01" = {
paths = [ paths = [

View File

@@ -75,12 +75,7 @@ locals {
data = { private_key = var.wireguard_private_key } data = { private_key = var.wireguard_private_key }
} }
# Nix cache signing keys # Nix cache signing key
"hosts/nix-cache01/cache-secret" = {
auto_generate = false
data = { key = var.cache_signing_key }
}
"hosts/nix-cache02/cache-secret" = { "hosts/nix-cache02/cache-secret" = {
auto_generate = false auto_generate = false
data = { key = var.cache_signing_key_02 } data = { key = var.cache_signing_key_02 }

View File

@@ -40,16 +40,9 @@ variable "wireguard_private_key" {
sensitive = true sensitive = true
} }
variable "cache_signing_key" {
description = "Nix binary cache signing key (nix-cache01)"
type = string
sensitive = true
}
variable "cache_signing_key_02" { variable "cache_signing_key_02" {
description = "Nix binary cache signing key (nix-cache02)" description = "Nix binary cache signing key (nix-cache02)"
type = string type = string
default = "PLACEHOLDER"
sensitive = true sensitive = true
} }