mirror of
https://gitlab.com/upRootNutrition/dotfiles.git
synced 2025-12-07 05:27:13 -06:00
401 lines
20 KiB
Nix
401 lines
20 KiB
Nix
|
|
# ============================================================================
|
||
|
|
# MicroVM Configuration
|
||
|
|
# ============================================================================
|
||
|
|
# This file contains the complete configuration for running a microVM
|
||
|
|
#
|
||
|
|
# Architecture Overview:
|
||
|
|
# ┌────────────────────────────────────────────────┐
|
||
|
|
# │ Host (Ceres - NixOS Server) │
|
||
|
|
# │ │
|
||
|
|
# │ ┌─────────────┐ ┌─────────────┐ │
|
||
|
|
# │ │ Caddy │ │ Bridge │ │
|
||
|
|
# │ │ (Reverse │───────▶│ (br-vms) │ │
|
||
|
|
# │ │ Proxy) │ │ 192.168.50 │ │
|
||
|
|
# │ └─────────────┘ │ .240 │ │
|
||
|
|
# │ │ └──────┬──────┘ │
|
||
|
|
# │ │ │ │
|
||
|
|
# │ │ ┌──────▼──────┐ │
|
||
|
|
# │ │ │ TAP │ │
|
||
|
|
# │ │ │ (vm-*) │ │
|
||
|
|
# │ │ └──────┬──────┘ │
|
||
|
|
# │ │ │ │
|
||
|
|
# │ ┌─────────▼──────────────────────▼─────────┐ │
|
||
|
|
# │ │ │ │
|
||
|
|
# │ │ MicroVM │ │
|
||
|
|
# │ │ ┌─────────────┐ ┌─────────────┐ │ │
|
||
|
|
# │ │ │ Service │ │ enp0s5 │ │ │
|
||
|
|
# │ │ │ Service │ │ 192.168.50 │ │ │
|
||
|
|
# │ │ │ Port 8085 │ │ .151 │ │ │
|
||
|
|
# │ │ └─────────────┘ └─────────────┘ │ │
|
||
|
|
# │ │ │ │
|
||
|
|
# │ └──────────────────────────────────────────┘ │
|
||
|
|
# │ │
|
||
|
|
# └────────────────────────────────────────────────┘
|
||
|
|
#
|
||
|
|
# Network Flow:
|
||
|
|
# 1. External request → Router (port forward 443) → Host IP (192.168.50.240)
|
||
|
|
# 2. Host Caddy receives HTTPS request on port 443
|
||
|
|
# 3. Caddy terminates TLS using ACME certificates
|
||
|
|
# 4. Caddy forwards HTTP to VM IP (192.168.50.xxx:xxxx)
|
||
|
|
# 5. Request travels through br-vms bridge → TAP interface → VM network
|
||
|
|
# 6. Service responds back through the same path
|
||
|
|
# ============================================================================
|
||
|
|
|
||
|
|
{
|
||
|
|
config,
|
||
|
|
flake,
|
||
|
|
...
|
||
|
|
}:
|
||
|
|
let
|
||
|
|
# Pull configuration from centralized instance definitions
|
||
|
|
# These are defined in modules/config/instances/config/*.nix
|
||
|
|
serviceCfg = flake.config.services.instances.service;
|
||
|
|
smtpCfg = flake.config.services.instances.smtp;
|
||
|
|
hostCfg = flake.config.services.instances.web;
|
||
|
|
inherit (flake.config.people) user0;
|
||
|
|
inherit (flake.config.services) instances;
|
||
|
|
|
||
|
|
# DNS provider configuration for ACME DNS-01 challenge
|
||
|
|
dns0 = instances.web.dns.provider0;
|
||
|
|
dns0Path = "dns/${dns0}";
|
||
|
|
in
|
||
|
|
{
|
||
|
|
# ============================================================================
|
||
|
|
# HOST-SIDE CONFIGURATION
|
||
|
|
# ============================================================================
|
||
|
|
# The following settings run on the host (Ceres), not inside the VM
|
||
|
|
|
||
|
|
# Add Caddy user to the ACME group so it can read TLS certificates
|
||
|
|
# NixOS ACME module creates certs with group ownership, and Caddy needs
|
||
|
|
# read access to serve them over HTTPS
|
||
|
|
users.users.caddy.extraGroups = [ "acme" ];
|
||
|
|
|
||
|
|
# Configure Let's Encrypt SSL certificate for service domain
|
||
|
|
# Uses DNS-01 challenge (via dns0 provider) instead of HTTP-01
|
||
|
|
# This allows cert generation even when the service isn't publicly accessible yet
|
||
|
|
security.acme.certs."${serviceCfg.domains.url0}" = {
|
||
|
|
dnsProvider = dns0; # DNS provider (e.g., cloudflare, route53)
|
||
|
|
environmentFile = config.sops.secrets.${dns0Path}.path; # API credentials from SOPS
|
||
|
|
group = "caddy"; # Allow Caddy to read the certs
|
||
|
|
};
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# MICROVM DEFINITION
|
||
|
|
# ============================================================================
|
||
|
|
# This section defines the entire VM configuration, including networking,
|
||
|
|
# services, and resource allocation
|
||
|
|
|
||
|
|
microvm.vms.service = {
|
||
|
|
# Automatically start VM when host boots
|
||
|
|
autostart = true;
|
||
|
|
|
||
|
|
# Restart VM when configuration changes (during nixos-rebuild)
|
||
|
|
restartIfChanged = true;
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# VM GUEST CONFIGURATION
|
||
|
|
# ============================================================================
|
||
|
|
# Everything inside this 'config' block runs INSIDE the VM, not on the host
|
||
|
|
|
||
|
|
config = {
|
||
|
|
# NixOS version for the VM system
|
||
|
|
system.stateVersion = "24.05";
|
||
|
|
time.timeZone = "America/Winnipeg";
|
||
|
|
|
||
|
|
# Allow SSH access to VM using host user's SSH keys
|
||
|
|
# This enables direct SSH via: ssh root@192.168.50.xxx
|
||
|
|
users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys;
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# service SERVICE (Inside VM)
|
||
|
|
# ============================================================================
|
||
|
|
# Main application configuration - runs on port xxxx inside the VM
|
||
|
|
|
||
|
|
services = {
|
||
|
|
# ============================================================================
|
||
|
|
# VM SSH ACCESS
|
||
|
|
# ============================================================================
|
||
|
|
# Enable SSH server inside the VM for management and debugging
|
||
|
|
|
||
|
|
openssh = {
|
||
|
|
enable = true;
|
||
|
|
settings = {
|
||
|
|
PasswordAuthentication = false; # Only allow key-based auth
|
||
|
|
PermitRootLogin = "prohibit-password"; # Root login only with keys
|
||
|
|
};
|
||
|
|
};
|
||
|
|
|
||
|
|
# Open firewall ports inside the VM
|
||
|
|
networking.firewall.allowedTCPPorts = [
|
||
|
|
serviceCfg.ports.port0 # Service web interface port number
|
||
|
|
];
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# VM NETWORK CONFIGURATION (systemd-networkd)
|
||
|
|
# ============================================================================
|
||
|
|
# This configures the network interface INSIDE the VM
|
||
|
|
# The VM sees a network interface called "enp0s5" which connects to the
|
||
|
|
# host's TAP interface (vm-service) via the bridge (br-vms)
|
||
|
|
};
|
||
|
|
systemd.network = {
|
||
|
|
enable = true; # Enable systemd-networkd for network management
|
||
|
|
|
||
|
|
networks."20-lan" = {
|
||
|
|
# Match the network interface created by QEMU
|
||
|
|
# QEMU with q35 machine type typically creates "enp0s5" for the first NIC
|
||
|
|
matchConfig.Name = "enp0s5";
|
||
|
|
|
||
|
|
# Assign static IP address to the VM
|
||
|
|
# This IP must be on the same subnet as the host bridge (192.168.50.0/24)
|
||
|
|
addresses = [ { Address = "${serviceCfg.interface.ip}/24"; } ]; # 192.168.50.xxx/24
|
||
|
|
|
||
|
|
# Configure default route to reach the internet
|
||
|
|
# All traffic (0.0.0.0/0) goes through the gateway (usually your router)
|
||
|
|
routes = [
|
||
|
|
{
|
||
|
|
Destination = "${hostCfg.localhost.address1}/0";
|
||
|
|
Gateway = serviceCfg.interface.gate; # 192.168.50.1
|
||
|
|
}
|
||
|
|
];
|
||
|
|
|
||
|
|
# DNS servers for the VM to use
|
||
|
|
# Using public DNS (Cloudflare and Google) for reliability
|
||
|
|
dns = [
|
||
|
|
"1.1.1.1"
|
||
|
|
"8.8.8.8"
|
||
|
|
];
|
||
|
|
};
|
||
|
|
};
|
||
|
|
|
||
|
|
# Explicitly start systemd-networkd service
|
||
|
|
# By default, systemd.network.enable creates configs but doesn't start the service
|
||
|
|
# This ensures the network is actually configured when the VM boots
|
||
|
|
systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ];
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# MICROVM HARDWARE CONFIGURATION
|
||
|
|
# ============================================================================
|
||
|
|
# This section defines the VM's virtual hardware, networking, and storage
|
||
|
|
|
||
|
|
microvm = {
|
||
|
|
# Virtual CPU cores allocated to the VM
|
||
|
|
vcpu = 2;
|
||
|
|
|
||
|
|
# Memory allocated to the VM (in MB)
|
||
|
|
mem = 1024;
|
||
|
|
|
||
|
|
# Hypervisor to use (QEMU with KVM acceleration)
|
||
|
|
hypervisor = "qemu";
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# NETWORK INTERFACES
|
||
|
|
# ============================================================================
|
||
|
|
# The VM has TWO network interfaces for different purposes:
|
||
|
|
|
||
|
|
interfaces = [
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Primary Interface: TAP (for LAN connectivity)
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# TAP creates a virtual ethernet device on the host that acts like
|
||
|
|
# a physical network cable connecting the VM to the host's network.
|
||
|
|
#
|
||
|
|
# Network Path:
|
||
|
|
# VM (enp0s5) ← virtio-net → TAP (vm-service) → Bridge (br-vms) → Physical NIC (enp10s0) → LAN
|
||
|
|
#
|
||
|
|
# The host has a bridge (br-vms) configured in systems/ceres/config/networking.nix
|
||
|
|
# that connects:
|
||
|
|
# - Physical interface: enp10s0
|
||
|
|
# - TAP interfaces: vm-* (this and other VMs)
|
||
|
|
#
|
||
|
|
# This allows the VM to appear as a separate device on your LAN with
|
||
|
|
# its own IP address (192.168.50.xxx)
|
||
|
|
{
|
||
|
|
type = "tap"; # TAP interface (Layer 2 / Ethernet)
|
||
|
|
id = serviceCfg.interface.id; # Interface name on host: "vm-service"
|
||
|
|
mac = serviceCfg.interface.mac; # MAC address: "02:00:00:00:00:xx"
|
||
|
|
}
|
||
|
|
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Secondary Interface: User-mode networking (for fallback/NAT)
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# User-mode networking (SLIRP) provides outbound internet access via NAT
|
||
|
|
# without requiring any host configuration. This is a backup interface.
|
||
|
|
#
|
||
|
|
# - VM gets a private IP (10.0.2.**) on this interface
|
||
|
|
# - Provides internet access even if TAP/bridge isn't working
|
||
|
|
# - Used for testing and as a fallback
|
||
|
|
# - Cannot receive inbound connections (NAT only)
|
||
|
|
{
|
||
|
|
type = "user"; # User-mode networking (SLIRP/NAT)
|
||
|
|
id = serviceCfg.interface.idUser; # Interface name: "vmuser-*"
|
||
|
|
mac = serviceCfg.interface.macUser; # MAC address: "02:00:00:00:00:xx"
|
||
|
|
}
|
||
|
|
];
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# PORT FORWARDING (Host → VM)
|
||
|
|
# ============================================================================
|
||
|
|
# Forward ports from the host to the VM for direct access
|
||
|
|
# This allows SSH to the VM via: ssh -p 220x root@localhost (from host)
|
||
|
|
#
|
||
|
|
# Without this, you'd need to SSH via the VM's LAN IP: ssh root@192.168.50.xxx
|
||
|
|
forwardPorts = [
|
||
|
|
{
|
||
|
|
from = "host"; # Forward from host
|
||
|
|
host.port = serviceCfg.interface.ssh; # Host port: 220x
|
||
|
|
guest.port = 22; # VM port: 22 (SSH)
|
||
|
|
}
|
||
|
|
];
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# SHARED DIRECTORIES (Host → VM)
|
||
|
|
# ============================================================================
|
||
|
|
# VirtioFS allows sharing directories from host to VM with good performance
|
||
|
|
# This is better than network shares (NFS/Samba) for VM-host communication
|
||
|
|
#
|
||
|
|
# Why use VirtioFS instead of storing everything in the VM?
|
||
|
|
# 1. Data persists when VM is recreated (VMs are ephemeral, data isn't)
|
||
|
|
# 2. Easy backups (just backup host directories)
|
||
|
|
# 3. Data accessible from host for maintenance/migration
|
||
|
|
# 4. Share read-only nix store from host (saves space)
|
||
|
|
|
||
|
|
shares = [
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Nix Store (Read-Only)
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Share the host's /nix/store as read-only inside the VM
|
||
|
|
# This provides all Nix packages without duplicating data
|
||
|
|
# The VM can use all the same packages as the host
|
||
|
|
{
|
||
|
|
mountPoint = "/nix/.ro-store"; # Mount point in VM
|
||
|
|
proto = "virtiofs"; # VirtioFS protocol (fast, modern)
|
||
|
|
source = "/nix/store"; # Source on host
|
||
|
|
tag = "read_only_nix_store"; # Unique identifier
|
||
|
|
}
|
||
|
|
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Service Data (Read-Write)
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Persistent storage for Service's database and attachments
|
||
|
|
# Stored on host at: /mnt/storage/service
|
||
|
|
# This data survives VM rebuilds/restarts
|
||
|
|
{
|
||
|
|
mountPoint = "/var/lib/service"; # Where Service stores its data
|
||
|
|
proto = "virtiofs"; # VirtioFS protocol
|
||
|
|
source = serviceCfg.mntPaths.path0; # Host: /mnt/storage/service
|
||
|
|
tag = "service_data"; # Unique identifier
|
||
|
|
}
|
||
|
|
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Secrets (Read-Only)
|
||
|
|
# ──────────────────────────────────────────────────────────────────
|
||
|
|
# Share secrets managed by SOPS from the host
|
||
|
|
# Contains sensitive config like SMTP passwords, admin tokens, etc.
|
||
|
|
# SOPS-nix decrypts these on the host, then they're shared to the VM
|
||
|
|
{
|
||
|
|
mountPoint = "/run/secrets"; # Mount point in VM
|
||
|
|
proto = "virtiofs"; # VirtioFS protocol
|
||
|
|
source = "/run/secrets"; # Source on host
|
||
|
|
tag = "host_secrets"; # Unique identifier
|
||
|
|
}
|
||
|
|
];
|
||
|
|
};
|
||
|
|
};
|
||
|
|
};
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# HOST-SIDE STORAGE CONFIGURATION
|
||
|
|
# ============================================================================
|
||
|
|
# Create necessary directories on the host for VM data
|
||
|
|
|
||
|
|
systemd.tmpfiles.rules = [
|
||
|
|
# Create service data directory on host if it doesn't exist
|
||
|
|
# This is where the VM's persistent data is actually stored
|
||
|
|
# d = directory, 0755 = permissions, root root = owner/group
|
||
|
|
"d ${serviceCfg.mntPaths.path0} 0755 root root -" # /mnt/storage/service
|
||
|
|
];
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# CADDY REVERSE PROXY (Host)
|
||
|
|
# ============================================================================
|
||
|
|
# Caddy runs on the host and forwards HTTPS traffic to the VM
|
||
|
|
#
|
||
|
|
# Traffic Flow:
|
||
|
|
# Internet → Router:443 → Host:443 (Caddy) → VM:xxxx (Service)
|
||
|
|
# ↓
|
||
|
|
# TLS Termination
|
||
|
|
# (ACME Certs)
|
||
|
|
#
|
||
|
|
# Why use a reverse proxy instead of exposing VM directly?
|
||
|
|
# 1. TLS/SSL termination on the host (easier cert management)
|
||
|
|
# 2. Single public IP can serve multiple services
|
||
|
|
# 3. Additional security layer (Caddy can add headers, rate limiting, etc.)
|
||
|
|
# 4. VM doesn't need to handle TLS complexity
|
||
|
|
|
||
|
|
services.caddy.virtualHosts."${serviceCfg.domains.url0}" = {
|
||
|
|
extraConfig = ''
|
||
|
|
reverse_proxy ${serviceCfg.interface.ip}:${toString serviceCfg.ports.port0} {
|
||
|
|
header_up X-Real-IP {remote_host}
|
||
|
|
}
|
||
|
|
|
||
|
|
tls ${serviceCfg.ssl.cert} ${serviceCfg.ssl.key}
|
||
|
|
|
||
|
|
encode zstd gzip
|
||
|
|
'';
|
||
|
|
};
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# SECRETS MANAGEMENT (SOPS)
|
||
|
|
# ============================================================================
|
||
|
|
# Configure secrets that will be decrypted by SOPS-nix on the host
|
||
|
|
# then shared to the VM via VirtioFS
|
||
|
|
#
|
||
|
|
# The service/env file contains sensitive environment variables like:
|
||
|
|
# - ADMIN_TOKEN: For accessing the admin panel
|
||
|
|
# - DATABASE_URL: Database connection string (if using external DB)
|
||
|
|
# - SMTP_PASSWORD: For sending email notifications
|
||
|
|
|
||
|
|
sops.secrets = {
|
||
|
|
"service/env" = {
|
||
|
|
owner = "root"; # File owner on host
|
||
|
|
mode = "0600"; # Permissions (read/write for owner only)
|
||
|
|
};
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
# ============================================================================
|
||
|
|
# SUMMARY: How This All Works Together
|
||
|
|
# ============================================================================
|
||
|
|
#
|
||
|
|
# 1. BOOT SEQUENCE:
|
||
|
|
# - Host starts and creates br-vms bridge
|
||
|
|
# - Host creates TAP interface (vm-service)
|
||
|
|
# - Host attaches TAP to bridge
|
||
|
|
# - QEMU starts with TAP fds and VirtioFS shares
|
||
|
|
# - VM boots and sees enp0s5 network interface
|
||
|
|
# - systemd-networkd configures enp0s5 with static IP
|
||
|
|
# - Service service starts on port xxxx
|
||
|
|
#
|
||
|
|
# 2. NETWORK CONNECTIVITY:
|
||
|
|
# - VM has IP 192.168.50.xxx on LAN (via TAP/bridge)
|
||
|
|
# - Host has IP 192.168.50.240 on LAN (on bridge)
|
||
|
|
# - Both can reach each other and the internet
|
||
|
|
# - Router forwards port 443 to host IP
|
||
|
|
#
|
||
|
|
# 3. REQUEST FLOW:
|
||
|
|
# External → Router:443 → Host:443 (Caddy) → Bridge → TAP → VM:xxxx (Service)
|
||
|
|
# Response follows same path in reverse
|
||
|
|
#
|
||
|
|
# 4. DATA STORAGE:
|
||
|
|
# - VM reads packages from host's /nix/store (shared read-only)
|
||
|
|
# - VM writes data to /var/lib/service (actually /mnt/storage/service on host)
|
||
|
|
# - VM reads secrets from /run/secrets (shared from host via SOPS)
|
||
|
|
#
|
||
|
|
# 5. MAINTENANCE:
|
||
|
|
# - SSH to VM: ssh root@192.168.50.xxx (from LAN)
|
||
|
|
# - SSH to VM: ssh -p 220x root@localhost (from host)
|
||
|
|
# - Rebuild: sudo nixos-rebuild switch --flake .#ceres
|
||
|
|
# - VM automatically restarts on config changes
|
||
|
|
#
|
||
|
|
# ============================================================================
|