test: forgejo microVM

This commit is contained in:
Nick 2025-11-09 04:38:20 -06:00
parent cb01cdf219
commit 1faa06262d

View file

@ -1,44 +1,64 @@
# ============================================================================ # ============================================================================
# MicroVM Configuration # MicroVM Service Template - Production-Ready Configuration
# ============================================================================ # ============================================================================
# This file contains the complete configuration for running a microVM # This template is based on proven, working configurations from:
# - Vaultwarden (simple service with environment file)
# - Forgejo (complex service with separate secret files)
# - Jellyfin (media service with multiple mounts and tmpfiles)
#
# CRITICAL SUCCESS FACTORS (learned from production deployments):
# 1. Use serviceCfg.name for all service references (not hardcoded strings)
# 2. Secrets MUST use service-specific subdirectories: /run/secrets/${serviceCfg.name}/
# 3. Host directories MUST exist with correct permissions BEFORE VM starts
# 4. Use 0777 permissions when VM service runs as non-root user with different UID
# 5. systemd.tmpfiles can be used INSIDE the VM for VM-internal directories
# 6. Host and VM tmpfiles rules serve different purposes - use both when needed
# #
# Architecture Overview: # Architecture Overview:
# ┌────────────────────────────────────────────────┐ # ┌────────────────────────────────────────────────┐
# │ Host (Ceres - NixOS Server) │ # │ Host (NixOS Server)
# │ │ # │ │
# │ ┌─────────────┐ ┌─────────────┐ │ # │ ┌─────────────┐ ┌─────────────┐
# │ │ Caddy │ │ Bridge │ │ # │ │ Caddy │ │ br-vms │
# │ │ (Reverse │───────▶│ (br-vms) │ │ # │ │ (Reverse │──────│ Bridge │
# │ │ Proxy) │ │ 192.168.50 │ │ # │ │ Proxy) │ │ 192.168.50 │ │
# │ └─────────────┘ │ .240 │ │ # │ │ TLS Term │ │ .240 │ │
# │ │ └──────┬──────┘ │ # │ └──────────────┘ └──────┬───────┘ │
# │ :443 │ │
# │ │ ┌─────▼──────┐ │
# │ │ │ vm-NAME │ │
# │ │ │ (TAP) │ │
# │ │ └─────┬──────┘ │
# │ │ │ │ # │ │ │ │
# │ │ ┌──────▼──────┐ │ # │ ┌─────▼──────────────────────▼────────────┐ │
# │ │ │ TAP │ │
# │ │ │ (vm-*) │ │
# │ │ └──────┬──────┘ │
# │ │ │ │ # │ │ │ │
# │ ┌─────────▼──────────────────────▼─────────┐ │ # │ │ MicroVM Guest │ │
# │ │ │ │ # │ │ ┌────────────┐ ┌────────────┐ │ │
# │ │ MicroVM │ │
# │ │ ┌─────────────┐ ┌─────────────┐ │ │
# │ │ │ Service │ │ enp0s5 │ │ │ # │ │ │ Service │ │ enp0s5 │ │ │
# │ │ │ Service │ │ 192.168.50 │ │ │ # │ │ │ :PORT │ │192.168.50 │ │ │
# │ │ │ Port 8085 │ │ .151 │ │ │ # │ │ │ │ │ .1XX │ │ │
# │ │ └─────────────┘ └─────────────┘ │ │ # │ │ └────────────┘ └────────────┘ │ │
# │ │ │ │ # │ │ │ │
# │ └──────────────────────────────────────────┘ │ # │ │ VirtioFS Mounts: │ │
# │ │ • /nix/.ro-store → Host /nix/store │ │
# │ │ • /var/lib/NAME → Host /mnt/storage │ │
# │ │ • /run/secrets → Host /run/secrets/NAME│ │
# │ └─────────────────────────────────────────┘ │
# │ │ # │ │
# └────────────────────────────────────────────────┘ # └────────────────────────────────────────────────┘
# #
# Network Flow: # Network Flow:
# 1. External request → Router (port forward 443) → Host IP (192.168.50.240) # 1. Internet → Router:443 (port forward) → Host:443 (Caddy)
# 2. Host Caddy receives HTTPS request on port 443 # 2. Caddy terminates TLS using ACME certificates
# 3. Caddy terminates TLS using ACME certificates # 3. Caddy proxies HTTP to VM's LAN IP (e.g., 192.168.50.151:8085)
# 4. Caddy forwards HTTP to VM IP (192.168.50.xxx:xxxx) # 4. Request: br-vms → TAP (vm-NAME) → VM enp0s5 → Service
# 5. Request travels through br-vms bridge → TAP interface → VM network # 5. Response follows same path in reverse
# 6. Service responds back through the same path #
# IMPORTANT: Split-DNS for LAN Access
# - External users: DNS resolves to public IP → router forwards to host
# - Internal users: MUST have DNS resolve to 192.168.50.240 (host bridge IP)
# OR use /etc/hosts entries, otherwise NAT hairpinning may fail
#
# ============================================================================ # ============================================================================
{ {
@ -47,15 +67,31 @@
... ...
}: }:
let let
# Pull configuration from centralized instance definitions # ============================================================================
# These are defined in modules/config/instances/config/*.nix # CONFIGURATION REFERENCES
serviceCfg = flake.config.services.instances.service; # ============================================================================
smtpCfg = flake.config.services.instances.smtp; # These pull from your centralized instance definitions
hostCfg = flake.config.services.instances.web; # Located in: modules/config/instances/config/*.nix
inherit (flake.config.people) user0; inherit (flake.config.people) user0;
inherit (flake.config.services) instances; inherit (flake.config.services) instances;
# DNS provider configuration for ACME DNS-01 challenge # REPLACE 'service' with your actual service name identifier
# This should match the attribute name in your instances configuration
# Examples: vaultwarden, forgejo, jellyfin, etc.
serviceCfg = instances.service; # CHANGE THIS
# SMTP configuration (if your service needs email)
# Remove this line if your service doesn't use SMTP
smtpCfg = instances.smtp;
# Host/web configuration for routing and DNS
hostCfg = instances.web;
# Service domain (e.g., "service.example.com")
host = serviceCfg.domains.url0;
# DNS provider for ACME DNS-01 challenge
dns0 = instances.web.dns.provider0; dns0 = instances.web.dns.provider0;
dns0Path = "dns/${dns0}"; dns0Path = "dns/${dns0}";
in in
@ -63,338 +99,518 @@ in
# ============================================================================ # ============================================================================
# HOST-SIDE CONFIGURATION # HOST-SIDE CONFIGURATION
# ============================================================================ # ============================================================================
# The following settings run on the host (Ceres), not inside the VM # These configurations run on the HOST, not inside the VM
# They MUST be configured BEFORE the VM starts
# Add Caddy user to the ACME group so it can read TLS certificates # ──────────────────────────────────────────────────────────────────────────
# NixOS ACME module creates certs with group ownership, and Caddy needs # 1. Caddy Group Membership
# read access to serve them over HTTPS # ──────────────────────────────────────────────────────────────────────────
# Allow Caddy to read ACME certificates
users.users.caddy.extraGroups = [ "acme" ]; users.users.caddy.extraGroups = [ "acme" ];
# Configure Let's Encrypt SSL certificate for service domain # ──────────────────────────────────────────────────────────────────────────
# Uses DNS-01 challenge (via dns0 provider) instead of HTTP-01 # 2. ACME TLS Certificate
# This allows cert generation even when the service isn't publicly accessible yet # ──────────────────────────────────────────────────────────────────────────
security.acme.certs."${serviceCfg.domains.url0}" = { # Request Let's Encrypt certificate using DNS-01 challenge
dnsProvider = dns0; # DNS provider (e.g., cloudflare, route53) security.acme.certs."${host}" = {
environmentFile = config.sops.secrets.${dns0Path}.path; # API credentials from SOPS dnsProvider = dns0;
group = "caddy"; # Allow Caddy to read the certs environmentFile = config.sops.secrets.${dns0Path}.path;
group = "caddy";
}; };
# ============================================================================ # ──────────────────────────────────────────────────────────────────────────
# MICROVM DEFINITION # 3. Host Storage Directories
# ============================================================================ # ──────────────────────────────────────────────────────────────────────────
# This section defines the entire VM configuration, including networking, # Create directories on the host BEFORE VM starts
# services, and resource allocation # The VM will mount these via VirtioFS
microvm.vms.service = {
# Automatically start VM when host boots
autostart = true;
# Restart VM when configuration changes (during nixos-rebuild)
restartIfChanged = true;
# ============================================================================
# VM GUEST CONFIGURATION
# ============================================================================
# Everything inside this 'config' block runs INSIDE the VM, not on the host
config = {
# NixOS version for the VM system
system.stateVersion = "24.05";
time.timeZone = "America/Winnipeg";
# Allow SSH access to VM using host user's SSH keys
# This enables direct SSH via: ssh root@192.168.50.xxx
users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys;
# ============================================================================
# service SERVICE (Inside VM)
# ============================================================================
# Main application configuration - runs on port xxxx inside the VM
services = {
# ============================================================================
# VM SSH ACCESS
# ============================================================================
# Enable SSH server inside the VM for management and debugging
openssh = {
enable = true;
settings = {
PasswordAuthentication = false; # Only allow key-based auth
PermitRootLogin = "prohibit-password"; # Root login only with keys
};
};
# ============================================================================
# VM NETWORK CONFIGURATION (systemd-networkd)
# ============================================================================
# This configures the network interface INSIDE the VM
# The VM sees a network interface called "enp0s5" which connects to the
# host's TAP interface (vm-service) via the bridge (br-vms)
};
# Open firewall ports inside the VM
networking.firewall.allowedTCPPorts = [
serviceCfg.ports.port0 # Service web interface port number
];
systemd.network = {
enable = true; # Enable systemd-networkd for network management
networks."20-lan" = {
# Match the network interface created by QEMU
# QEMU with q35 machine type typically creates "enp0s5" for the first NIC
matchConfig.Name = "enp0s5";
# Assign static IP address to the VM
# This IP must be on the same subnet as the host bridge (192.168.50.0/24)
addresses = [ { Address = "${serviceCfg.interface.ip}/24"; } ]; # 192.168.50.xxx/24
# Configure default route to reach the internet
# All traffic (0.0.0.0/0) goes through the gateway (usually your router)
routes = [
{
Destination = "${hostCfg.localhost.address1}/0";
Gateway = serviceCfg.interface.gate; # 192.168.50.1
}
];
# DNS servers for the VM to use
# Using public DNS (Cloudflare and Google) for reliability
dns = [
"1.1.1.1"
"8.8.8.8"
];
};
};
# Explicitly start systemd-networkd service
# By default, systemd.network.enable creates configs but doesn't start the service
# This ensures the network is actually configured when the VM boots
systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ];
# ============================================================================
# MICROVM HARDWARE CONFIGURATION
# ============================================================================
# This section defines the VM's virtual hardware, networking, and storage
microvm = {
# Virtual CPU cores allocated to the VM
vcpu = 2;
# Memory allocated to the VM (in MB)
mem = 1024;
# Hypervisor to use (QEMU with KVM acceleration)
hypervisor = "qemu";
# ============================================================================
# NETWORK INTERFACES
# ============================================================================
# The VM has TWO network interfaces for different purposes:
interfaces = [
# ──────────────────────────────────────────────────────────────────
# Primary Interface: TAP (for LAN connectivity)
# ──────────────────────────────────────────────────────────────────
# TAP creates a virtual ethernet device on the host that acts like
# a physical network cable connecting the VM to the host's network.
# #
# Network Path: # PERMISSION PATTERNS (choose based on your service):
# VM (enp0s5) ← virtio-net → TAP (vm-service) → Bridge (br-vms) → Physical NIC (enp10s0) → LAN # - 0755: Safe default when VM service runs as root
# - 0777: Required when VM service runs as non-root with different UID
# (e.g., jellyfin runs as UID 999 inside VM)
# #
# The host has a bridge (br-vms) configured in systems/ceres/config/networking.nix # EXAMPLES FROM WORKING CONFIGS:
# that connects: # Vaultwarden: "d ${serviceCfg.mntPaths.path0} 0777 root root -"
# - Physical interface: enp10s0 # Forgejo: "d ${serviceCfg.mntPaths.path0} 0777 root root -"
# - TAP interfaces: vm-* (this and other VMs) # Jellyfin: "d ${serviceCfg.mntPaths.path0} 0777 root root -"
# # "d ${serviceCfg.mntPaths.path0}/cache 0777 root root -"
# This allows the VM to appear as a separate device on your LAN with
# its own IP address (192.168.50.xxx)
{
type = "tap"; # TAP interface (Layer 2 / Ethernet)
id = serviceCfg.interface.id; # Interface name on host: "vm-service"
mac = serviceCfg.interface.mac; # MAC address: "02:00:00:00:00:xx"
}
# ──────────────────────────────────────────────────────────────────
# Secondary Interface: User-mode networking (for fallback/NAT)
# ──────────────────────────────────────────────────────────────────
# User-mode networking (SLIRP) provides outbound internet access via NAT
# without requiring any host configuration. This is a backup interface.
#
# - VM gets a private IP (10.0.2.**) on this interface
# - Provides internet access even if TAP/bridge isn't working
# - Used for testing and as a fallback
# - Cannot receive inbound connections (NAT only)
{
type = "user"; # User-mode networking (SLIRP/NAT)
id = serviceCfg.interface.idUser; # Interface name: "vmuser-*"
mac = serviceCfg.interface.macUser; # MAC address: "02:00:00:00:00:xx"
}
];
# ============================================================================
# PORT FORWARDING (Host → VM)
# ============================================================================
# Forward ports from the host to the VM for direct access
# This allows SSH to the VM via: ssh -p 220x root@localhost (from host)
#
# Without this, you'd need to SSH via the VM's LAN IP: ssh root@192.168.50.xxx
forwardPorts = [
{
from = "host"; # Forward from host
host.port = serviceCfg.interface.ssh; # Host port: 220x
guest.port = 22; # VM port: 22 (SSH)
}
];
# ============================================================================
# SHARED DIRECTORIES (Host → VM)
# ============================================================================
# VirtioFS allows sharing directories from host to VM with good performance
# This is better than network shares (NFS/Samba) for VM-host communication
#
# Why use VirtioFS instead of storing everything in the VM?
# 1. Data persists when VM is recreated (VMs are ephemeral, data isn't)
# 2. Easy backups (just backup host directories)
# 3. Data accessible from host for maintenance/migration
# 4. Share read-only nix store from host (saves space)
shares = [
# ──────────────────────────────────────────────────────────────────
# Nix Store (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share the host's /nix/store as read-only inside the VM
# This provides all Nix packages without duplicating data
# The VM can use all the same packages as the host
{
mountPoint = "/nix/.ro-store"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol (fast, modern)
source = "/nix/store"; # Source on host
tag = "read_only_nix_store"; # Unique identifier
}
# ──────────────────────────────────────────────────────────────────
# Service Data (Read-Write)
# ──────────────────────────────────────────────────────────────────
# Persistent storage for Service's database and attachments
# Stored on host at: /mnt/storage/service
# This data survives VM rebuilds/restarts
{
mountPoint = "/var/lib/service"; # Where Service stores its data
proto = "virtiofs"; # VirtioFS protocol
source = serviceCfg.mntPaths.path0; # Host: /mnt/storage/service
tag = "service_data"; # Unique identifier
}
# ──────────────────────────────────────────────────────────────────
# Secrets (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share secrets managed by SOPS from the host
# Contains sensitive config like SMTP passwords, admin tokens, etc.
# SOPS-nix decrypts these on the host, then they're shared to the VM
{
mountPoint = "/run/secrets"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol
source = "/run/secrets"; # Source on host
tag = "host_secrets"; # Unique identifier
}
];
};
};
};
# ============================================================================
# HOST-SIDE STORAGE CONFIGURATION
# ============================================================================
# Create necessary directories on the host for VM data
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
# Create service data directory on host if it doesn't exist # Main data directory
# This is where the VM's persistent data is actually stored "d ${serviceCfg.mntPaths.path0} 0777 root root -"
# d = directory, 0755 = permissions, root root = owner/group
"d ${serviceCfg.mntPaths.path0} 0755 root root -" # /mnt/storage/service # OPTIONAL: Additional directories if needed (like Jellyfin's cache)
# "d ${serviceCfg.mntPaths.path0}/cache 0777 root root -"
]; ];
# ============================================================================ # ──────────────────────────────────────────────────────────────────────────
# CADDY REVERSE PROXY (Host) # 4. Secrets Management (SOPS)
# ============================================================================ # ──────────────────────────────────────────────────────────────────────────
# Caddy runs on the host and forwards HTTPS traffic to the VM # Configure secrets decryption on the host
# SOPS-nix will decrypt these to /run/secrets/${serviceCfg.name}/*
# #
# Traffic Flow: # CRITICAL: Always use ${serviceCfg.name} for the path prefix!
# Internet → Router:443 → Host:443 (Caddy) → VM:xxxx (Service) # This prevents conflicts between multiple VMs
# ↓
# TLS Termination
# (ACME Certs)
# #
# Why use a reverse proxy instead of exposing VM directly? # PATTERN 1: Single environment file (like Vaultwarden)
# 1. TLS/SSL termination on the host (easier cert management) # sops.secrets = {
# 2. Single public IP can serve multiple services # "${serviceCfg.name}/env" = {
# 3. Additional security layer (Caddy can add headers, rate limiting, etc.) # owner = "root";
# 4. VM doesn't need to handle TLS complexity # mode = "0600";
# };
# };
#
# PATTERN 2: Multiple secret files (like Forgejo)
# sops.secrets = {
# "${serviceCfg.name}/smtp" = {
# owner = "root";
# mode = "0600";
# };
# "${serviceCfg.name}/database" = {
# owner = "root";
# mode = "0600";
# };
# };
#
# PATTERN 3: No secrets (like Jellyfin - if service doesn't need secrets)
# sops.secrets = {};
services.caddy.virtualHosts."${serviceCfg.domains.url0}" = { sops.secrets = {
# CHOOSE ONE OF THE PATTERNS ABOVE AND UNCOMMENT
# "${serviceCfg.name}/env" = {
# owner = "root";
# mode = "0600";
# };
};
# ============================================================================
# CADDY REVERSE PROXY (Host-Side)
# ============================================================================
# Caddy terminates TLS and proxies to the VM
services.caddy.virtualHosts."${host}" = {
extraConfig = '' extraConfig = ''
# Forward all requests to the VM's IP and port
reverse_proxy ${serviceCfg.interface.ip}:${toString serviceCfg.ports.port0} { reverse_proxy ${serviceCfg.interface.ip}:${toString serviceCfg.ports.port0} {
# Pass the real client IP to the service
header_up X-Real-IP {remote_host} header_up X-Real-IP {remote_host}
} }
# Use ACME certificate managed by NixOS
tls ${serviceCfg.ssl.cert} ${serviceCfg.ssl.key} tls ${serviceCfg.ssl.cert} ${serviceCfg.ssl.key}
# Compress responses
encode zstd gzip encode zstd gzip
''; '';
}; };
# ============================================================================ # ============================================================================
# SECRETS MANAGEMENT (SOPS) # MICROVM DEFINITION
# ============================================================================ # ============================================================================
# Configure secrets that will be decrypted by SOPS-nix on the host # Everything below defines the VM's configuration
# then shared to the VM via VirtioFS
#
# The service/env file contains sensitive environment variables like:
# - ADMIN_TOKEN: For accessing the admin panel
# - DATABASE_URL: Database connection string (if using external DB)
# - SMTP_PASSWORD: For sending email notifications
sops.secrets = { microvm.vms.${serviceCfg.name} = {
"service/env" = { # VM Lifecycle
owner = "root"; # File owner on host autostart = true; # Start VM automatically on host boot
mode = "0600"; # Permissions (read/write for owner only) restartIfChanged = true; # Restart VM when configuration changes
# ──────────────────────────────────────────────────────────────────────
# VM Guest Configuration
# ──────────────────────────────────────────────────────────────────────
# Everything inside 'config' runs INSIDE the VM
config = {
# NixOS version (should match host or be compatible)
system.stateVersion = "24.05";
# Timezone (should match host for consistent logging)
time.timeZone = "America/Winnipeg";
# SSH Access - allow SSH into VM using host user's keys
users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys;
# ────────────────────────────────────────────────────────────────────
# Services Configuration
# ────────────────────────────────────────────────────────────────────
services = {
# ══════════════════════════════════════════════════════════════════
# YOUR SERVICE CONFIGURATION GOES HERE
# ══════════════════════════════════════════════════════════════════
# Choose one of the patterns below based on your service type:
# ┌─────────────────────────────────────────────────────────────────
# │ PATTERN 1: Simple Service (Vaultwarden-style)
# │ - Uses environment file for secrets
# │ - Single configuration block
# └─────────────────────────────────────────────────────────────────
# vaultwarden = {
# enable = true;
# dbBackend = "sqlite";
# config = {
# DOMAIN = "https://${host}";
#
# # Email Configuration
# SMTP_AUTH_MECHANISM = "Plain";
# SMTP_EMBED_IMAGES = true;
# SMTP_FROM = serviceCfg.email.address0;
# SMTP_FROM_NAME = serviceCfg.label;
# SMTP_HOST = smtpCfg.hostname;
# SMTP_PORT = smtpCfg.ports.port1;
# SMTP_SECURITY = smtpCfg.records.record1;
# SMTP_USERNAME = smtpCfg.email.address0;
#
# # Security Configuration
# DISABLE_ADMIN_TOKEN = false;
#
# # Web Server Settings
# ROCKET_ADDRESS = "0.0.0.0";
# ROCKET_PORT = serviceCfg.ports.port0;
# };
#
# # Mount secrets from host
# environmentFile = "/run/secrets/env";
# };
# ┌─────────────────────────────────────────────────────────────────
# │ PATTERN 2: Complex Service (Forgejo-style)
# │ - Uses separate secret files
# │ - More complex settings structure
# └─────────────────────────────────────────────────────────────────
# forgejo = {
# enable = true;
# lfs.enable = true;
#
# # Separate secret files (not environment variables)
# secrets = {
# mailer.PASSWD = "/run/secrets/smtp";
# };
#
# dump = {
# interval = "5:00";
# type = "zip";
# file = "forgejo-backup";
# enable = true;
# };
#
# settings = {
# server = {
# DOMAIN = host;
# ROOT_URL = "https://${host}/";
# HTTP_PORT = serviceCfg.ports.port0;
# };
#
# service.DISABLE_REGISTRATION = true;
#
# actions = {
# ENABLED = true;
# DEFAULT_ACTIONS_URL = "github";
# };
#
# mirror = {
# ENABLED = true;
# };
#
# mailer = {
# ENABLED = true;
# SMTP_ADDR = smtpCfg.hostname;
# FROM = smtpCfg.email.address1;
# USER = smtpCfg.email.address1;
# PROTOCOL = "${smtpCfg.name}+${smtpCfg.records.record1}";
# SMTP_PORT = smtpCfg.ports.port1;
# SEND_AS_PLAIN_TEXT = true;
# USE_CLIENT_CERT = false;
# };
# };
# };
# ┌─────────────────────────────────────────────────────────────────
# │ PATTERN 3: Media Service (Jellyfin-style)
# │ - Simple enable, uses openFirewall
# │ - No secrets needed
# └─────────────────────────────────────────────────────────────────
# jellyfin = {
# enable = true;
# openFirewall = true;
# };
# ══════════════════════════════════════════════════════════════════
# SSH Server (for VM management) - ALWAYS INCLUDE
# ══════════════════════════════════════════════════════════════════
openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
PermitRootLogin = "prohibit-password";
};
};
};
# ────────────────────────────────────────────────────────────────────
# Firewall Configuration
# ────────────────────────────────────────────────────────────────────
# Open necessary ports inside the VM
#
# EXAMPLES:
# Vaultwarden: [22, 25, 139, 587, 2525, serviceCfg.ports.port0]
# Forgejo: [22, 25, 139, 587, 2525, serviceCfg.ports.port0]
# Jellyfin: [22, serviceCfg.ports.port0, port1, port2]
networking.firewall.allowedTCPPorts = [
22 # SSH (always include)
serviceCfg.ports.port0 # Main service port
# OPTIONAL: SMTP ports if service sends email
# 25 # SMTP
# 587 # SMTP Submission
# 2525 # Alternative SMTP
# OPTIONAL: Additional ports (like Jellyfin's discovery ports)
# serviceCfg.ports.port1
# serviceCfg.ports.port2
];
# ────────────────────────────────────────────────────────────────────
# OPTIONAL: Temporary Filesystem
# ────────────────────────────────────────────────────────────────────
# Some services need a large /tmp (e.g., Forgejo for large Git operations)
# Jellyfin also uses this pattern
#
# UNCOMMENT IF NEEDED:
# fileSystems."/tmp" = {
# device = "tmpfs";
# fsType = "tmpfs";
# options = [
# "size=4G" # Adjust size as needed
# "mode=1777" # Sticky bit for multi-user access
# ];
# };
# ────────────────────────────────────────────────────────────────────
# Systemd Configuration
# ────────────────────────────────────────────────────────────────────
systemd = {
# Network Configuration (systemd-networkd)
# Configure the VM's network interface
#
# INTERFACE NAME: "enp0s5" is typical for QEMU with q35 machine
# To verify: SSH into VM and run: ip link show
network = {
enable = true;
networks."20-lan" = {
# Match the network interface created by QEMU
matchConfig.Name = "enp0s5";
# Static IP Configuration
# MUST be on same subnet as host bridge (e.g., 192.168.50.0/24)
addresses = [
{ Address = "${serviceCfg.interface.ip}/24"; }
];
# Default route for internet access
# PATTERN 1: Use "0.0.0.0/0" (Forgejo)
# PATTERN 2: Use "${hostCfg.localhost.address1}/0" (Vaultwarden, Jellyfin)
# Both work - choose one
routes = [
{
Destination = "0.0.0.0/0"; # or "${hostCfg.localhost.address1}/0"
Gateway = serviceCfg.interface.gate;
}
];
# DNS servers
dns = [
"1.1.1.1" # Cloudflare
"8.8.8.8" # Google
];
};
};
# OPTIONAL: VM-internal tmpfiles (Jellyfin and Forgejo use this)
# This creates directories INSIDE the VM, separate from host tmpfiles
# Used when the service needs specific ownership/permissions inside VM
#
# EXAMPLES:
# Jellyfin: "d ${serviceCfg.varPaths.path0}/media 0755 ${serviceCfg.name} ${serviceCfg.name} -"
# Forgejo: "d ${serviceCfg.varPaths.path0} 0755 ${serviceCfg.name} ${serviceCfg.name} -"
#
# UNCOMMENT IF NEEDED:
# tmpfiles.rules = [
# "d ${serviceCfg.varPaths.path0} 0755 ${serviceCfg.name} ${serviceCfg.name} -"
# ];
};
# Ensure systemd-networkd starts on boot
systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ];
# ────────────────────────────────────────────────────────────────────
# MicroVM Hardware Configuration
# ────────────────────────────────────────────────────────────────────
microvm = {
# Virtual CPU cores
# Adjust based on service needs:
# - Light services (Vaultwarden, Forgejo): 2 cores
# - Heavy services (Jellyfin): 6 cores
vcpu = 2;
# Memory in MB
# Adjust based on service needs:
# - Light services (Vaultwarden): 3072 MB (3 GB)
# - Medium services (Forgejo): 3072 MB (3 GB)
# - Heavy services (Jellyfin): 8192 MB (8 GB)
mem = 3072;
# Hypervisor - QEMU with KVM provides best performance
hypervisor = "qemu";
# ──────────────────────────────────────────────────────────────────
# Network Interfaces
# ──────────────────────────────────────────────────────────────────
# All working configs use TWO interfaces: TAP + User-mode
interfaces = [
# Primary Interface: TAP (LAN Connectivity)
{
type = "tap";
id = serviceCfg.interface.id; # e.g., "vm-service"
mac = serviceCfg.interface.mac; # e.g., "02:00:00:00:00:51"
}
# Secondary Interface: User-mode (NAT/Fallback)
{
type = "user";
id = serviceCfg.interface.idUser; # e.g., "vmuser-service"
mac = serviceCfg.interface.macUser; # e.g., "02:00:00:00:01:51"
}
];
# ──────────────────────────────────────────────────────────────────
# Port Forwarding (Host → VM)
# ──────────────────────────────────────────────────────────────────
# Forward SSH from host to VM for easy access
# Access via: ssh -p 22XX root@localhost (from host)
forwardPorts = [
{
from = "host";
host.port = serviceCfg.interface.ssh; # e.g., 2201
guest.port = 22;
}
];
# ──────────────────────────────────────────────────────────────────
# Shared Directories (VirtioFS)
# ──────────────────────────────────────────────────────────────────
# Share directories from host to VM
#
# IMPORTANT: All source paths must exist on host BEFORE VM starts
# Use systemd.tmpfiles.rules (in host section) to create directories
shares = [
# ┌───────────────────────────────────────────────────────────────
# │ Nix Store (Read-Only) - ALWAYS INCLUDE
# └───────────────────────────────────────────────────────────────
{
mountPoint = "/nix/.ro-store";
proto = "virtiofs";
source = "/nix/store";
tag = "read_only_nix_store";
}
# ┌───────────────────────────────────────────────────────────────
# │ Service Data (Read-Write)
# └───────────────────────────────────────────────────────────────
# CHOOSE ONE PATTERN:
#
# PATTERN 1: Direct path (Vaultwarden)
# {
# mountPoint = "/var/lib/bitwarden_rs";
# proto = "virtiofs";
# source = serviceCfg.mntPaths.path0;
# tag = "${serviceCfg.name}_data";
# }
#
# PATTERN 2: Use serviceCfg.name (Forgejo)
# {
# mountPoint = "/var/lib/${serviceCfg.name}";
# proto = "virtiofs";
# source = serviceCfg.mntPaths.path0;
# tag = "${serviceCfg.name}_data";
# }
#
# PATTERN 3: Use serviceCfg.varPaths (Jellyfin)
# {
# mountPoint = serviceCfg.varPaths.path0;
# proto = "virtiofs";
# source = serviceCfg.mntPaths.path0;
# tag = "${serviceCfg.name}_data";
# }
{
mountPoint = "/var/lib/${serviceCfg.name}"; # ADJUST THIS
proto = "virtiofs";
source = serviceCfg.mntPaths.path0;
tag = "${serviceCfg.name}_data";
}
# ┌───────────────────────────────────────────────────────────────
# │ OPTIONAL: Additional Data Mounts (like Jellyfin's cache)
# └───────────────────────────────────────────────────────────────
# UNCOMMENT IF NEEDED:
# {
# mountPoint = serviceCfg.varPaths.path1;
# proto = "virtiofs";
# source = "${serviceCfg.mntPaths.path0}/cache";
# tag = "${serviceCfg.name}_cache";
# }
# ┌───────────────────────────────────────────────────────────────
# │ Secrets (Read-Only) - INCLUDE IF SERVICE NEEDS SECRETS
# └───────────────────────────────────────────────────────────────
# CRITICAL: Source must use service-specific subdirectory!
# This matches the sops.secrets configuration above
#
# UNCOMMENT IF SERVICE NEEDS SECRETS:
# {
# mountPoint = "/run/secrets";
# proto = "virtiofs";
# source = "/run/secrets/${serviceCfg.name}";
# tag = "host_secrets";
# }
];
};
}; };
}; };
} }
# ============================================================================ # ============================================================================
# SUMMARY: How This All Works Together # QUICK REFERENCE: Pattern Comparison
# ============================================================================ # ============================================================================
# #
# 1. BOOT SEQUENCE: # ┌──────────────┬─────────────┬──────────────┬─────────────┐
# - Host starts and creates br-vms bridge # │ Aspect │ Vaultwarden │ Forgejo │ Jellyfin │
# - Host creates TAP interface (vm-service) # ├──────────────┼─────────────┼──────────────┼─────────────┤
# - Host attaches TAP to bridge # │ vCPU │ 2 │ 2 │ 6 │
# - QEMU starts with TAP fds and VirtioFS shares # │ Memory │ 3072 MB │ 3072 MB │ 8192 MB │
# - VM boots and sees enp0s5 network interface # │ Secrets │ env file │ sep. files │ none │
# - systemd-networkd configures enp0s5 with static IP # │ /tmp mount │ no │ yes (4G) │ yes (4G) │
# - Service service starts on port xxxx # │ VM tmpfiles │ no │ yes │ yes │
# # │ Host perms │ 0777 │ 0777 │ 0777 │
# 2. NETWORK CONNECTIVITY: # │ Data mounts │ 1 │ 1 │ 2 │
# - VM has IP 192.168.50.xxx on LAN (via TAP/bridge) # │ Secrets mnt │ yes │ yes │ no │
# - Host has IP 192.168.50.240 on LAN (on bridge) # └──────────────┴─────────────┴──────────────┴─────────────┘
# - Both can reach each other and the internet #
# - Router forwards port 443 to host IP # ============================================================================
# # CHECKLIST: Steps to Create New Service
# 3. REQUEST FLOW: # ============================================================================
# External → Router:443 → Host:443 (Caddy) → Bridge → TAP → VM:xxxx (Service) #
# Response follows same path in reverse # 1. [ ] Copy this template to modules/nixos/services/YOUR-SERVICE/default.nix
# # 2. [ ] Replace 'service' with your service name in instances reference
# 4. DATA STORAGE: # 3. [ ] Uncomment and configure your service in services = { ... }
# - VM reads packages from host's /nix/store (shared read-only) # 4. [ ] Adjust vcpu/mem based on service requirements
# - VM writes data to /var/lib/service (actually /mnt/storage/service on host) # 5. [ ] Configure secrets (if needed) - both sops.secrets and shares
# - VM reads secrets from /run/secrets (shared from host via SOPS) # 6. [ ] Set correct mountPoint for service data (check service docs)
# # 7. [ ] Adjust firewall ports based on service needs
# 5. MAINTENANCE: # 8. [ ] Add /tmp mount if service needs large temporary space
# - SSH to VM: ssh root@192.168.50.xxx (from LAN) # 9. [ ] Test build: sudo nixos-rebuild build --flake .#YOUR-HOST
# - SSH to VM: ssh -p 220x root@localhost (from host) # 10. [ ] Deploy: sudo nixos-rebuild switch --flake .#YOUR-HOST
# - Rebuild: sudo nixos-rebuild switch --flake .#ceres # 11. [ ] Verify TAP exists: ip link show vm-YOUR-SERVICE
# - VM automatically restarts on config changes # 12. [ ] SSH to VM: ssh -p 22XX root@localhost
# 13. [ ] Check network in VM: ip addr show enp0s5
# 14. [ ] Test service: curl http://VM-IP:PORT
# 15. [ ] Test external access: https://YOUR-SERVICE.example.com
# #
# ============================================================================ # ============================================================================