test: jellyfin microVM

This commit is contained in:
Nick 2025-11-08 20:09:32 -06:00
parent e6a4f094e3
commit b553e92ad1
4 changed files with 464 additions and 328 deletions

View file

@ -2,20 +2,23 @@
let
inherit (moduleFunctions.instancesFunctions)
domain0
servicePath
sslPath
sopsPath
varPath
mntPath
secretPath
;
label = "Jellyfin";
name = "jellyfin";
domain = "${name}.${domain0}";
secrets = "${secretPath}/${name}";
ssl = "${sslPath}/${name}.${domain0}";
in
{
label = label;
name = name;
short = "Jelly";
sops = {
path0 = "${sopsPath}/${name}";
email = {
address0 = "noreply@${domain0}";
};
domains = {
url0 = domain;
@ -29,17 +32,32 @@ in
"music"
];
subdomain = name;
paths = {
path0 = "${servicePath}/${label}";
path1 = "${servicePath}/${label}/cache";
};
ports = {
port0 = 8096; # Jellyfin HTTP
port1 = 5055; # Jellyseer
port2 = 8920; # Jellyfin HTTPS
};
interface = {
id = "vm-${name}";
mac = "02:00:00:00:00:52";
idUser = "vmuser-jellyfin";
macUser = "02:00:00:00:00:04";
ip = "192.168.50.152";
gate = "192.168.50.1";
ssh = 2202;
};
ssl = {
cert = "${sslPath}/${name}.${domain0}/fullchain.pem";
key = "${sslPath}/${name}.${domain0}/key.pem";
path = ssl;
cert = "${ssl}/fullchain.pem";
key = "${ssl}/key.pem";
};
varPaths = {
path0 = "${varPath}/${name}";
};
mntPaths = {
path0 = "${mntPath}/${name}";
};
secretPaths = {
path0 = secrets;
};
}

View file

@ -0,0 +1,400 @@
# ============================================================================
# MicroVM Configuration
# ============================================================================
# This file contains the complete configuration for running a microVM
#
# Architecture Overview:
# ┌────────────────────────────────────────────────┐
# │ Host (Ceres - NixOS Server) │
# │ │
# │ ┌─────────────┐ ┌─────────────┐ │
# │ │ Caddy │ │ Bridge │ │
# │ │ (Reverse │───────▶│ (br-vms) │ │
# │ │ Proxy) │ │ 192.168.50 │ │
# │ └─────────────┘ │ .240 │ │
# │ │ └──────┬──────┘ │
# │ │ │ │
# │ │ ┌──────▼──────┐ │
# │ │ │ TAP │ │
# │ │ │ (vm-*) │ │
# │ │ └──────┬──────┘ │
# │ │ │ │
# │ ┌─────────▼──────────────────────▼─────────┐ │
# │ │ │ │
# │ │ MicroVM │ │
# │ │ ┌─────────────┐ ┌─────────────┐ │ │
# │ │ │ Service │ │ enp0s5 │ │ │
# │ │ │ Service │ │ 192.168.50 │ │ │
# │ │ │ Port 8085 │ │ .151 │ │ │
# │ │ └─────────────┘ └─────────────┘ │ │
# │ │ │ │
# │ └──────────────────────────────────────────┘ │
# │ │
# └────────────────────────────────────────────────┘
#
# Network Flow:
# 1. External request → Router (port forward 443) → Host IP (192.168.50.240)
# 2. Host Caddy receives HTTPS request on port 443
# 3. Caddy terminates TLS using ACME certificates
# 4. Caddy forwards HTTP to VM IP (192.168.50.xxx:xxxx)
# 5. Request travels through br-vms bridge → TAP interface → VM network
# 6. Service responds back through the same path
# ============================================================================
{
config,
flake,
...
}:
let
# Pull configuration from centralized instance definitions
# These are defined in modules/config/instances/config/*.nix
serviceCfg = flake.config.services.instances.service;
smtpCfg = flake.config.services.instances.smtp;
hostCfg = flake.config.services.instances.web;
inherit (flake.config.people) user0;
inherit (flake.config.services) instances;
# DNS provider configuration for ACME DNS-01 challenge
dns0 = instances.web.dns.provider0;
dns0Path = "dns/${dns0}";
in
{
# ============================================================================
# HOST-SIDE CONFIGURATION
# ============================================================================
# The following settings run on the host (Ceres), not inside the VM
# Add Caddy user to the ACME group so it can read TLS certificates
# NixOS ACME module creates certs with group ownership, and Caddy needs
# read access to serve them over HTTPS
users.users.caddy.extraGroups = [ "acme" ];
# Configure Let's Encrypt SSL certificate for service domain
# Uses DNS-01 challenge (via dns0 provider) instead of HTTP-01
# This allows cert generation even when the service isn't publicly accessible yet
security.acme.certs."${serviceCfg.domains.url0}" = {
dnsProvider = dns0; # DNS provider (e.g., cloudflare, route53)
environmentFile = config.sops.secrets.${dns0Path}.path; # API credentials from SOPS
group = "caddy"; # Allow Caddy to read the certs
};
# ============================================================================
# MICROVM DEFINITION
# ============================================================================
# This section defines the entire VM configuration, including networking,
# services, and resource allocation
microvm.vms.service = {
# Automatically start VM when host boots
autostart = true;
# Restart VM when configuration changes (during nixos-rebuild)
restartIfChanged = true;
# ============================================================================
# VM GUEST CONFIGURATION
# ============================================================================
# Everything inside this 'config' block runs INSIDE the VM, not on the host
config = {
# NixOS version for the VM system
system.stateVersion = "24.05";
time.timeZone = "America/Winnipeg";
# Allow SSH access to VM using host user's SSH keys
# This enables direct SSH via: ssh root@192.168.50.xxx
users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys;
# ============================================================================
# service SERVICE (Inside VM)
# ============================================================================
# Main application configuration - runs on port xxxx inside the VM
services = {
# ============================================================================
# VM SSH ACCESS
# ============================================================================
# Enable SSH server inside the VM for management and debugging
openssh = {
enable = true;
settings = {
PasswordAuthentication = false; # Only allow key-based auth
PermitRootLogin = "prohibit-password"; # Root login only with keys
};
};
# Open firewall ports inside the VM
networking.firewall.allowedTCPPorts = [
serviceCfg.ports.port0 # Service web interface port number
];
# ============================================================================
# VM NETWORK CONFIGURATION (systemd-networkd)
# ============================================================================
# This configures the network interface INSIDE the VM
# The VM sees a network interface called "enp0s5" which connects to the
# host's TAP interface (vm-service) via the bridge (br-vms)
};
systemd.network = {
enable = true; # Enable systemd-networkd for network management
networks."20-lan" = {
# Match the network interface created by QEMU
# QEMU with q35 machine type typically creates "enp0s5" for the first NIC
matchConfig.Name = "enp0s5";
# Assign static IP address to the VM
# This IP must be on the same subnet as the host bridge (192.168.50.0/24)
addresses = [ { Address = "${serviceCfg.interface.ip}/24"; } ]; # 192.168.50.xxx/24
# Configure default route to reach the internet
# All traffic (0.0.0.0/0) goes through the gateway (usually your router)
routes = [
{
Destination = "${hostCfg.localhost.address1}/0";
Gateway = serviceCfg.interface.gate; # 192.168.50.1
}
];
# DNS servers for the VM to use
# Using public DNS (Cloudflare and Google) for reliability
dns = [
"1.1.1.1"
"8.8.8.8"
];
};
};
# Explicitly start systemd-networkd service
# By default, systemd.network.enable creates configs but doesn't start the service
# This ensures the network is actually configured when the VM boots
systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ];
# ============================================================================
# MICROVM HARDWARE CONFIGURATION
# ============================================================================
# This section defines the VM's virtual hardware, networking, and storage
microvm = {
# Virtual CPU cores allocated to the VM
vcpu = 2;
# Memory allocated to the VM (in MB)
mem = 1024;
# Hypervisor to use (QEMU with KVM acceleration)
hypervisor = "qemu";
# ============================================================================
# NETWORK INTERFACES
# ============================================================================
# The VM has TWO network interfaces for different purposes:
interfaces = [
# ──────────────────────────────────────────────────────────────────
# Primary Interface: TAP (for LAN connectivity)
# ──────────────────────────────────────────────────────────────────
# TAP creates a virtual ethernet device on the host that acts like
# a physical network cable connecting the VM to the host's network.
#
# Network Path:
# VM (enp0s5) ← virtio-net → TAP (vm-service) → Bridge (br-vms) → Physical NIC (enp10s0) → LAN
#
# The host has a bridge (br-vms) configured in systems/ceres/config/networking.nix
# that connects:
# - Physical interface: enp10s0
# - TAP interfaces: vm-* (this and other VMs)
#
# This allows the VM to appear as a separate device on your LAN with
# its own IP address (192.168.50.xxx)
{
type = "tap"; # TAP interface (Layer 2 / Ethernet)
id = serviceCfg.interface.id; # Interface name on host: "vm-service"
mac = serviceCfg.interface.mac; # MAC address: "02:00:00:00:00:xx"
}
# ──────────────────────────────────────────────────────────────────
# Secondary Interface: User-mode networking (for fallback/NAT)
# ──────────────────────────────────────────────────────────────────
# User-mode networking (SLIRP) provides outbound internet access via NAT
# without requiring any host configuration. This is a backup interface.
#
# - VM gets a private IP (10.0.2.**) on this interface
# - Provides internet access even if TAP/bridge isn't working
# - Used for testing and as a fallback
# - Cannot receive inbound connections (NAT only)
{
type = "user"; # User-mode networking (SLIRP/NAT)
id = serviceCfg.interface.idUser; # Interface name: "vmuser-*"
mac = serviceCfg.interface.macUser; # MAC address: "02:00:00:00:00:xx"
}
];
# ============================================================================
# PORT FORWARDING (Host → VM)
# ============================================================================
# Forward ports from the host to the VM for direct access
# This allows SSH to the VM via: ssh -p 220x root@localhost (from host)
#
# Without this, you'd need to SSH via the VM's LAN IP: ssh root@192.168.50.xxx
forwardPorts = [
{
from = "host"; # Forward from host
host.port = serviceCfg.interface.ssh; # Host port: 220x
guest.port = 22; # VM port: 22 (SSH)
}
];
# ============================================================================
# SHARED DIRECTORIES (Host → VM)
# ============================================================================
# VirtioFS allows sharing directories from host to VM with good performance
# This is better than network shares (NFS/Samba) for VM-host communication
#
# Why use VirtioFS instead of storing everything in the VM?
# 1. Data persists when VM is recreated (VMs are ephemeral, data isn't)
# 2. Easy backups (just backup host directories)
# 3. Data accessible from host for maintenance/migration
# 4. Share read-only nix store from host (saves space)
shares = [
# ──────────────────────────────────────────────────────────────────
# Nix Store (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share the host's /nix/store as read-only inside the VM
# This provides all Nix packages without duplicating data
# The VM can use all the same packages as the host
{
mountPoint = "/nix/.ro-store"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol (fast, modern)
source = "/nix/store"; # Source on host
tag = "read_only_nix_store"; # Unique identifier
}
# ──────────────────────────────────────────────────────────────────
# Service Data (Read-Write)
# ──────────────────────────────────────────────────────────────────
# Persistent storage for Service's database and attachments
# Stored on host at: /mnt/storage/service
# This data survives VM rebuilds/restarts
{
mountPoint = "/var/lib/service"; # Where Service stores its data
proto = "virtiofs"; # VirtioFS protocol
source = serviceCfg.mntPaths.path0; # Host: /mnt/storage/service
tag = "service_data"; # Unique identifier
}
# ──────────────────────────────────────────────────────────────────
# Secrets (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share secrets managed by SOPS from the host
# Contains sensitive config like SMTP passwords, admin tokens, etc.
# SOPS-nix decrypts these on the host, then they're shared to the VM
{
mountPoint = "/run/secrets"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol
source = "/run/secrets"; # Source on host
tag = "host_secrets"; # Unique identifier
}
];
};
};
};
# ============================================================================
# HOST-SIDE STORAGE CONFIGURATION
# ============================================================================
# Create necessary directories on the host for VM data
systemd.tmpfiles.rules = [
# Create service data directory on host if it doesn't exist
# This is where the VM's persistent data is actually stored
# d = directory, 0755 = permissions, root root = owner/group
"d ${serviceCfg.mntPaths.path0} 0755 root root -" # /mnt/storage/service
];
# ============================================================================
# CADDY REVERSE PROXY (Host)
# ============================================================================
# Caddy runs on the host and forwards HTTPS traffic to the VM
#
# Traffic Flow:
# Internet → Router:443 → Host:443 (Caddy) → VM:xxxx (Service)
# ↓
# TLS Termination
# (ACME Certs)
#
# Why use a reverse proxy instead of exposing VM directly?
# 1. TLS/SSL termination on the host (easier cert management)
# 2. Single public IP can serve multiple services
# 3. Additional security layer (Caddy can add headers, rate limiting, etc.)
# 4. VM doesn't need to handle TLS complexity
services.caddy.virtualHosts."${serviceCfg.domains.url0}" = {
extraConfig = ''
reverse_proxy ${serviceCfg.interface.ip}:${toString serviceCfg.ports.port0} {
header_up X-Real-IP {remote_host}
}
tls ${serviceCfg.ssl.cert} ${serviceCfg.ssl.key}
encode zstd gzip
'';
};
# ============================================================================
# SECRETS MANAGEMENT (SOPS)
# ============================================================================
# Configure secrets that will be decrypted by SOPS-nix on the host
# then shared to the VM via VirtioFS
#
# The service/env file contains sensitive environment variables like:
# - ADMIN_TOKEN: For accessing the admin panel
# - DATABASE_URL: Database connection string (if using external DB)
# - SMTP_PASSWORD: For sending email notifications
sops.secrets = {
"service/env" = {
owner = "root"; # File owner on host
mode = "0600"; # Permissions (read/write for owner only)
};
};
}
# ============================================================================
# SUMMARY: How This All Works Together
# ============================================================================
#
# 1. BOOT SEQUENCE:
# - Host starts and creates br-vms bridge
# - Host creates TAP interface (vm-service)
# - Host attaches TAP to bridge
# - QEMU starts with TAP fds and VirtioFS shares
# - VM boots and sees enp0s5 network interface
# - systemd-networkd configures enp0s5 with static IP
# - Service service starts on port xxxx
#
# 2. NETWORK CONNECTIVITY:
# - VM has IP 192.168.50.xxx on LAN (via TAP/bridge)
# - Host has IP 192.168.50.240 on LAN (on bridge)
# - Both can reach each other and the internet
# - Router forwards port 443 to host IP
#
# 3. REQUEST FLOW:
# External → Router:443 → Host:443 (Caddy) → Bridge → TAP → VM:xxxx (Service)
# Response follows same path in reverse
#
# 4. DATA STORAGE:
# - VM reads packages from host's /nix/store (shared read-only)
# - VM writes data to /var/lib/service (actually /mnt/storage/service on host)
# - VM reads secrets from /run/secrets (shared from host via SOPS)
#
# 5. MAINTENANCE:
# - SSH to VM: ssh root@192.168.50.xxx (from LAN)
# - SSH to VM: ssh -p 220x root@localhost (from host)
# - Rebuild: sudo nixos-rebuild switch --flake .#ceres
# - VM automatically restarts on config changes
#
# ============================================================================

View file

@ -1,122 +1,35 @@
# ============================================================================
# Vaultwarden MicroVM Configuration
# ============================================================================
# This file contains the complete configuration for running Vaultwarden
# (a Bitwarden-compatible password manager) inside an isolated MicroVM.
#
# Architecture Overview:
# ┌─────────────────────────────────────────────────────────────────┐
# │ Host (Ceres - NixOS Server) │
# │ │
# │ ┌─────────────┐ ┌──────────────┐ │
# │ │ Caddy │────────▶│ Bridge │ │
# │ │ (Reverse │ │ (br-vms) │ │
# │ │ Proxy) │ │ 192.168.50 │ │
# │ └─────────────┘ │ .240 │ │
# │ │ └──────┬───────┘ │
# │ │ │ │
# │ │ ┌──────▼────────┐ │
# │ │ │ TAP Interface │ │
# │ │ │ (vm-vault*) │ │
# │ │ └──────┬────────┘ │
# │ │ │ │
# │ ┌─────▼────────────────────────▼──────────────────────────┐ │
# │ │ │ │
# │ │ MicroVM (vaultwarden) │ │
# │ │ ┌────────────────┐ ┌──────────────┐ │ │
# │ │ │ Vaultwarden │ │ enp0s5 │ │ │
# │ │ │ Service │ │ 192.168.50 │ │ │
# │ │ │ Port 8085 │ │ .151 │ │ │
# │ │ └────────────────┘ └──────────────┘ │ │
# │ │ │ │
# │ └──────────────────────────────────────────────────────────┘ │
# │ │
# └─────────────────────────────────────────────────────────────────┘
#
# Network Flow:
# 1. External request → Router (port forward 443) → Host IP (192.168.50.240)
# 2. Host Caddy receives HTTPS request on port 443
# 3. Caddy terminates TLS using ACME certificates
# 4. Caddy forwards HTTP to VM IP (192.168.50.151:8085)
# 5. Request travels through br-vms bridge → TAP interface → VM network
# 6. Vaultwarden responds back through the same path
# ============================================================================
{
config,
lib,
pkgs,
flake,
...
}:
let
# Pull configuration from centralized instance definitions
# These are defined in modules/config/instances/config/vaultwarden.nix
vaultwardenCfg = flake.config.services.instances.vaultwarden;
smtpCfg = flake.config.services.instances.smtp;
inherit (flake.config.people) user0;
inherit (flake.config.services) instances;
# DNS provider configuration for ACME DNS-01 challenge
dns0 = instances.web.dns.provider0;
dns0Path = "dns/${dns0}";
in
{
# ============================================================================
# HOST-SIDE CONFIGURATION
# ============================================================================
# The following settings run on the host (Ceres), not inside the VM
# Add Caddy user to the ACME group so it can read TLS certificates
# NixOS ACME module creates certs with group ownership, and Caddy needs
# read access to serve them over HTTPS
users.users.caddy.extraGroups = [ "acme" ];
# Configure Let's Encrypt SSL certificate for vaultwarden domain
# Uses DNS-01 challenge (via dns0 provider) instead of HTTP-01
# This allows cert generation even when the service isn't publicly accessible yet
security.acme.certs."${vaultwardenCfg.domains.url0}" = {
dnsProvider = dns0; # DNS provider (e.g., cloudflare, route53)
environmentFile = config.sops.secrets.${dns0Path}.path; # API credentials from SOPS
group = "caddy"; # Allow Caddy to read the certs
dnsProvider = dns0;
environmentFile = config.sops.secrets.${dns0Path}.path;
group = "caddy";
};
# ============================================================================
# MICROVM DEFINITION
# ============================================================================
# This section defines the entire VM configuration, including networking,
# services, and resource allocation
microvm.vms.vaultwarden = {
# Automatically start VM when host boots
autostart = true;
# Restart VM when configuration changes (during nixos-rebuild)
restartIfChanged = true;
# ============================================================================
# VM GUEST CONFIGURATION
# ============================================================================
# Everything inside this 'config' block runs INSIDE the VM, not on the host
config = {
# NixOS version for the VM system
system.stateVersion = "24.05";
time.timeZone = "America/Winnipeg";
# Allow SSH access to VM using host user's SSH keys
# This enables direct SSH via: ssh root@192.168.50.151
users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys;
# ============================================================================
# VAULTWARDEN SERVICE (Inside VM)
# ============================================================================
# Main application configuration - runs on port 8085 inside the VM
services.vaultwarden = {
enable = true;
dbBackend = "sqlite";
config = {
# Domain Configuration
DOMAIN = "https://${vaultwardenCfg.domains.url0}";
@ -151,59 +64,34 @@ in
environmentFile = "/run/secrets/vaultwarden/env";
};
# ============================================================================
# VM SSH ACCESS
# ============================================================================
# Enable SSH server inside the VM for management and debugging
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false; # Only allow key-based auth
PermitRootLogin = "prohibit-password"; # Root login only with keys
PasswordAuthentication = false;
PermitRootLogin = "prohibit-password";
};
};
# Open firewall ports inside the VM
networking.firewall.allowedTCPPorts = [
22 # SSH
25 # SMTP
139 # SMTP
587 # SMTP
2525 # SMTP
vaultwardenCfg.ports.port0 # Vaultwarden web interface (8085)
vaultwardenCfg.ports.port0
];
# ============================================================================
# VM NETWORK CONFIGURATION (systemd-networkd)
# ============================================================================
# This configures the network interface INSIDE the VM
# The VM sees a network interface called "enp0s5" which connects to the
# host's TAP interface (vm-vaultwarden) via the bridge (br-vms)
systemd.network = {
enable = true; # Enable systemd-networkd for network management
enable = true;
networks."20-lan" = {
# Match the network interface created by QEMU
# QEMU with q35 machine type typically creates "enp0s5" for the first NIC
matchConfig.Name = "enp0s5";
# Assign static IP address to the VM
# This IP must be on the same subnet as the host bridge (192.168.50.0/24)
addresses = [ { Address = "${vaultwardenCfg.interface.ip}/24"; } ]; # 192.168.50.151/24
# Configure default route to reach the internet
# All traffic (0.0.0.0/0) goes through the gateway (usually your router)
addresses = [ { Address = "${vaultwardenCfg.interface.ip}/24"; } ];
routes = [
{
Destination = "0.0.0.0/0";
Gateway = vaultwardenCfg.interface.gate; # 192.168.50.1
Gateway = vaultwardenCfg.interface.gate;
}
];
# DNS servers for the VM to use
# Using public DNS (Cloudflare and Google) for reliability
dns = [
"1.1.1.1"
"8.8.8.8"
@ -211,243 +99,73 @@ in
};
};
# Explicitly start systemd-networkd service
# By default, systemd.network.enable creates configs but doesn't start the service
# This ensures the network is actually configured when the VM boots
systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ];
# ============================================================================
# MICROVM HARDWARE CONFIGURATION
# ============================================================================
# This section defines the VM's virtual hardware, networking, and storage
microvm = {
# Virtual CPU cores allocated to the VM
vcpu = 2;
# Memory allocated to the VM (in MB)
mem = 1024;
# Hypervisor to use (QEMU with KVM acceleration)
hypervisor = "qemu";
# ============================================================================
# NETWORK INTERFACES
# ============================================================================
# The VM has TWO network interfaces for different purposes:
interfaces = [
# ──────────────────────────────────────────────────────────────────
# Primary Interface: TAP (for LAN connectivity)
# ──────────────────────────────────────────────────────────────────
# TAP creates a virtual ethernet device on the host that acts like
# a physical network cable connecting the VM to the host's network.
#
# Network Path:
# VM (enp0s5) ← virtio-net → TAP (vm-vaultwarden) → Bridge (br-vms) → Physical NIC (enp10s0) → LAN
#
# The host has a bridge (br-vms) configured in systems/ceres/config/networking.nix
# that connects:
# - Physical interface: enp10s0
# - TAP interfaces: vm-* (this and other VMs)
#
# This allows the VM to appear as a separate device on your LAN with
# its own IP address (192.168.50.151)
{
type = "tap"; # TAP interface (Layer 2 / Ethernet)
id = vaultwardenCfg.interface.id; # Interface name on host: "vm-vaultwarden"
mac = vaultwardenCfg.interface.mac; # MAC address: "02:00:00:00:00:51"
type = "tap";
id = vaultwardenCfg.interface.id;
mac = vaultwardenCfg.interface.mac;
}
# ──────────────────────────────────────────────────────────────────
# Secondary Interface: User-mode networking (for fallback/NAT)
# ──────────────────────────────────────────────────────────────────
# User-mode networking (SLIRP) provides outbound internet access via NAT
# without requiring any host configuration. This is a backup interface.
#
# - VM gets a private IP (10.0.2.15) on this interface
# - Provides internet access even if TAP/bridge isn't working
# - Used for testing and as a fallback
# - Cannot receive inbound connections (NAT only)
{
type = "user"; # User-mode networking (SLIRP/NAT)
id = vaultwardenCfg.interface.idUser; # Interface name: "vmuser-vault"
mac = vaultwardenCfg.interface.macUser; # MAC address: "02:00:00:00:00:03"
type = "user";
id = vaultwardenCfg.interface.idUser;
mac = vaultwardenCfg.interface.macUser;
}
];
# ============================================================================
# PORT FORWARDING (Host → VM)
# ============================================================================
# Forward ports from the host to the VM for direct access
# This allows SSH to the VM via: ssh -p 2201 root@localhost (from host)
#
# Without this, you'd need to SSH via the VM's LAN IP: ssh root@192.168.50.151
forwardPorts = [
{
from = "host"; # Forward from host
host.port = vaultwardenCfg.interface.ssh; # Host port: 2201
guest.port = 22; # VM port: 22 (SSH)
from = "host";
host.port = vaultwardenCfg.interface.ssh;
guest.port = 22;
}
];
# ============================================================================
# SHARED DIRECTORIES (Host → VM)
# ============================================================================
# VirtioFS allows sharing directories from host to VM with good performance
# This is better than network shares (NFS/Samba) for VM-host communication
#
# Why use VirtioFS instead of storing everything in the VM?
# 1. Data persists when VM is recreated (VMs are ephemeral, data isn't)
# 2. Easy backups (just backup host directories)
# 3. Data accessible from host for maintenance/migration
# 4. Share read-only nix store from host (saves space)
shares = [
# ──────────────────────────────────────────────────────────────────
# Nix Store (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share the host's /nix/store as read-only inside the VM
# This provides all Nix packages without duplicating data
# The VM can use all the same packages as the host
{
mountPoint = "/nix/.ro-store"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol (fast, modern)
source = "/nix/store"; # Source on host
tag = "read_only_nix_store"; # Unique identifier
mountPoint = "/nix/.ro-store";
proto = "virtiofs";
source = "/nix/store";
tag = "read_only_nix_store";
}
# ──────────────────────────────────────────────────────────────────
# Vaultwarden Data (Read-Write)
# ──────────────────────────────────────────────────────────────────
# Persistent storage for Vaultwarden's database and attachments
# Stored on host at: /mnt/storage/vaultwarden
# This data survives VM rebuilds/restarts
{
mountPoint = "/var/lib/bitwarden_rs"; # Where Vaultwarden stores its data
proto = "virtiofs"; # VirtioFS protocol
source = vaultwardenCfg.mntPaths.path0; # Host: /mnt/storage/vaultwarden
tag = "vaultwarden_data"; # Unique identifier
mountPoint = "/var/lib/bitwarden_rs";
proto = "virtiofs";
source = vaultwardenCfg.mntPaths.path0;
tag = "vaultwarden_data";
}
# ──────────────────────────────────────────────────────────────────
# Secrets (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share secrets managed by SOPS from the host
# Contains sensitive config like SMTP passwords, admin tokens, etc.
# SOPS-nix decrypts these on the host, then they're shared to the VM
{
mountPoint = "/run/secrets"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol
source = "/run/secrets"; # Source on host
tag = "host_secrets"; # Unique identifier
mountPoint = "/run/secrets";
proto = "virtiofs";
source = "/run/secrets";
tag = "host_secrets";
}
];
};
};
};
# ============================================================================
# HOST-SIDE STORAGE CONFIGURATION
# ============================================================================
# Create necessary directories on the host for VM data
systemd.tmpfiles.rules = [
# Create vaultwarden data directory on host if it doesn't exist
# This is where the VM's persistent data is actually stored
# d = directory, 0755 = permissions, root root = owner/group
"d ${vaultwardenCfg.mntPaths.path0} 0755 root root -" # /mnt/storage/vaultwarden
"d ${vaultwardenCfg.mntPaths.path0} 0755 root root -"
];
# ============================================================================
# CADDY REVERSE PROXY (Host)
# ============================================================================
# Caddy runs on the host and forwards HTTPS traffic to the VM
#
# Traffic Flow:
# Internet → Router:443 → Host:443 (Caddy) → VM:8085 (Vaultwarden)
# ↓
# TLS Termination
# (ACME Certs)
#
# Why use a reverse proxy instead of exposing VM directly?
# 1. TLS/SSL termination on the host (easier cert management)
# 2. Single public IP can serve multiple services
# 3. Additional security layer (Caddy can add headers, rate limiting, etc.)
# 4. VM doesn't need to handle TLS complexity
services.caddy.virtualHosts."${vaultwardenCfg.domains.url0}" = {
extraConfig = ''
# Forward all requests to the VM's IP and port
# Caddy terminates TLS, then forwards as plain HTTP to the VM
reverse_proxy ${vaultwardenCfg.interface.ip}:${toString vaultwardenCfg.ports.port0} {
# Preserve the real client IP in a header
# Vaultwarden can use this for logging and security
header_up X-Real-IP {remote_host}
}
# TLS configuration - use ACME certificates from host
# These certs are managed by security.acme.certs (configured above)
# Caddy reads them from /var/lib/acme/vaultwarden.cloudbert.fun/
tls ${vaultwardenCfg.ssl.cert} ${vaultwardenCfg.ssl.key}
# Compress responses for better performance
# Reduces bandwidth and improves load times
encode zstd gzip
'';
};
# ============================================================================
# SECRETS MANAGEMENT (SOPS)
# ============================================================================
# Configure secrets that will be decrypted by SOPS-nix on the host
# then shared to the VM via VirtioFS
#
# The vaultwarden/env file contains sensitive environment variables like:
# - ADMIN_TOKEN: For accessing the admin panel
# - DATABASE_URL: Database connection string (if using external DB)
# - SMTP_PASSWORD: For sending email notifications
sops.secrets = {
"vaultwarden/env" = {
owner = "root"; # File owner on host
mode = "0600"; # Permissions (read/write for owner only)
owner = "root";
mode = "0600";
};
};
}
# ============================================================================
# SUMMARY: How This All Works Together
# ============================================================================
#
# 1. BOOT SEQUENCE:
# - Host starts and creates br-vms bridge
# - Host creates TAP interface (vm-vaultwarden)
# - Host attaches TAP to bridge
# - QEMU starts with TAP fds and VirtioFS shares
# - VM boots and sees enp0s5 network interface
# - systemd-networkd configures enp0s5 with static IP
# - Vaultwarden service starts on port 8085
#
# 2. NETWORK CONNECTIVITY:
# - VM has IP 192.168.50.151 on LAN (via TAP/bridge)
# - Host has IP 192.168.50.240 on LAN (on bridge)
# - Both can reach each other and the internet
# - Router forwards port 443 to host IP
#
# 3. REQUEST FLOW:
# External → Router:443 → Host:443 (Caddy) → Bridge → TAP → VM:8085 (Vaultwarden)
# Response follows same path in reverse
#
# 4. DATA STORAGE:
# - VM reads packages from host's /nix/store (shared read-only)
# - VM writes data to /var/lib/bitwarden_rs (actually /mnt/storage/vaultwarden on host)
# - VM reads secrets from /run/secrets (shared from host via SOPS)
#
# 5. MAINTENANCE:
# - SSH to VM: ssh root@192.168.50.151 (from LAN)
# - SSH to VM: ssh -p 2201 root@localhost (from host)
# - Rebuild: sudo nixos-rebuild switch --flake .#ceres
# - VM automatically restarts on config changes
#
# ============================================================================