test: jellyfin microVM

This commit is contained in:
Nick 2025-11-08 20:09:32 -06:00
parent e6a4f094e3
commit b553e92ad1
4 changed files with 464 additions and 328 deletions

View file

@ -2,20 +2,23 @@
let let
inherit (moduleFunctions.instancesFunctions) inherit (moduleFunctions.instancesFunctions)
domain0 domain0
servicePath
sslPath sslPath
sopsPath varPath
mntPath
secretPath
; ;
label = "Jellyfin"; label = "Jellyfin";
name = "jellyfin"; name = "jellyfin";
domain = "${name}.${domain0}"; domain = "${name}.${domain0}";
secrets = "${secretPath}/${name}";
ssl = "${sslPath}/${name}.${domain0}";
in in
{ {
label = label; label = label;
name = name; name = name;
short = "Jelly"; short = "Jelly";
sops = { email = {
path0 = "${sopsPath}/${name}"; address0 = "noreply@${domain0}";
}; };
domains = { domains = {
url0 = domain; url0 = domain;
@ -29,17 +32,32 @@ in
"music" "music"
]; ];
subdomain = name; subdomain = name;
paths = {
path0 = "${servicePath}/${label}";
path1 = "${servicePath}/${label}/cache";
};
ports = { ports = {
port0 = 8096; # Jellyfin HTTP port0 = 8096; # Jellyfin HTTP
port1 = 5055; # Jellyseer port1 = 5055; # Jellyseer
port2 = 8920; # Jellyfin HTTPS port2 = 8920; # Jellyfin HTTPS
}; };
interface = {
id = "vm-${name}";
mac = "02:00:00:00:00:52";
idUser = "vmuser-jellyfin";
macUser = "02:00:00:00:00:04";
ip = "192.168.50.152";
gate = "192.168.50.1";
ssh = 2202;
};
ssl = { ssl = {
cert = "${sslPath}/${name}.${domain0}/fullchain.pem"; path = ssl;
key = "${sslPath}/${name}.${domain0}/key.pem"; cert = "${ssl}/fullchain.pem";
key = "${ssl}/key.pem";
};
varPaths = {
path0 = "${varPath}/${name}";
};
mntPaths = {
path0 = "${mntPath}/${name}";
};
secretPaths = {
path0 = secrets;
}; };
} }

View file

@ -0,0 +1,400 @@
# ============================================================================
# MicroVM Configuration
# ============================================================================
# This file contains the complete configuration for running a microVM
#
# Architecture Overview:
# ┌────────────────────────────────────────────────┐
# │ Host (Ceres - NixOS Server) │
# │ │
# │ ┌─────────────┐ ┌─────────────┐ │
# │ │ Caddy │ │ Bridge │ │
# │ │ (Reverse │───────▶│ (br-vms) │ │
# │ │ Proxy) │ │ 192.168.50 │ │
# │ └─────────────┘ │ .240 │ │
# │ │ └──────┬──────┘ │
# │ │ │ │
# │ │ ┌──────▼──────┐ │
# │ │ │ TAP │ │
# │ │ │ (vm-*) │ │
# │ │ └──────┬──────┘ │
# │ │ │ │
# │ ┌─────────▼──────────────────────▼─────────┐ │
# │ │ │ │
# │ │ MicroVM │ │
# │ │ ┌─────────────┐ ┌─────────────┐ │ │
# │ │ │ Service │ │ enp0s5 │ │ │
# │ │ │ Service │ │ 192.168.50 │ │ │
# │ │ │ Port 8085 │ │ .151 │ │ │
# │ │ └─────────────┘ └─────────────┘ │ │
# │ │ │ │
# │ └──────────────────────────────────────────┘ │
# │ │
# └────────────────────────────────────────────────┘
#
# Network Flow:
# 1. External request → Router (port forward 443) → Host IP (192.168.50.240)
# 2. Host Caddy receives HTTPS request on port 443
# 3. Caddy terminates TLS using ACME certificates
# 4. Caddy forwards HTTP to VM IP (192.168.50.xxx:xxxx)
# 5. Request travels through br-vms bridge → TAP interface → VM network
# 6. Service responds back through the same path
# ============================================================================
{
config,
flake,
...
}:
let
# Pull configuration from centralized instance definitions
# These are defined in modules/config/instances/config/*.nix
serviceCfg = flake.config.services.instances.service;
smtpCfg = flake.config.services.instances.smtp;
hostCfg = flake.config.services.instances.web;
inherit (flake.config.people) user0;
inherit (flake.config.services) instances;
# DNS provider configuration for ACME DNS-01 challenge
dns0 = instances.web.dns.provider0;
dns0Path = "dns/${dns0}";
in
{
# ============================================================================
# HOST-SIDE CONFIGURATION
# ============================================================================
# The following settings run on the host (Ceres), not inside the VM
# Add Caddy user to the ACME group so it can read TLS certificates
# NixOS ACME module creates certs with group ownership, and Caddy needs
# read access to serve them over HTTPS
users.users.caddy.extraGroups = [ "acme" ];
# Configure Let's Encrypt SSL certificate for service domain
# Uses DNS-01 challenge (via dns0 provider) instead of HTTP-01
# This allows cert generation even when the service isn't publicly accessible yet
security.acme.certs."${serviceCfg.domains.url0}" = {
dnsProvider = dns0; # DNS provider (e.g., cloudflare, route53)
environmentFile = config.sops.secrets.${dns0Path}.path; # API credentials from SOPS
group = "caddy"; # Allow Caddy to read the certs
};
# ============================================================================
# MICROVM DEFINITION
# ============================================================================
# This section defines the entire VM configuration, including networking,
# services, and resource allocation
microvm.vms.service = {
# Automatically start VM when host boots
autostart = true;
# Restart VM when configuration changes (during nixos-rebuild)
restartIfChanged = true;
# ============================================================================
# VM GUEST CONFIGURATION
# ============================================================================
# Everything inside this 'config' block runs INSIDE the VM, not on the host
config = {
# NixOS version for the VM system
system.stateVersion = "24.05";
time.timeZone = "America/Winnipeg";
# Allow SSH access to VM using host user's SSH keys
# This enables direct SSH via: ssh root@192.168.50.xxx
users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys;
# ============================================================================
# service SERVICE (Inside VM)
# ============================================================================
# Main application configuration - runs on port xxxx inside the VM
services = {
# ============================================================================
# VM SSH ACCESS
# ============================================================================
# Enable SSH server inside the VM for management and debugging
openssh = {
enable = true;
settings = {
PasswordAuthentication = false; # Only allow key-based auth
PermitRootLogin = "prohibit-password"; # Root login only with keys
};
};
# Open firewall ports inside the VM
networking.firewall.allowedTCPPorts = [
serviceCfg.ports.port0 # Service web interface port number
];
# ============================================================================
# VM NETWORK CONFIGURATION (systemd-networkd)
# ============================================================================
# This configures the network interface INSIDE the VM
# The VM sees a network interface called "enp0s5" which connects to the
# host's TAP interface (vm-service) via the bridge (br-vms)
};
systemd.network = {
enable = true; # Enable systemd-networkd for network management
networks."20-lan" = {
# Match the network interface created by QEMU
# QEMU with q35 machine type typically creates "enp0s5" for the first NIC
matchConfig.Name = "enp0s5";
# Assign static IP address to the VM
# This IP must be on the same subnet as the host bridge (192.168.50.0/24)
addresses = [ { Address = "${serviceCfg.interface.ip}/24"; } ]; # 192.168.50.xxx/24
# Configure default route to reach the internet
# All traffic (0.0.0.0/0) goes through the gateway (usually your router)
routes = [
{
Destination = "${hostCfg.localhost.address1}/0";
Gateway = serviceCfg.interface.gate; # 192.168.50.1
}
];
# DNS servers for the VM to use
# Using public DNS (Cloudflare and Google) for reliability
dns = [
"1.1.1.1"
"8.8.8.8"
];
};
};
# Explicitly start systemd-networkd service
# By default, systemd.network.enable creates configs but doesn't start the service
# This ensures the network is actually configured when the VM boots
systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ];
# ============================================================================
# MICROVM HARDWARE CONFIGURATION
# ============================================================================
# This section defines the VM's virtual hardware, networking, and storage
microvm = {
# Virtual CPU cores allocated to the VM
vcpu = 2;
# Memory allocated to the VM (in MB)
mem = 1024;
# Hypervisor to use (QEMU with KVM acceleration)
hypervisor = "qemu";
# ============================================================================
# NETWORK INTERFACES
# ============================================================================
# The VM has TWO network interfaces for different purposes:
interfaces = [
# ──────────────────────────────────────────────────────────────────
# Primary Interface: TAP (for LAN connectivity)
# ──────────────────────────────────────────────────────────────────
# TAP creates a virtual ethernet device on the host that acts like
# a physical network cable connecting the VM to the host's network.
#
# Network Path:
# VM (enp0s5) ← virtio-net → TAP (vm-service) → Bridge (br-vms) → Physical NIC (enp10s0) → LAN
#
# The host has a bridge (br-vms) configured in systems/ceres/config/networking.nix
# that connects:
# - Physical interface: enp10s0
# - TAP interfaces: vm-* (this and other VMs)
#
# This allows the VM to appear as a separate device on your LAN with
# its own IP address (192.168.50.xxx)
{
type = "tap"; # TAP interface (Layer 2 / Ethernet)
id = serviceCfg.interface.id; # Interface name on host: "vm-service"
mac = serviceCfg.interface.mac; # MAC address: "02:00:00:00:00:xx"
}
# ──────────────────────────────────────────────────────────────────
# Secondary Interface: User-mode networking (for fallback/NAT)
# ──────────────────────────────────────────────────────────────────
# User-mode networking (SLIRP) provides outbound internet access via NAT
# without requiring any host configuration. This is a backup interface.
#
# - VM gets a private IP (10.0.2.**) on this interface
# - Provides internet access even if TAP/bridge isn't working
# - Used for testing and as a fallback
# - Cannot receive inbound connections (NAT only)
{
type = "user"; # User-mode networking (SLIRP/NAT)
id = serviceCfg.interface.idUser; # Interface name: "vmuser-*"
mac = serviceCfg.interface.macUser; # MAC address: "02:00:00:00:00:xx"
}
];
# ============================================================================
# PORT FORWARDING (Host → VM)
# ============================================================================
# Forward ports from the host to the VM for direct access
# This allows SSH to the VM via: ssh -p 220x root@localhost (from host)
#
# Without this, you'd need to SSH via the VM's LAN IP: ssh root@192.168.50.xxx
forwardPorts = [
{
from = "host"; # Forward from host
host.port = serviceCfg.interface.ssh; # Host port: 220x
guest.port = 22; # VM port: 22 (SSH)
}
];
# ============================================================================
# SHARED DIRECTORIES (Host → VM)
# ============================================================================
# VirtioFS allows sharing directories from host to VM with good performance
# This is better than network shares (NFS/Samba) for VM-host communication
#
# Why use VirtioFS instead of storing everything in the VM?
# 1. Data persists when VM is recreated (VMs are ephemeral, data isn't)
# 2. Easy backups (just backup host directories)
# 3. Data accessible from host for maintenance/migration
# 4. Share read-only nix store from host (saves space)
shares = [
# ──────────────────────────────────────────────────────────────────
# Nix Store (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share the host's /nix/store as read-only inside the VM
# This provides all Nix packages without duplicating data
# The VM can use all the same packages as the host
{
mountPoint = "/nix/.ro-store"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol (fast, modern)
source = "/nix/store"; # Source on host
tag = "read_only_nix_store"; # Unique identifier
}
# ──────────────────────────────────────────────────────────────────
# Service Data (Read-Write)
# ──────────────────────────────────────────────────────────────────
# Persistent storage for Service's database and attachments
# Stored on host at: /mnt/storage/service
# This data survives VM rebuilds/restarts
{
mountPoint = "/var/lib/service"; # Where Service stores its data
proto = "virtiofs"; # VirtioFS protocol
source = serviceCfg.mntPaths.path0; # Host: /mnt/storage/service
tag = "service_data"; # Unique identifier
}
# ──────────────────────────────────────────────────────────────────
# Secrets (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share secrets managed by SOPS from the host
# Contains sensitive config like SMTP passwords, admin tokens, etc.
# SOPS-nix decrypts these on the host, then they're shared to the VM
{
mountPoint = "/run/secrets"; # Mount point in VM
proto = "virtiofs"; # VirtioFS protocol
source = "/run/secrets"; # Source on host
tag = "host_secrets"; # Unique identifier
}
];
};
};
};
# ============================================================================
# HOST-SIDE STORAGE CONFIGURATION
# ============================================================================
# Create necessary directories on the host for VM data
systemd.tmpfiles.rules = [
# Create service data directory on host if it doesn't exist
# This is where the VM's persistent data is actually stored
# d = directory, 0755 = permissions, root root = owner/group
"d ${serviceCfg.mntPaths.path0} 0755 root root -" # /mnt/storage/service
];
# ============================================================================
# CADDY REVERSE PROXY (Host)
# ============================================================================
# Caddy runs on the host and forwards HTTPS traffic to the VM
#
# Traffic Flow:
# Internet → Router:443 → Host:443 (Caddy) → VM:xxxx (Service)
# ↓
# TLS Termination
# (ACME Certs)
#
# Why use a reverse proxy instead of exposing VM directly?
# 1. TLS/SSL termination on the host (easier cert management)
# 2. Single public IP can serve multiple services
# 3. Additional security layer (Caddy can add headers, rate limiting, etc.)
# 4. VM doesn't need to handle TLS complexity
services.caddy.virtualHosts."${serviceCfg.domains.url0}" = {
extraConfig = ''
reverse_proxy ${serviceCfg.interface.ip}:${toString serviceCfg.ports.port0} {
header_up X-Real-IP {remote_host}
}
tls ${serviceCfg.ssl.cert} ${serviceCfg.ssl.key}
encode zstd gzip
'';
};
# ============================================================================
# SECRETS MANAGEMENT (SOPS)
# ============================================================================
# Configure secrets that will be decrypted by SOPS-nix on the host
# then shared to the VM via VirtioFS
#
# The service/env file contains sensitive environment variables like:
# - ADMIN_TOKEN: For accessing the admin panel
# - DATABASE_URL: Database connection string (if using external DB)
# - SMTP_PASSWORD: For sending email notifications
sops.secrets = {
"service/env" = {
owner = "root"; # File owner on host
mode = "0600"; # Permissions (read/write for owner only)
};
};
}
# ============================================================================
# SUMMARY: How This All Works Together
# ============================================================================
#
# 1. BOOT SEQUENCE:
# - Host starts and creates br-vms bridge
# - Host creates TAP interface (vm-service)
# - Host attaches TAP to bridge
# - QEMU starts with TAP fds and VirtioFS shares
# - VM boots and sees enp0s5 network interface
# - systemd-networkd configures enp0s5 with static IP
# - Service service starts on port xxxx
#
# 2. NETWORK CONNECTIVITY:
# - VM has IP 192.168.50.xxx on LAN (via TAP/bridge)
# - Host has IP 192.168.50.240 on LAN (on bridge)
# - Both can reach each other and the internet
# - Router forwards port 443 to host IP
#
# 3. REQUEST FLOW:
# External → Router:443 → Host:443 (Caddy) → Bridge → TAP → VM:xxxx (Service)
# Response follows same path in reverse
#
# 4. DATA STORAGE:
# - VM reads packages from host's /nix/store (shared read-only)
# - VM writes data to /var/lib/service (actually /mnt/storage/service on host)
# - VM reads secrets from /run/secrets (shared from host via SOPS)
#
# 5. MAINTENANCE:
# - SSH to VM: ssh root@192.168.50.xxx (from LAN)
# - SSH to VM: ssh -p 220x root@localhost (from host)
# - Rebuild: sudo nixos-rebuild switch --flake .#ceres
# - VM automatically restarts on config changes
#
# ============================================================================

View file

@ -1,122 +1,35 @@
# ============================================================================
# Vaultwarden MicroVM Configuration
# ============================================================================
# This file contains the complete configuration for running Vaultwarden
# (a Bitwarden-compatible password manager) inside an isolated MicroVM.
#
# Architecture Overview:
# ┌─────────────────────────────────────────────────────────────────┐
# │ Host (Ceres - NixOS Server) │
# │ │
# │ ┌─────────────┐ ┌──────────────┐ │
# │ │ Caddy │────────▶│ Bridge │ │
# │ │ (Reverse │ │ (br-vms) │ │
# │ │ Proxy) │ │ 192.168.50 │ │
# │ └─────────────┘ │ .240 │ │
# │ │ └──────┬───────┘ │
# │ │ │ │
# │ │ ┌──────▼────────┐ │
# │ │ │ TAP Interface │ │
# │ │ │ (vm-vault*) │ │
# │ │ └──────┬────────┘ │
# │ │ │ │
# │ ┌─────▼────────────────────────▼──────────────────────────┐ │
# │ │ │ │
# │ │ MicroVM (vaultwarden) │ │
# │ │ ┌────────────────┐ ┌──────────────┐ │ │
# │ │ │ Vaultwarden │ │ enp0s5 │ │ │
# │ │ │ Service │ │ 192.168.50 │ │ │
# │ │ │ Port 8085 │ │ .151 │ │ │
# │ │ └────────────────┘ └──────────────┘ │ │
# │ │ │ │
# │ └──────────────────────────────────────────────────────────┘ │
# │ │
# └─────────────────────────────────────────────────────────────────┘
#
# Network Flow:
# 1. External request → Router (port forward 443) → Host IP (192.168.50.240)
# 2. Host Caddy receives HTTPS request on port 443
# 3. Caddy terminates TLS using ACME certificates
# 4. Caddy forwards HTTP to VM IP (192.168.50.151:8085)
# 5. Request travels through br-vms bridge → TAP interface → VM network
# 6. Vaultwarden responds back through the same path
# ============================================================================
{ {
config, config,
lib,
pkgs,
flake, flake,
... ...
}: }:
let let
# Pull configuration from centralized instance definitions
# These are defined in modules/config/instances/config/vaultwarden.nix
vaultwardenCfg = flake.config.services.instances.vaultwarden; vaultwardenCfg = flake.config.services.instances.vaultwarden;
smtpCfg = flake.config.services.instances.smtp; smtpCfg = flake.config.services.instances.smtp;
inherit (flake.config.people) user0; inherit (flake.config.people) user0;
inherit (flake.config.services) instances; inherit (flake.config.services) instances;
# DNS provider configuration for ACME DNS-01 challenge
dns0 = instances.web.dns.provider0; dns0 = instances.web.dns.provider0;
dns0Path = "dns/${dns0}"; dns0Path = "dns/${dns0}";
in in
{ {
# ============================================================================
# HOST-SIDE CONFIGURATION
# ============================================================================
# The following settings run on the host (Ceres), not inside the VM
# Add Caddy user to the ACME group so it can read TLS certificates
# NixOS ACME module creates certs with group ownership, and Caddy needs
# read access to serve them over HTTPS
users.users.caddy.extraGroups = [ "acme" ]; users.users.caddy.extraGroups = [ "acme" ];
# Configure Let's Encrypt SSL certificate for vaultwarden domain
# Uses DNS-01 challenge (via dns0 provider) instead of HTTP-01
# This allows cert generation even when the service isn't publicly accessible yet
security.acme.certs."${vaultwardenCfg.domains.url0}" = { security.acme.certs."${vaultwardenCfg.domains.url0}" = {
dnsProvider = dns0; # DNS provider (e.g., cloudflare, route53) dnsProvider = dns0;
environmentFile = config.sops.secrets.${dns0Path}.path; # API credentials from SOPS environmentFile = config.sops.secrets.${dns0Path}.path;
group = "caddy"; # Allow Caddy to read the certs group = "caddy";
}; };
# ============================================================================
# MICROVM DEFINITION
# ============================================================================
# This section defines the entire VM configuration, including networking,
# services, and resource allocation
microvm.vms.vaultwarden = { microvm.vms.vaultwarden = {
# Automatically start VM when host boots
autostart = true; autostart = true;
# Restart VM when configuration changes (during nixos-rebuild)
restartIfChanged = true; restartIfChanged = true;
# ============================================================================
# VM GUEST CONFIGURATION
# ============================================================================
# Everything inside this 'config' block runs INSIDE the VM, not on the host
config = { config = {
# NixOS version for the VM system
system.stateVersion = "24.05"; system.stateVersion = "24.05";
time.timeZone = "America/Winnipeg"; time.timeZone = "America/Winnipeg";
# Allow SSH access to VM using host user's SSH keys
# This enables direct SSH via: ssh root@192.168.50.151
users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys; users.users.root.openssh.authorizedKeys.keys = flake.config.people.users.${user0}.sshKeys;
# ============================================================================
# VAULTWARDEN SERVICE (Inside VM)
# ============================================================================
# Main application configuration - runs on port 8085 inside the VM
services.vaultwarden = { services.vaultwarden = {
enable = true; enable = true;
dbBackend = "sqlite"; dbBackend = "sqlite";
config = { config = {
# Domain Configuration # Domain Configuration
DOMAIN = "https://${vaultwardenCfg.domains.url0}"; DOMAIN = "https://${vaultwardenCfg.domains.url0}";
@ -151,59 +64,34 @@ in
environmentFile = "/run/secrets/vaultwarden/env"; environmentFile = "/run/secrets/vaultwarden/env";
}; };
# ============================================================================
# VM SSH ACCESS
# ============================================================================
# Enable SSH server inside the VM for management and debugging
services.openssh = { services.openssh = {
enable = true; enable = true;
settings = { settings = {
PasswordAuthentication = false; # Only allow key-based auth PasswordAuthentication = false;
PermitRootLogin = "prohibit-password"; # Root login only with keys PermitRootLogin = "prohibit-password";
}; };
}; };
# Open firewall ports inside the VM
networking.firewall.allowedTCPPorts = [ networking.firewall.allowedTCPPorts = [
22 # SSH 22 # SSH
25 # SMTP 25 # SMTP
139 # SMTP 139 # SMTP
587 # SMTP 587 # SMTP
2525 # SMTP 2525 # SMTP
vaultwardenCfg.ports.port0 # Vaultwarden web interface (8085) vaultwardenCfg.ports.port0
]; ];
# ============================================================================
# VM NETWORK CONFIGURATION (systemd-networkd)
# ============================================================================
# This configures the network interface INSIDE the VM
# The VM sees a network interface called "enp0s5" which connects to the
# host's TAP interface (vm-vaultwarden) via the bridge (br-vms)
systemd.network = { systemd.network = {
enable = true; # Enable systemd-networkd for network management enable = true;
networks."20-lan" = { networks."20-lan" = {
# Match the network interface created by QEMU
# QEMU with q35 machine type typically creates "enp0s5" for the first NIC
matchConfig.Name = "enp0s5"; matchConfig.Name = "enp0s5";
addresses = [ { Address = "${vaultwardenCfg.interface.ip}/24"; } ];
# Assign static IP address to the VM
# This IP must be on the same subnet as the host bridge (192.168.50.0/24)
addresses = [ { Address = "${vaultwardenCfg.interface.ip}/24"; } ]; # 192.168.50.151/24
# Configure default route to reach the internet
# All traffic (0.0.0.0/0) goes through the gateway (usually your router)
routes = [ routes = [
{ {
Destination = "0.0.0.0/0"; Destination = "0.0.0.0/0";
Gateway = vaultwardenCfg.interface.gate; # 192.168.50.1 Gateway = vaultwardenCfg.interface.gate;
} }
]; ];
# DNS servers for the VM to use
# Using public DNS (Cloudflare and Google) for reliability
dns = [ dns = [
"1.1.1.1" "1.1.1.1"
"8.8.8.8" "8.8.8.8"
@ -211,243 +99,73 @@ in
}; };
}; };
# Explicitly start systemd-networkd service
# By default, systemd.network.enable creates configs but doesn't start the service
# This ensures the network is actually configured when the VM boots
systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ]; systemd.services.systemd-networkd.wantedBy = [ "multi-user.target" ];
# ============================================================================
# MICROVM HARDWARE CONFIGURATION
# ============================================================================
# This section defines the VM's virtual hardware, networking, and storage
microvm = { microvm = {
# Virtual CPU cores allocated to the VM
vcpu = 2; vcpu = 2;
# Memory allocated to the VM (in MB)
mem = 1024; mem = 1024;
# Hypervisor to use (QEMU with KVM acceleration)
hypervisor = "qemu"; hypervisor = "qemu";
# ============================================================================
# NETWORK INTERFACES
# ============================================================================
# The VM has TWO network interfaces for different purposes:
interfaces = [ interfaces = [
# ──────────────────────────────────────────────────────────────────
# Primary Interface: TAP (for LAN connectivity)
# ──────────────────────────────────────────────────────────────────
# TAP creates a virtual ethernet device on the host that acts like
# a physical network cable connecting the VM to the host's network.
#
# Network Path:
# VM (enp0s5) ← virtio-net → TAP (vm-vaultwarden) → Bridge (br-vms) → Physical NIC (enp10s0) → LAN
#
# The host has a bridge (br-vms) configured in systems/ceres/config/networking.nix
# that connects:
# - Physical interface: enp10s0
# - TAP interfaces: vm-* (this and other VMs)
#
# This allows the VM to appear as a separate device on your LAN with
# its own IP address (192.168.50.151)
{ {
type = "tap"; # TAP interface (Layer 2 / Ethernet) type = "tap";
id = vaultwardenCfg.interface.id; # Interface name on host: "vm-vaultwarden" id = vaultwardenCfg.interface.id;
mac = vaultwardenCfg.interface.mac; # MAC address: "02:00:00:00:00:51" mac = vaultwardenCfg.interface.mac;
} }
# ──────────────────────────────────────────────────────────────────
# Secondary Interface: User-mode networking (for fallback/NAT)
# ──────────────────────────────────────────────────────────────────
# User-mode networking (SLIRP) provides outbound internet access via NAT
# without requiring any host configuration. This is a backup interface.
#
# - VM gets a private IP (10.0.2.15) on this interface
# - Provides internet access even if TAP/bridge isn't working
# - Used for testing and as a fallback
# - Cannot receive inbound connections (NAT only)
{ {
type = "user"; # User-mode networking (SLIRP/NAT) type = "user";
id = vaultwardenCfg.interface.idUser; # Interface name: "vmuser-vault" id = vaultwardenCfg.interface.idUser;
mac = vaultwardenCfg.interface.macUser; # MAC address: "02:00:00:00:00:03" mac = vaultwardenCfg.interface.macUser;
} }
]; ];
# ============================================================================
# PORT FORWARDING (Host → VM)
# ============================================================================
# Forward ports from the host to the VM for direct access
# This allows SSH to the VM via: ssh -p 2201 root@localhost (from host)
#
# Without this, you'd need to SSH via the VM's LAN IP: ssh root@192.168.50.151
forwardPorts = [ forwardPorts = [
{ {
from = "host"; # Forward from host from = "host";
host.port = vaultwardenCfg.interface.ssh; # Host port: 2201 host.port = vaultwardenCfg.interface.ssh;
guest.port = 22; # VM port: 22 (SSH) guest.port = 22;
} }
]; ];
# ============================================================================
# SHARED DIRECTORIES (Host → VM)
# ============================================================================
# VirtioFS allows sharing directories from host to VM with good performance
# This is better than network shares (NFS/Samba) for VM-host communication
#
# Why use VirtioFS instead of storing everything in the VM?
# 1. Data persists when VM is recreated (VMs are ephemeral, data isn't)
# 2. Easy backups (just backup host directories)
# 3. Data accessible from host for maintenance/migration
# 4. Share read-only nix store from host (saves space)
shares = [ shares = [
# ──────────────────────────────────────────────────────────────────
# Nix Store (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share the host's /nix/store as read-only inside the VM
# This provides all Nix packages without duplicating data
# The VM can use all the same packages as the host
{ {
mountPoint = "/nix/.ro-store"; # Mount point in VM mountPoint = "/nix/.ro-store";
proto = "virtiofs"; # VirtioFS protocol (fast, modern) proto = "virtiofs";
source = "/nix/store"; # Source on host source = "/nix/store";
tag = "read_only_nix_store"; # Unique identifier tag = "read_only_nix_store";
} }
# ──────────────────────────────────────────────────────────────────
# Vaultwarden Data (Read-Write)
# ──────────────────────────────────────────────────────────────────
# Persistent storage for Vaultwarden's database and attachments
# Stored on host at: /mnt/storage/vaultwarden
# This data survives VM rebuilds/restarts
{ {
mountPoint = "/var/lib/bitwarden_rs"; # Where Vaultwarden stores its data mountPoint = "/var/lib/bitwarden_rs";
proto = "virtiofs"; # VirtioFS protocol proto = "virtiofs";
source = vaultwardenCfg.mntPaths.path0; # Host: /mnt/storage/vaultwarden source = vaultwardenCfg.mntPaths.path0;
tag = "vaultwarden_data"; # Unique identifier tag = "vaultwarden_data";
} }
# ──────────────────────────────────────────────────────────────────
# Secrets (Read-Only)
# ──────────────────────────────────────────────────────────────────
# Share secrets managed by SOPS from the host
# Contains sensitive config like SMTP passwords, admin tokens, etc.
# SOPS-nix decrypts these on the host, then they're shared to the VM
{ {
mountPoint = "/run/secrets"; # Mount point in VM mountPoint = "/run/secrets";
proto = "virtiofs"; # VirtioFS protocol proto = "virtiofs";
source = "/run/secrets"; # Source on host source = "/run/secrets";
tag = "host_secrets"; # Unique identifier tag = "host_secrets";
} }
]; ];
}; };
}; };
}; };
# ============================================================================
# HOST-SIDE STORAGE CONFIGURATION
# ============================================================================
# Create necessary directories on the host for VM data
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
# Create vaultwarden data directory on host if it doesn't exist "d ${vaultwardenCfg.mntPaths.path0} 0755 root root -"
# This is where the VM's persistent data is actually stored
# d = directory, 0755 = permissions, root root = owner/group
"d ${vaultwardenCfg.mntPaths.path0} 0755 root root -" # /mnt/storage/vaultwarden
]; ];
# ============================================================================
# CADDY REVERSE PROXY (Host)
# ============================================================================
# Caddy runs on the host and forwards HTTPS traffic to the VM
#
# Traffic Flow:
# Internet → Router:443 → Host:443 (Caddy) → VM:8085 (Vaultwarden)
# ↓
# TLS Termination
# (ACME Certs)
#
# Why use a reverse proxy instead of exposing VM directly?
# 1. TLS/SSL termination on the host (easier cert management)
# 2. Single public IP can serve multiple services
# 3. Additional security layer (Caddy can add headers, rate limiting, etc.)
# 4. VM doesn't need to handle TLS complexity
services.caddy.virtualHosts."${vaultwardenCfg.domains.url0}" = { services.caddy.virtualHosts."${vaultwardenCfg.domains.url0}" = {
extraConfig = '' extraConfig = ''
# Forward all requests to the VM's IP and port
# Caddy terminates TLS, then forwards as plain HTTP to the VM
reverse_proxy ${vaultwardenCfg.interface.ip}:${toString vaultwardenCfg.ports.port0} { reverse_proxy ${vaultwardenCfg.interface.ip}:${toString vaultwardenCfg.ports.port0} {
# Preserve the real client IP in a header
# Vaultwarden can use this for logging and security
header_up X-Real-IP {remote_host} header_up X-Real-IP {remote_host}
} }
# TLS configuration - use ACME certificates from host
# These certs are managed by security.acme.certs (configured above)
# Caddy reads them from /var/lib/acme/vaultwarden.cloudbert.fun/
tls ${vaultwardenCfg.ssl.cert} ${vaultwardenCfg.ssl.key} tls ${vaultwardenCfg.ssl.cert} ${vaultwardenCfg.ssl.key}
# Compress responses for better performance
# Reduces bandwidth and improves load times
encode zstd gzip encode zstd gzip
''; '';
}; };
# ============================================================================
# SECRETS MANAGEMENT (SOPS)
# ============================================================================
# Configure secrets that will be decrypted by SOPS-nix on the host
# then shared to the VM via VirtioFS
#
# The vaultwarden/env file contains sensitive environment variables like:
# - ADMIN_TOKEN: For accessing the admin panel
# - DATABASE_URL: Database connection string (if using external DB)
# - SMTP_PASSWORD: For sending email notifications
sops.secrets = { sops.secrets = {
"vaultwarden/env" = { "vaultwarden/env" = {
owner = "root"; # File owner on host owner = "root";
mode = "0600"; # Permissions (read/write for owner only) mode = "0600";
}; };
}; };
} }
# ============================================================================
# SUMMARY: How This All Works Together
# ============================================================================
#
# 1. BOOT SEQUENCE:
# - Host starts and creates br-vms bridge
# - Host creates TAP interface (vm-vaultwarden)
# - Host attaches TAP to bridge
# - QEMU starts with TAP fds and VirtioFS shares
# - VM boots and sees enp0s5 network interface
# - systemd-networkd configures enp0s5 with static IP
# - Vaultwarden service starts on port 8085
#
# 2. NETWORK CONNECTIVITY:
# - VM has IP 192.168.50.151 on LAN (via TAP/bridge)
# - Host has IP 192.168.50.240 on LAN (on bridge)
# - Both can reach each other and the internet
# - Router forwards port 443 to host IP
#
# 3. REQUEST FLOW:
# External → Router:443 → Host:443 (Caddy) → Bridge → TAP → VM:8085 (Vaultwarden)
# Response follows same path in reverse
#
# 4. DATA STORAGE:
# - VM reads packages from host's /nix/store (shared read-only)
# - VM writes data to /var/lib/bitwarden_rs (actually /mnt/storage/vaultwarden on host)
# - VM reads secrets from /run/secrets (shared from host via SOPS)
#
# 5. MAINTENANCE:
# - SSH to VM: ssh root@192.168.50.151 (from LAN)
# - SSH to VM: ssh -p 2201 root@localhost (from host)
# - Rebuild: sudo nixos-rebuild switch --flake .#ceres
# - VM automatically restarts on config changes
#
# ============================================================================

View file

@ -1,7 +1,7 @@
ssh: ssh:
private: ENC[AES256_GCM,data:XJk/gjPkFeSZtPkKYS2vRHqMY/X5zRaDlS4UwzUvjm9MvTgdhoXUlqvFC0Dl5SZhRlY+XXAuG7gIIUESzCFWQKdOoUcto3r0WSuIm9EwLKXnnaHemeFVHYgZU9Rz45PK6yFWUC06+n56b2A1dFXftjeXcCqaQrT/jk3RDSHmhW9u7QgDmhhaybxXOrzkup2U8kjhrMmRBcf4xP//nihuzHcyYX75ONr56bgkjl6gpZTfZrn2ad8b+4iGn+rElzf7RHAG0mwTeEX2kYRyafaanGuc2xTnZubBAYDnc1eM6T99PXC0iWh/lUKc1zG1l18UchWzgvl3sPK0Cb2/5aaFMUk2ET6kVOlpKyGc94MRpyv3iUi8soFjh34sWH3mFtec2OWfIxDhoVfZoc2hmP2Hflfjp7acwaMskFBHaCSO2DGtNmN3hSUhAAeLx8OZupSIJmDVpq00qKUbN+5z4K78AdGuUOP07cE889evNniCHLP6yPav7tIulnBS9lD2U+CbqF7vMtdZx/eYFwJjmMtE,iv:JxSytvXKWLHDedlE0Wq5YpPUnfb0HoQgKJ2bt1Z8yqk=,tag:MjOoUSWsHWHgxp0yu9YQFA==,type:str] private: ENC[AES256_GCM,data:XJk/gjPkFeSZtPkKYS2vRHqMY/X5zRaDlS4UwzUvjm9MvTgdhoXUlqvFC0Dl5SZhRlY+XXAuG7gIIUESzCFWQKdOoUcto3r0WSuIm9EwLKXnnaHemeFVHYgZU9Rz45PK6yFWUC06+n56b2A1dFXftjeXcCqaQrT/jk3RDSHmhW9u7QgDmhhaybxXOrzkup2U8kjhrMmRBcf4xP//nihuzHcyYX75ONr56bgkjl6gpZTfZrn2ad8b+4iGn+rElzf7RHAG0mwTeEX2kYRyafaanGuc2xTnZubBAYDnc1eM6T99PXC0iWh/lUKc1zG1l18UchWzgvl3sPK0Cb2/5aaFMUk2ET6kVOlpKyGc94MRpyv3iUi8soFjh34sWH3mFtec2OWfIxDhoVfZoc2hmP2Hflfjp7acwaMskFBHaCSO2DGtNmN3hSUhAAeLx8OZupSIJmDVpq00qKUbN+5z4K78AdGuUOP07cE889evNniCHLP6yPav7tIulnBS9lD2U+CbqF7vMtdZx/eYFwJjmMtE,iv:JxSytvXKWLHDedlE0Wq5YpPUnfb0HoQgKJ2bt1Z8yqk=,tag:MjOoUSWsHWHgxp0yu9YQFA==,type:str]
public: ENC[AES256_GCM,data:Cn4hutHHeptbefHOKK7zv5TmveGOqfHAwGHogDq9sRmeb+b1lzHwj7qvg8lcnlJtIo4qS+TrKtSj5ZCsPNXOhWG1rkk97gTfPMbcxj5f1O3WJigL2wsrB2cQgc5UsA==,iv:ID4zRdr/efClOAHbXzxG1bNuJR0A2qbydzGlMhvEcRE=,tag:qbIoaGb+RXxRRkkQtuX7/A==,type:str] public: ENC[AES256_GCM,data:Cn4hutHHeptbefHOKK7zv5TmveGOqfHAwGHogDq9sRmeb+b1lzHwj7qvg8lcnlJtIo4qS+TrKtSj5ZCsPNXOhWG1rkk97gTfPMbcxj5f1O3WJigL2wsrB2cQgc5UsA==,iv:ID4zRdr/efClOAHbXzxG1bNuJR0A2qbydzGlMhvEcRE=,tag:qbIoaGb+RXxRRkkQtuX7/A==,type:str]
hosts: ENC[AES256_GCM,data:5pwmBKxsOq49CNkDnXQHm5jDPhTqLd7UBJZnnhQ5f0lJUV1bskD6k5rYXhFriLfWgnJ68y5cdBttoqMSxmrCMca/rltPQTgoyI+DzC8wcWVR+YKNlnarkKBrNjfRLKlCfikgaRX+skS4a2KzxOFo6HpwS6zK9I9c3t/6CIcpK4lDl2mWoTwssxdwtUgUyFgcWUppnf/WJZFbkHCYHWOfWnXpaltLriK35y3Hv9NnPs4aUeOx5B+UHcGWZkiif00axRGo7UBxs47/Ny7gHmHX9Gs/aj0icrkdjxu4pzrwInoUoWFsMNW5haEvX3Dh4P8b4jiyW0gj1qMrmUd19yfjvmYIOeId/HVKv51hbJyiZmQdtHLpohNLq/PSO/NHMlEqhPRdc1CQtjRp8JBoUSmK3J3/hVCKs5eje6bIZIrsq/kYD6auvwuCuU3FkJ77I8DWKo/mGyWrapXHO5uBsVHpJYlVN+nMtXVVdeetCQHmFah4f0Cyqo5jByv3nEVXfvuC2Q9/m3hDw3aOJ6HGFgYpzblOSxS8WIxg5TSEi8zU59rS379lM4BTNYQ+urzkLg1xg0lxiAEd+6g2LIY3QVhr9owHmiJfGVIkr4NpjCmxHIqGSzJs2Qku8thRAMiXHQp850y+qW727bDyhJ7jcFa66lDD+fKhlf9HVaC9q+hCXSLVMN1EwwxxZypWd4xXJUyNZmvkRvlugWlWh1xHaaxrfuU3Rc7r5rnGz8RSXTFsGY74qcCWIpqn4AWjxI+9Uzp1TnsM9Kk3iXts+dt+OnxDcxoMC0GhMjndXden78ufapSbG7tA5s4HBl71l7rJJ36WlDjePZXhA6+ajWlA4Bc4HJxQoyznp7Q1g4947rfWNfMM3pLo6LLMvIYfT8CmjkUb//WvOeq4Ktm0tjssFFGc1mb0dPN6OoIanxxgWhsuA8CYI4VjtuzMSNUPbp1pESq3LjyVbZcml2xlM02m/S6Xu291dhgXT+n0fUb8F4k6oWtiWA45HkpFrL1yRgRiOIjcDHGUqkMUZ5Yud1L6XZkyQ4pIqz7vszJj+Qf6unH8yg/HEDSxzVlZTL2e7In9ZZJowp1MoRqU/pQ+r8zksZl8s3/x8Qb6i1zXUeCt85CSXjXWjl0Xo0WhWBibRva/kPjIi7wDC/cry6dBae2wv000ql6qY5KeA3qvuF0I3abCxWCNGnuVw9O7CkU/Xvs3ev+PtQvIbZD1gH4BnMJQGiUUgUhsQO/NVsx1WJLO/Q3lcyiS5gGAeJWJsObICRsU4jqEH2UkEco9J0bJP7/O4/3jg7ymxMzBaP8qa7uP/l2Wkdkxk64HazKCm9gjvzdQAujxfD+UIGkYl/F24VuOKfNDsOHhwaL5gpwUMtRgqQfo9EwDr9K4w5JtpGtdtYA1a3U1q128cIoIBOWw6AhL,iv:r1r8sIWKmEMvi4T3oPJa+u7Ym6fTdDD7H/P3nmym3uY=,tag:vr6lWYaFfys8CwZl/D1ceg==,type:str] hosts: ENC[AES256_GCM,data:cgizSZ5PiwkhNm2c3PNYOO29iNUg6U+skkSb5HpOeYNFv3ZkwtVM3SdEXdZEHDF91JhFeoJiNevLF80ol7YR9J493bQsEZPuOyMIpsDIWAHKFBS+S46ekG/oBwN8JZUy0Hnh3x421cg5KOE8J8Um5wJhh8Lun9Y0747nmEhqXt+cAjzaGNBbuFJR7z8XAO8/ZjrBxOr+LcxJt2IYG6PqA6r9ZEFEglXqxDeezTcQsNxIGuflBrB0QKyUp0CGkM4uaW9506UrvUzdHh71hYHm5Ir9pmhYawlWCWuOgydR8aCkIiz2lbatvjukHJvoMd2S1TxSYHMkheXMhFEDmfzQvPW/7LdfQ3PmMyGq23JFuSxsvPXSOLpI2QfqpQBwkCbNhzEkmc0ln50rRAka/z4HfR6Ga77FLW9Jcz9rpwRiI0eF46x1gRxiB2DO74kSwwZgjTNyafPGChQJjDh6Fycs9ivMOFEoltIaipHHSrxDTQvz1nhJkb1S/qgQEkfm9GbfJS9OxYgheHzWJOxtocPVVpUc5wac/9KrS2O1lfgI4IrtNF1J0zti6JcsGewFQZzfDx6usTKw/R9T1okNhtM4njn5qADsJyWnpkuiiqXh2ozO9CLgehxpm6oSbw/FVA3BDME2iU+16CXsZVxxVW3zSSI5ctZXpkDgSJh19DCh7Er5nb6iwsLLCCRqcIKtfAf2ZHPABB2BoZE0CGvf/nnkQd++MfL9W18OwXz3/9aVkW+gYNrDU9Mv8LiGz0BIarpneTdIw4i+LT7wtv5h0RSjvfJS50LP/1eK39vTwP9OiJ5UsCDyURSkB2sVS0MeSca6nVGoIOdWr2ee8IBp3QFS24VWZvLFE+0MY90S4ZNZsOF8nmN+M/SCvS4h/ixqQQ6zr+qtS8Hm1N/EHx2U2CqRbE64HnQhaHnj/hKUkQiABIlt1kz1eR2NjEt5aa1hQyoKl8E4J9V9V/u3t3vd0aIkWFL08SUEQEQWith066Jdf0VMdQvaSUKiSlhfZZ5Wj6ex1Y1hX9vT9utEGUAS12vlh2KMEpPi6eFxvIyOiu7bCMSeicQLpjGKEIsKJOVDPYPjbilwXuz8agjUevoguoAT12CKdCWPlnrjuByCZ34/U/qBHdak7UqnjS9MbbDTIcr8uz/2g8w2dMIAbRA0I9A4CrOUAAecIPQMCFU2scuJt+4xxZxof+HUlyqXQR3YD6Wh89/5rVK+kvgoNjflijegxW0rju7NyU0kU3jZp36V1F4HxzXsz7OOHmN54GJTvRSqhrQfQ39+eOSvyd04pwCM7EAOfJYw0moozVnVtmjnDLVjR49ho9qHrvDVLJ/sEcG/iyDrNtp8PCq9OK6EOEgPGNq4qDEow1GelPz6M1BU7t+KL5QnqDi0azz7L2ctx9N6b34LOHgNoHD5qGWP0CnNo1Pq7lQYoN9/XHYdQbHrc8NWjR56NmBnUnA65Af8+67J7+stLSVDrUyRRf9Ohcp5k6WK5N1yyJeYxa1EDulsXzToEEbadSbbS6REEESEn/jlup7U5NituT5wT9al,iv:ervRE0xkjtsKNYB/1W9oHM59lHwHTsOk3NLhnaRvWCM=,tag:Yj3emxFA5h2ndIilC5L16w==,type:str]
network: network:
server: ENC[AES256_GCM,data:EFsmXNkuf5OAMh8hjfZTixmmdjqBNIME9JjQC8azeCwcMVInm8bWdxE4OqFmxOk9MAU=,iv:pI6WeM2aQC+7vx1Xmp5O2rikqNLgzuEOg+Lo7TqFQxU=,tag:ElcA8mn9dx+IjIf38nKT5A==,type:str] server: ENC[AES256_GCM,data:EFsmXNkuf5OAMh8hjfZTixmmdjqBNIME9JjQC8azeCwcMVInm8bWdxE4OqFmxOk9MAU=,iv:pI6WeM2aQC+7vx1Xmp5O2rikqNLgzuEOg+Lo7TqFQxU=,tag:ElcA8mn9dx+IjIf38nKT5A==,type:str]
fallaryn: ENC[AES256_GCM,data:O77hH3STB6zpl0b9iXsVu9OOrlLKUwfs2qI9hdqX4kMuBs3XgT/xsQ==,iv:RDKsuJoy+LIyADMc3bgOEmLKdXtu6kad2aeVetuZdJI=,tag:MrpCZ+iJUnGIjeHMgcYG6Q==,type:str] fallaryn: ENC[AES256_GCM,data:O77hH3STB6zpl0b9iXsVu9OOrlLKUwfs2qI9hdqX4kMuBs3XgT/xsQ==,iv:RDKsuJoy+LIyADMc3bgOEmLKdXtu6kad2aeVetuZdJI=,tag:MrpCZ+iJUnGIjeHMgcYG6Q==,type:str]
@ -60,7 +60,7 @@ sops:
bXBOa1VSakoyaWxpODJEOU11QUZCaUEK8Ch9Ten3DdrPHF1DTH2qei85AlHUOaLD bXBOa1VSakoyaWxpODJEOU11QUZCaUEK8Ch9Ten3DdrPHF1DTH2qei85AlHUOaLD
aNfzakake7ej+MxJYdKEU0bcWofNMKzIlZa2uM10KZSENDP8d8qlig== aNfzakake7ej+MxJYdKEU0bcWofNMKzIlZa2uM10KZSENDP8d8qlig==
-----END AGE ENCRYPTED FILE----- -----END AGE ENCRYPTED FILE-----
lastmodified: "2025-11-06T07:19:22Z" lastmodified: "2025-11-08T20:52:41Z"
mac: ENC[AES256_GCM,data:c6YNIQQqfiXqEiFvP+mGjj7H9gPxM6XGjSD1Oq8Ypv2qtW9x8SZxfwMVH8cB24a8Qu6VqZ6VbuRc63aGTKkQN2WfetC4W3Cuqzh8o8CbajkOrIR/uR9Sqoqjd3WRD6KP0/wS/upVt15xJL1JIIyDY23yZbeB4zurVBGwS3Yuzss=,iv:U3ELLWrSlBt/vxH3k7InijHRw9TMXcY3DrYvfyP+0M8=,tag:bMgWVwS/YQhh59H2xxYieQ==,type:str] mac: ENC[AES256_GCM,data:Hh8TQJH8MGbTFYbWiMfiAorvf4X7mN6Nnmm1lgGnahE5ZSMTZloIIWguDplroWTdgpLHtfSntbVzWozFL1PvGtcwUSEpfuGZ5HEDsBehPrdQ3MfulA7ClhBkx4GrJDmNoJSJlOpaSvA5R4Iwl/xO5+udHSB3DPleP/TZZndk4Ho=,iv:QNgZvvdYL/z9kVGRWoMJhBGSJi3gEBa8fBZeTnmGbrg=,tag:7muXvZcekqRnN8DSVxU2fw==,type:str]
unencrypted_suffix: _unencrypted unencrypted_suffix: _unencrypted
version: 3.11.0 version: 3.11.0