dotfiles/modules/nixos/guests/defenseioGpu/config/default.nix

302 lines
9.4 KiB
Nix
Raw Normal View History

2025-12-01 00:15:57 -06:00
{
flake,
pkgs,
config,
lib,
...
}:
let
cfg = config.services.gpu-miner;
# CUDA-enabled packages
cudaPackages = pkgs.cudaPackages;
# Use GCC 13 for CUDA compilation (GCC 14 is too new for CUDA 12.8)
# But we need GCC 14's runtime libraries for CXXABI_1.3.15
gccCompiler = pkgs.gcc13;
gccRuntime = pkgs.gcc14;
pythonEnv = pkgs.python312.withPackages (
ps: with ps; [
pip
virtualenv
]
);
# Generate complete config.yaml file
configFile = pkgs.writeText "config.yaml" ''
miner:
api_url: https://mine.defensio.io/api
max_workers: ${toString cfg.maxWorkers}
update_interval: 30
retry_delay: 5
max_retries: 3
wallet:
consolidate_address: ${cfg.consolidateAddress}
auto_register: true
wallet_count: ${toString cfg.maxWorkers}
gpu:
enabled: true
batch_size: ${toString cfg.batchSize}
device_id: ${toString cfg.gpuDeviceId}
thread_count: 256
logging:
level: info
file: ${cfg.dataDir}/GPU-Miner/miner.log
max_size: 10485760
backup_count: 5
performance:
target_hashrate: 0
monitor_interval: 60
stats_update_interval: 10
database:
path: ${cfg.dataDir}/GPU-Miner/miner.db
backup_enabled: true
backup_interval: 3600
'';
in
{
options.services.gpu-miner = {
enable = lib.mkEnableOption "GPU Miner for Defensio (DFO)";
dataDir = lib.mkOption {
type = lib.types.path;
default = "/var/lib/gpu-miner";
description = "Directory where the miner data and venv will be stored";
};
consolidateAddress = lib.mkOption {
type = lib.types.str;
default = "addr1q87k2jlckh6ujqx4ymkdd4jrhy6gukdtum0p77pdh5gqcw8ctl65fvaw097l32ta6m8hth3xu9cjfz70y34gs2mdfzlsj465th";
description = "Cardano address for consolidating mined tokens";
};
maxWorkers = lib.mkOption {
type = lib.types.int;
default = 1;
description = "Maximum number of worker threads";
};
batchSize = lib.mkOption {
type = lib.types.int;
default = 1000000;
description = "GPU batch size for mining";
};
gpuDeviceId = lib.mkOption {
type = lib.types.int;
default = 0;
description = "GPU device ID to use for mining (use nvidia-smi to see available GPUs)";
};
};
config = lib.mkIf cfg.enable {
# Create the data directory
systemd.tmpfiles.rules = [
"d ${cfg.dataDir} 0755 root root - -"
];
systemd.services.gpu-miner = {
description = "GPU Miner for Defensio (DFO) tokens";
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
CUDA_PATH = "${cudaPackages.cudatoolkit}";
# Use GCC 14 runtime libraries for CXXABI_1.3.15
LD_LIBRARY_PATH = lib.makeLibraryPath [
cudaPackages.cudatoolkit
config.boot.kernelPackages.nvidiaPackages.latest
gccRuntime.cc.lib
];
__GLX_VENDOR_LIBRARY_NAME = "nvidia";
GBM_BACKEND = "nvidia-drm";
};
path = [
cudaPackages.cudatoolkit
pkgs.git
pythonEnv
gccCompiler # GCC 13 for CUDA compilation
pkgs.gnumake
pkgs.patchelf
pkgs.stdenv.cc.cc.lib
pkgs.binutils
];
serviceConfig = {
Type = "simple";
Restart = "always";
RestartSec = 10;
WorkingDirectory = cfg.dataDir;
TimeoutStartSec = "5min";
# Setup script
ExecStartPre = pkgs.writeShellScript "setup-gpu-miner" ''
set -e
cd ${cfg.dataDir}
# Clone or update the repository
if [ ! -d GPU-Miner ]; then
echo "Cloning GPU-Miner repository..."
${pkgs.git}/bin/git clone https://github.com/Herolias/GPU-Miner.git
else
echo "Updating GPU-Miner repository..."
cd GPU-Miner
${pkgs.git}/bin/git pull || true
cd ..
fi
cd GPU-Miner
# Remove and recreate venv to ensure clean state
if [ -d venv ]; then
echo "Removing existing virtual environment..."
rm -rf venv
fi
echo "Creating virtual environment with Python 3.12..."
${pythonEnv}/bin/python -m venv venv
echo "Installing dependencies..."
venv/bin/pip install --upgrade pip
# Install base dependencies first
echo "Installing base dependencies..."
venv/bin/pip install colorama pyyaml portalocker
# Install from requirements.txt if available
if [ -f requirements.txt ]; then
echo "Installing from requirements.txt..."
venv/bin/pip install -r requirements.txt
else
echo "Installing common dependencies..."
venv/bin/pip install requests numpy pycardano cbor2
fi
echo "Dependency installation complete."
# Patch the GPU binaries for NixOS
if [ -d gpu_core/bin/linux ]; then
echo "Patching GPU binaries for NixOS..."
for so_file in gpu_core/bin/linux/*.so; do
if [ -f "$so_file" ]; then
echo "Patching $so_file"
${pkgs.patchelf}/bin/patchelf --set-interpreter "$(cat ${pkgs.stdenv.cc}/nix-support/dynamic-linker)" "$so_file" 2>/dev/null || true
${pkgs.patchelf}/bin/patchelf --set-rpath "${
lib.makeLibraryPath [
cudaPackages.cudatoolkit
config.boot.kernelPackages.nvidiaPackages.latest
gccRuntime.cc.lib
pkgs.zlib
pkgs.glib
pkgs.glibc
]
}:$ORIGIN" "$so_file" || echo "Warning: Could not patch $so_file"
${pkgs.patchelf}/bin/patchelf --shrink-rpath "$so_file" 2>/dev/null || true
fi
done
echo "Patching complete."
fi
# Copy the config file
echo "Creating config.yaml..."
cp ${configFile} config.yaml
# Ensure database directory exists
mkdir -p $(dirname ${cfg.dataDir}/GPU-Miner/miner.db)
echo "Setup complete."
echo "Current commit: $(${pkgs.git}/bin/git log -1 --format='%h - %s')"
'';
# Main execution script
ExecStart = pkgs.writeShellScript "run-gpu-miner" ''
2025-12-02 03:30:54 -06:00
set -e
cd ${cfg.dataDir}/GPU-Miner
# Set CUDA environment
export CUDA_PATH=${cudaPackages.cudatoolkit}
export CUDA_ROOT=${cudaPackages.cudatoolkit}
export CUDA_HOME=${cudaPackages.cudatoolkit}
# Use GCC 13 for CUDA compilation (compatible with CUDA 12.8)
export CC=${gccCompiler}/bin/gcc
export CXX=${gccCompiler}/bin/g++
# CRITICAL: Force nvcc to use GCC 13 by creating a wrapper script
# The wrapper must pass both --compiler-bindir and ensure CUDA headers are found
mkdir -p /tmp/nvcc-wrapper
cat > /tmp/nvcc-wrapper/nvcc <<WRAPPER_EOF
#!${pkgs.bash}/bin/bash
# Add CUDA include path to help nvcc find cuda_runtime.h
exec ${cudaPackages.cudatoolkit}/bin/nvcc \\
--compiler-bindir ${gccCompiler}/bin \\
-I${cudaPackages.cudatoolkit}/include \\
"\$@"
WRAPPER_EOF
chmod +x /tmp/nvcc-wrapper/nvcc
# Put our wrapper at the front of PATH
export PATH="/tmp/nvcc-wrapper:$PATH"
# Use GCC 14 runtime libraries for CXXABI_1.3.15 at runtime
export LD_LIBRARY_PATH=${
lib.makeLibraryPath [
cudaPackages.cudatoolkit
config.boot.kernelPackages.nvidiaPackages.latest
gccRuntime.cc.lib # GCC 14 for runtime
pkgs.zlib
pkgs.glib
pkgs.glibc
]
}
# Add gpu_core binaries to library path
export LD_LIBRARY_PATH="$PWD/gpu_core/bin/linux:$LD_LIBRARY_PATH"
export __GLX_VENDOR_LIBRARY_NAME=nvidia
export GBM_BACKEND=nvidia-drm
# Verify GPU accessibility
echo "Checking GPU availability..."
if command -v nvidia-smi &> /dev/null; then
nvidia-smi -L || echo "Warning: Could not list GPUs"
fi
echo "Starting GPU miner..."
echo "Python version: $(venv/bin/python --version)"
echo "Compile-time GCC: ${gccCompiler}/bin/gcc ($(${gccCompiler}/bin/gcc --version | head -n1))"
echo "Runtime GCC library: ${gccRuntime.cc.lib}/lib"
echo "Working directory: $(pwd)"
echo "NVCC wrapper: $(type -p nvcc)"
# Verify the critical library is available
echo "Checking for CXXABI_1.3.15..."
if strings ${gccRuntime.cc.lib}/lib/libstdc++.so.6 | grep -q CXXABI_1.3.15; then
echo " CXXABI_1.3.15 found in runtime libstdc++"
else
echo " WARNING: CXXABI_1.3.15 not found"
fi
# Test nvcc compilation
echo "Testing nvcc compiler configuration..."
/tmp/nvcc-wrapper/nvcc --version
# Run the miner
exec venv/bin/python main.py --workers ${builtins.toString cfg.maxWorkers} 2>&1
2025-12-01 00:15:57 -06:00
'';
StandardOutput = "journal";
StandardError = "journal";
};
};
};
}