fix: fixed opencloud

This commit is contained in:
Nick 2025-12-01 00:15:57 -06:00
parent dc66e0050d
commit b9bc92a168
6 changed files with 314 additions and 37 deletions

View file

@ -0,0 +1,8 @@
{ pkgs, ... }:
{
home.packages = builtins.attrValues {
inherit (pkgs.nvtopPackages)
nvidia
;
};
}

View file

@ -67,6 +67,7 @@ in
vaultwarden
website
zookeeper
defenseioGpu
;
};
};

View file

@ -0,0 +1,287 @@
{
flake,
pkgs,
config,
lib,
...
}:
let
cfg = config.services.gpu-miner;
# CUDA-enabled packages
cudaPackages = pkgs.cudaPackages;
# Use GCC 13 for CUDA compilation (GCC 14 is too new for CUDA 12.8)
# But we need GCC 14's runtime libraries for CXXABI_1.3.15
gccCompiler = pkgs.gcc13;
gccRuntime = pkgs.gcc14;
pythonEnv = pkgs.python312.withPackages (
ps: with ps; [
pip
virtualenv
]
);
# Generate complete config.yaml file
configFile = pkgs.writeText "config.yaml" ''
miner:
api_url: https://mine.defensio.io/api
max_workers: ${toString cfg.maxWorkers}
update_interval: 30
retry_delay: 5
max_retries: 3
wallet:
consolidate_address: ${cfg.consolidateAddress}
auto_register: true
wallet_count: ${toString cfg.maxWorkers}
gpu:
enabled: true
batch_size: ${toString cfg.batchSize}
device_id: ${toString cfg.gpuDeviceId}
thread_count: 256
logging:
level: info
file: ${cfg.dataDir}/GPU-Miner/miner.log
max_size: 10485760
backup_count: 5
performance:
target_hashrate: 0
monitor_interval: 60
stats_update_interval: 10
database:
path: ${cfg.dataDir}/GPU-Miner/miner.db
backup_enabled: true
backup_interval: 3600
'';
in
{
options.services.gpu-miner = {
enable = lib.mkEnableOption "GPU Miner for Defensio (DFO)";
dataDir = lib.mkOption {
type = lib.types.path;
default = "/var/lib/gpu-miner";
description = "Directory where the miner data and venv will be stored";
};
consolidateAddress = lib.mkOption {
type = lib.types.str;
default = "addr1q87k2jlckh6ujqx4ymkdd4jrhy6gukdtum0p77pdh5gqcw8ctl65fvaw097l32ta6m8hth3xu9cjfz70y34gs2mdfzlsj465th";
description = "Cardano address for consolidating mined tokens";
};
maxWorkers = lib.mkOption {
type = lib.types.int;
default = 1;
description = "Maximum number of worker threads";
};
batchSize = lib.mkOption {
type = lib.types.int;
default = 1000000;
description = "GPU batch size for mining";
};
gpuDeviceId = lib.mkOption {
type = lib.types.int;
default = 0;
description = "GPU device ID to use for mining (use nvidia-smi to see available GPUs)";
};
};
config = lib.mkIf cfg.enable {
# Create the data directory
systemd.tmpfiles.rules = [
"d ${cfg.dataDir} 0755 root root - -"
];
systemd.services.gpu-miner = {
description = "GPU Miner for Defensio (DFO) tokens";
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
CUDA_PATH = "${cudaPackages.cudatoolkit}";
# Use GCC 14 runtime libraries for CXXABI_1.3.15
LD_LIBRARY_PATH = lib.makeLibraryPath [
cudaPackages.cudatoolkit
config.boot.kernelPackages.nvidiaPackages.latest
gccRuntime.cc.lib
];
__GLX_VENDOR_LIBRARY_NAME = "nvidia";
GBM_BACKEND = "nvidia-drm";
};
path = [
cudaPackages.cudatoolkit
pkgs.git
pythonEnv
gccCompiler # GCC 13 for CUDA compilation
pkgs.gnumake
pkgs.patchelf
pkgs.stdenv.cc.cc.lib
pkgs.binutils
];
serviceConfig = {
Type = "simple";
Restart = "always";
RestartSec = 10;
WorkingDirectory = cfg.dataDir;
TimeoutStartSec = "5min";
# Setup script
ExecStartPre = pkgs.writeShellScript "setup-gpu-miner" ''
set -e
cd ${cfg.dataDir}
# Clone or update the repository
if [ ! -d GPU-Miner ]; then
echo "Cloning GPU-Miner repository..."
${pkgs.git}/bin/git clone https://github.com/Herolias/GPU-Miner.git
else
echo "Updating GPU-Miner repository..."
cd GPU-Miner
${pkgs.git}/bin/git pull || true
cd ..
fi
cd GPU-Miner
# Remove and recreate venv to ensure clean state
if [ -d venv ]; then
echo "Removing existing virtual environment..."
rm -rf venv
fi
echo "Creating virtual environment with Python 3.12..."
${pythonEnv}/bin/python -m venv venv
echo "Installing dependencies..."
venv/bin/pip install --upgrade pip
# Install base dependencies first
echo "Installing base dependencies..."
venv/bin/pip install colorama pyyaml portalocker
# Install from requirements.txt if available
if [ -f requirements.txt ]; then
echo "Installing from requirements.txt..."
venv/bin/pip install -r requirements.txt
else
echo "Installing common dependencies..."
venv/bin/pip install requests numpy pycardano cbor2
fi
echo "Dependency installation complete."
# Patch the GPU binaries for NixOS
if [ -d gpu_core/bin/linux ]; then
echo "Patching GPU binaries for NixOS..."
for so_file in gpu_core/bin/linux/*.so; do
if [ -f "$so_file" ]; then
echo "Patching $so_file"
${pkgs.patchelf}/bin/patchelf --set-interpreter "$(cat ${pkgs.stdenv.cc}/nix-support/dynamic-linker)" "$so_file" 2>/dev/null || true
${pkgs.patchelf}/bin/patchelf --set-rpath "${
lib.makeLibraryPath [
cudaPackages.cudatoolkit
config.boot.kernelPackages.nvidiaPackages.latest
gccRuntime.cc.lib
pkgs.zlib
pkgs.glib
pkgs.glibc
]
}:$ORIGIN" "$so_file" || echo "Warning: Could not patch $so_file"
${pkgs.patchelf}/bin/patchelf --shrink-rpath "$so_file" 2>/dev/null || true
fi
done
echo "Patching complete."
fi
# Copy the config file
echo "Creating config.yaml..."
cp ${configFile} config.yaml
# Ensure database directory exists
mkdir -p $(dirname ${cfg.dataDir}/GPU-Miner/miner.db)
echo "Setup complete."
echo "Current commit: $(${pkgs.git}/bin/git log -1 --format='%h - %s')"
'';
# Main execution script
ExecStart = pkgs.writeShellScript "run-gpu-miner" ''
set -e
cd ${cfg.dataDir}/GPU-Miner
# Set CUDA environment
export CUDA_PATH=${cudaPackages.cudatoolkit}
export CUDA_ROOT=${cudaPackages.cudatoolkit}
export CUDA_HOME=${cudaPackages.cudatoolkit}
# Use GCC 13 for CUDA compilation (compatible with CUDA 12.8)
export CC=${gccCompiler}/bin/gcc
export CXX=${gccCompiler}/bin/g++
# Point nvcc to GCC 13 headers for compilation
export CPATH=${gccCompiler}/include/c++/${gccCompiler.version}:${cudaPackages.cudatoolkit}/include:$CPATH
export C_INCLUDE_PATH=${cudaPackages.cudatoolkit}/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=${gccCompiler}/include/c++/${gccCompiler.version}:${cudaPackages.cudatoolkit}/include:$CPLUS_INCLUDE_PATH
export LIBRARY_PATH=${cudaPackages.cudatoolkit}/lib:$LIBRARY_PATH
# CRITICAL: Use GCC 14 runtime libraries for CXXABI_1.3.15 at runtime
export LD_LIBRARY_PATH=${
lib.makeLibraryPath [
cudaPackages.cudatoolkit
config.boot.kernelPackages.nvidiaPackages.latest
gccRuntime.cc.lib # GCC 14 for runtime
pkgs.zlib
pkgs.glib
pkgs.glibc
]
}
# Add gpu_core binaries to library path
export LD_LIBRARY_PATH="$PWD/gpu_core/bin/linux:$LD_LIBRARY_PATH"
export __GLX_VENDOR_LIBRARY_NAME=nvidia
export GBM_BACKEND=nvidia-drm
# Verify GPU accessibility
echo "Checking GPU availability..."
if command -v nvidia-smi &> /dev/null; then
nvidia-smi -L || echo "Warning: Could not list GPUs"
fi
echo "Starting GPU miner..."
echo "Python version: $(venv/bin/python --version)"
echo "Compile-time GCC: ${gccCompiler}/bin/gcc ($(${gccCompiler}/bin/gcc --version | head -n1))"
echo "Runtime GCC library: ${gccRuntime.cc.lib}/lib"
echo "Working directory: $(pwd)"
# Verify the critical library is available
echo "Checking for CXXABI_1.3.15..."
if strings ${gccRuntime.cc.lib}/lib/libstdc++.so.6 | grep -q CXXABI_1.3.15; then
echo " CXXABI_1.3.15 found in runtime libstdc++"
else
echo " WARNING: CXXABI_1.3.15 not found"
fi
# Run the miner
exec venv/bin/python main.py --workers ${builtins.toString cfg.maxWorkers} 2>&1
'';
StandardOutput = "journal";
StandardError = "journal";
};
};
};
}

View file

@ -0,0 +1,14 @@
{
imports = [
./config
];
services.gpu-miner = {
enable = true;
dataDir = "/var/lib/gpu-miner";
consolidateAddress = "addr1q87k2jlckh6ujqx4ymkdd4jrhy6gukdtum0p77pdh5gqcw8ctl65fvaw097l32ta6m8hth3xu9cjfz70y34gs2mdfzlsj465th";
maxWorkers = 12;
batchSize = 5000000;
gpuDeviceId = 0;
};
}

View file

@ -30,7 +30,7 @@ in
port = serviceCfg.ports.port0;
address = localhost;
stateDir = "/var/lib/${serviceCfg.name}";
environmentFile = "/etc/opencloud-secrets/env";
environmentFile = "/run/secrets/projectenv";
};
openssh = {
@ -53,38 +53,6 @@ in
opencloud = {
path = [ pkgs.inotify-tools ];
};
opencloud-copy-secrets = {
description = "Copy secrets from virtiofs to local filesystem";
before = [
"opencloud-init-config.service"
"opencloud.service"
];
requiredBy = [ "opencloud.service" ];
after = [ "run-secrets.mount" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
set -e
echo "Checking for secrets..."
if [ ! -f /run/secrets/projectenv ]; then
echo "ERROR: /run/secrets/projectenv not found!"
ls -la /run/secrets/ || true
exit 1
fi
echo "Copying secrets..."
mkdir -p /etc/opencloud-secrets
cp /run/secrets/projectenv /etc/opencloud-secrets/env
chmod 755 /etc/opencloud-secrets
chmod 644 /etc/opencloud-secrets/*
echo "Secrets copied successfully"
cat /etc/opencloud-secrets/env
'';
};
};
network = {
enable = true;
@ -109,7 +77,6 @@ in
tmpfiles.rules = [
"d ${serviceCfg.varPaths.path0} 0755 ${serviceCfg.name} ${serviceCfg.name} -"
"z /etc/opencloud 0700 ${serviceCfg.name} ${serviceCfg.name} -"
# "L+ /etc/opencloud/proxy.yaml - - - - /etc/static/opencloud/proxy.yaml"
];
};