Compare commits

..

1 Commits

Author SHA1 Message Date
ash-bot 5b1d352fbb feat(ci): target ci-build runner label
CI / check (pull_request) Has been cancelled
CI / build-and-cache (pull_request) Has been cancelled
Route check and build-and-cache jobs to ci-build runner tier
for proper resource isolation from lightweight CI workloads.
2026-04-28 19:44:26 +00:00
15 changed files with 15 additions and 6068 deletions
+2 -70
View File
@@ -8,7 +8,7 @@ on:
jobs:
check:
runs-on: ubuntu-latest
runs-on: [ubuntu-latest, ci-build]
steps:
- uses: actions/checkout@v6
@@ -20,7 +20,7 @@ jobs:
NIX_CONFIG: "access-tokens = git.johnogle.info=${{ secrets.GITEA_ACCESS_TOKEN }}"
build-and-cache:
runs-on: ubuntu-latest
runs-on: [ubuntu-latest, ci-build]
needs: check
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
steps:
@@ -48,9 +48,6 @@ jobs:
custom-mcrcon-rbw
custom-tea-rbw
custom-rclone-torbox-setup
custom-opencode
custom-qmd
openclaw-runtime-closure
custom-nextcloud-talk-desktop
qt-pinned-jellyfin-media-player
qt-pinned-stremio
@@ -107,68 +104,3 @@ jobs:
fi
env:
NIX_CONFIG: "access-tokens = git.johnogle.info=${{ secrets.GITEA_ACCESS_TOKEN }}"
build-and-push-openclaw:
name: Build & Push OpenClaw Image
runs-on: ubuntu-latest
needs: check
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
outputs:
image_tag: ${{ steps.meta.outputs.tag }}
steps:
- uses: actions/checkout@v6
- uses: https://git.johnogle.info/johno/gitea-actions/nix-setup@v1
- name: Setup SSH for cache
run: |
mkdir -p ~/.ssh
echo "${{ secrets.CACHE_SSH_KEY }}" > ~/.ssh/cache_key
chmod 600 ~/.ssh/cache_key
ssh-keyscan -H ${{ secrets.CACHE_HOST }} >> ~/.ssh/known_hosts 2>/dev/null || true
- name: Generate image tag
id: meta
run: |
# Read the image tag from the nix definition's tag attribute
# buildLayeredImage sets tag from openclawImageTag in default.nix
IMAGE_TAG=$(nix eval .#packages.x86_64-linux.openclaw-image.imageTag --raw 2>/dev/null || \
nix eval .#openclaw-image.imageTag --raw 2>/dev/null || \
nix eval .#openclaw-image.outPath --raw 2>/dev/null | xargs basename | sed 's/.*-//')
# Fallback to short SHA if tag extraction fails
if [ -z "$IMAGE_TAG" ] || [ "$IMAGE_TAG" = "tar.gz" ]; then
IMAGE_TAG=$(echo "${{ github.sha }}" | cut -c1-7)
fi
echo "tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "Image will be tagged: ${IMAGE_TAG}"
- name: Build Docker image with Nix
run: nix build .#openclaw-image --cores 2
env:
NIX_CONFIG: "access-tokens = git.johnogle.info=${{ secrets.GITEA_ACCESS_TOKEN }}"
- name: Load and tag image
run: |
docker load < result
docker tag openclaw:${{ steps.meta.outputs.tag }} registry.johnogle.info/openclaw:${{ steps.meta.outputs.tag }}
docker tag openclaw:${{ steps.meta.outputs.tag }} registry.johnogle.info/openclaw:latest
- name: Login to registry
run: |
echo "${{ secrets.REGISTRY_PASSWORD }}" | docker login registry.johnogle.info -u ${{ secrets.REGISTRY_USERNAME }} --password-stdin
- name: Push image
run: |
# Push versioned tag with retry (large images can timeout on slow connections)
for i in 1 2 3; do
if docker push registry.johnogle.info/openclaw:${{ steps.meta.outputs.tag }}; then
break
fi
echo "Push attempt $i failed, retrying in 10s..."
sleep 10
done
# Push latest tag — mostly a manifest push since layers already exist
docker push registry.johnogle.info/openclaw:latest || \
echo "::warning::Failed to push :latest tag (versioned tag already pushed)"
-17
View File
@@ -104,11 +104,6 @@
};
};
# Common specialArgs passed to all NixOS systems
nixosSpecialArgs = {
inherit nixpkgs-unstable;
};
# Shared unstable overlays for custom package builds
customUnstableOverlays = [
# Override claude-code in unstable to use our custom GCS-based build
@@ -154,7 +149,6 @@
in
{
nixosConfigurations.nix-book = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/nix-book/configuration.nix
@@ -172,7 +166,6 @@
};
nixosConfigurations.boxy = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/boxy/configuration.nix
@@ -186,7 +179,6 @@
};
nixosConfigurations.gym-box = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/gym-box/configuration.nix
@@ -199,7 +191,6 @@
};
nixosConfigurations.zix790prors = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/zix790prors/configuration.nix
@@ -221,7 +212,6 @@
# Live USB ISO configuration
nixosConfigurations.live-usb = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/live-usb/configuration.nix
@@ -235,7 +225,6 @@
# Steam Deck configuration (using unstable for better Jovian compatibility)
nixosConfigurations.nix-deck = nixpkgs-unstable.lib.nixosSystem rec {
system = "x86_64-linux";
specialArgs = nixosSpecialArgs;
modules = nixosModulesUnstable ++ [
./machines/nix-deck/configuration.nix
{
@@ -247,7 +236,6 @@
# ZFS/NFS server configuration
nixosConfigurations.john-endesktop = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/john-endesktop/configuration.nix
@@ -295,8 +283,6 @@
"custom-tea-rbw" = pkgs.custom.tea-rbw;
"custom-rclone-torbox-setup" = pkgs.custom.rclone-torbox-setup;
"custom-opencode" = pkgs.custom.opencode;
"custom-qmd" = pkgs.custom.qmd;
"qt-pinned-jellyfin-media-player" = pkgsQt.jellyfin-media-player;
"qt-pinned-stremio" = pkgsQt.stremio;
}
@@ -306,9 +292,6 @@
"custom-nextcloud-talk-desktop" = pkgs.custom.nextcloud-talk-desktop;
# nix-deck kernel from Jovian-NixOS (Steam Deck) - expensive to build
"nix-deck-kernel" = self.nixosConfigurations.nix-deck.config.boot.kernelPackages.kernel;
# OpenClaw docker image (pulled + augmented with nix tools)
"openclaw-image" = pkgs.custom.openclaw-image;
"openclaw-runtime-closure" = pkgs.custom.openclaw-runtime-closure;
}
else
{ }
-8
View File
@@ -99,14 +99,6 @@ in
};
};
xdg.configFile."opencode/opencode.json" = {
source = ./opencode-config.json;
};
xdg.configFile."opencode/oh-my-openagent.jsonc" = {
source = ./opencode-omo-config.jsonc;
};
# Note: modules must be imported at top-level home config
};
}
-24
View File
@@ -1,24 +0,0 @@
{
"$schema": "https://opencode.ai/config.json",
"plugin": ["oh-my-openagent"],
"provider": {
"llama-local": {
"name": "Llama.cpp (zix790prors RTX 4070 Ti)",
"npm": "@ai-sdk/openai-compatible",
"options": {
"baseURL": "http://zix790prors.oglehome:8080/v1"
},
"models": {
"Qwen3.6-35B-A3B": {
"name": "Qwen3.6-35B-A3B (UD-Q8_K_XL)",
"reasoning": true,
"tool_call": true,
"limit": {
"context": 131072,
"output": 32768
}
}
}
}
}
}
-136
View File
@@ -1,136 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
"agents": {
"sisyphus": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"llama-local/Qwen3.6-35B-A3B",
"ollama-cloud/qwen3-coder-next"
]
},
"prometheus": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"ollama-cloud/qwen3-coder-next"
]
},
"atlas": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/gemma4:31b",
"ollama-cloud/kimi-k2.5"
]
},
"explore": {
"model": "ollama-cloud/gemma4:31b",
"fallback_models": [
"ollama-cloud/ministral-3:14b",
"llama-local/Qwen3.6-35B-A3B"
]
},
"librarian": {
"model": "ollama-cloud/gemma4:31b",
"fallback_models": [
"ollama-cloud/ministral-3:14b"
]
},
"oracle": {
"model": "ollama-cloud/qwen3-coder-next",
"fallback_models": [
"ollama-cloud/deepseek-v3.2",
"ollama-cloud/glm-5.1"
]
},
"multimodal-looker": {
"disable": true
},
"hephaestus": {
"disable": true
},
"momus": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/qwen3-coder-next"
]
},
"metis": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5"
]
}
},
"categories": {
"quick": {
"model": "ollama-cloud/gemma4:31b",
"fallback_models": [
"ollama-cloud/ministral-3:14b"
]
},
"unspecified-low": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"llama-local/Qwen3.6-35B-A3B"
]
},
"unspecified-high": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"ollama-cloud/qwen3-coder-next"
]
},
"deep": {
"model": "ollama-cloud/qwen3-coder-next",
"fallback_models": [
"ollama-cloud/deepseek-v3.2",
"ollama-cloud/glm-5.1"
]
},
"ultrabrain": {
"model": "ollama-cloud/qwen3-coder-next",
"fallback_models": [
"ollama-cloud/deepseek-v3.2",
"ollama-cloud/glm-5.1"
]
},
"writing": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5"
]
},
"visual-engineering": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/qwen3-coder-next"
]
}
},
"runtime_fallback": {
"enabled": true,
"retry_on_errors": [400, 429, 503, 529],
"max_fallback_attempts": 3,
"cooldown_seconds": 60,
"notify_on_fallback": true
},
"background_task": {
"defaultConcurrency": 5,
"providerConcurrency": {
"ollama-cloud": 10,
"llama-local": 2
}
},
"disabled_hooks": ["no-sisyphus-gpt"],
"comment_checker": {
"custom_prompt": "Check for AI-generated filler phrases, redundant obvious statements, and excessively verbose explanations. Comments should add value beyond what the code itself expresses. Flag: 'TODO' without ticket references, 'Note that...' when obvious, repeating the function name in the comment, and any form of 'simply' or 'simply just'. Use {{comments}} placeholder."
},
"tmux": { "enabled": false },
"experimental": {
"aggressive_truncation": true,
"task_system": true
}
}
+5 -6
View File
@@ -233,15 +233,14 @@ rbw is unavailable or the entry is not found."
gptel-use-tools t
gptel-confirm-tool-calls 'always
gptel-include-reasoning 'ignore
gptel-model "Qwen3.6-35B-A3B")
gptel-model "qwen3:30b")
;; Set default backend to llama-swap (OpenAI-compatible)
;; Set default backend to be Ollama-Local
(setq! gptel-backend
(gptel-make-openai "llama-swap"
:host "localhost:8080"
:endpoint "/v1/chat/completions"
(gptel-make-ollama "Ollama-Local"
:host "localhost:11434"
:stream t
:models '("Qwen3.6-35B-A3B")))
:models '(deepseek-r1 deepseek-r1-fullctx qwen3:30b qwen3:4b llama3.1 qwen2.5-coder mistral-nemo gpt-oss)))
;; Define custom tools
(gptel-make-tool
+6 -14
View File
@@ -10,7 +10,6 @@ with lib;
imports = [
./hardware-configuration.nix
#./virtual-surround.nix
../../roles/local-inference
];
roles = {
@@ -27,19 +26,6 @@ with lib;
x11 = true;
};
kodi.enable = true;
local-inference = {
enable = true;
host = "zix790prors.oglehome";
openFirewall = true;
globalTTL = 900;
models = {
"Qwen3.6-35B-A3B" = {
hf-model = "unsloth/Qwen3.6-35B-A3B-GGUF:UD-Q8_K_XL";
aliases = [ "Qwen3.6-35B-A3B" ];
cpu-moe = true;
};
};
};
nfs-mounts.enable = true;
nvidia = {
enable = true;
@@ -70,6 +56,12 @@ with lib;
${pkgs.xorg.xrandr}/bin/xrandr --output DP-0 --mode 3440x1440 --rate 164.90 --primary
'';
services.ollama = {
enable = true;
acceleration = "cuda";
loadModels = [ "gpt-oss" "deepseek-r1" "qwen3:30b" ];
};
# This option defines the first version of NixOS you have installed on this particular machine,
# and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
#
+1 -4
View File
@@ -1,5 +1,5 @@
{ pkgs, ... }:
rec {
{
tea-rbw = pkgs.callPackage ./tea-rbw { };
app-launcher-server = pkgs.callPackage ./app-launcher-server { };
claude-code = pkgs.callPackage ./claude-code { };
@@ -8,7 +8,4 @@ rec {
pi-coding-agent = pkgs.callPackage ./pi-coding-agent { };
nextcloud-talk-desktop = pkgs.callPackage ./nextcloud-talk-desktop { };
opencode = pkgs.callPackage ./opencode { };
qmd = pkgs.callPackage ./qmd { };
openclaw-image = pkgs.callPackage ./openclaw-image { };
openclaw-runtime-closure = pkgs.callPackage ./openclaw-image/runtime-closure.nix { inherit qmd; };
}
-232
View File
@@ -1,232 +0,0 @@
{
pkgs,
lib,
}:
let
# renovate: datasource=docker depName=ghcr.io/openclaw/openclaw
openclawImageTag = "2026.4.26";
openclawImageDigest = "sha256:7ea070b04d1e70811fe8ba15feaad5890b1646021b24e00f4795bd4587a594ed";
# Pull the upstream openclaw Docker image (only to extract /app from it)
openclawBase = pkgs.dockerTools.pullImage {
imageName = "ghcr.io/openclaw/openclaw";
imageDigest = openclawImageDigest;
sha256 = "sha256-a05y90gGFhka95t6blkk3Vt/xChKC+BIuoWAeEGoavk=";
finalImageTag = openclawImageTag;
os = "linux";
arch = "amd64";
};
# Extract the openclaw application (/app) from the upstream Docker image.
#
# We don't use fromImage because Nix's copyToRoot creates a /lib/ directory
# that shadows the Debian base image's /lib -> usr/lib symlink, breaking
# the glibc dynamic linker (/lib64/ld-linux-x86-64.so.2 chain).
# Instead, we extract only /app from the upstream image layers, then build
# a pure Nix image where everything runs against Nix's glibc.
#
# Docker image tars use the Image Manifest v2 format: each layer is a
# separate .tar within the outer tar. We extract all layers to find /app.
openclawApp =
pkgs.runCommand "openclaw-app"
{
nativeBuildInputs = [
pkgs.python3
pkgs.gnutar
];
}
''
mkdir -p $out/app
# Extract all layers from the Docker image tarball
mkdir workdir
tar xf ${openclawBase} -C workdir
# Python: parse manifest, generate shell script to extract /app from layers
python3 << 'PYEOF'
import json
with open("workdir/manifest.json") as f:
manifest = json.load(f)
layers = manifest[0]["Layers"]
with open("extract.sh", "w") as script:
script.write("#!/bin/sh\nset -e\n")
for layer in layers:
if layer.endswith(".tar"):
lp = f"workdir/{layer}"
else:
lp = f"workdir/{layer}/layer.tar"
script.write(f"""
if [ -f '{lp}' ]; then
if tar tf '{lp}' 2>/dev/null | grep -q '^app/'; then
echo "Extracting /app from {layer}..." >&2
tar xf '{lp}' -C "$OUT_DIR" --strip-components=0 app/ 2>/dev/null || true
fi
fi
""")
PYEOF
OUT_DIR="$out" sh extract.sh
if [ ! -f "$out/app/openclaw.mjs" ]; then
echo "ERROR: /app/openclaw.mjs not found after extraction"
ls -la $out/app/ 2>/dev/null || true
exit 1
fi
echo "Successfully extracted openclaw app"
'';
# Custom NSS files that include the "node" user (UID 1000, GID 1000).
# fakeNss only creates root/nobody, so we create our own with all three.
openclawNss = pkgs.runCommand "openclaw-nss" { } ''
mkdir -p $out/etc
cat > $out/etc/passwd << 'EOF'
root:x:0:0:root user:/var/empty:/bin/sh
nobody:x:65534:65534:nobody:/var/empty:/bin/sh
node:x:1000:1000::/home/node:/bin/bash
EOF
cat > $out/etc/group << 'EOF'
root:x:0:
nobody:x:65534:
node:x:1000:
EOF
cat > $out/etc/shadow << 'EOF'
root:!x:::::::
nobody:!x:::::::
node:!x:::::::
EOF
'';
# Node user home directory
nodeHome = pkgs.runCommand "node-home" { } ''
mkdir -p $out/home/node
'';
# Docker entrypoint script — equivalent to the upstream docker-entrypoint.sh.
# We replicate it as a Nix derivation to avoid extracting the Debian binary
# layer and to avoid filesystem conflicts in the image customization layer.
dockerEntrypoint = pkgs.writeShellScript "docker-entrypoint.sh" ''
#!/bin/sh
set -e
# Run command with node if the first arg contains a "-" or is not a
# system command. The last part inside the "{}" is a workaround for
# the following bug in ash/dash:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=874264
if [ "''${1#-}" != "$1" ] || [ -z "$(command -v "''${1}")" ] || { [ -f "''${1}" ] && ! [ -x "''${1}" ]; }; then
set -- node "$@"
fi
exec "$@"
'';
# Wrap the entrypoint as a derivation so it can be placed via copyToRoot
# instead of extraCommands (which can't write to paths that already have
# Nix store symlinks from other contents)
entrypointPkg = pkgs.runCommand "docker-entrypoint" { } ''
mkdir -p $out/usr/local/bin
cp ${dockerEntrypoint} $out/usr/local/bin/docker-entrypoint.sh
chmod +x $out/usr/local/bin/docker-entrypoint.sh
'';
in
pkgs.dockerTools.buildLayeredImage {
name = "openclaw";
tag = openclawImageTag;
# Don't use fromImage — see openclawApp derivation comment
maxLayers = 120;
contents = [
# System basics
pkgs.bashInteractive
pkgs.coreutils
pkgs.cacert
# Custom NSS with node user
openclawNss
# Node user home directory
nodeHome
# Docker entrypoint script (in /usr/local/bin)
entrypointPkg
# Runtime package manager (agents can `nix run` arbitrary packages)
# Also needed by init container for `nix copy --from` to populate PVC from Harmonia
pkgs.nix
# HTTP client (needed for init container Harmonia health checks and fallback)
pkgs.curl
];
# NOTE: Runtime packages (nodejs_22, kubectl, jq, git, emacs, tsx, tea,
# pythonEnv, qmd) are NOT in contents. They live in the
# `openclaw-runtime-closure` meta-package, which CI pushes to the
# Harmonia binary cache. The init container pulls them from Harmonia
# into the PVC at pod startup. This keeps the Docker image thin (~1.5GB
# vs the previous ~2.7GB) and makes CI pushes fast.
#
# NOTE: openclawApp is NOT in contents. It would create /app as a symlink
# to /nix/store/..., which breaks OpenClaw's symlink escape security check
# (resolved paths "escape" /app/dist/extensions). Instead, extraCommands
# copies the real files into /app as a proper directory.
extraCommands = ''
# Create /tmp with correct permissions (needed by Node.js and nix)
mkdir -p tmp
chmod 1777 tmp
# Create /run for nix-daemon socket
mkdir -p run
# Create /var/empty (referenced by NSS passwd home dirs)
mkdir -p var/empty
# Copy OpenClaw app as a REAL directory (not a Nix store symlink).
# The app has a symlink escape check: resolved paths must stay within
# /app/dist/extensions/. If /app is a symlink to /nix/store/HASH/app/,
# realpath resolves to /nix/store/... which "escapes" the boundary.
rm -rf app
mkdir -p app
cp -a ${openclawApp}/app/. app/
'';
config = {
Entrypoint = [ "docker-entrypoint.sh" ];
Cmd = [
"node"
"openclaw.mjs"
"gateway"
"--allow-unconfigured"
];
WorkingDir = "/app";
User = "node";
Env = [
# SSL certificates
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
"NIX_SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
# Nix configuration
"NIX_PATH=nixpkgs=flake:nixpkgs"
# PATH: standard dirs + Nix store bin dirs appended by buildLayeredImage
# + runtime closure bin dir (populated from Harmonia by init container into PVC)
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${pkgs.custom.openclaw-runtime-closure}/bin"
"NODE_ENV=production"
# Home directory (Docker User directive doesn't set HOME from /etc/passwd)
"HOME=/home/node"
# Runtime closure path — init container uses this to `nix copy --from` Harmonia
# This creates a build dependency (Nix resolves the path) but the closure
# is NOT in `contents`, so it won't be in the image layers.
"OPENCLAW_RUNTIME_CLOSURE=${pkgs.custom.openclaw-runtime-closure}"
];
};
}
@@ -1,22 +0,0 @@
{
pkgs,
qmd,
}:
# This package creates a store path that transitively depends on all packages
# that should be available at runtime in the OpenClaw pod.
# CI uses this to push a single closure to Harmonia.
pkgs.symlinkJoin {
name = "openclaw-runtime-closure";
paths = [
pkgs.nodejs_22
pkgs.kubectl
pkgs.jq
pkgs.git
pkgs.emacs
pkgs.tsx
pkgs.tea
(pkgs.python3.withPackages (ps: [ ps.pymupdf ]))
qmd
];
}
-89
View File
@@ -1,89 +0,0 @@
{
lib,
stdenv,
buildNpmPackage,
fetchzip,
nodejs_22,
python3,
sqlite,
}:
let
version = "2.1.0";
in
buildNpmPackage rec {
pname = "qmd";
inherit version;
src = fetchzip {
url = "https://git.johnogle.info/johno/qmd/archive/4fc77609b54102110a42583b78d20580aa8510e3.tar.gz";
sha256 = "sha256-jW2CbntQuDzSDo42bXjfBxSB5BSME6wYDOSkLvEQnb4=";
};
# Vendored package-lock.json generated from QMD's package.json.
# QMD ships bun.lock/pnpm-lock.yaml but not package-lock.json.
# buildNpmPackage requires npm's lockfile format.
postPatch = ''
cp ${./package-lock.json} package-lock.json
'';
# npmDepsHash matches the vendored package-lock.json
npmDepsHash = "sha256-3sis2NIuPDnwAOzWEQBcA+VUsMaxO8Nkuk6wCJ9foBA=";
nodejs = nodejs_22;
nativeBuildInputs = [
nodejs
python3 # for node-gyp (better-sqlite3, sqlite-vec)
];
buildInputs = [
sqlite # for sqlite extension loading at runtime
];
# npm rebuild compiles native addons (better-sqlite3, sqlite-vec) against Node 22's V8
npmRebuildFlags = [ "--build-from-source" ];
# Don't run npm run prepare (it tries to install git hooks)
npmPruneFlags = [ "--omit=dev" ];
buildPhase = ''
runHook preBuild
npm run build
runHook postBuild
'';
installPhase = ''
runHook preInstall
mkdir -p $out/lib/qmd $out/bin
# Copy compiled output, node_modules, and config
cp -r dist node_modules $out/lib/qmd/
cp package.json $out/lib/qmd/
# Create wrapper that runs the compiled CLI with Node.js 22
# Sets LD_LIBRARY_PATH for sqlite-vec extension loading
cat > $out/bin/qmd << EOF
#!/bin/sh
export LD_LIBRARY_PATH="${sqlite.out}/lib''${LD_LIBRARY_PATH:+:}\$LD_LIBRARY_PATH"
exec ${nodejs}/bin/node $out/lib/qmd/dist/cli/qmd.js "\$@"
EOF
chmod +x $out/bin/qmd
runHook postInstall
'';
meta = with lib; {
description = "Query Markup Documents on-device hybrid search for markdown files";
longDescription = ''
QMD combines BM25 full-text search, vector semantic search, and LLM re-ranking.
This build uses Node.js 22 (instead of Bun) to avoid native module ABI issues
with better-sqlite3 and sqlite-vec.
'';
homepage = "https://github.com/tobi/qmd";
license = licenses.mit;
platforms = platforms.linux;
mainProgram = "qmd";
};
}
-5298
View File
File diff suppressed because it is too large Load Diff
-13
View File
@@ -19,19 +19,6 @@
"dependencyDashboard": true,
"dependencyDashboardAutoclose": false,
"dependencyDashboardTitle": "NixOS Configs Dependency Dashboard",
"customManagers": [
{
"customType": "regex",
"description": "Update openclaw Docker image tag and digest in Nix package",
"managerFilePatterns": [
"/^packages/openclaw-image/default\\.nix$/"
],
"matchStrings": [
" # renovate: datasource=(?<datasource>[^\\s]+) depName=(?<depName>[^\\s]+)\\n openclawImageTag = \"(?<currentValue>[^\"]+)\";\\n openclawImageDigest = \"sha256:(?<currentDigest>[^\"]+)\";"
],
"autoReplaceStringTemplate": " # renovate: datasource={{{datasource}}} depName={{{depName}}}\n openclawImageTag = \"{{{newValue}}}\";\n openclawImageDigest = \"{{{newDigest}}}\";"
}
],
"packageRules": [
{
"description": "Group all GitHub Actions updates",
+1 -8
View File
@@ -1,8 +1,4 @@
{
lib,
pkgs,
...
}:
{ lib, pkgs, ... }:
with lib;
@@ -15,9 +11,6 @@ with lib;
./desktop
./k3s-node
./kodi
# local-inference is NOT imported here because its module-level
# disabledModules/imports conflict with nix-deck's unstable-based
# nixpkgs. Import it directly in machine configs that need it.
./nfs-mounts
./plasma-bigscreen
./nvidia
-127
View File
@@ -1,127 +0,0 @@
{
config,
lib,
pkgs,
nixpkgs-unstable,
...
}:
with lib;
let
cfg = config.roles.local-inference;
llama-cpp-cuda = pkgs.unstable.llama-cpp.override { cudaSupport = true; };
llama-server = getExe' llama-cpp-cuda "llama-server";
in
{
# Replace the stable nixpkgs llama-swap module with the unstable version,
# which may have newer features. For systems already built on unstable
# (e.g., nix-deck), this module is excluded from roles/default.nix instead.
imports = [ "${nixpkgs-unstable}/nixos/modules/services/networking/llama-swap.nix" ];
disabledModules = [ "services/networking/llama-swap.nix" ];
options.roles.local-inference = {
enable = mkEnableOption "Enable local LLM inference via llama-swap + llama.cpp";
models = mkOption {
type = types.attrsOf (
types.submodule {
options = {
hf-model = mkOption {
type = types.str;
description = "HuggingFace model shorthand (e.g. unsloth/Qwen3.6-35B-A3B-GGUF:UD-Q8_K_XL)";
};
aliases = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Aliases for the model in the API";
};
n-gpu-layers = mkOption {
type = types.int;
default = 99;
description = "Number of layers to offload to GPU";
};
cpu-moe = mkOption {
type = types.bool;
default = false;
description = "Offload MoE expert layers to CPU";
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Extra arguments passed to llama-server";
};
ttl = mkOption {
type = types.int;
default = -1;
description = "Seconds before unloading model (-1 = use global default, 0 = never unload)";
};
};
}
);
default = { };
description = "Models to serve from HuggingFace";
};
host = mkOption {
type = types.str;
default = "127.0.0.1";
description = "IP address llama-swap listens on";
};
port = mkOption {
type = types.port;
default = 8080;
description = "Port llama-swap listens on";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Open the server port in the firewall";
};
healthCheckTimeout = mkOption {
type = types.int;
default = 600;
description = "Seconds to wait for llama-server health check (model download can take a while)";
};
globalTTL = mkOption {
type = types.int;
default = 0;
description = "Default TTL in seconds before unloading an idle model (0 = never unload)";
};
};
config = mkIf cfg.enable {
systemd.services.llama-swap.environment = {
LLAMA_CACHE = "/var/cache/llama-swap";
HOME = "/var/lib/llama-swap";
};
systemd.services.llama-swap.serviceConfig = {
CacheDirectory = "llama-swap";
StateDirectory = "llama-swap";
};
services.llama-swap = {
enable = true;
listenAddress = cfg.host;
port = cfg.port;
openFirewall = cfg.openFirewall;
settings = {
healthCheckTimeout = cfg.healthCheckTimeout;
globalTTL = cfg.globalTTL;
models = mapAttrs (
name: m:
{
cmd = "${llama-server} --port \${PORT} -hf ${m.hf-model} -ngl ${toString m.n-gpu-layers} --no-webui ${optionalString m.cpu-moe "--cpu-moe"} ${concatStringsSep " " m.extraArgs}";
aliases = m.aliases;
}
// optionalAttrs (m.ttl != -1) { ttl = m.ttl; }
) cfg.models;
};
};
};
}