Compare commits

..

1 Commits

Author SHA1 Message Date
Ash
acf5242ca6 ci: add plasma-bigscreen to cached packages
All checks were successful
CI / check (pull_request) Successful in 1m30s
CI / build-and-cache (pull_request) Has been skipped
Add plasma-bigscreen to CI cache and expose it in flake packages.
The package is built from upstream master (not yet in nixpkgs).
2026-04-10 16:52:31 -07:00
15 changed files with 47 additions and 6184 deletions

View File

@@ -48,12 +48,11 @@ jobs:
custom-mcrcon-rbw
custom-tea-rbw
custom-rclone-torbox-setup
custom-opencode
custom-qmd
custom-nextcloud-talk-desktop
qt-pinned-jellyfin-media-player
qt-pinned-stremio
nix-deck-kernel
plasma-bigscreen
)
FAILED=()
@@ -106,94 +105,3 @@ jobs:
fi
env:
NIX_CONFIG: "access-tokens = git.johnogle.info=${{ secrets.GITEA_ACCESS_TOKEN }}"
build-and-push-openclaw:
name: Build & Push OpenClaw Image
runs-on: ubuntu-latest
needs: check
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
outputs:
image_tag: ${{ steps.meta.outputs.tag }}
steps:
- uses: actions/checkout@v6
- uses: https://git.johnogle.info/johno/gitea-actions/nix-setup@v1
- name: Setup SSH for cache
run: |
mkdir -p ~/.ssh
echo "${{ secrets.CACHE_SSH_KEY }}" > ~/.ssh/cache_key
chmod 600 ~/.ssh/cache_key
ssh-keyscan -H ${{ secrets.CACHE_HOST }} >> ~/.ssh/known_hosts 2>/dev/null || true
- name: Generate image tag
id: meta
run: |
# Read the image tag from the nix definition's tag attribute
# buildLayeredImage sets tag from openclawImageTag in default.nix
IMAGE_TAG=$(nix eval .#packages.x86_64-linux.openclaw-image.imageTag --raw 2>/dev/null || \
nix eval .#openclaw-image.imageTag --raw 2>/dev/null || \
nix eval .#openclaw-image.outPath --raw 2>/dev/null | xargs basename | sed 's/.*-//')
# Fallback to short SHA if tag extraction fails
if [ -z "$IMAGE_TAG" ] || [ "$IMAGE_TAG" = "tar.gz" ]; then
IMAGE_TAG=$(echo "${{ github.sha }}" | cut -c1-7)
fi
echo "tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "Image will be tagged: ${IMAGE_TAG}"
- name: Build Docker image with Nix
run: nix build .#openclaw-image --cores 2
env:
NIX_CONFIG: "access-tokens = git.johnogle.info=${{ secrets.GITEA_ACCESS_TOKEN }}"
- name: Load and tag image
run: |
docker load < result
docker tag openclaw:${{ steps.meta.outputs.tag }} registry.johnogle.info/openclaw:${{ steps.meta.outputs.tag }}
docker tag openclaw:${{ steps.meta.outputs.tag }} registry.johnogle.info/openclaw:latest
- name: Login to registry
run: |
echo "${{ secrets.REGISTRY_PASSWORD }}" | docker login registry.johnogle.info -u ${{ secrets.REGISTRY_USERNAME }} --password-stdin
- name: Push image
run: |
# Push versioned tag with retry (large images can timeout on slow connections)
for i in 1 2 3; do
if docker push registry.johnogle.info/openclaw:${{ steps.meta.outputs.tag }}; then
break
fi
echo "Push attempt $i failed, retrying in 10s..."
sleep 10
done
# Push latest tag — mostly a manifest push since layers already exist
docker push registry.johnogle.info/openclaw:latest || \
echo "::warning::Failed to push :latest tag (versioned tag already pushed)"
deploy-openclaw:
name: Deploy OpenClaw to Cluster
runs-on: ubuntu-latest
needs: build-and-push-openclaw
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
steps:
- name: Checkout k3s-cluster-config
uses: actions/checkout@v4
with:
repository: johno/k3s-cluster-config
token: ${{ secrets.CONFIG_REPO_TOKEN }}
path: k3s-cluster-config
- name: Update HelmRelease image tag
run: |
cd k3s-cluster-config
sed -i 's|tag: ".*"|tag: "${{ needs.build-and-push-openclaw.outputs.image_tag }}"|' \
clusters/oglenet/apps/communication/openclaw.yaml
- name: Commit and push
run: |
cd k3s-cluster-config
git config user.name "Gitea CI"
git config user.email "ci@johnogle.info"
git add clusters/oglenet/apps/communication/openclaw.yaml
git diff --cached --quiet || git commit -m "Deploy openclaw:${{ needs.build-and-push-openclaw.outputs.image_tag }}"
git push

View File

@@ -104,11 +104,6 @@
};
};
# Common specialArgs passed to all NixOS systems
nixosSpecialArgs = {
inherit nixpkgs-unstable;
};
# Shared unstable overlays for custom package builds
customUnstableOverlays = [
# Override claude-code in unstable to use our custom GCS-based build
@@ -154,7 +149,6 @@
in
{
nixosConfigurations.nix-book = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/nix-book/configuration.nix
@@ -172,7 +166,6 @@
};
nixosConfigurations.boxy = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/boxy/configuration.nix
@@ -186,7 +179,6 @@
};
nixosConfigurations.gym-box = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/gym-box/configuration.nix
@@ -199,7 +191,6 @@
};
nixosConfigurations.zix790prors = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/zix790prors/configuration.nix
@@ -221,7 +212,6 @@
# Live USB ISO configuration
nixosConfigurations.live-usb = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/live-usb/configuration.nix
@@ -235,7 +225,6 @@
# Steam Deck configuration (using unstable for better Jovian compatibility)
nixosConfigurations.nix-deck = nixpkgs-unstable.lib.nixosSystem rec {
system = "x86_64-linux";
specialArgs = nixosSpecialArgs;
modules = nixosModulesUnstable ++ [
./machines/nix-deck/configuration.nix
{
@@ -247,7 +236,6 @@
# ZFS/NFS server configuration
nixosConfigurations.john-endesktop = nixpkgs.lib.nixosSystem rec {
specialArgs = nixosSpecialArgs;
system = "x86_64-linux";
modules = nixosModules ++ [
./machines/john-endesktop/configuration.nix
@@ -295,10 +283,10 @@
"custom-tea-rbw" = pkgs.custom.tea-rbw;
"custom-rclone-torbox-setup" = pkgs.custom.rclone-torbox-setup;
"custom-opencode" = pkgs.custom.opencode;
"custom-qmd" = pkgs.custom.qmd;
"qt-pinned-jellyfin-media-player" = pkgsQt.jellyfin-media-player;
"qt-pinned-stremio" = pkgsQt.stremio;
# Plasma Bigscreen — not yet in nixpkgs, built from upstream
"plasma-bigscreen" = pkgs.kdePackages.callPackage ./roles/plasma-bigscreen/package.nix { };
}
// (
if system == "x86_64-linux" then
@@ -306,8 +294,6 @@
"custom-nextcloud-talk-desktop" = pkgs.custom.nextcloud-talk-desktop;
# nix-deck kernel from Jovian-NixOS (Steam Deck) - expensive to build
"nix-deck-kernel" = self.nixosConfigurations.nix-deck.config.boot.kernelPackages.kernel;
# OpenClaw docker image (pulled + augmented with nix tools)
"openclaw-image" = pkgs.custom.openclaw-image;
}
else
{ }

View File

@@ -99,14 +99,6 @@ in
};
};
xdg.configFile."opencode/opencode.json" = {
source = ./opencode-config.json;
};
xdg.configFile."opencode/oh-my-openagent.jsonc" = {
source = ./opencode-omo-config.jsonc;
};
# Note: modules must be imported at top-level home config
};
}

View File

@@ -1,24 +0,0 @@
{
"$schema": "https://opencode.ai/config.json",
"plugin": ["oh-my-openagent"],
"provider": {
"llama-local": {
"name": "Llama.cpp (zix790prors RTX 4070 Ti)",
"npm": "@ai-sdk/openai-compatible",
"options": {
"baseURL": "http://zix790prors.oglehome:8080/v1"
},
"models": {
"Qwen3.6-35B-A3B": {
"name": "Qwen3.6-35B-A3B (UD-Q8_K_XL)",
"reasoning": true,
"tool_call": true,
"limit": {
"context": 131072,
"output": 32768
}
}
}
}
}
}

View File

@@ -1,136 +0,0 @@
{
"$schema": "https://raw.githubusercontent.com/code-yeongyu/oh-my-openagent/dev/assets/oh-my-opencode.schema.json",
"agents": {
"sisyphus": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"llama-local/Qwen3.6-35B-A3B",
"ollama-cloud/qwen3-coder-next"
]
},
"prometheus": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"ollama-cloud/qwen3-coder-next"
]
},
"atlas": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/gemma4:31b",
"ollama-cloud/kimi-k2.5"
]
},
"explore": {
"model": "ollama-cloud/gemma4:31b",
"fallback_models": [
"ollama-cloud/ministral-3:14b",
"llama-local/Qwen3.6-35B-A3B"
]
},
"librarian": {
"model": "ollama-cloud/gemma4:31b",
"fallback_models": [
"ollama-cloud/ministral-3:14b"
]
},
"oracle": {
"model": "ollama-cloud/qwen3-coder-next",
"fallback_models": [
"ollama-cloud/deepseek-v3.2",
"ollama-cloud/glm-5.1"
]
},
"multimodal-looker": {
"disable": true
},
"hephaestus": {
"disable": true
},
"momus": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/qwen3-coder-next"
]
},
"metis": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5"
]
}
},
"categories": {
"quick": {
"model": "ollama-cloud/gemma4:31b",
"fallback_models": [
"ollama-cloud/ministral-3:14b"
]
},
"unspecified-low": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"llama-local/Qwen3.6-35B-A3B"
]
},
"unspecified-high": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5",
"ollama-cloud/qwen3-coder-next"
]
},
"deep": {
"model": "ollama-cloud/qwen3-coder-next",
"fallback_models": [
"ollama-cloud/deepseek-v3.2",
"ollama-cloud/glm-5.1"
]
},
"ultrabrain": {
"model": "ollama-cloud/qwen3-coder-next",
"fallback_models": [
"ollama-cloud/deepseek-v3.2",
"ollama-cloud/glm-5.1"
]
},
"writing": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/kimi-k2.5"
]
},
"visual-engineering": {
"model": "ollama-cloud/glm-5.1",
"fallback_models": [
"ollama-cloud/qwen3-coder-next"
]
}
},
"runtime_fallback": {
"enabled": true,
"retry_on_errors": [400, 429, 503, 529],
"max_fallback_attempts": 3,
"cooldown_seconds": 60,
"notify_on_fallback": true
},
"background_task": {
"defaultConcurrency": 5,
"providerConcurrency": {
"ollama-cloud": 10,
"llama-local": 2
}
},
"disabled_hooks": ["no-sisyphus-gpt"],
"comment_checker": {
"custom_prompt": "Check for AI-generated filler phrases, redundant obvious statements, and excessively verbose explanations. Comments should add value beyond what the code itself expresses. Flag: 'TODO' without ticket references, 'Note that...' when obvious, repeating the function name in the comment, and any form of 'simply' or 'simply just'. Use {{comments}} placeholder."
},
"tmux": { "enabled": false },
"experimental": {
"aggressive_truncation": true,
"task_system": true
}
}

View File

@@ -233,15 +233,14 @@ rbw is unavailable or the entry is not found."
gptel-use-tools t
gptel-confirm-tool-calls 'always
gptel-include-reasoning 'ignore
gptel-model "Qwen3.6-35B-A3B")
gptel-model "qwen3:30b")
;; Set default backend to llama-swap (OpenAI-compatible)
;; Set default backend to be Ollama-Local
(setq! gptel-backend
(gptel-make-openai "llama-swap"
:host "localhost:8080"
:endpoint "/v1/chat/completions"
(gptel-make-ollama "Ollama-Local"
:host "localhost:11434"
:stream t
:models '("Qwen3.6-35B-A3B")))
:models '(deepseek-r1 deepseek-r1-fullctx qwen3:30b qwen3:4b llama3.1 qwen2.5-coder mistral-nemo gpt-oss)))
;; Define custom tools
(gptel-make-tool

View File

@@ -1,46 +1,30 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
"usbhid"
"uas"
"sd_mod"
];
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "thunderbolt" "uas" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-uuid/0e75a66e-6c9e-471e-8bd2-fee7b27b74a1";
fsType = "btrfs";
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/59c0df78-c6fa-415d-8592-13547a3fada6";
fsType = "btrfs";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/9E2C-F187";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/DC66-D04C";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices = [ { device = "/.swapfile"; } ];
swapDevices = [ ];
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;

View File

@@ -10,7 +10,6 @@ with lib;
imports = [
./hardware-configuration.nix
#./virtual-surround.nix
../../roles/local-inference
];
roles = {
@@ -27,19 +26,6 @@ with lib;
x11 = true;
};
kodi.enable = true;
local-inference = {
enable = true;
host = "zix790prors.oglehome";
openFirewall = true;
globalTTL = 900;
models = {
"Qwen3.6-35B-A3B" = {
hf-model = "unsloth/Qwen3.6-35B-A3B-GGUF:UD-Q8_K_XL";
aliases = [ "Qwen3.6-35B-A3B" ];
cpu-moe = true;
};
};
};
nfs-mounts.enable = true;
nvidia = {
enable = true;
@@ -70,6 +56,12 @@ with lib;
${pkgs.xorg.xrandr}/bin/xrandr --output DP-0 --mode 3440x1440 --rate 164.90 --primary
'';
services.ollama = {
enable = true;
acceleration = "cuda";
loadModels = [ "gpt-oss" "deepseek-r1" "qwen3:30b" ];
};
# This option defines the first version of NixOS you have installed on this particular machine,
# and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
#

View File

@@ -5,7 +5,6 @@ import logging
import os
import subprocess
import sys
from datetime import date
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
import psutil
@@ -23,9 +22,6 @@ ALLOWED_APPS = {
'kodi': 'kodi'
}
# Workout card base URL
WORKOUT_CARD_BASE_URL = 'https://ogle.fyi/ash/workout'
def is_app_running(app_name):
"""Check if an application is already running, returns (is_running, pid)"""
command = ALLOWED_APPS.get(app_name)
@@ -92,10 +88,7 @@ class AppLauncherHandler(BaseHTTPRequestHandler):
response = {
'status': 'running',
'available_apps': list(ALLOWED_APPS.keys()),
'endpoints': {
'POST /launch/<app_name>': 'Launch an application (optional JSON body: {"args": ["url"]})',
'POST /workout': 'Open today\'s workout card in Firefox'
}
'usage': 'POST /launch/<app_name> to launch an application'
}
self.wfile.write(json.dumps(response, indent=2).encode())
else:
@@ -108,21 +101,8 @@ class AppLauncherHandler(BaseHTTPRequestHandler):
if len(path_parts) == 2 and path_parts[0] == 'launch':
app_name = path_parts[1]
self.launch_app(app_name)
elif len(path_parts) == 1 and path_parts[0] == 'workout':
self.open_workout_card()
else:
self.send_error(404, "Invalid endpoint. Use /launch/<app_name> or /workout")
def read_post_body(self):
"""Read and parse JSON body from POST request, return dict or empty dict."""
content_length = int(self.headers.get('Content-Length', 0))
if content_length > 0:
try:
body = self.rfile.read(content_length)
return json.loads(body.decode('utf-8'))
except (json.JSONDecodeError, UnicodeDecodeError) as e:
logger.warning(f"Failed to parse POST body as JSON: {e}")
return {}
self.send_error(404, "Invalid endpoint. Use /launch/<app_name>")
def launch_app(self, app_name):
if app_name not in ALLOWED_APPS:
@@ -131,44 +111,30 @@ class AppLauncherHandler(BaseHTTPRequestHandler):
command = ALLOWED_APPS[app_name]
# Read optional args from POST body
body = self.read_post_body()
extra_args = body.get('args', [])
# Validate args are strings
if not isinstance(extra_args, list) or not all(isinstance(a, str) for a in extra_args):
self.send_error(400, "'args' must be a list of strings")
return
full_command = [command] + extra_args
# Check if app is already running
is_running, existing_pid = is_app_running(app_name)
if is_running:
# If extra args provided, still launch a new instance (e.g., open a URL)
if extra_args:
logger.info(f"Application {app_name} already running (PID: {existing_pid}), but extra args provided — launching new instance")
else:
logger.info(f"Application {app_name} is already running (PID: {existing_pid}), skipping launch")
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
response = {
'status': 'success',
'message': f'{app_name} is already running',
'pid': existing_pid,
'already_running': True
}
self.wfile.write(json.dumps(response).encode())
return
logger.info(f"Application {app_name} is already running (PID: {existing_pid}), skipping launch")
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
response = {
'status': 'success',
'message': f'{app_name} is already running',
'pid': existing_pid,
'already_running': True
}
self.wfile.write(json.dumps(response).encode())
return
try:
# Launch the application in the background
# Ensure we have the proper environment for GUI apps
env = os.environ.copy()
logger.info(f"Launching application: {' '.join(full_command)}")
logger.info(f"Launching application: {command}")
process = subprocess.Popen(
full_command,
[command],
env=env,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
@@ -193,50 +159,12 @@ class AppLauncherHandler(BaseHTTPRequestHandler):
logger.error(f"Error launching {command}: {e}")
self.send_error(500, f"Failed to launch {app_name}: {str(e)}")
def open_workout_card(self):
"""Open today's workout card in Firefox."""
today = date.today().strftime('%Y-%m-%d')
url = f"{WORKOUT_CARD_BASE_URL}/{today}.html"
logger.info(f"Opening workout card for {today}: {url}")
# Always launch Firefox with the URL, even if already running
command = ALLOWED_APPS['firefox']
env = os.environ.copy()
try:
process = subprocess.Popen(
[command, url],
env=env,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
start_new_session=True
)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
response = {
'status': 'success',
'message': f'Opened workout card in Firefox',
'url': url,
'pid': process.pid
}
self.wfile.write(json.dumps(response).encode())
except FileNotFoundError:
logger.error(f"Firefox not found: {command}")
self.send_error(500, "Firefox not found on system")
except Exception as e:
logger.error(f"Error launching Firefox with workout card: {e}")
self.send_error(500, f"Failed to open workout card: {str(e)}")
def main():
port = int(sys.argv[1]) if len(sys.argv) > 1 else 8081
server = HTTPServer(('0.0.0.0', port), AppLauncherHandler)
logger.info(f"App launcher server starting on port {port}")
logger.info(f"Available applications: {list(ALLOWED_APPS.keys())}")
logger.info(f"Workout card URL: {WORKOUT_CARD_BASE_URL}/<date>.html")
try:
server.serve_forever()

View File

@@ -1,5 +1,5 @@
{ pkgs, ... }:
rec {
{
tea-rbw = pkgs.callPackage ./tea-rbw { };
app-launcher-server = pkgs.callPackage ./app-launcher-server { };
claude-code = pkgs.callPackage ./claude-code { };
@@ -8,6 +8,4 @@ rec {
pi-coding-agent = pkgs.callPackage ./pi-coding-agent { };
nextcloud-talk-desktop = pkgs.callPackage ./nextcloud-talk-desktop { };
opencode = pkgs.callPackage ./opencode { };
qmd = pkgs.callPackage ./qmd { };
openclaw-image = pkgs.callPackage ./openclaw-image { inherit qmd; };
}

View File

@@ -1,243 +0,0 @@
{
pkgs,
lib,
qmd,
}:
let
# Pin the upstream openclaw image version
# Updated by Renovate when new versions appear in flake.lock
openclawImageTag = "2026.4.14";
openclawImageDigest = "sha256:7ea070b04d1e70811fe8ba15feaad5890b1646021b24e00f4795bd4587a594ed";
# Pull the upstream openclaw Docker image (only to extract /app from it)
openclawBase = pkgs.dockerTools.pullImage {
imageName = "ghcr.io/openclaw/openclaw";
imageDigest = openclawImageDigest;
sha256 = "sha256-mSAa7lmciD6OXd3KHr8pf2VJ0aHPGnjdGbeu+oFhNo8=";
finalImageTag = openclawImageTag;
os = "linux";
arch = "amd64";
};
# Extract the openclaw application (/app) from the upstream Docker image.
#
# We don't use fromImage because Nix's copyToRoot creates a /lib/ directory
# that shadows the Debian base image's /lib -> usr/lib symlink, breaking
# the glibc dynamic linker (/lib64/ld-linux-x86-64.so.2 chain).
# Instead, we extract only /app from the upstream image layers, then build
# a pure Nix image where everything runs against Nix's glibc.
#
# Docker image tars use the Image Manifest v2 format: each layer is a
# separate .tar within the outer tar. We extract all layers to find /app.
openclawApp =
pkgs.runCommand "openclaw-app"
{
nativeBuildInputs = [
pkgs.python3
pkgs.gnutar
];
}
''
mkdir -p $out/app
# Extract all layers from the Docker image tarball
mkdir workdir
tar xf ${openclawBase} -C workdir
# Python: parse manifest, generate shell script to extract /app from layers
python3 << 'PYEOF'
import json
with open("workdir/manifest.json") as f:
manifest = json.load(f)
layers = manifest[0]["Layers"]
with open("extract.sh", "w") as script:
script.write("#!/bin/sh\nset -e\n")
for layer in layers:
if layer.endswith(".tar"):
lp = f"workdir/{layer}"
else:
lp = f"workdir/{layer}/layer.tar"
script.write(f"""
if [ -f '{lp}' ]; then
if tar tf '{lp}' 2>/dev/null | grep -q '^app/'; then
echo "Extracting /app from {layer}..." >&2
tar xf '{lp}' -C "$OUT_DIR" --strip-components=0 app/ 2>/dev/null || true
fi
fi
""")
PYEOF
OUT_DIR="$out" sh extract.sh
if [ ! -f "$out/app/openclaw.mjs" ]; then
echo "ERROR: /app/openclaw.mjs not found after extraction"
ls -la $out/app/ 2>/dev/null || true
exit 1
fi
echo "Successfully extracted openclaw app"
'';
# Python environment with pymupdf
pythonEnv = pkgs.python3.withPackages (ps: [ ps.pymupdf ]);
# Custom NSS files that include the "node" user (UID 1000, GID 1000).
# fakeNss only creates root/nobody, so we create our own with all three.
openclawNss = pkgs.runCommand "openclaw-nss" { } ''
mkdir -p $out/etc
cat > $out/etc/passwd << 'EOF'
root:x:0:0:root user:/var/empty:/bin/sh
nobody:x:65534:65534:nobody:/var/empty:/bin/sh
node:x:1000:1000::/home/node:/bin/bash
EOF
cat > $out/etc/group << 'EOF'
root:x:0:
nobody:x:65534:
node:x:1000:
EOF
cat > $out/etc/shadow << 'EOF'
root:!x:::::::
nobody:!x:::::::
node:!x:::::::
EOF
'';
# Node user home directory
nodeHome = pkgs.runCommand "node-home" { } ''
mkdir -p $out/home/node
'';
# Docker entrypoint script — equivalent to the upstream docker-entrypoint.sh.
# We replicate it as a Nix derivation to avoid extracting the Debian binary
# layer and to avoid filesystem conflicts in the image customization layer.
dockerEntrypoint = pkgs.writeShellScript "docker-entrypoint.sh" ''
#!/bin/sh
set -e
# Run command with node if the first arg contains a "-" or is not a
# system command. The last part inside the "{}" is a workaround for
# the following bug in ash/dash:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=874264
if [ "''${1#-}" != "$1" ] || [ -z "$(command -v "''${1}")" ] || { [ -f "''${1}" ] && ! [ -x "''${1}" ]; }; then
set -- node "$@"
fi
exec "$@"
'';
# Wrap the entrypoint as a derivation so it can be placed via copyToRoot
# instead of extraCommands (which can't write to paths that already have
# Nix store symlinks from other contents)
entrypointPkg = pkgs.runCommand "docker-entrypoint" { } ''
mkdir -p $out/usr/local/bin
cp ${dockerEntrypoint} $out/usr/local/bin/docker-entrypoint.sh
chmod +x $out/usr/local/bin/docker-entrypoint.sh
'';
in
pkgs.dockerTools.buildLayeredImage {
name = "openclaw";
tag = openclawImageTag;
# Don't use fromImage — see openclawApp derivation comment
maxLayers = 120;
contents = [
# System basics
pkgs.bashInteractive
pkgs.coreutils
pkgs.cacert
# Custom NSS with node user
openclawNss
# Node user home directory
nodeHome
# Docker entrypoint script (in /usr/local/bin)
entrypointPkg
# Runtime package manager (agents can `nix run` arbitrary packages)
pkgs.nix
# Tools baked into the image
pkgs.kubectl
pkgs.jq
pkgs.curl
pkgs.git
pkgs.emacs
# Node.js 22+ (for openclaw runtime, QMD, matrix-bot-sdk)
pkgs.nodejs_22
# TypeScript runtime (for running TypeScript files directly, e.g. via nix run)
pkgs.tsx
# Python with pymupdf (PDF-to-image for Claude vision)
pythonEnv
# Gitea CLI (PR workflow)
pkgs.tea
# QMD — on-device hybrid search for markdown (built with Node.js 22, not Bun)
qmd
];
# NOTE: openclawApp is NOT in contents. It would create /app as a symlink
# to /nix/store/..., which breaks OpenClaw's symlink escape security check
# (resolved paths "escape" /app/dist/extensions). Instead, extraCommands
# copies the real files into /app as a proper directory.
extraCommands = ''
# Create /tmp with correct permissions (needed by Node.js and nix)
mkdir -p tmp
chmod 1777 tmp
# Create /run for nix-daemon socket
mkdir -p run
# Create /var/empty (referenced by NSS passwd home dirs)
mkdir -p var/empty
# Copy OpenClaw app as a REAL directory (not a Nix store symlink).
# The app has a symlink escape check: resolved paths must stay within
# /app/dist/extensions/. If /app is a symlink to /nix/store/HASH/app/,
# realpath resolves to /nix/store/... which "escapes" the boundary.
rm -rf app
mkdir -p app
cp -a ${openclawApp}/app/. app/
'';
config = {
Entrypoint = [ "docker-entrypoint.sh" ];
Cmd = [
"node"
"openclaw.mjs"
"gateway"
"--allow-unconfigured"
];
WorkingDir = "/app";
User = "node";
Env = [
# SSL certificates
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
"NIX_SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
# Nix configuration
"NIX_PATH=nixpkgs=flake:nixpkgs"
# PATH: standard dirs + Nix store bin dirs are appended by buildLayeredImage
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
"NODE_ENV=production"
# Home directory (Docker User directive doesn't set HOME from /etc/passwd)
"HOME=/home/node"
];
};
}

View File

@@ -1,89 +0,0 @@
{
lib,
stdenv,
buildNpmPackage,
fetchzip,
nodejs_22,
python3,
sqlite,
}:
let
version = "2.1.0";
in
buildNpmPackage rec {
pname = "qmd";
inherit version;
src = fetchzip {
url = "https://git.johnogle.info/johno/qmd/archive/220e46e09c90e59bdcb0b4b81753ebc32c45ad80.tar.gz";
sha256 = "sha256-hPTBsJFsCEuPOk2oZqDy5XcbvP4T+aHlgCL3EHO2vb8=";
};
# Vendored package-lock.json generated from QMD's package.json.
# QMD ships bun.lock/pnpm-lock.yaml but not package-lock.json.
# buildNpmPackage requires npm's lockfile format.
postPatch = ''
cp ${./package-lock.json} package-lock.json
'';
# npmDepsHash matches the vendored package-lock.json
npmDepsHash = "sha256-3sis2NIuPDnwAOzWEQBcA+VUsMaxO8Nkuk6wCJ9foBA=";
nodejs = nodejs_22;
nativeBuildInputs = [
nodejs
python3 # for node-gyp (better-sqlite3, sqlite-vec)
];
buildInputs = [
sqlite # for sqlite extension loading at runtime
];
# npm rebuild compiles native addons (better-sqlite3, sqlite-vec) against Node 22's V8
npmRebuildFlags = [ "--build-from-source" ];
# Don't run npm run prepare (it tries to install git hooks)
npmPruneFlags = [ "--omit=dev" ];
buildPhase = ''
runHook preBuild
npm run build
runHook postBuild
'';
installPhase = ''
runHook preInstall
mkdir -p $out/lib/qmd $out/bin
# Copy compiled output, node_modules, and config
cp -r dist node_modules $out/lib/qmd/
cp package.json $out/lib/qmd/
# Create wrapper that runs the compiled CLI with Node.js 22
# Sets LD_LIBRARY_PATH for sqlite-vec extension loading
cat > $out/bin/qmd << EOF
#!/bin/sh
export LD_LIBRARY_PATH="${sqlite.out}/lib''${LD_LIBRARY_PATH:+:}\$LD_LIBRARY_PATH"
exec ${nodejs}/bin/node $out/lib/qmd/dist/cli/qmd.js "\$@"
EOF
chmod +x $out/bin/qmd
runHook postInstall
'';
meta = with lib; {
description = "Query Markup Documents on-device hybrid search for markdown files";
longDescription = ''
QMD combines BM25 full-text search, vector semantic search, and LLM re-ranking.
This build uses Node.js 22 (instead of Bun) to avoid native module ABI issues
with better-sqlite3 and sqlite-vec.
'';
homepage = "https://github.com/tobi/qmd";
license = licenses.mit;
platforms = platforms.linux;
mainProgram = "qmd";
};
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,4 @@
{
lib,
pkgs,
...
}:
{ lib, pkgs, ... }:
with lib;
@@ -15,9 +11,6 @@ with lib;
./desktop
./k3s-node
./kodi
# local-inference is NOT imported here because its module-level
# disabledModules/imports conflict with nix-deck's unstable-based
# nixpkgs. Import it directly in machine configs that need it.
./nfs-mounts
./plasma-bigscreen
./nvidia

View File

@@ -1,127 +0,0 @@
{
config,
lib,
pkgs,
nixpkgs-unstable,
...
}:
with lib;
let
cfg = config.roles.local-inference;
llama-cpp-cuda = pkgs.unstable.llama-cpp.override { cudaSupport = true; };
llama-server = getExe' llama-cpp-cuda "llama-server";
in
{
# Replace the stable nixpkgs llama-swap module with the unstable version,
# which may have newer features. For systems already built on unstable
# (e.g., nix-deck), this module is excluded from roles/default.nix instead.
imports = [ "${nixpkgs-unstable}/nixos/modules/services/networking/llama-swap.nix" ];
disabledModules = [ "services/networking/llama-swap.nix" ];
options.roles.local-inference = {
enable = mkEnableOption "Enable local LLM inference via llama-swap + llama.cpp";
models = mkOption {
type = types.attrsOf (
types.submodule {
options = {
hf-model = mkOption {
type = types.str;
description = "HuggingFace model shorthand (e.g. unsloth/Qwen3.6-35B-A3B-GGUF:UD-Q8_K_XL)";
};
aliases = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Aliases for the model in the API";
};
n-gpu-layers = mkOption {
type = types.int;
default = 99;
description = "Number of layers to offload to GPU";
};
cpu-moe = mkOption {
type = types.bool;
default = false;
description = "Offload MoE expert layers to CPU";
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Extra arguments passed to llama-server";
};
ttl = mkOption {
type = types.int;
default = -1;
description = "Seconds before unloading model (-1 = use global default, 0 = never unload)";
};
};
}
);
default = { };
description = "Models to serve from HuggingFace";
};
host = mkOption {
type = types.str;
default = "127.0.0.1";
description = "IP address llama-swap listens on";
};
port = mkOption {
type = types.port;
default = 8080;
description = "Port llama-swap listens on";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Open the server port in the firewall";
};
healthCheckTimeout = mkOption {
type = types.int;
default = 600;
description = "Seconds to wait for llama-server health check (model download can take a while)";
};
globalTTL = mkOption {
type = types.int;
default = 0;
description = "Default TTL in seconds before unloading an idle model (0 = never unload)";
};
};
config = mkIf cfg.enable {
systemd.services.llama-swap.environment = {
LLAMA_CACHE = "/var/cache/llama-swap";
HOME = "/var/lib/llama-swap";
};
systemd.services.llama-swap.serviceConfig = {
CacheDirectory = "llama-swap";
StateDirectory = "llama-swap";
};
services.llama-swap = {
enable = true;
listenAddress = cfg.host;
port = cfg.port;
openFirewall = cfg.openFirewall;
settings = {
healthCheckTimeout = cfg.healthCheckTimeout;
globalTTL = cfg.globalTTL;
models = mapAttrs (
name: m:
{
cmd = "${llama-server} --port \${PORT} -hf ${m.hf-model} -ngl ${toString m.n-gpu-layers} --no-webui ${optionalString m.cpu-moe "--cpu-moe"} ${concatStringsSep " " m.extraArgs}";
aliases = m.aliases;
}
// optionalAttrs (m.ttl != -1) { ttl = m.ttl; }
) cfg.models;
};
};
};
}