Compare commits
75 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1701474b3d | ||
|
|
a7e9fbf699 | ||
|
|
358fcaf935 | ||
|
|
f19ddc5400 | ||
|
|
64b58b31ab | ||
|
|
afff85cdff | ||
|
|
a91e6cd643 | ||
|
|
9b2f4a7652 | ||
|
|
c8c97fdf64 | ||
|
|
43272f6fbb | ||
|
|
65c3e90374 | ||
|
|
0eacdd367b | ||
|
|
9fe9323b9c | ||
|
|
bfafb9c179 | ||
|
|
677a6ed84f | ||
|
|
da2d71c3fe | ||
|
|
e124402b7b | ||
|
|
705a7c2137 | ||
|
|
c2c6ddeaf9 | ||
|
|
b509107100 | ||
|
|
34cb28e0b9 | ||
|
|
1da3e18e60 | ||
|
|
5adb096d9d | ||
|
|
81bfe48ed3 | ||
|
|
41a758d6d8 | ||
|
|
5250e9e12a | ||
|
|
b3407759d2 | ||
|
|
c8c765a239 | ||
|
|
775af2973d | ||
|
|
da906847dd | ||
|
|
0a649e6faa | ||
|
|
fb40fa1405 | ||
|
|
7bfc2fcb76 | ||
|
|
376305e9d9 | ||
|
|
73f5b4025b | ||
|
|
c756f12d00 | ||
|
|
8d5611f14e | ||
|
|
98e154b18e | ||
|
|
38adfa4d8b | ||
|
|
03b0f7ff52 | ||
|
|
3b628150c2 | ||
|
|
1afe3fb823 | ||
|
|
caa88d96c5 | ||
|
|
4c9e8b8b99 | ||
|
|
c699e3e2ed | ||
|
|
65ecb6cafd | ||
|
|
540e33dbe9 | ||
|
|
85dd150d75 | ||
|
|
45634059dd | ||
|
|
d4da2b325d | ||
|
|
4985bdfbcc | ||
|
|
f4cbcb4ce9 | ||
|
|
c4d956ebe7 | ||
|
|
7f6fe53c6f | ||
|
|
19f4fa3ddb | ||
|
|
e648edce8c | ||
|
|
8a8b56e9e6 | ||
|
|
c91ab85457 | ||
|
|
00a59dec44 | ||
|
|
2de2d6b7e4 | ||
|
|
f30178265c | ||
|
|
5141facb21 | ||
|
|
15caf62b9f | ||
|
|
28a9de64d5 | ||
|
|
a9ed342be6 | ||
|
|
f9e788ccfb | ||
|
|
c220678162 | ||
|
|
b649635f48 | ||
|
|
117b91b87f | ||
|
|
ffa8dd56cb | ||
|
|
92042d679c | ||
|
|
585c204648 | ||
|
|
6209a49d54 | ||
|
|
ffeff97d9f | ||
|
|
9b5c889795 |
@@ -27,7 +27,7 @@ Observe the current system state to inform triage decisions.
|
||||
**Step 1: Check Deacon state**
|
||||
```bash
|
||||
# Is Deacon session alive?
|
||||
tmux has-session -t gt-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
tmux has-session -t hq-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
|
||||
# If alive, what's the pane output showing?
|
||||
gt peek deacon --lines 20
|
||||
@@ -125,7 +125,7 @@ gt nudge deacon "Boot check-in: you have pending work"
|
||||
**WAKE**
|
||||
```bash
|
||||
# Send escape to break any tool waiting
|
||||
tmux send-keys -t gt-deacon Escape
|
||||
tmux send-keys -t hq-deacon Escape
|
||||
|
||||
# Brief pause
|
||||
sleep 1
|
||||
|
||||
@@ -23,7 +23,7 @@ Witnesses detect it and escalate to the Mayor.
|
||||
The Deacon's agent bead last_activity timestamp is updated during each patrol
|
||||
cycle. Witnesses check this timestamp to verify health."""
|
||||
formula = "mol-deacon-patrol"
|
||||
version = 4
|
||||
version = 6
|
||||
|
||||
[[steps]]
|
||||
id = "inbox-check"
|
||||
@@ -148,6 +148,49 @@ bd gate list --json
|
||||
After closing a gate, the Waiters field contains mail addresses to notify.
|
||||
Send a brief notification to each waiter that the gate has cleared."""
|
||||
|
||||
[[steps]]
|
||||
id = "dispatch-gated-molecules"
|
||||
title = "Dispatch molecules with resolved gates"
|
||||
needs = ["gate-evaluation"]
|
||||
description = """
|
||||
Find molecules blocked on gates that have now closed and dispatch them.
|
||||
|
||||
This completes the async resume cycle without explicit waiter tracking.
|
||||
The molecule state IS the waiter - patrol discovers reality each cycle.
|
||||
|
||||
**Step 1: Find gate-ready molecules**
|
||||
```bash
|
||||
bd mol ready --gated --json
|
||||
```
|
||||
|
||||
This returns molecules where:
|
||||
- Status is in_progress
|
||||
- Current step has a gate dependency
|
||||
- The gate bead is now closed
|
||||
- No polecat currently has it hooked
|
||||
|
||||
**Step 2: For each ready molecule, dispatch to the appropriate rig**
|
||||
```bash
|
||||
# Determine target rig from molecule metadata
|
||||
bd mol show <mol-id> --json
|
||||
# Look for rig field or infer from prefix
|
||||
|
||||
# Dispatch to that rig's polecat pool
|
||||
gt sling <mol-id> <rig>/polecats
|
||||
```
|
||||
|
||||
**Step 3: Log dispatch**
|
||||
Note which molecules were dispatched for observability:
|
||||
```bash
|
||||
# Molecule <mol-id> dispatched to <rig>/polecats (gate <gate-id> cleared)
|
||||
```
|
||||
|
||||
**If no gate-ready molecules:**
|
||||
Skip - nothing to dispatch. Gates haven't closed yet or molecules
|
||||
already have active polecats working on them.
|
||||
|
||||
**Exit criteria:** All gate-ready molecules dispatched to polecats."""
|
||||
|
||||
[[steps]]
|
||||
id = "check-convoy-completion"
|
||||
title = "Check convoy completion"
|
||||
@@ -258,7 +301,7 @@ Keep notifications brief and actionable. The recipient can run bd show for detai
|
||||
[[steps]]
|
||||
id = "health-scan"
|
||||
title = "Check Witness and Refinery health"
|
||||
needs = ["trigger-pending-spawns", "gate-evaluation", "fire-notifications"]
|
||||
needs = ["trigger-pending-spawns", "dispatch-gated-molecules", "fire-notifications"]
|
||||
description = """
|
||||
Check Witness and Refinery health for each rig.
|
||||
|
||||
@@ -342,14 +385,21 @@ Reset unresponsive_cycles to 0 when component responds normally."""
|
||||
|
||||
[[steps]]
|
||||
id = "zombie-scan"
|
||||
title = "Backup check for zombie polecats"
|
||||
title = "Detect zombie polecats (NO KILL AUTHORITY)"
|
||||
needs = ["health-scan"]
|
||||
description = """
|
||||
Defense-in-depth check for zombie polecats that Witness should have cleaned.
|
||||
Defense-in-depth DETECTION of zombie polecats that Witness should have cleaned.
|
||||
|
||||
**⚠️ CRITICAL: The Deacon has NO kill authority.**
|
||||
|
||||
These are workers with context, mid-task progress, unsaved state. Every kill
|
||||
destroys work. File the warrant and let Boot handle interrogation and execution.
|
||||
You do NOT have kill authority.
|
||||
|
||||
**Why this exists:**
|
||||
The Witness is responsible for nuking polecats after they complete work (via POLECAT_DONE).
|
||||
This step provides backup detection in case the Witness fails to clean up.
|
||||
The Witness is responsible for cleaning up polecats after they complete work.
|
||||
This step provides backup DETECTION in case the Witness fails to clean up.
|
||||
Detection only - Boot handles termination.
|
||||
|
||||
**Zombie criteria:**
|
||||
- State: idle or done (no active work assigned)
|
||||
@@ -357,26 +407,34 @@ This step provides backup detection in case the Witness fails to clean up.
|
||||
- No hooked work (nothing pending for this polecat)
|
||||
- Last activity: older than 10 minutes
|
||||
|
||||
**Run the zombie scan:**
|
||||
**Run the zombie scan (DRY RUN ONLY):**
|
||||
```bash
|
||||
gt deacon zombie-scan --dry-run
|
||||
```
|
||||
|
||||
**NEVER run:**
|
||||
- `gt deacon zombie-scan` (without --dry-run)
|
||||
- `tmux kill-session`
|
||||
- `gt polecat nuke`
|
||||
- Any command that terminates a session
|
||||
|
||||
**If zombies detected:**
|
||||
1. Review the output to confirm they are truly abandoned
|
||||
2. Run without --dry-run to nuke them:
|
||||
2. File a death warrant for each detected zombie:
|
||||
```bash
|
||||
gt deacon zombie-scan
|
||||
gt warrant file <polecat> --reason "Zombie detected: no session, no hook, idle >10m"
|
||||
```
|
||||
3. Boot will handle interrogation and execution
|
||||
4. Notify the Mayor about Witness failure:
|
||||
```bash
|
||||
gt mail send mayor/ -s "Witness cleanup failure" \
|
||||
-m "Filed death warrant for <polecat>. Witness failed to clean up."
|
||||
```
|
||||
3. This will:
|
||||
- Nuke each zombie polecat
|
||||
- Notify the Mayor about Witness failure
|
||||
- Log the cleanup action
|
||||
|
||||
**If no zombies:**
|
||||
No action needed - Witness is doing its job.
|
||||
|
||||
**Note:** This is a backup mechanism. If you frequently find zombies,
|
||||
**Note:** This is a backup mechanism. If you frequently detect zombies,
|
||||
investigate why the Witness isn't cleaning up properly."""
|
||||
|
||||
[[steps]]
|
||||
@@ -505,10 +563,48 @@ Skip dispatch - system is healthy.
|
||||
|
||||
**Exit criteria:** Session GC dispatched to dog (if needed)."""
|
||||
|
||||
[[steps]]
|
||||
id = "costs-digest"
|
||||
title = "Aggregate daily costs"
|
||||
needs = ["session-gc"]
|
||||
description = """
|
||||
**DAILY DIGEST** - Aggregate yesterday's session cost wisps.
|
||||
|
||||
Session costs are recorded as ephemeral wisps (not exported to JSONL) to avoid
|
||||
log-in-database pollution. This step aggregates them into a permanent daily
|
||||
"Cost Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
**Step 1: Check if digest is needed**
|
||||
```bash
|
||||
# Preview yesterday's costs (dry run)
|
||||
gt costs digest --yesterday --dry-run
|
||||
```
|
||||
|
||||
If output shows "No session cost wisps found", skip to Step 3.
|
||||
|
||||
**Step 2: Create the digest**
|
||||
```bash
|
||||
gt costs digest --yesterday
|
||||
```
|
||||
|
||||
This:
|
||||
- Queries all session.ended wisps from yesterday
|
||||
- Creates a single "Cost Report YYYY-MM-DD" bead with aggregated data
|
||||
- Deletes the source wisps
|
||||
|
||||
**Step 3: Verify**
|
||||
The digest appears in `gt costs --week` queries.
|
||||
Daily digests preserve audit trail without per-session pollution.
|
||||
|
||||
**Timing**: Run once per morning patrol cycle. The --yesterday flag ensures
|
||||
we don't try to digest today's incomplete data.
|
||||
|
||||
**Exit criteria:** Yesterday's costs digested (or no wisps to digest)."""
|
||||
|
||||
[[steps]]
|
||||
id = "log-maintenance"
|
||||
title = "Rotate logs and prune state"
|
||||
needs = ["session-gc"]
|
||||
needs = ["costs-digest"]
|
||||
description = """
|
||||
Maintain daemon logs and state files.
|
||||
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"formula": "mol-gastown-boot",
|
||||
"description": "Mayor bootstraps Gas Town via a verification-gated lifecycle molecule.\n\n## Purpose\nWhen Mayor executes \"boot up gas town\", this proto provides the workflow.\nEach step has action + verification - steps stay open until outcome is confirmed.\n\n## Key Principles\n1. **Verification-gated steps** - Not \"command ran\" but \"outcome confirmed\"\n2. **gt peek for verification** - Capture session output to detect stalls\n3. **gt nudge for recovery** - Reliable message delivery to unstick agents\n4. **Parallel where possible** - Witnesses and refineries can start in parallel\n5. **Ephemeral execution** - Boot is a wisp, squashed to digest after completion\n\n## Execution\n```bash\nbd mol wisp mol-gastown-boot # Create wisp\n```",
|
||||
"version": 1,
|
||||
"steps": [
|
||||
{
|
||||
"id": "ensure-daemon",
|
||||
"title": "Ensure daemon",
|
||||
"description": "Verify the Gas Town daemon is running.\n\n## Action\n```bash\ngt daemon status || gt daemon start\n```\n\n## Verify\n1. Daemon PID file exists: `~/.gt/daemon.pid`\n2. Process is alive: `kill -0 $(cat ~/.gt/daemon.pid)`\n3. Daemon responds: `gt daemon status` returns success\n\n## OnFail\nCannot start daemon. Log error and continue - some commands work without daemon."
|
||||
},
|
||||
{
|
||||
"id": "ensure-deacon",
|
||||
"title": "Ensure deacon",
|
||||
"needs": ["ensure-daemon"],
|
||||
"description": "Start the Deacon and verify patrol mode is active.\n\n## Action\n```bash\ngt deacon start\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`\n2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago\n\n## OnStall\n```bash\ngt nudge deacon/ \"Start patrol.\"\nsleep 30\n# Re-verify\n```"
|
||||
},
|
||||
{
|
||||
"id": "ensure-witnesses",
|
||||
"title": "Ensure witnesses",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig witnesses.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-witness",
|
||||
"title": "Ensure gastown witness",
|
||||
"description": "Start the gastown rig Witness.\n\n## Action\n```bash\ngt witness start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-witness 2>/dev/null`\n2. Not stalled: `gt peek gastown/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-witness",
|
||||
"title": "Ensure beads witness",
|
||||
"description": "Start the beads rig Witness.\n\n## Action\n```bash\ngt witness start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-witness 2>/dev/null`\n2. Not stalled: `gt peek beads/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "ensure-refineries",
|
||||
"title": "Ensure refineries",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig refineries.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-refinery",
|
||||
"title": "Ensure gastown refinery",
|
||||
"description": "Start the gastown rig Refinery.\n\n## Action\n```bash\ngt refinery start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-refinery 2>/dev/null`\n2. Not stalled: `gt peek gastown/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-refinery",
|
||||
"title": "Ensure beads refinery",
|
||||
"description": "Start the beads rig Refinery.\n\n## Action\n```bash\ngt refinery start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-refinery 2>/dev/null`\n2. Not stalled: `gt peek beads/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "verify-town-health",
|
||||
"title": "Verify town health",
|
||||
"needs": ["ensure-witnesses", "ensure-refineries"],
|
||||
"description": "Final verification that Gas Town is healthy.\n\n## Action\n```bash\ngt status\n```\n\n## Verify\n1. Daemon running: Shows daemon status OK\n2. Deacon active: Shows deacon in patrol mode\n3. All witnesses: Each rig witness shows active\n4. All refineries: Each rig refinery shows active\n\n## OnFail\nLog degraded state but consider boot complete. Some agents may need manual recovery.\nRun `gt doctor` for detailed diagnostics."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -48,7 +48,7 @@ gt deacon start
|
||||
```
|
||||
|
||||
## Verify
|
||||
1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`
|
||||
1. Session exists: `tmux has-session -t hq-deacon 2>/dev/null`
|
||||
2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt
|
||||
3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago
|
||||
|
||||
|
||||
519
.beads/formulas/mol-shutdown-dance.formula.toml
Normal file
519
.beads/formulas/mol-shutdown-dance.formula.toml
Normal file
@@ -0,0 +1,519 @@
|
||||
description = """
|
||||
Death warrant execution state machine for Dogs.
|
||||
|
||||
Dogs execute this molecule to process death warrants. Each Dog is a lightweight
|
||||
goroutine (NOT a Claude session) that runs the interrogation state machine.
|
||||
|
||||
## Architecture Context
|
||||
|
||||
Dogs are lightweight workers in Boot's pool (see dog-pool-architecture.md):
|
||||
- Fixed pool of 5 goroutines (configurable via GT_DOG_POOL_SIZE)
|
||||
- State persisted to ~/gt/deacon/dogs/active/<id>.json
|
||||
- Recovery on Boot restart via orphan state files
|
||||
|
||||
## State Machine
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ INTERROGATING │ │
|
||||
│ │ │
|
||||
│ 1. Send health check │ │
|
||||
│ 2. Open timeout gate │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
│ gate closes (timeout or response) │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ EVALUATING │ │
|
||||
│ │ │
|
||||
│ Check tmux output for │ │
|
||||
│ ALIVE keyword │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
┌───────┴───────┐ │
|
||||
│ │ │
|
||||
▼ ▼ │
|
||||
[ALIVE found] [No ALIVE] │
|
||||
│ │ │
|
||||
│ │ attempt < 3? │
|
||||
│ ├──────────────────────────────────→─┘
|
||||
│ │ yes: attempt++, longer timeout
|
||||
│ │
|
||||
│ │ no: attempt == 3
|
||||
▼ ▼
|
||||
┌─────────┐ ┌─────────────┐
|
||||
│ PARDONED│ │ EXECUTING │
|
||||
│ │ │ │
|
||||
│ Cancel │ │ Kill tmux │
|
||||
│ warrant │ │ session │
|
||||
└────┬────┘ └──────┬──────┘
|
||||
│ │
|
||||
└────────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────┐
|
||||
│ EPITAPH │
|
||||
│ │
|
||||
│ Log outcome │
|
||||
│ Release dog │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## Timeout Gates
|
||||
|
||||
| Attempt | Timeout | Cumulative Wait |
|
||||
|---------|---------|-----------------|
|
||||
| 1 | 60s | 60s |
|
||||
| 2 | 120s | 180s (3 min) |
|
||||
| 3 | 240s | 420s (7 min) |
|
||||
|
||||
Timeout gates work like this:
|
||||
- Gate opens when interrogation message is sent
|
||||
- Gate closes when EITHER:
|
||||
a) Timeout expires (proceed to evaluate)
|
||||
b) Response detected (early close, proceed to evaluate)
|
||||
- The gate state determines the evaluation outcome
|
||||
|
||||
## Interrogation Message Format
|
||||
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within {timeout}s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: {attempt}/3
|
||||
```
|
||||
|
||||
## Response Detection
|
||||
|
||||
The Dog checks tmux output for:
|
||||
1. The ALIVE keyword (explicit response)
|
||||
2. Any Claude output after the health check (implicit activity)
|
||||
|
||||
```go
|
||||
func (d *Dog) CheckForResponse() bool {
|
||||
output := tmux.CapturePane(d.Warrant.Target, 50) // Last 50 lines
|
||||
return strings.Contains(output, "ALIVE")
|
||||
}
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|-------------|-------------|-----------------------------------------------|
|
||||
| warrant_id | hook_bead | Bead ID of the death warrant |
|
||||
| target | warrant | Session name to interrogate |
|
||||
| reason | warrant | Why warrant was issued |
|
||||
| requester | warrant | Who filed the warrant (e.g., deacon, witness) |
|
||||
|
||||
## Integration
|
||||
|
||||
Dogs are NOT Claude sessions. This molecule is:
|
||||
1. A specification document (defines the state machine)
|
||||
2. A reference for Go implementation in internal/shutdown/
|
||||
3. A template for creating warrant-tracking beads
|
||||
|
||||
The Go implementation follows this spec exactly."""
|
||||
formula = "mol-shutdown-dance"
|
||||
version = 1
|
||||
|
||||
[squash]
|
||||
trigger = "on_complete"
|
||||
template_type = "operational"
|
||||
include_metrics = true
|
||||
|
||||
# ============================================================================
|
||||
# STEP 1: WARRANT_RECEIVED
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "warrant-received"
|
||||
title = "Receive and validate death warrant"
|
||||
description = """
|
||||
Entry point when Dog is allocated from pool.
|
||||
|
||||
**1. Read warrant from allocation:**
|
||||
The Dog receives a Warrant struct containing:
|
||||
- ID: Bead ID of the warrant
|
||||
- Target: Session name (e.g., "gt-gastown-Toast")
|
||||
- Reason: Why termination requested
|
||||
- Requester: Who filed (deacon, witness, mayor)
|
||||
- FiledAt: Timestamp
|
||||
|
||||
**2. Validate target exists:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
```
|
||||
|
||||
If target doesn't exist:
|
||||
- Warrant is stale (already dead)
|
||||
- Skip to EPITAPH with outcome=already_dead
|
||||
|
||||
**3. Initialize state file:**
|
||||
Write initial state to ~/gt/deacon/dogs/active/{dog-id}.json
|
||||
|
||||
**4. Set initial attempt counter:**
|
||||
attempt = 1
|
||||
|
||||
**Exit criteria:** Warrant validated, target confirmed alive, state initialized."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 2: INTERROGATION_1 (60s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-1"
|
||||
title = "First interrogation (60s timeout)"
|
||||
needs = ["warrant-received"]
|
||||
description = """
|
||||
First attempt to contact the session.
|
||||
|
||||
**1. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 60s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 1/3
|
||||
```
|
||||
|
||||
**2. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**3. Open timeout gate:**
|
||||
Gate configuration:
|
||||
- Type: timer
|
||||
- Timeout: 60 seconds
|
||||
- Close conditions:
|
||||
a) Timer expires
|
||||
b) ALIVE keyword detected in output
|
||||
|
||||
**4. Wait for gate to close:**
|
||||
The Dog waits (select on timer channel or early close signal).
|
||||
|
||||
**5. Record interrogation timestamp:**
|
||||
Update state file with last_message_at.
|
||||
|
||||
**Exit criteria:** Message sent, waiting for gate to close."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 3: EVALUATE_1
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-1"
|
||||
title = "Evaluate first interrogation response"
|
||||
needs = ["interrogation-1"]
|
||||
description = """
|
||||
Check if session responded to first interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword:**
|
||||
```go
|
||||
if strings.Contains(output, "ALIVE") {
|
||||
return PARDONED
|
||||
}
|
||||
```
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_2
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 4: INTERROGATION_2 (120s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-2"
|
||||
title = "Second interrogation (120s timeout)"
|
||||
needs = ["evaluate-1"]
|
||||
gate = { type = "conditional", condition = "no_response_1" }
|
||||
description = """
|
||||
Second attempt with longer timeout.
|
||||
|
||||
Only executed if evaluate-1 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 2
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 120s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 2/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 120 seconds
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Second message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 5: EVALUATE_2
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-2"
|
||||
title = "Evaluate second interrogation response"
|
||||
needs = ["interrogation-2"]
|
||||
description = """
|
||||
Check if session responded to second interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_3
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 6: INTERROGATION_3 (240s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-3"
|
||||
title = "Final interrogation (240s timeout)"
|
||||
needs = ["evaluate-2"]
|
||||
gate = { type = "conditional", condition = "no_response_2" }
|
||||
description = """
|
||||
Final attempt before execution.
|
||||
|
||||
Only executed if evaluate-2 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 3
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 240s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 3/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 240 seconds
|
||||
- This is the FINAL chance
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Final message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 7: EVALUATE_3
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-3"
|
||||
title = "Evaluate final interrogation response"
|
||||
needs = ["interrogation-3"]
|
||||
description = """
|
||||
Final evaluation before execution.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to EXECUTE
|
||||
|
||||
**Exit criteria:** Final decision made."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 8: PARDON (success path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "pardon"
|
||||
title = "Pardon session - cancel warrant"
|
||||
needs = ["evaluate-1", "evaluate-2", "evaluate-3"]
|
||||
gate = { type = "conditional", condition = "alive_detected" }
|
||||
description = """
|
||||
Session responded - cancel the death warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = PARDONED
|
||||
|
||||
**2. Record pardon details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "pardoned",
|
||||
"attempt": {attempt},
|
||||
"response_time": "{time_since_last_interrogation}s",
|
||||
"pardoned_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**3. Cancel warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "Session responded at attempt {attempt}"
|
||||
```
|
||||
|
||||
**4. Notify requester:**
|
||||
```bash
|
||||
gt mail send {requester}/ -s "PARDON: {target}" -m "Death warrant cancelled.
|
||||
Session responded after attempt {attempt}.
|
||||
Warrant: {warrant_id}
|
||||
Response detected: {timestamp}"
|
||||
```
|
||||
|
||||
**Exit criteria:** Warrant cancelled, requester notified."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 9: EXECUTE (termination path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "execute"
|
||||
title = "Execute warrant - kill session"
|
||||
needs = ["evaluate-3"]
|
||||
gate = { type = "conditional", condition = "no_response_final" }
|
||||
description = """
|
||||
Session unresponsive after 3 attempts - execute the warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = EXECUTING
|
||||
|
||||
**2. Kill the tmux session:**
|
||||
```bash
|
||||
tmux kill-session -t {target}
|
||||
```
|
||||
|
||||
**3. Verify session is dead:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
# Should fail (session gone)
|
||||
```
|
||||
|
||||
**4. If session still exists (kill failed):**
|
||||
- Force kill with tmux kill-server if isolated
|
||||
- Or escalate to Boot for manual intervention
|
||||
|
||||
**5. Record execution details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "executed",
|
||||
"attempts": 3,
|
||||
"total_wait": "420s",
|
||||
"executed_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**Exit criteria:** Session terminated."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 10: EPITAPH (completion)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "epitaph"
|
||||
title = "Log cause of death and close warrant"
|
||||
needs = ["pardon", "execute"]
|
||||
description = """
|
||||
Final step - create audit record and release Dog back to pool.
|
||||
|
||||
**1. Compose epitaph based on outcome:**
|
||||
|
||||
For PARDONED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: PARDONED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Response: Attempt {attempt}, after {wait_time}s
|
||||
Pardoned at: {timestamp}
|
||||
```
|
||||
|
||||
For EXECUTED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: EXECUTED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempts: 3 (60s + 120s + 240s = 420s total)
|
||||
Executed at: {timestamp}
|
||||
```
|
||||
|
||||
For ALREADY_DEAD (target gone before interrogation):
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: ALREADY_DEAD
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Note: Target session not found at warrant processing
|
||||
```
|
||||
|
||||
**2. Close warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "{epitaph_summary}"
|
||||
```
|
||||
|
||||
**3. Move state file to completed:**
|
||||
```bash
|
||||
mv ~/gt/deacon/dogs/active/{dog-id}.json ~/gt/deacon/dogs/completed/
|
||||
```
|
||||
|
||||
**4. Report to Boot:**
|
||||
Write completion file: ~/gt/deacon/dogs/active/{dog-id}.done
|
||||
```json
|
||||
{
|
||||
"dog_id": "{dog-id}",
|
||||
"warrant_id": "{warrant_id}",
|
||||
"target": "{target}",
|
||||
"outcome": "{pardoned|executed|already_dead}",
|
||||
"duration": "{total_duration}s"
|
||||
}
|
||||
```
|
||||
|
||||
**5. Release Dog to pool:**
|
||||
Dog resets state and returns to idle channel.
|
||||
|
||||
**Exit criteria:** Warrant closed, Dog released, audit complete."""
|
||||
|
||||
# ============================================================================
|
||||
# VARIABLES
|
||||
# ============================================================================
|
||||
[vars]
|
||||
[vars.warrant_id]
|
||||
description = "Bead ID of the death warrant being processed"
|
||||
required = true
|
||||
|
||||
[vars.target]
|
||||
description = "Session name to interrogate (e.g., gt-gastown-Toast)"
|
||||
required = true
|
||||
|
||||
[vars.reason]
|
||||
description = "Why the warrant was issued"
|
||||
required = true
|
||||
|
||||
[vars.requester]
|
||||
description = "Who filed the warrant (deacon, witness, mayor)"
|
||||
required = true
|
||||
default = "deacon"
|
||||
120
.github/workflows/ci.yml
vendored
120
.github/workflows/ci.yml
vendored
@@ -68,6 +68,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
@@ -82,8 +84,122 @@ jobs:
|
||||
- name: Build
|
||||
run: go build -v ./cmd/gt
|
||||
|
||||
- name: Test
|
||||
run: go test -v -race -short ./...
|
||||
- name: Test with Coverage
|
||||
run: |
|
||||
go test -race -short -coverprofile=coverage.out ./... 2>&1 | tee test-output.txt
|
||||
|
||||
- name: Upload Coverage Data
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
path: |
|
||||
coverage.out
|
||||
test-output.txt
|
||||
|
||||
# Separate job to process coverage after ALL tests complete
|
||||
coverage:
|
||||
name: Coverage Report
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test, integration]
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Download Coverage Data
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
|
||||
- name: Generate Coverage Report
|
||||
run: |
|
||||
# Parse per-package coverage from test output
|
||||
echo "## Code Coverage Report" > coverage-report.md
|
||||
echo "" >> coverage-report.md
|
||||
|
||||
# Get overall coverage
|
||||
TOTAL=$(go tool cover -func=coverage.out | grep total | awk '{print $3}')
|
||||
echo "**Overall Coverage: ${TOTAL}**" >> coverage-report.md
|
||||
echo "" >> coverage-report.md
|
||||
|
||||
# Create per-package table
|
||||
echo "| Package | Coverage |" >> coverage-report.md
|
||||
echo "|---------|----------|" >> coverage-report.md
|
||||
|
||||
# Extract package coverage from all test output lines
|
||||
grep -E "github.com/steveyegge/gastown.*coverage:" test-output.txt | \
|
||||
sed 's/.*github.com\/steveyegge\/gastown\///' | \
|
||||
awk '{
|
||||
pkg = $1
|
||||
for (i=2; i<=NF; i++) {
|
||||
if ($i == "coverage:") {
|
||||
cov = $(i+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
printf "| %s | %s |\n", pkg, cov
|
||||
}' | sort -u >> coverage-report.md
|
||||
|
||||
echo "" >> coverage-report.md
|
||||
echo "---" >> coverage-report.md
|
||||
echo "_Generated by CI_" >> coverage-report.md
|
||||
|
||||
# Show in logs
|
||||
cat coverage-report.md
|
||||
|
||||
- name: Upload Coverage Report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage-report.md
|
||||
retention-days: 30
|
||||
|
||||
- name: Comment Coverage on PR
|
||||
# Only for internal PRs - fork PRs can't write comments
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const report = fs.readFileSync('coverage-report.md', 'utf8');
|
||||
|
||||
// Find existing coverage comment
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
|
||||
const botComment = comments.find(comment =>
|
||||
comment.user.type === 'Bot' &&
|
||||
comment.body.includes('## Code Coverage Report')
|
||||
);
|
||||
|
||||
if (botComment) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id,
|
||||
body: report
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: report
|
||||
});
|
||||
}
|
||||
|
||||
- name: Coverage Note for Fork PRs
|
||||
if: github.event.pull_request.head.repo.full_name != github.repository
|
||||
run: |
|
||||
echo "::notice::Coverage report uploaded as artifact (fork PRs cannot post comments). Download from Actions tab."
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
|
||||
54
CHANGELOG.md
54
CHANGELOG.md
@@ -7,6 +7,60 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.2.3] - 2026-01-09
|
||||
|
||||
Worker safety release - prevents accidental termination of active agents.
|
||||
|
||||
> **Note**: The Deacon safety improvements are believed to be correct but have not
|
||||
> yet been extensively tested in production. We recommend running with
|
||||
> `gt deacon pause` initially and monitoring behavior before enabling full patrol.
|
||||
> Please report any issues. A 0.3.0 release will follow once these changes are
|
||||
> battle-tested.
|
||||
|
||||
### Critical Safety Improvements
|
||||
|
||||
- **Kill authority removed from Deacon** - Deacon patrol now only detects zombies via `--dry-run`, never kills directly. Death warrants are filed for Boot to handle interrogation/execution. This prevents destruction of worker context, mid-task progress, and unsaved state (#gt-vhaej)
|
||||
- **Bulletproof pause mechanism** - Multi-layer pause for Deacon with file-based state, `gt deacon pause/resume` commands, and guards in `gt prime` and heartbeat (#265)
|
||||
- **Doctor warns instead of killing** - `gt doctor` now warns about stale town-root settings rather than killing sessions (#243)
|
||||
- **Orphan process check informational** - Doctor's orphan process detection is now informational only, not actionable (#272)
|
||||
|
||||
### Added
|
||||
|
||||
- **`gt account switch` command** - Switch between Claude Code accounts with `gt account switch <handle>`. Manages `~/.claude` symlinks and updates default account
|
||||
- **`gt crew list --all`** - Show all crew members across all rigs (#276)
|
||||
- **Rig-level custom agent support** - Configure different agents per-rig (#12)
|
||||
- **Rig identity beads check** - Doctor validates rig identity beads exist
|
||||
- **GT_ROOT env var** - Set for all agent sessions for consistent environment
|
||||
- **New agent presets** - Added Cursor, Auggie (Augment Code), and Sourcegraph AMP as built-in agent presets (#247)
|
||||
- **Context Management docs** - Added to Witness template for better context handling (gt-jjama)
|
||||
|
||||
### Fixed
|
||||
|
||||
- **`gt prime --hook` recognized** - Doctor now recognizes `gt prime --hook` as valid session hook config (#14)
|
||||
- **Integration test reliability** - Improved test stability (#13)
|
||||
- **IsClaudeRunning detection** - Now detects 'claude' and version patterns correctly (#273)
|
||||
- **Deacon heartbeat restored** - `ensureDeaconRunning` restored to heartbeat using Manager pattern (#271)
|
||||
- **Deacon session names** - Correct session name references in formulas (#270)
|
||||
- **Hidden directory scanning** - Ignore `.claude` and other dot directories when enumerating polecats (#258, #279)
|
||||
- **SetupRedirect tracked beads** - Works correctly with tracked beads architecture where canonical location is `mayor/rig/.beads`
|
||||
- **Tmux shell ready** - Wait for shell ready before sending keys (#264)
|
||||
- **Gastown prefix derivation** - Correctly derive `gt-` prefix for gastown compound words (gt-m46bb)
|
||||
- **Custom beads types** - Register custom beads types during install (#250)
|
||||
|
||||
### Changed
|
||||
|
||||
- **Refinery Manager pattern** - Replaced `ensureRefinerySession` with `refinery.Manager.Start()` for consistency
|
||||
|
||||
### Removed
|
||||
|
||||
- **Unused formula JSON** - Removed unused JSON formula file (cleanup)
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to all contributors for this release:
|
||||
- @julianknutsen - Doctor fixes (#14, #271, #272, #273), formula fixes (#270), GT_ROOT env (#268)
|
||||
- @joshuavial - Hidden directory scanning (#258, #279), crew list --all (#276)
|
||||
|
||||
## [0.2.2] - 2026-01-07
|
||||
|
||||
Rig operational state management, unified agent startup, and extensive stability fixes.
|
||||
|
||||
51
README.md
51
README.md
@@ -77,6 +77,8 @@ Work tracking units. Bundle multiple issues/tasks that get assigned to agents.
|
||||
|
||||
Git-backed issue tracking system that stores work state as structured data.
|
||||
|
||||
> **New to Gas Town?** See the [Glossary](docs/glossary.md) for a complete guide to terminology and concepts.
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
@@ -85,7 +87,8 @@ Git-backed issue tracking system that stores work state as structured data.
|
||||
- **Git 2.25+** - for worktree support
|
||||
- **beads (bd) 0.44.0+** - [github.com/steveyegge/beads](https://github.com/steveyegge/beads) (required for custom type support)
|
||||
- **tmux 3.0+** - recommended for full experience
|
||||
- **Claude Code CLI** - [claude.ai/code](https://claude.ai/code)
|
||||
- **Claude Code CLI** (default runtime) - [claude.ai/code](https://claude.ai/code)
|
||||
- **Codex CLI** (optional runtime) - [developers.openai.com/codex/cli](https://developers.openai.com/codex/cli)
|
||||
|
||||
### Setup
|
||||
|
||||
@@ -180,6 +183,18 @@ gt convoy create "Auth System" issue-101 issue-102 --notify
|
||||
gt convoy list
|
||||
```
|
||||
|
||||
### Minimal Mode (No Tmux)
|
||||
|
||||
Run individual runtime instances manually. Gas Town just tracks state.
|
||||
|
||||
```bash
|
||||
gt convoy create "Fix bugs" issue-123 # Create convoy (sling auto-creates if skipped)
|
||||
gt sling issue-123 myproject # Assign to worker
|
||||
claude --resume # Agent reads mail, runs work (Claude)
|
||||
# or: codex # Start Codex in the workspace
|
||||
gt convoy list # Check progress
|
||||
```
|
||||
|
||||
### Beads Formula Workflow
|
||||
|
||||
**Best for:** Predefined, repeatable processes
|
||||
@@ -258,6 +273,30 @@ gt sling bug-101 myproject/my-agent
|
||||
gt convoy show
|
||||
```
|
||||
|
||||
## Runtime Configuration
|
||||
|
||||
Gas Town supports multiple AI coding runtimes. Per-rig runtime settings are in `settings/config.json`.
|
||||
|
||||
```json
|
||||
{
|
||||
"runtime": {
|
||||
"provider": "codex",
|
||||
"command": "codex",
|
||||
"args": [],
|
||||
"prompt_mode": "none"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
|
||||
- Claude uses hooks in `.claude/settings.json` for mail injection and startup.
|
||||
- For Codex, set `project_doc_fallback_filenames = ["CLAUDE.md"]` in
|
||||
`~/.codex/config.toml` so role instructions are picked up.
|
||||
- For runtimes without hooks (e.g., Codex), Gas Town sends a startup fallback
|
||||
after the session is ready: `gt prime`, optional `gt mail check --inject`
|
||||
for autonomous roles, and `gt nudge deacon session-started`.
|
||||
|
||||
## Key Commands
|
||||
|
||||
### Workspace Management
|
||||
@@ -274,12 +313,14 @@ gt crew add <name> --rig <rig> # Create crew workspace
|
||||
```bash
|
||||
gt agents # List active agents
|
||||
gt sling <issue> <rig> # Assign work to agent
|
||||
gt sling <issue> <rig> --agent codex # Override runtime for this sling/spawn
|
||||
gt sling <issue> <rig> --agent cursor # Override runtime for this sling/spawn
|
||||
gt mayor attach # Start Mayor session
|
||||
gt mayor start --agent gemini # Run Mayor with a specific agent alias
|
||||
gt mayor start --agent auggie # Run Mayor with a specific agent alias
|
||||
gt prime # Alternative to mayor attach
|
||||
```
|
||||
|
||||
**Built-in agent presets**: `claude`, `gemini`, `codex`, `cursor`, `auggie`, `amp`
|
||||
|
||||
### Convoy (Work Tracking)
|
||||
|
||||
```bash
|
||||
@@ -312,6 +353,10 @@ bd mol pour <formula> # Create trackable instance
|
||||
bd mol list # List active instances
|
||||
```
|
||||
|
||||
## Cooking Formulas
|
||||
|
||||
Gas Town includes built-in formulas for common workflows. See `.beads/formulas/` for available recipes.
|
||||
|
||||
## Dashboard
|
||||
|
||||
Gas Town includes a web dashboard for monitoring:
|
||||
|
||||
@@ -17,7 +17,9 @@ Complete setup guide for Gas Town multi-agent orchestrator.
|
||||
| Tool | Version | Check | Install |
|
||||
|------|---------|-------|---------|
|
||||
| **tmux** | 3.0+ | `tmux -V` | See below |
|
||||
| **Claude Code** | latest | `claude --version` | See [claude.ai/claude-code](https://claude.ai/claude-code) |
|
||||
| **Claude Code** (default) | latest | `claude --version` | See [claude.ai/claude-code](https://claude.ai/claude-code) |
|
||||
| **Codex CLI** (optional) | latest | `codex --version` | See [developers.openai.com/codex/cli](https://developers.openai.com/codex/cli) |
|
||||
| **OpenCode CLI** (optional) | latest | `opencode --version` | See [opencode.ai](https://opencode.ai) |
|
||||
|
||||
## Installing Prerequisites
|
||||
|
||||
@@ -159,16 +161,17 @@ Gas Town supports two operational modes:
|
||||
|
||||
### Minimal Mode (No Daemon)
|
||||
|
||||
Run individual Claude Code instances manually. Gas Town only tracks state.
|
||||
Run individual runtime instances manually. Gas Town only tracks state.
|
||||
|
||||
```bash
|
||||
# Create and assign work
|
||||
gt convoy create "Fix bugs" issue-123
|
||||
gt sling issue-123 myproject
|
||||
|
||||
# Run Claude manually
|
||||
# Run runtime manually
|
||||
cd ~/gt/myproject/polecats/<worker>
|
||||
claude --resume
|
||||
claude --resume # Claude Code
|
||||
# or: codex # Codex CLI
|
||||
|
||||
# Check progress
|
||||
gt convoy list
|
||||
|
||||
94
docs/glossary.md
Normal file
94
docs/glossary.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Gas Town Glossary
|
||||
|
||||
Gas Town is an agentic development environment for managing multiple Claude Code instances simultaneously using the `gt` and `bd` (Beads) binaries, coordinated with tmux in git-managed directories.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### MEOW (Molecular Expression of Work)
|
||||
Breaking large goals into detailed instructions for agents. Supported by Beads, Epics, Formulas, and Molecules. MEOW ensures work is decomposed into trackable, atomic units that agents can execute autonomously.
|
||||
|
||||
### GUPP (Gas Town Universal Propulsion Principle)
|
||||
"If there is work on your Hook, YOU MUST RUN IT." This principle ensures agents autonomously proceed with available work without waiting for external input. GUPP is the heartbeat of autonomous operation.
|
||||
|
||||
### NDI (Nondeterministic Idempotence)
|
||||
The overarching goal ensuring useful outcomes through orchestration of potentially unreliable processes. Persistent Beads and oversight agents (Witness, Deacon) guarantee eventual workflow completion even when individual operations may fail or produce varying results.
|
||||
|
||||
## Environments
|
||||
|
||||
### Town
|
||||
The management headquarters (e.g., `~/gt/`). The Town coordinates all workers across multiple Rigs and houses town-level agents like Mayor and Deacon.
|
||||
|
||||
### Rig
|
||||
A project-specific Git repository under Gas Town management. Each Rig has its own Polecats, Refinery, Witness, and Crew members. Rigs are where actual development work happens.
|
||||
|
||||
## Town-Level Roles
|
||||
|
||||
### Mayor
|
||||
Chief-of-staff agent responsible for initiating Convoys, coordinating work distribution, and notifying users of important events. The Mayor operates from the town level and has visibility across all Rigs.
|
||||
|
||||
### Deacon
|
||||
Daemon beacon running continuous Patrol cycles. The Deacon ensures worker activity, monitors system health, and triggers recovery when agents become unresponsive. Think of the Deacon as the system's watchdog.
|
||||
|
||||
### Dogs
|
||||
The Deacon's crew of maintenance agents handling background tasks like cleanup, health checks, and system maintenance.
|
||||
|
||||
### Boot (the Dog)
|
||||
A special Dog that checks the Deacon every 5 minutes, ensuring the watchdog itself is still watching. This creates a chain of accountability.
|
||||
|
||||
## Rig-Level Roles
|
||||
|
||||
### Polecat
|
||||
Ephemeral worker agents that produce Merge Requests. Polecats are spawned for specific tasks, complete their work, and are then cleaned up. They work in isolated git worktrees to avoid conflicts.
|
||||
|
||||
### Refinery
|
||||
Manages the Merge Queue for a Rig. The Refinery intelligently merges changes from Polecats, handling conflicts and ensuring code quality before changes reach the main branch.
|
||||
|
||||
### Witness
|
||||
Patrol agent that oversees Polecats and the Refinery within a Rig. The Witness monitors progress, detects stuck agents, and can trigger recovery actions.
|
||||
|
||||
### Crew
|
||||
Long-lived, named agents for persistent collaboration. Unlike ephemeral Polecats, Crew members maintain context across sessions and are ideal for ongoing work relationships.
|
||||
|
||||
## Work Units
|
||||
|
||||
### Bead
|
||||
Git-backed atomic work unit stored in JSONL format. Beads are the fundamental unit of work tracking in Gas Town. They can represent issues, tasks, epics, or any trackable work item.
|
||||
|
||||
### Formula
|
||||
TOML-based workflow source template. Formulas define reusable patterns for common operations like patrol cycles, code review, or deployment.
|
||||
|
||||
### Protomolecule
|
||||
A template class for instantiating Molecules. Protomolecules define the structure and steps of a workflow without being tied to specific work items.
|
||||
|
||||
### Molecule
|
||||
Durable chained Bead workflows. Molecules represent multi-step processes where each step is tracked as a Bead. They survive agent restarts and ensure complex workflows complete.
|
||||
|
||||
### Wisp
|
||||
Ephemeral Beads destroyed after runs. Wisps are lightweight work items used for transient operations that don't need permanent tracking.
|
||||
|
||||
### Hook
|
||||
A special pinned Bead for each agent. The Hook is an agent's primary work queue - when work appears on your Hook, GUPP dictates you must run it.
|
||||
|
||||
## Workflow Commands
|
||||
|
||||
### Convoy
|
||||
Primary work-order wrapping related Beads. Convoys group related tasks together and can be assigned to multiple workers. Created with `gt convoy create`.
|
||||
|
||||
### Slinging
|
||||
Assigning work to agents via `gt sling`. When you sling work to a Polecat or Crew member, you're putting it on their Hook for execution.
|
||||
|
||||
### Nudging
|
||||
Real-time messaging between agents with `gt nudge`. Nudges allow immediate communication without going through the mail system.
|
||||
|
||||
### Handoff
|
||||
Agent session refresh via `/handoff`. When context gets full or an agent needs a fresh start, handoff transfers work state to a new session.
|
||||
|
||||
### Seance
|
||||
Communicating with previous sessions via `gt seance`. Allows agents to query their predecessors for context and decisions from earlier work.
|
||||
|
||||
### Patrol
|
||||
Ephemeral loop maintaining system heartbeat. Patrol agents (Deacon, Witness) continuously cycle through health checks and trigger actions as needed.
|
||||
|
||||
---
|
||||
|
||||
*This glossary was contributed by [Clay Shirky](https://github.com/cshirky) in [Issue #80](https://github.com/steveyegge/gastown/issues/80).*
|
||||
@@ -359,15 +359,56 @@ gt config agent remove <name> # Remove custom agent (built-ins protected)
|
||||
gt config default-agent [name] # Get or set town default agent
|
||||
```
|
||||
|
||||
**Built-in agents**: `claude`, `gemini`, `codex`
|
||||
**Built-in agents**: `claude`, `gemini`, `codex`, `cursor`, `auggie`, `amp`
|
||||
|
||||
**Custom agents**: Define per-town in `mayor/town.json`:
|
||||
**Custom agents**: Define per-town via CLI or JSON:
|
||||
```bash
|
||||
gt config agent set claude-glm "claude-glm --model glm-4"
|
||||
gt config agent set claude "claude-opus" # Override built-in
|
||||
gt config default-agent claude-glm # Set default
|
||||
```
|
||||
|
||||
**Advanced agent config** (`settings/agents.json`):
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": [],
|
||||
"resume_flag": "--session",
|
||||
"resume_style": "flag",
|
||||
"non_interactive": {
|
||||
"subcommand": "run",
|
||||
"output_flag": "--format json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rig-level agents** (`<rig>/settings/config.json`):
|
||||
```json
|
||||
{
|
||||
"type": "rig-settings",
|
||||
"version": 1,
|
||||
"agent": "opencode",
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": ["--session"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Agent resolution order**: rig-level → town-level → built-in presets.
|
||||
|
||||
For OpenCode autonomous mode, set env var in your shell profile:
|
||||
```bash
|
||||
export OPENCODE_PERMISSION='{"*":"allow"}'
|
||||
```
|
||||
|
||||
### Rig Management
|
||||
|
||||
```bash
|
||||
|
||||
495
dog-pool-architecture.md
Normal file
495
dog-pool-architecture.md
Normal file
@@ -0,0 +1,495 @@
|
||||
# Dog Pool Architecture for Concurrent Shutdown Dances
|
||||
|
||||
> Design document for gt-fsld8
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Boot needs to run multiple shutdown-dance molecules concurrently when multiple death
|
||||
warrants are issued. The current hook design only allows one molecule per agent.
|
||||
|
||||
Example scenario:
|
||||
- Warrant 1: Kill stuck polecat Toast (60s into interrogation)
|
||||
- Warrant 2: Kill stuck polecat Shadow (just started)
|
||||
- Warrant 3: Kill stuck witness (120s into interrogation)
|
||||
|
||||
All three need concurrent tracking, independent timeouts, and separate outcomes.
|
||||
|
||||
## Design Decision: Lightweight State Machines
|
||||
|
||||
After analyzing the options, the shutdown-dance does NOT need Claude sessions.
|
||||
The dance is a deterministic state machine:
|
||||
|
||||
```
|
||||
WARRANT -> INTERROGATE -> EVALUATE -> PARDON|EXECUTE
|
||||
```
|
||||
|
||||
Each step is mechanical:
|
||||
1. Send a tmux message (no LLM needed)
|
||||
2. Wait for timeout or response (timer)
|
||||
3. Check tmux output for ALIVE keyword (string match)
|
||||
4. Repeat or terminate
|
||||
|
||||
**Decision**: Dogs are lightweight Go routines, not Claude sessions.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────┐
|
||||
│ BOOT │
|
||||
│ (Claude session in tmux) │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Dog Manager │ │
|
||||
│ │ │ │
|
||||
│ │ Pool: [Dog1, Dog2, Dog3, ...] (goroutines + state files) │ │
|
||||
│ │ │ │
|
||||
│ │ allocate() → Dog │ │
|
||||
│ │ release(Dog) │ │
|
||||
│ │ status() → []DogStatus │ │
|
||||
│ └──────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ Boot's job: │
|
||||
│ - Watch for warrants (file or event) │
|
||||
│ - Allocate dog from pool │
|
||||
│ - Monitor dog progress │
|
||||
│ - Handle dog completion/failure │
|
||||
│ - Report results │
|
||||
└────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Dog Structure
|
||||
|
||||
```go
|
||||
// Dog represents a shutdown-dance executor
|
||||
type Dog struct {
|
||||
ID string // Unique ID (e.g., "dog-1704567890123")
|
||||
Warrant *Warrant // The death warrant being processed
|
||||
State ShutdownDanceState
|
||||
Attempt int // Current interrogation attempt (1-3)
|
||||
StartedAt time.Time
|
||||
StateFile string // Persistent state: ~/gt/deacon/dogs/active/<id>.json
|
||||
}
|
||||
|
||||
type ShutdownDanceState string
|
||||
|
||||
const (
|
||||
StateIdle ShutdownDanceState = "idle"
|
||||
StateInterrogating ShutdownDanceState = "interrogating" // Sent message, waiting
|
||||
StateEvaluating ShutdownDanceState = "evaluating" // Checking response
|
||||
StatePardoned ShutdownDanceState = "pardoned" // Session responded
|
||||
StateExecuting ShutdownDanceState = "executing" // Killing session
|
||||
StateComplete ShutdownDanceState = "complete" // Done, ready for cleanup
|
||||
StateFailed ShutdownDanceState = "failed" // Dog crashed/errored
|
||||
)
|
||||
|
||||
type Warrant struct {
|
||||
ID string // Bead ID for the warrant
|
||||
Target string // Session to interrogate (e.g., "gt-gastown-Toast")
|
||||
Reason string // Why warrant was issued
|
||||
Requester string // Who filed the warrant
|
||||
FiledAt time.Time
|
||||
}
|
||||
```
|
||||
|
||||
## Pool Design
|
||||
|
||||
### Fixed Pool Size
|
||||
|
||||
**Decision**: Fixed pool of 5 dogs, configurable via environment.
|
||||
|
||||
Rationale:
|
||||
- Dynamic sizing adds complexity without clear benefit
|
||||
- 5 concurrent shutdown dances handles worst-case scenarios
|
||||
- If pool exhausted, warrants queue (better than infinite dog spawning)
|
||||
- Memory footprint is negligible (goroutines + small state files)
|
||||
|
||||
```go
|
||||
const (
|
||||
DefaultPoolSize = 5
|
||||
MaxPoolSize = 20
|
||||
)
|
||||
|
||||
type DogPool struct {
|
||||
mu sync.Mutex
|
||||
dogs []*Dog // All dogs in pool
|
||||
idle chan *Dog // Channel of available dogs
|
||||
active map[string]*Dog // ID -> Dog for active dogs
|
||||
stateDir string // ~/gt/deacon/dogs/active/
|
||||
}
|
||||
|
||||
func (p *DogPool) Allocate(warrant *Warrant) (*Dog, error) {
|
||||
select {
|
||||
case dog := <-p.idle:
|
||||
dog.Warrant = warrant
|
||||
dog.State = StateInterrogating
|
||||
dog.Attempt = 1
|
||||
dog.StartedAt = time.Now()
|
||||
p.active[dog.ID] = dog
|
||||
return dog, nil
|
||||
default:
|
||||
return nil, ErrPoolExhausted
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DogPool) Release(dog *Dog) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
delete(p.active, dog.ID)
|
||||
dog.Reset()
|
||||
p.idle <- dog
|
||||
}
|
||||
```
|
||||
|
||||
### Why Not Dynamic Pool?
|
||||
|
||||
Considered but rejected:
|
||||
- Adding dogs on demand increases complexity
|
||||
- No clear benefit - warrants rarely exceed 5 concurrent
|
||||
- If needed, raise DefaultPoolSize
|
||||
- Simpler to reason about fixed resources
|
||||
|
||||
## Communication: State Files + Events
|
||||
|
||||
### State Persistence
|
||||
|
||||
Each active dog writes state to `~/gt/deacon/dogs/active/<id>.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "dog-1704567890123",
|
||||
"warrant": {
|
||||
"id": "gt-abc123",
|
||||
"target": "gt-gastown-Toast",
|
||||
"reason": "no_response_health_check",
|
||||
"requester": "deacon",
|
||||
"filed_at": "2026-01-07T20:15:00Z"
|
||||
},
|
||||
"state": "interrogating",
|
||||
"attempt": 2,
|
||||
"started_at": "2026-01-07T20:15:00Z",
|
||||
"last_message_at": "2026-01-07T20:16:00Z",
|
||||
"next_timeout": "2026-01-07T20:18:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Boot Monitoring
|
||||
|
||||
Boot monitors dogs via:
|
||||
1. **Polling**: `gt dog status --active` every tick
|
||||
2. **Completion files**: Dogs write `<id>.done` when complete
|
||||
|
||||
```go
|
||||
type DogResult struct {
|
||||
DogID string
|
||||
Warrant *Warrant
|
||||
Outcome DogOutcome // pardoned | executed | failed
|
||||
Duration time.Duration
|
||||
Details string
|
||||
}
|
||||
|
||||
type DogOutcome string
|
||||
|
||||
const (
|
||||
OutcomePardoned DogOutcome = "pardoned" // Session responded
|
||||
OutcomeExecuted DogOutcome = "executed" // Session killed
|
||||
OutcomeFailed DogOutcome = "failed" // Dog crashed
|
||||
)
|
||||
```
|
||||
|
||||
### Why Not Mail?
|
||||
|
||||
Considered but rejected for dog<->boot communication:
|
||||
- Mail is async, poll-based - adds latency
|
||||
- State files are simpler for local coordination
|
||||
- Dogs don't need complex inter-agent communication
|
||||
- Keep mail for external coordination (Witness, Mayor)
|
||||
|
||||
## Shutdown Dance State Machine
|
||||
|
||||
Each dog executes this state machine:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ INTERROGATING │ │
|
||||
│ │ │
|
||||
│ 1. Send health check │ │
|
||||
│ 2. Start timeout timer │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
│ timeout or response │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ EVALUATING │ │
|
||||
│ │ │
|
||||
│ Check tmux output for │ │
|
||||
│ ALIVE keyword │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
┌───────┴───────┐ │
|
||||
│ │ │
|
||||
▼ ▼ │
|
||||
[ALIVE found] [No ALIVE] │
|
||||
│ │ │
|
||||
│ │ attempt < 3? │
|
||||
│ ├──────────────────────────────────→─┘
|
||||
│ │ yes: attempt++, longer timeout
|
||||
│ │
|
||||
│ │ no: attempt == 3
|
||||
▼ ▼
|
||||
┌─────────┐ ┌─────────────┐
|
||||
│ PARDONED│ │ EXECUTING │
|
||||
│ │ │ │
|
||||
│ Cancel │ │ Kill tmux │
|
||||
│ warrant │ │ session │
|
||||
└────┬────┘ └──────┬──────┘
|
||||
│ │
|
||||
└────────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────┐
|
||||
│ COMPLETE │
|
||||
│ │
|
||||
│ Write result │
|
||||
│ Release dog │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
### Timeout Gates
|
||||
|
||||
| Attempt | Timeout | Cumulative Wait |
|
||||
|---------|---------|-----------------|
|
||||
| 1 | 60s | 60s |
|
||||
| 2 | 120s | 180s (3 min) |
|
||||
| 3 | 240s | 420s (7 min) |
|
||||
|
||||
### Health Check Message
|
||||
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within {timeout}s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: {attempt}/3
|
||||
```
|
||||
|
||||
### Response Detection
|
||||
|
||||
```go
|
||||
func (d *Dog) CheckForResponse() bool {
|
||||
tm := tmux.NewTmux()
|
||||
output, err := tm.CapturePane(d.Warrant.Target, 50) // Last 50 lines
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Any output after our health check counts as alive
|
||||
// Specifically look for ALIVE keyword for explicit response
|
||||
return strings.Contains(output, "ALIVE")
|
||||
}
|
||||
```
|
||||
|
||||
## Dog Implementation
|
||||
|
||||
### Not Reusing Polecat Infrastructure
|
||||
|
||||
**Decision**: Dogs do NOT reuse polecat infrastructure.
|
||||
|
||||
Rationale:
|
||||
- Polecats are Claude sessions with molecules, hooks, sandboxes
|
||||
- Dogs are simple state machine executors
|
||||
- Polecats have 3-layer lifecycle (session/sandbox/slot)
|
||||
- Dogs have single-layer lifecycle (just state)
|
||||
- Different resource profiles, different management
|
||||
|
||||
What dogs DO share:
|
||||
- tmux utilities for message sending/capture
|
||||
- State file patterns
|
||||
- Pool allocation pattern
|
||||
|
||||
### Dog Execution Loop
|
||||
|
||||
```go
|
||||
func (d *Dog) Run(ctx context.Context) DogResult {
|
||||
d.State = StateInterrogating
|
||||
d.saveState()
|
||||
|
||||
for d.Attempt <= 3 {
|
||||
// Send interrogation message
|
||||
if err := d.sendHealthCheck(); err != nil {
|
||||
return d.fail(err)
|
||||
}
|
||||
|
||||
// Wait for timeout or context cancellation
|
||||
timeout := d.timeoutForAttempt(d.Attempt)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return d.fail(ctx.Err())
|
||||
case <-time.After(timeout):
|
||||
// Timeout reached
|
||||
}
|
||||
|
||||
// Evaluate response
|
||||
d.State = StateEvaluating
|
||||
d.saveState()
|
||||
|
||||
if d.CheckForResponse() {
|
||||
// Session is alive
|
||||
return d.pardon()
|
||||
}
|
||||
|
||||
// No response - try again or execute
|
||||
d.Attempt++
|
||||
if d.Attempt <= 3 {
|
||||
d.State = StateInterrogating
|
||||
d.saveState()
|
||||
}
|
||||
}
|
||||
|
||||
// All attempts exhausted - execute warrant
|
||||
return d.execute()
|
||||
}
|
||||
```
|
||||
|
||||
## Failure Handling
|
||||
|
||||
### Dog Crashes Mid-Dance
|
||||
|
||||
If a dog crashes (Boot process restarts, system crash):
|
||||
|
||||
1. State files persist in `~/gt/deacon/dogs/active/`
|
||||
2. On Boot restart, scan for orphaned state files
|
||||
3. Resume or restart based on state:
|
||||
|
||||
| State | Recovery Action |
|
||||
|------------------|------------------------------------|
|
||||
| interrogating | Restart from current attempt |
|
||||
| evaluating | Check response, continue |
|
||||
| executing | Verify kill, mark complete |
|
||||
| pardoned/complete| Already done, clean up |
|
||||
|
||||
```go
|
||||
func (p *DogPool) RecoverOrphans() error {
|
||||
files, _ := filepath.Glob(p.stateDir + "/*.json")
|
||||
for _, f := range files {
|
||||
state := loadDogState(f)
|
||||
if state.State != StateComplete && state.State != StatePardoned {
|
||||
dog := p.allocateForRecovery(state)
|
||||
go dog.Resume()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Handling Pool Exhaustion
|
||||
|
||||
If all dogs are busy when new warrant arrives:
|
||||
|
||||
```go
|
||||
func (b *Boot) HandleWarrant(warrant *Warrant) error {
|
||||
dog, err := b.pool.Allocate(warrant)
|
||||
if err == ErrPoolExhausted {
|
||||
// Queue the warrant for later processing
|
||||
b.warrantQueue.Push(warrant)
|
||||
b.log("Warrant %s queued (pool exhausted)", warrant.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
result := dog.Run(b.ctx)
|
||||
b.handleResult(result)
|
||||
b.pool.Release(dog)
|
||||
|
||||
// Check queue for pending warrants
|
||||
if next := b.warrantQueue.Pop(); next != nil {
|
||||
b.HandleWarrant(next)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
~/gt/deacon/dogs/
|
||||
├── boot/ # Boot's working directory
|
||||
│ ├── CLAUDE.md # Boot context
|
||||
│ └── .boot-status.json # Boot execution status
|
||||
├── active/ # Active dog state files
|
||||
│ ├── dog-123.json # Dog 1 state
|
||||
│ ├── dog-456.json # Dog 2 state
|
||||
│ └── ...
|
||||
├── completed/ # Completed dance records (for audit)
|
||||
│ ├── dog-789.json # Historical record
|
||||
│ └── ...
|
||||
└── warrants/ # Pending warrant queue
|
||||
├── warrant-abc.json
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Command Interface
|
||||
|
||||
```bash
|
||||
# Pool status
|
||||
gt dog pool status
|
||||
# Output:
|
||||
# Dog Pool: 3/5 active
|
||||
# dog-123: interrogating Toast (attempt 2, 45s remaining)
|
||||
# dog-456: executing Shadow
|
||||
# dog-789: idle
|
||||
|
||||
# Manual dog operations (for debugging)
|
||||
gt dog pool allocate <warrant-id>
|
||||
gt dog pool release <dog-id>
|
||||
|
||||
# View active dances
|
||||
gt dog dances
|
||||
# Output:
|
||||
# Active Shutdown Dances:
|
||||
# dog-123 → Toast: Interrogating (2/3), timeout in 45s
|
||||
# dog-456 → Shadow: Executing warrant
|
||||
|
||||
# View warrant queue
|
||||
gt dog warrants
|
||||
# Output:
|
||||
# Pending Warrants: 2
|
||||
# 1. gt-abc: witness-gastown (stuck_no_progress)
|
||||
# 2. gt-def: polecat-Copper (crash_loop)
|
||||
```
|
||||
|
||||
## Integration with Existing Dogs
|
||||
|
||||
The existing `dog` package (`internal/dog/`) manages Deacon's multi-rig helper dogs.
|
||||
Those are different from shutdown-dance dogs:
|
||||
|
||||
| Aspect | Helper Dogs (existing) | Dance Dogs (new) |
|
||||
|-----------------|-----------------------------|-----------------------------|
|
||||
| Purpose | Cross-rig infrastructure | Shutdown dance execution |
|
||||
| Sessions | Claude sessions | Goroutines (no Claude) |
|
||||
| Worktrees | One per rig | None |
|
||||
| Lifecycle | Long-lived, reusable | Ephemeral per warrant |
|
||||
| State | idle/working | Dance state machine |
|
||||
|
||||
**Recommendation**: Use different package to avoid confusion:
|
||||
- `internal/dog/` - existing helper dogs
|
||||
- `internal/shutdown/` - shutdown dance pool
|
||||
|
||||
## Summary: Answers to Design Questions
|
||||
|
||||
| Question | Answer |
|
||||
|----------|--------|
|
||||
| How many Dogs in pool? | Fixed: 5 (configurable via GT_DOG_POOL_SIZE) |
|
||||
| How do Dogs communicate with Boot? | State files + completion markers |
|
||||
| Are Dogs tmux sessions? | No - goroutines with state machine |
|
||||
| Reuse polecat infrastructure? | No - too heavyweight, different model |
|
||||
| What if Dog dies mid-dance? | State file recovery on Boot restart |
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [x] Architecture document for Dog pool
|
||||
- [x] Clear allocation/deallocation protocol
|
||||
- [x] Failure handling for Dog crashes
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
)
|
||||
|
||||
// Common errors
|
||||
@@ -138,17 +140,23 @@ func cleanBeadsRuntimeFiles(beadsDir string) error {
|
||||
"mq",
|
||||
}
|
||||
|
||||
var firstErr error
|
||||
for _, pattern := range runtimePatterns {
|
||||
matches, err := filepath.Glob(filepath.Join(beadsDir, pattern))
|
||||
if err != nil {
|
||||
continue // Invalid pattern, skip
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
for _, match := range matches {
|
||||
os.RemoveAll(match) // Best effort, ignore errors
|
||||
if err := os.RemoveAll(match); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return firstErr
|
||||
}
|
||||
|
||||
// SetupRedirect creates a .beads/redirect file for a worktree to point to the rig's shared beads.
|
||||
@@ -185,9 +193,19 @@ func SetupRedirect(townRoot, worktreePath string) error {
|
||||
|
||||
rigRoot := filepath.Join(townRoot, parts[0])
|
||||
rigBeadsPath := filepath.Join(rigRoot, ".beads")
|
||||
mayorBeadsPath := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
|
||||
// Check rig-level .beads first, fall back to mayor/rig/.beads (tracked beads architecture)
|
||||
usesMayorFallback := false
|
||||
if _, err := os.Stat(rigBeadsPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("no rig .beads found at %s", rigBeadsPath)
|
||||
// No rig/.beads - check for mayor/rig/.beads (tracked beads architecture)
|
||||
if _, err := os.Stat(mayorBeadsPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("no beads found at %s or %s", rigBeadsPath, mayorBeadsPath)
|
||||
}
|
||||
// Using mayor fallback - warn user to run bd doctor
|
||||
fmt.Fprintf(os.Stderr, "Warning: rig .beads not found at %s, using %s\n", rigBeadsPath, mayorBeadsPath)
|
||||
fmt.Fprintf(os.Stderr, " Run 'bd doctor' to fix rig beads configuration\n")
|
||||
usesMayorFallback = true
|
||||
}
|
||||
|
||||
// Clean up runtime files in .beads/ but preserve tracked files (formulas/, README.md, etc.)
|
||||
@@ -205,18 +223,26 @@ func SetupRedirect(townRoot, worktreePath string) error {
|
||||
// e.g., crew/<name> (depth 2) -> ../../.beads
|
||||
// refinery/rig (depth 2) -> ../../.beads
|
||||
depth := len(parts) - 1 // subtract 1 for rig name itself
|
||||
redirectPath := strings.Repeat("../", depth) + ".beads"
|
||||
upPath := strings.Repeat("../", depth)
|
||||
|
||||
// Check if rig-level beads has a redirect (tracked beads case).
|
||||
// If so, redirect directly to the final destination to avoid chains.
|
||||
// The bd CLI doesn't support redirect chains, so we must skip intermediate hops.
|
||||
rigRedirectPath := filepath.Join(rigBeadsPath, "redirect")
|
||||
if data, err := os.ReadFile(rigRedirectPath); err == nil {
|
||||
rigRedirectTarget := strings.TrimSpace(string(data))
|
||||
if rigRedirectTarget != "" {
|
||||
// Rig has redirect (e.g., "mayor/rig/.beads" for tracked beads).
|
||||
// Redirect worktree directly to the final destination.
|
||||
redirectPath = strings.Repeat("../", depth) + rigRedirectTarget
|
||||
var redirectPath string
|
||||
if usesMayorFallback {
|
||||
// Direct redirect to mayor/rig/.beads since rig/.beads doesn't exist
|
||||
redirectPath = upPath + "mayor/rig/.beads"
|
||||
} else {
|
||||
redirectPath = upPath + ".beads"
|
||||
|
||||
// Check if rig-level beads has a redirect (tracked beads case).
|
||||
// If so, redirect directly to the final destination to avoid chains.
|
||||
// The bd CLI doesn't support redirect chains, so we must skip intermediate hops.
|
||||
rigRedirectPath := filepath.Join(rigBeadsPath, "redirect")
|
||||
if data, err := os.ReadFile(rigRedirectPath); err == nil {
|
||||
rigRedirectTarget := strings.TrimSpace(string(data))
|
||||
if rigRedirectTarget != "" {
|
||||
// Rig has redirect (e.g., "mayor/rig/.beads" for tracked beads).
|
||||
// Redirect worktree directly to the final destination.
|
||||
redirectPath = upPath + rigRedirectTarget
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -747,7 +773,7 @@ func (b *Beads) Update(id string, opts UpdateOptions) error {
|
||||
}
|
||||
|
||||
// Close closes one or more issues.
|
||||
// If CLAUDE_SESSION_ID is set in the environment, it is passed to bd close
|
||||
// If a runtime session ID is set in the environment, it is passed to bd close
|
||||
// for work attribution tracking (see decision 009-session-events-architecture.md).
|
||||
func (b *Beads) Close(ids ...string) error {
|
||||
if len(ids) == 0 {
|
||||
@@ -757,7 +783,7 @@ func (b *Beads) Close(ids ...string) error {
|
||||
args := append([]string{"close"}, ids...)
|
||||
|
||||
// Pass session ID for work attribution if available
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
@@ -766,7 +792,7 @@ func (b *Beads) Close(ids ...string) error {
|
||||
}
|
||||
|
||||
// CloseWithReason closes one or more issues with a reason.
|
||||
// If CLAUDE_SESSION_ID is set in the environment, it is passed to bd close
|
||||
// If a runtime session ID is set in the environment, it is passed to bd close
|
||||
// for work attribution tracking (see decision 009-session-events-architecture.md).
|
||||
func (b *Beads) CloseWithReason(reason string, ids ...string) error {
|
||||
if len(ids) == 0 {
|
||||
@@ -777,7 +803,7 @@ func (b *Beads) CloseWithReason(reason string, ids ...string) error {
|
||||
args = append(args, "--reason="+reason)
|
||||
|
||||
// Pass session ID for work attribution if available
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
|
||||
@@ -88,9 +88,9 @@ func TestWrapError(t *testing.T) {
|
||||
b := New("/test")
|
||||
|
||||
tests := []struct {
|
||||
stderr string
|
||||
wantErr error
|
||||
wantNil bool
|
||||
stderr string
|
||||
wantErr error
|
||||
wantNil bool
|
||||
}{
|
||||
{"not a beads repository", ErrNotARepo, false},
|
||||
{"No .beads directory found", ErrNotARepo, false},
|
||||
@@ -127,7 +127,6 @@ func TestIntegration(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Walk up to find .beads
|
||||
dir := cwd
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(dir, ".beads")); err == nil {
|
||||
@@ -140,6 +139,11 @@ func TestIntegration(t *testing.T) {
|
||||
dir = parent
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(dir, ".beads", "beads.db")
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
t.Skip("no beads.db found (JSONL-only repo)")
|
||||
}
|
||||
|
||||
b := New(dir)
|
||||
|
||||
// Sync database with JSONL before testing to avoid "Database out of sync" errors.
|
||||
@@ -201,10 +205,10 @@ func TestIntegration(t *testing.T) {
|
||||
// TestParseMRFields tests parsing MR fields from issue descriptions.
|
||||
func TestParseMRFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
issue *Issue
|
||||
wantNil bool
|
||||
wantFields *MRFields
|
||||
name string
|
||||
issue *Issue
|
||||
wantNil bool
|
||||
wantFields *MRFields
|
||||
}{
|
||||
{
|
||||
name: "nil issue",
|
||||
@@ -521,8 +525,8 @@ author: someone
|
||||
target: main`,
|
||||
},
|
||||
fields: &MRFields{
|
||||
Branch: "polecat/Capable/gt-ghi",
|
||||
Target: "integration/epic",
|
||||
Branch: "polecat/Capable/gt-ghi",
|
||||
Target: "integration/epic",
|
||||
CloseReason: "merged",
|
||||
},
|
||||
want: `branch: polecat/Capable/gt-ghi
|
||||
@@ -1032,10 +1036,10 @@ func TestParseAgentBeadID(t *testing.T) {
|
||||
// Parseable but not valid agent roles (IsAgentSessionBead will reject)
|
||||
{"gt-abc123", "", "abc123", "", true}, // Parses as town-level but not valid role
|
||||
// Other prefixes (bd-, hq-)
|
||||
{"bd-mayor", "", "mayor", "", true}, // bd prefix town-level
|
||||
{"bd-beads-witness", "beads", "witness", "", true}, // bd prefix rig-level singleton
|
||||
{"bd-beads-polecat-pearl", "beads", "polecat", "pearl", true}, // bd prefix rig-level named
|
||||
{"hq-mayor", "", "mayor", "", true}, // hq prefix town-level
|
||||
{"bd-mayor", "", "mayor", "", true}, // bd prefix town-level
|
||||
{"bd-beads-witness", "beads", "witness", "", true}, // bd prefix rig-level singleton
|
||||
{"bd-beads-polecat-pearl", "beads", "polecat", "pearl", true}, // bd prefix rig-level named
|
||||
{"hq-mayor", "", "mayor", "", true}, // hq prefix town-level
|
||||
// Truly invalid patterns
|
||||
{"x-mayor", "", "", "", false}, // Prefix too short (1 char)
|
||||
{"abcd-mayor", "", "", "", false}, // Prefix too long (4 chars)
|
||||
@@ -1741,7 +1745,7 @@ func TestSetupRedirect(t *testing.T) {
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// No rig/.beads created
|
||||
// No rig/.beads or mayor/rig/.beads created
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
@@ -1751,4 +1755,44 @@ func TestSetupRedirect(t *testing.T) {
|
||||
t.Error("SetupRedirect should fail if rig .beads missing")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("crew worktree with mayor/rig beads only", func(t *testing.T) {
|
||||
// Setup: no rig/.beads, only mayor/rig/.beads exists
|
||||
// This is the tracked beads architecture where rig root has no .beads directory
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
mayorRigBeads := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// Create only mayor/rig/.beads (no rig/.beads)
|
||||
if err := os.MkdirAll(mayorRigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
// Run SetupRedirect - should succeed and point to mayor/rig/.beads
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect points to mayor/rig/.beads
|
||||
redirectPath := filepath.Join(crewPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../mayor/rig/.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
|
||||
// Verify redirect resolves correctly
|
||||
resolved := ResolveBeadsDir(crewPath)
|
||||
if resolved != mayorRigBeads {
|
||||
t.Errorf("resolved = %q, want %q", resolved, mayorRigBeads)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -197,6 +197,11 @@ func (b *Boot) spawnTmux() error {
|
||||
// Launch Claude with environment exported inline and initial triage prompt
|
||||
// The "gt boot triage" prompt tells Boot to immediately start triage (GUPP principle)
|
||||
startCmd := config.BuildAgentStartupCommand("boot", "deacon-boot", "", "gt boot triage")
|
||||
// Wait for shell to be ready before sending keys (prevents "can't find pane" under load)
|
||||
if err := b.tmux.WaitForShellReady(SessionName, 5*time.Second); err != nil {
|
||||
_ = b.tmux.KillSession(SessionName)
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
if err := b.tmux.SendKeys(SessionName, startCmd); err != nil {
|
||||
return fmt.Errorf("sending startup command: %w", err)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
)
|
||||
|
||||
// Filename is the checkpoint file name within the polecat directory.
|
||||
@@ -84,7 +86,7 @@ func Write(polecatDir string, cp *Checkpoint) error {
|
||||
|
||||
// Set session ID from environment if available
|
||||
if cp.SessionID == "" {
|
||||
cp.SessionID = os.Getenv("CLAUDE_SESSION_ID")
|
||||
cp.SessionID = runtime.SessionIDFromEnv()
|
||||
if cp.SessionID == "" {
|
||||
cp.SessionID = fmt.Sprintf("pid-%d", os.Getpid())
|
||||
}
|
||||
|
||||
@@ -38,17 +38,24 @@ func RoleTypeFor(role string) RoleType {
|
||||
// For worktrees, we use sparse checkout to exclude source repo's .claude/ directory,
|
||||
// so our settings.json is the only one Claude Code sees.
|
||||
func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
claudeDir := filepath.Join(workDir, ".claude")
|
||||
settingsPath := filepath.Join(claudeDir, "settings.json")
|
||||
return EnsureSettingsAt(workDir, roleType, ".claude", "settings.json")
|
||||
}
|
||||
|
||||
// EnsureSettingsAt ensures a settings file exists at a custom directory/file.
|
||||
// If the file doesn't exist, it copies the appropriate template based on role type.
|
||||
// If the file already exists, it's left unchanged.
|
||||
func EnsureSettingsAt(workDir string, roleType RoleType, settingsDir, settingsFile string) error {
|
||||
claudeDir := filepath.Join(workDir, settingsDir)
|
||||
settingsPath := filepath.Join(claudeDir, settingsFile)
|
||||
|
||||
// If settings already exist, don't overwrite
|
||||
if _, err := os.Stat(settingsPath); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create .claude directory if needed
|
||||
// Create settings directory if needed
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating .claude directory: %w", err)
|
||||
return fmt.Errorf("creating settings directory: %w", err)
|
||||
}
|
||||
|
||||
// Select template based on role type
|
||||
@@ -78,3 +85,8 @@ func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
func EnsureSettingsForRole(workDir, role string) error {
|
||||
return EnsureSettings(workDir, RoleTypeFor(role))
|
||||
}
|
||||
|
||||
// EnsureSettingsForRoleAt is a convenience function that combines RoleTypeFor and EnsureSettingsAt.
|
||||
func EnsureSettingsForRoleAt(workDir, role, settingsDir, settingsFile string) error {
|
||||
return EnsureSettingsAt(workDir, RoleTypeFor(role), settingsDir, settingsFile)
|
||||
}
|
||||
|
||||
@@ -264,6 +264,25 @@ Examples:
|
||||
RunE: runAccountStatus,
|
||||
}
|
||||
|
||||
var accountSwitchCmd = &cobra.Command{
|
||||
Use: "switch <handle>",
|
||||
Short: "Switch to a different account",
|
||||
Long: `Switch the active Claude Code account.
|
||||
|
||||
This command:
|
||||
1. Backs up ~/.claude to the current account's config_dir (if needed)
|
||||
2. Creates a symlink from ~/.claude to the target account's config_dir
|
||||
3. Updates the default account in accounts.json
|
||||
|
||||
After switching, you must restart Claude Code for the change to take effect.
|
||||
|
||||
Examples:
|
||||
gt account switch work # Switch to work account
|
||||
gt account switch personal # Switch to personal account`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runAccountSwitch,
|
||||
}
|
||||
|
||||
func runAccountStatus(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
@@ -318,6 +337,122 @@ func runAccountStatus(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runAccountSwitch(cmd *cobra.Command, args []string) error {
|
||||
targetHandle := args[0]
|
||||
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
}
|
||||
|
||||
accountsPath := constants.MayorAccountsPath(townRoot)
|
||||
cfg, err := config.LoadAccountsConfig(accountsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading accounts config: %w", err)
|
||||
}
|
||||
|
||||
// Check if target account exists
|
||||
targetAcct := cfg.GetAccount(targetHandle)
|
||||
if targetAcct == nil {
|
||||
// List available accounts
|
||||
var handles []string
|
||||
for h := range cfg.Accounts {
|
||||
handles = append(handles, h)
|
||||
}
|
||||
sort.Strings(handles)
|
||||
return fmt.Errorf("account '%s' not found. Available accounts: %v", targetHandle, handles)
|
||||
}
|
||||
|
||||
// Get ~/.claude path
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting home directory: %w", err)
|
||||
}
|
||||
claudeDir := home + "/.claude"
|
||||
|
||||
// Check current state of ~/.claude
|
||||
fileInfo, err := os.Lstat(claudeDir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("checking ~/.claude: %w", err)
|
||||
}
|
||||
|
||||
// Determine current account (if any) by checking symlink target
|
||||
var currentHandle string
|
||||
if err == nil && fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
// It's a symlink - find which account it points to
|
||||
linkTarget, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading symlink: %w", err)
|
||||
}
|
||||
for h, acct := range cfg.Accounts {
|
||||
if acct.ConfigDir == linkTarget {
|
||||
currentHandle = h
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if already on target account
|
||||
if currentHandle == targetHandle {
|
||||
fmt.Printf("Already on account '%s'\n", targetHandle)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle the case where ~/.claude is a real directory (not a symlink)
|
||||
if err == nil && fileInfo.Mode()&os.ModeSymlink == 0 && fileInfo.IsDir() {
|
||||
// It's a real directory - need to move it
|
||||
// Try to find which account it belongs to based on default
|
||||
if currentHandle == "" && cfg.Default != "" {
|
||||
currentHandle = cfg.Default
|
||||
}
|
||||
|
||||
if currentHandle != "" {
|
||||
currentAcct := cfg.GetAccount(currentHandle)
|
||||
if currentAcct != nil {
|
||||
// Move ~/.claude to the current account's config_dir
|
||||
fmt.Printf("Moving ~/.claude to %s...\n", currentAcct.ConfigDir)
|
||||
|
||||
// Remove the target config dir if it exists (it might be empty from account add)
|
||||
if _, err := os.Stat(currentAcct.ConfigDir); err == nil {
|
||||
if err := os.RemoveAll(currentAcct.ConfigDir); err != nil {
|
||||
return fmt.Errorf("removing existing config dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Rename(claudeDir, currentAcct.ConfigDir); err != nil {
|
||||
return fmt.Errorf("moving ~/.claude to %s: %w", currentAcct.ConfigDir, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("~/.claude is a directory but no default account is set. Please set a default account first with 'gt account default <handle>'")
|
||||
}
|
||||
} else if err == nil && fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
// It's a symlink - remove it so we can create a new one
|
||||
if err := os.Remove(claudeDir); err != nil {
|
||||
return fmt.Errorf("removing existing symlink: %w", err)
|
||||
}
|
||||
}
|
||||
// If ~/.claude doesn't exist, that's fine - we'll create the symlink
|
||||
|
||||
// Create symlink to target account
|
||||
if err := os.Symlink(targetAcct.ConfigDir, claudeDir); err != nil {
|
||||
return fmt.Errorf("creating symlink to %s: %w", targetAcct.ConfigDir, err)
|
||||
}
|
||||
|
||||
// Update default account
|
||||
cfg.Default = targetHandle
|
||||
if err := config.SaveAccountsConfig(accountsPath, cfg); err != nil {
|
||||
return fmt.Errorf("saving accounts config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Switched to account '%s'\n", targetHandle)
|
||||
fmt.Printf("~/.claude -> %s\n", targetAcct.ConfigDir)
|
||||
fmt.Println()
|
||||
fmt.Println(style.Warning.Render("⚠️ Restart Claude Code for the change to take effect"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Add flags
|
||||
accountListCmd.Flags().BoolVar(&accountJSON, "json", false, "Output as JSON")
|
||||
@@ -330,6 +465,7 @@ func init() {
|
||||
accountCmd.AddCommand(accountAddCmd)
|
||||
accountCmd.AddCommand(accountDefaultCmd)
|
||||
accountCmd.AddCommand(accountStatusCmd)
|
||||
accountCmd.AddCommand(accountSwitchCmd)
|
||||
|
||||
rootCmd.AddCommand(accountCmd)
|
||||
}
|
||||
|
||||
299
internal/cmd/account_test.go
Normal file
299
internal/cmd/account_test.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
// setupTestTownForAccount creates a minimal Gas Town workspace with accounts.
|
||||
func setupTestTownForAccount(t *testing.T) (townRoot string, accountsDir string) {
|
||||
t.Helper()
|
||||
|
||||
townRoot = t.TempDir()
|
||||
|
||||
// Create mayor directory with required files
|
||||
mayorDir := filepath.Join(townRoot, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor: %v", err)
|
||||
}
|
||||
|
||||
// Create town.json
|
||||
townConfig := &config.TownConfig{
|
||||
Type: "town",
|
||||
Version: config.CurrentTownVersion,
|
||||
Name: "test-town",
|
||||
PublicName: "Test Town",
|
||||
CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
}
|
||||
townConfigPath := filepath.Join(mayorDir, "town.json")
|
||||
if err := config.SaveTownConfig(townConfigPath, townConfig); err != nil {
|
||||
t.Fatalf("save town.json: %v", err)
|
||||
}
|
||||
|
||||
// Create empty rigs.json
|
||||
rigsConfig := &config.RigsConfig{
|
||||
Version: 1,
|
||||
Rigs: make(map[string]config.RigEntry),
|
||||
}
|
||||
rigsPath := filepath.Join(mayorDir, "rigs.json")
|
||||
if err := config.SaveRigsConfig(rigsPath, rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
|
||||
// Create accounts directory
|
||||
accountsDir = filepath.Join(t.TempDir(), "claude-accounts")
|
||||
if err := os.MkdirAll(accountsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir accounts: %v", err)
|
||||
}
|
||||
|
||||
return townRoot, accountsDir
|
||||
}
|
||||
|
||||
func TestAccountSwitch(t *testing.T) {
|
||||
t.Run("switch between accounts", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
// Create fake home directory for ~/.claude
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
// Create account config directories
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
personalConfigDir := filepath.Join(accountsDir, "personal")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(personalConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir personal config: %v", err)
|
||||
}
|
||||
|
||||
// Create accounts.json with two accounts
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Accounts["personal"] = config.Account{
|
||||
Email: "steve@personal.com",
|
||||
ConfigDir: personalConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create initial symlink to work account
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.Symlink(workConfigDir, claudeDir); err != nil {
|
||||
t.Fatalf("create symlink: %v", err)
|
||||
}
|
||||
|
||||
// Change to town root
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Run switch to personal
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"personal"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify symlink points to personal
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != personalConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, personalConfigDir)
|
||||
}
|
||||
|
||||
// Verify default was updated
|
||||
loadedCfg, err := config.LoadAccountsConfig(accountsPath)
|
||||
if err != nil {
|
||||
t.Fatalf("load accounts: %v", err)
|
||||
}
|
||||
if loadedCfg.Default != "personal" {
|
||||
t.Errorf("default = %q, want 'personal'", loadedCfg.Default)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("already on target account", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create symlink already pointing to work
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.Symlink(workConfigDir, claudeDir); err != nil {
|
||||
t.Fatalf("create symlink: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to work (should be no-op)
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"work"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Symlink should still point to work
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != workConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, workConfigDir)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nonexistent account", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to nonexistent account
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"nonexistent"})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent account")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("real directory gets moved", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
personalConfigDir := filepath.Join(accountsDir, "personal")
|
||||
// Don't create workConfigDir - it will be created by moving ~/.claude
|
||||
if err := os.MkdirAll(personalConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir personal config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Accounts["personal"] = config.Account{
|
||||
Email: "steve@personal.com",
|
||||
ConfigDir: personalConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create ~/.claude as a real directory with a marker file
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude: %v", err)
|
||||
}
|
||||
markerFile := filepath.Join(claudeDir, "marker.txt")
|
||||
if err := os.WriteFile(markerFile, []byte("test"), 0644); err != nil {
|
||||
t.Fatalf("write marker: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to personal
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"personal"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify ~/.claude is now a symlink to personal
|
||||
fileInfo, err := os.Lstat(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("lstat .claude: %v", err)
|
||||
}
|
||||
if fileInfo.Mode()&os.ModeSymlink == 0 {
|
||||
t.Error("~/.claude is not a symlink")
|
||||
}
|
||||
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != personalConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, personalConfigDir)
|
||||
}
|
||||
|
||||
// Verify original content was moved to work config dir
|
||||
movedMarker := filepath.Join(workConfigDir, "marker.txt")
|
||||
if _, err := os.Stat(movedMarker); err != nil {
|
||||
t.Errorf("marker file not moved to work config dir: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -84,14 +84,12 @@ func (v beadsVersion) compare(other beadsVersion) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// getBeadsVersion executes `bd --version` and parses the output.
|
||||
// Returns the version string (e.g., "0.44.0") or error.
|
||||
func getBeadsVersion() (string, error) {
|
||||
cmd := exec.Command("bd", "--version")
|
||||
cmd := exec.Command("bd", "version")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return "", fmt.Errorf("bd --version failed: %s", string(exitErr.Stderr))
|
||||
return "", fmt.Errorf("bd version failed: %s", string(exitErr.Stderr))
|
||||
}
|
||||
return "", fmt.Errorf("failed to run bd: %w (is beads installed?)", err)
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func runConfigAgentList(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Collect all agents
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
customAgents := make(map[string]*config.RuntimeConfig)
|
||||
if townSettings.Agents != nil {
|
||||
for name, runtime := range townSettings.Agents {
|
||||
@@ -330,7 +330,7 @@ func runConfigAgentSet(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Agent '%s' set to: %s\n", style.Bold.Render(name), commandLine)
|
||||
|
||||
// Check if this overrides a built-in
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
for _, builtin := range builtInAgents {
|
||||
if name == builtin {
|
||||
fmt.Printf("\n%s\n", style.Dim.Render("(overriding built-in '"+builtin+"' preset)"))
|
||||
@@ -350,7 +350,7 @@ func runConfigAgentRemove(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Check if trying to remove built-in
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
for _, builtin := range builtInAgents {
|
||||
if name == builtin {
|
||||
return fmt.Errorf("cannot remove built-in agent '%s' (use 'gt config agent set' to override it)", name)
|
||||
@@ -415,7 +415,7 @@ func runConfigDefaultAgent(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Verify agent exists
|
||||
isValid := false
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
for _, builtin := range builtInAgents {
|
||||
if name == builtin {
|
||||
isValid = true
|
||||
|
||||
@@ -18,15 +18,24 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
costsJSON bool
|
||||
costsToday bool
|
||||
costsWeek bool
|
||||
costsByRole bool
|
||||
costsByRig bool
|
||||
costsJSON bool
|
||||
costsToday bool
|
||||
costsWeek bool
|
||||
costsByRole bool
|
||||
costsByRig bool
|
||||
costsVerbose bool
|
||||
|
||||
// Record subcommand flags
|
||||
recordSession string
|
||||
recordWorkItem string
|
||||
|
||||
// Digest subcommand flags
|
||||
digestYesterday bool
|
||||
digestDate string
|
||||
digestDryRun bool
|
||||
|
||||
// Migrate subcommand flags
|
||||
migrateDryRun bool
|
||||
)
|
||||
|
||||
var costsCmd = &cobra.Command{
|
||||
@@ -37,24 +46,34 @@ var costsCmd = &cobra.Command{
|
||||
|
||||
By default, shows live costs scraped from running tmux sessions.
|
||||
|
||||
Cost tracking uses ephemeral wisps for individual sessions that are
|
||||
aggregated into daily "Cost Report" digest beads for audit purposes.
|
||||
|
||||
Examples:
|
||||
gt costs # Live costs from running sessions
|
||||
gt costs --today # Today's total from session events
|
||||
gt costs --week # This week's total
|
||||
gt costs --today # Today's costs from wisps (not yet digested)
|
||||
gt costs --week # This week's costs from digest beads + today's wisps
|
||||
gt costs --by-role # Breakdown by role (polecat, witness, etc.)
|
||||
gt costs --by-rig # Breakdown by rig
|
||||
gt costs --json # Output as JSON`,
|
||||
gt costs --json # Output as JSON
|
||||
|
||||
Subcommands:
|
||||
gt costs record # Record session cost as ephemeral wisp (Stop hook)
|
||||
gt costs digest # Aggregate wisps into daily digest bead (Deacon patrol)`,
|
||||
RunE: runCosts,
|
||||
}
|
||||
|
||||
var costsRecordCmd = &cobra.Command{
|
||||
Use: "record",
|
||||
Short: "Record session cost as a bead event (called by Stop hook)",
|
||||
Long: `Record the final cost of a session as a session.ended event in beads.
|
||||
Short: "Record session cost as an ephemeral wisp (called by Stop hook)",
|
||||
Long: `Record the final cost of a session as an ephemeral wisp.
|
||||
|
||||
This command is intended to be called from a Claude Code Stop hook.
|
||||
It captures the final cost from the tmux session and creates an event
|
||||
bead with the cost data.
|
||||
It captures the final cost from the tmux session and creates an ephemeral
|
||||
event that is NOT exported to JSONL (avoiding log-in-database pollution).
|
||||
|
||||
Session cost wisps are aggregated daily by 'gt costs digest' into a single
|
||||
permanent "Cost Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
Examples:
|
||||
gt costs record --session gt-gastown-toast
|
||||
@@ -62,6 +81,46 @@ Examples:
|
||||
RunE: runCostsRecord,
|
||||
}
|
||||
|
||||
var costsDigestCmd = &cobra.Command{
|
||||
Use: "digest",
|
||||
Short: "Aggregate session cost wisps into a daily digest bead",
|
||||
Long: `Aggregate ephemeral session cost wisps into a permanent daily digest.
|
||||
|
||||
This command is intended to be run by Deacon patrol (daily) or manually.
|
||||
It queries session.ended wisps for a target date, creates a single aggregate
|
||||
"Cost Report YYYY-MM-DD" bead, then deletes the source wisps.
|
||||
|
||||
The resulting digest bead is permanent (exported to JSONL, synced via git)
|
||||
and provides an audit trail without log-in-database pollution.
|
||||
|
||||
Examples:
|
||||
gt costs digest --yesterday # Digest yesterday's costs (default for patrol)
|
||||
gt costs digest --date 2026-01-07 # Digest a specific date
|
||||
gt costs digest --yesterday --dry-run # Preview without changes`,
|
||||
RunE: runCostsDigest,
|
||||
}
|
||||
|
||||
var costsMigrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate legacy session.ended beads to the new wisp architecture",
|
||||
Long: `Migrate legacy session.ended event beads to the new cost tracking system.
|
||||
|
||||
This command handles the transition from the old architecture (where each
|
||||
session.ended event was a permanent bead) to the new wisp-based system.
|
||||
|
||||
The migration:
|
||||
1. Finds all open session.ended event beads (should be none if auto-close worked)
|
||||
2. Closes them with reason "migrated to wisp architecture"
|
||||
|
||||
Legacy beads remain in the database for historical queries but won't interfere
|
||||
with the new wisp-based cost tracking.
|
||||
|
||||
Examples:
|
||||
gt costs migrate # Migrate legacy beads
|
||||
gt costs migrate --dry-run # Preview what would be migrated`,
|
||||
RunE: runCostsMigrate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(costsCmd)
|
||||
costsCmd.Flags().BoolVar(&costsJSON, "json", false, "Output as JSON")
|
||||
@@ -69,11 +128,22 @@ func init() {
|
||||
costsCmd.Flags().BoolVar(&costsWeek, "week", false, "Show this week's total from session events")
|
||||
costsCmd.Flags().BoolVar(&costsByRole, "by-role", false, "Show breakdown by role")
|
||||
costsCmd.Flags().BoolVar(&costsByRig, "by-rig", false, "Show breakdown by rig")
|
||||
costsCmd.Flags().BoolVarP(&costsVerbose, "verbose", "v", false, "Show debug output for failures")
|
||||
|
||||
// Add record subcommand
|
||||
costsCmd.AddCommand(costsRecordCmd)
|
||||
costsRecordCmd.Flags().StringVar(&recordSession, "session", "", "Tmux session name to record")
|
||||
costsRecordCmd.Flags().StringVar(&recordWorkItem, "work-item", "", "Work item ID (bead) for attribution")
|
||||
|
||||
// Add digest subcommand
|
||||
costsCmd.AddCommand(costsDigestCmd)
|
||||
costsDigestCmd.Flags().BoolVar(&digestYesterday, "yesterday", false, "Digest yesterday's costs (default for patrol)")
|
||||
costsDigestCmd.Flags().StringVar(&digestDate, "date", "", "Digest a specific date (YYYY-MM-DD)")
|
||||
costsDigestCmd.Flags().BoolVar(&digestDryRun, "dry-run", false, "Preview what would be done without making changes")
|
||||
|
||||
// Add migrate subcommand
|
||||
costsCmd.AddCommand(costsMigrateCmd)
|
||||
costsMigrateCmd.Flags().BoolVar(&migrateDryRun, "dry-run", false, "Preview what would be migrated without making changes")
|
||||
}
|
||||
|
||||
// SessionCost represents cost info for a single session.
|
||||
@@ -180,46 +250,48 @@ func runLiveCosts() error {
|
||||
}
|
||||
|
||||
func runCostsFromLedger() error {
|
||||
// Query session events from beads
|
||||
entries, err := querySessionEvents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session events: %w", err)
|
||||
now := time.Now()
|
||||
var entries []CostEntry
|
||||
var err error
|
||||
|
||||
if costsToday {
|
||||
// For today: query ephemeral wisps (not yet digested)
|
||||
// This gives real-time view of today's costs
|
||||
entries, err = querySessionCostWisps(now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session cost wisps: %w", err)
|
||||
}
|
||||
} else if costsWeek {
|
||||
// For week: query digest beads (costs.digest events)
|
||||
// These are the aggregated daily reports
|
||||
entries, err = queryDigestBeads(7)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying digest beads: %w", err)
|
||||
}
|
||||
|
||||
// Also include today's wisps (not yet digested)
|
||||
todayWisps, _ := querySessionCostWisps(now)
|
||||
entries = append(entries, todayWisps...)
|
||||
} else {
|
||||
// No time filter: query both digests and legacy session.ended events
|
||||
// (for backwards compatibility during migration)
|
||||
entries, err = querySessionEvents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session events: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(entries) == 0 {
|
||||
fmt.Println(style.Dim.Render("No session events found. Costs are recorded when sessions end."))
|
||||
fmt.Println(style.Dim.Render("No cost data found. Costs are recorded when sessions end."))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter entries by time period
|
||||
var filtered []CostEntry
|
||||
now := time.Now()
|
||||
|
||||
for _, entry := range entries {
|
||||
if costsToday {
|
||||
// Today: same day
|
||||
if entry.EndedAt.Year() == now.Year() &&
|
||||
entry.EndedAt.YearDay() == now.YearDay() {
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
} else if costsWeek {
|
||||
// This week: within 7 days
|
||||
weekAgo := now.AddDate(0, 0, -7)
|
||||
if entry.EndedAt.After(weekAgo) {
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
} else {
|
||||
// No time filter
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate totals
|
||||
var total float64
|
||||
byRole := make(map[string]float64)
|
||||
byRig := make(map[string]float64)
|
||||
|
||||
for _, entry := range filtered {
|
||||
for _, entry := range entries {
|
||||
total += entry.CostUSD
|
||||
byRole[entry.Role] += entry.CostUSD
|
||||
if entry.Rig != "" {
|
||||
@@ -250,7 +322,7 @@ func runCostsFromLedger() error {
|
||||
return outputCostsJSON(output)
|
||||
}
|
||||
|
||||
return outputLedgerHuman(output, filtered)
|
||||
return outputLedgerHuman(output, entries)
|
||||
}
|
||||
|
||||
// SessionEvent represents a session.ended event from beads.
|
||||
@@ -362,6 +434,84 @@ func querySessionEvents() ([]CostEntry, error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// queryDigestBeads queries costs.digest events from the past N days and extracts session entries.
|
||||
func queryDigestBeads(days int) ([]CostEntry, error) {
|
||||
// Get list of event IDs
|
||||
listArgs := []string{
|
||||
"list",
|
||||
"--type=event",
|
||||
"--all",
|
||||
"--limit=0",
|
||||
"--json",
|
||||
}
|
||||
|
||||
listCmd := exec.Command("bd", listArgs...)
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var listItems []EventListItem
|
||||
if err := json.Unmarshal(listOutput, &listItems); err != nil {
|
||||
return nil, fmt.Errorf("parsing event list: %w", err)
|
||||
}
|
||||
|
||||
if len(listItems) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get full details for all events
|
||||
showArgs := []string{"show", "--json"}
|
||||
for _, item := range listItems {
|
||||
showArgs = append(showArgs, item.ID)
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("showing events: %w", err)
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
return nil, fmt.Errorf("parsing event details: %w", err)
|
||||
}
|
||||
|
||||
// Calculate date range
|
||||
now := time.Now()
|
||||
cutoff := now.AddDate(0, 0, -days)
|
||||
|
||||
var entries []CostEntry
|
||||
for _, event := range events {
|
||||
// Filter for costs.digest events only
|
||||
if event.EventKind != "costs.digest" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the digest payload
|
||||
var digest CostDigest
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &digest); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Check date is within range
|
||||
digestDate, err := time.Parse("2006-01-02", digest.Date)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if digestDate.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract individual session entries from the digest
|
||||
entries = append(entries, digest.Sessions...)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// parseSessionName extracts role, rig, and worker from a session name.
|
||||
// Session names follow the pattern: gt-<rig>-<worker> or gt-<global-agent>
|
||||
// Examples:
|
||||
@@ -574,9 +724,14 @@ func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("marshaling payload: %w", err)
|
||||
}
|
||||
|
||||
// Build bd create command
|
||||
// Build bd create command for ephemeral wisp
|
||||
// Using --ephemeral creates a wisp that:
|
||||
// - Is stored locally only (not exported to JSONL)
|
||||
// - Won't pollute git history with O(sessions/day) events
|
||||
// - Will be aggregated into daily digests by 'gt costs digest'
|
||||
bdArgs := []string{
|
||||
"create",
|
||||
"--ephemeral",
|
||||
"--type=event",
|
||||
"--title=" + title,
|
||||
"--event-category=session.ended",
|
||||
@@ -593,30 +748,28 @@ func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
// NOTE: We intentionally don't use --rig flag here because it causes
|
||||
// event fields (event_kind, actor, payload) to not be stored properly.
|
||||
// The bd command will auto-detect the correct rig from cwd.
|
||||
// TODO: File beads bug about --rig flag losing event fields.
|
||||
|
||||
// Execute bd create
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
output, err := bdCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating session event: %w\nOutput: %s", err, string(output))
|
||||
return fmt.Errorf("creating session cost wisp: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
eventID := strings.TrimSpace(string(output))
|
||||
wispID := strings.TrimSpace(string(output))
|
||||
|
||||
// Auto-close session events immediately after creation.
|
||||
// These are informational audit events that don't need to stay open.
|
||||
// The event data is preserved in the closed bead and remains queryable.
|
||||
closeCmd := exec.Command("bd", "close", eventID, "--reason=auto-closed session event")
|
||||
// Auto-close session cost wisps immediately after creation.
|
||||
// These are informational records that don't need to stay open.
|
||||
// The wisp data is preserved and queryable until digested.
|
||||
closeCmd := exec.Command("bd", "close", wispID, "--reason=auto-closed session cost wisp")
|
||||
if closeErr := closeCmd.Run(); closeErr != nil {
|
||||
// Non-fatal: event was created, just couldn't auto-close
|
||||
// The witness patrol can clean these up if needed
|
||||
fmt.Fprintf(os.Stderr, "warning: could not auto-close session event %s: %v\n", eventID, closeErr)
|
||||
// Non-fatal: wisp was created, just couldn't auto-close
|
||||
fmt.Fprintf(os.Stderr, "warning: could not auto-close session cost wisp %s: %v\n", wispID, closeErr)
|
||||
}
|
||||
|
||||
// Output confirmation (silent if cost is zero and no work item)
|
||||
if cost > 0 || recordWorkItem != "" {
|
||||
fmt.Printf("%s Recorded $%.2f for %s (event: %s)", style.Success.Render("✓"), cost, session, eventID)
|
||||
fmt.Printf("%s Recorded $%.2f for %s (wisp: %s)", style.Success.Render("✓"), cost, session, wispID)
|
||||
if recordWorkItem != "" {
|
||||
fmt.Printf(" (work: %s)", recordWorkItem)
|
||||
}
|
||||
@@ -649,9 +802,13 @@ func deriveSessionName() string {
|
||||
return fmt.Sprintf("gt-%s-crew-%s", rig, crew)
|
||||
}
|
||||
|
||||
// Town-level roles (mayor, deacon): gt-{town}-{role}
|
||||
if (role == "mayor" || role == "deacon") && town != "" {
|
||||
return fmt.Sprintf("gt-%s-%s", town, role)
|
||||
// Town-level roles (mayor, deacon): gt-{town}-{role} or gt-{role}
|
||||
if role == "mayor" || role == "deacon" {
|
||||
if town != "" {
|
||||
return fmt.Sprintf("gt-%s-%s", town, role)
|
||||
}
|
||||
// No town set - use simple gt-{role} pattern
|
||||
return fmt.Sprintf("gt-%s", role)
|
||||
}
|
||||
|
||||
// Rig-based roles (witness, refinery): gt-{rig}-{role}
|
||||
@@ -664,12 +821,9 @@ func deriveSessionName() string {
|
||||
|
||||
// detectCurrentTmuxSession returns the current tmux session name if running inside tmux.
|
||||
// Uses `tmux display-message -p '#S'` which prints the session name.
|
||||
// Note: We don't check TMUX env var because it may not be inherited when Claude Code
|
||||
// runs bash commands, even though we are inside a tmux session.
|
||||
func detectCurrentTmuxSession() string {
|
||||
// Check if we're inside tmux
|
||||
if os.Getenv("TMUX") == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
cmd := exec.Command("tmux", "display-message", "-p", "#S")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -722,3 +876,451 @@ func buildAgentPath(role, rig, worker string) string {
|
||||
return worker
|
||||
}
|
||||
}
|
||||
|
||||
// CostDigest represents the aggregated daily cost report.
|
||||
type CostDigest struct {
|
||||
Date string `json:"date"`
|
||||
TotalUSD float64 `json:"total_usd"`
|
||||
SessionCount int `json:"session_count"`
|
||||
Sessions []CostEntry `json:"sessions"`
|
||||
ByRole map[string]float64 `json:"by_role"`
|
||||
ByRig map[string]float64 `json:"by_rig,omitempty"`
|
||||
}
|
||||
|
||||
// WispListOutput represents the JSON output from bd mol wisp list.
|
||||
type WispListOutput struct {
|
||||
Wisps []WispItem `json:"wisps"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// WispItem represents a single wisp from bd mol wisp list.
|
||||
type WispItem struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
// runCostsDigest aggregates session cost wisps into a daily digest bead.
|
||||
func runCostsDigest(cmd *cobra.Command, args []string) error {
|
||||
// Determine target date
|
||||
var targetDate time.Time
|
||||
|
||||
if digestDate != "" {
|
||||
parsed, err := time.Parse("2006-01-02", digestDate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid date format (use YYYY-MM-DD): %w", err)
|
||||
}
|
||||
targetDate = parsed
|
||||
} else if digestYesterday {
|
||||
targetDate = time.Now().AddDate(0, 0, -1)
|
||||
} else {
|
||||
return fmt.Errorf("specify --yesterday or --date YYYY-MM-DD")
|
||||
}
|
||||
|
||||
dateStr := targetDate.Format("2006-01-02")
|
||||
|
||||
// Query ephemeral session.ended wisps for target date
|
||||
wisps, err := querySessionCostWisps(targetDate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session cost wisps: %w", err)
|
||||
}
|
||||
|
||||
if len(wisps) == 0 {
|
||||
fmt.Printf("%s No session cost wisps found for %s\n", style.Dim.Render("○"), dateStr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build digest
|
||||
digest := CostDigest{
|
||||
Date: dateStr,
|
||||
Sessions: wisps,
|
||||
ByRole: make(map[string]float64),
|
||||
ByRig: make(map[string]float64),
|
||||
}
|
||||
|
||||
for _, w := range wisps {
|
||||
digest.TotalUSD += w.CostUSD
|
||||
digest.SessionCount++
|
||||
digest.ByRole[w.Role] += w.CostUSD
|
||||
if w.Rig != "" {
|
||||
digest.ByRig[w.Rig] += w.CostUSD
|
||||
}
|
||||
}
|
||||
|
||||
if digestDryRun {
|
||||
fmt.Printf("%s [DRY RUN] Would create Cost Report %s:\n", style.Bold.Render("📊"), dateStr)
|
||||
fmt.Printf(" Total: $%.2f\n", digest.TotalUSD)
|
||||
fmt.Printf(" Sessions: %d\n", digest.SessionCount)
|
||||
fmt.Printf(" By Role:\n")
|
||||
for role, cost := range digest.ByRole {
|
||||
fmt.Printf(" %s: $%.2f\n", role, cost)
|
||||
}
|
||||
if len(digest.ByRig) > 0 {
|
||||
fmt.Printf(" By Rig:\n")
|
||||
for rig, cost := range digest.ByRig {
|
||||
fmt.Printf(" %s: $%.2f\n", rig, cost)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create permanent digest bead
|
||||
digestID, err := createCostDigestBead(digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating digest bead: %w", err)
|
||||
}
|
||||
|
||||
// Delete source wisps (they're ephemeral, use bd mol burn)
|
||||
deletedCount, deleteErr := deleteSessionCostWisps(targetDate)
|
||||
if deleteErr != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to delete some source wisps: %v\n", deleteErr)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Created Cost Report %s (bead: %s)\n", style.Success.Render("✓"), dateStr, digestID)
|
||||
fmt.Printf(" Total: $%.2f from %d sessions\n", digest.TotalUSD, digest.SessionCount)
|
||||
if deletedCount > 0 {
|
||||
fmt.Printf(" Deleted %d source wisps\n", deletedCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// querySessionCostWisps queries ephemeral session.ended events for a target date.
|
||||
func querySessionCostWisps(targetDate time.Time) ([]CostEntry, error) {
|
||||
// List all wisps including closed ones
|
||||
listCmd := exec.Command("bd", "mol", "wisp", "list", "--all", "--json")
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
// No wisps database or command failed
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] wisp list failed: %v\n", err)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var wispList WispListOutput
|
||||
if err := json.Unmarshal(listOutput, &wispList); err != nil {
|
||||
return nil, fmt.Errorf("parsing wisp list: %w", err)
|
||||
}
|
||||
|
||||
if wispList.Count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Batch all wisp IDs into a single bd show call to avoid N+1 queries
|
||||
showArgs := []string{"show", "--json"}
|
||||
for _, wisp := range wispList.Wisps {
|
||||
showArgs = append(showArgs, wisp.ID)
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("showing wisps: %w", err)
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
return nil, fmt.Errorf("parsing wisp details: %w", err)
|
||||
}
|
||||
|
||||
var sessionCostWisps []CostEntry
|
||||
targetDay := targetDate.Format("2006-01-02")
|
||||
|
||||
for _, event := range events {
|
||||
// Filter for session.ended events only
|
||||
if event.EventKind != "session.ended" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse payload
|
||||
var payload SessionPayload
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &payload); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] payload unmarshal failed for event %s: %v\n", event.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Parse ended_at and filter by target date
|
||||
endedAt := event.CreatedAt
|
||||
if payload.EndedAt != "" {
|
||||
if parsed, err := time.Parse(time.RFC3339, payload.EndedAt); err == nil {
|
||||
endedAt = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this event is from the target date
|
||||
if endedAt.Format("2006-01-02") != targetDay {
|
||||
continue
|
||||
}
|
||||
|
||||
sessionCostWisps = append(sessionCostWisps, CostEntry{
|
||||
SessionID: payload.SessionID,
|
||||
Role: payload.Role,
|
||||
Rig: payload.Rig,
|
||||
Worker: payload.Worker,
|
||||
CostUSD: payload.CostUSD,
|
||||
EndedAt: endedAt,
|
||||
WorkItem: event.Target,
|
||||
})
|
||||
}
|
||||
|
||||
return sessionCostWisps, nil
|
||||
}
|
||||
|
||||
// createCostDigestBead creates a permanent bead for the daily cost digest.
|
||||
func createCostDigestBead(digest CostDigest) (string, error) {
|
||||
// Build description with aggregate data
|
||||
var desc strings.Builder
|
||||
desc.WriteString(fmt.Sprintf("Daily cost aggregate for %s.\n\n", digest.Date))
|
||||
desc.WriteString(fmt.Sprintf("**Total:** $%.2f from %d sessions\n\n", digest.TotalUSD, digest.SessionCount))
|
||||
|
||||
if len(digest.ByRole) > 0 {
|
||||
desc.WriteString("## By Role\n")
|
||||
roles := make([]string, 0, len(digest.ByRole))
|
||||
for role := range digest.ByRole {
|
||||
roles = append(roles, role)
|
||||
}
|
||||
sort.Strings(roles)
|
||||
for _, role := range roles {
|
||||
icon := constants.RoleEmoji(role)
|
||||
desc.WriteString(fmt.Sprintf("- %s %s: $%.2f\n", icon, role, digest.ByRole[role]))
|
||||
}
|
||||
desc.WriteString("\n")
|
||||
}
|
||||
|
||||
if len(digest.ByRig) > 0 {
|
||||
desc.WriteString("## By Rig\n")
|
||||
rigs := make([]string, 0, len(digest.ByRig))
|
||||
for rig := range digest.ByRig {
|
||||
rigs = append(rigs, rig)
|
||||
}
|
||||
sort.Strings(rigs)
|
||||
for _, rig := range rigs {
|
||||
desc.WriteString(fmt.Sprintf("- %s: $%.2f\n", rig, digest.ByRig[rig]))
|
||||
}
|
||||
desc.WriteString("\n")
|
||||
}
|
||||
|
||||
// Build payload JSON with full session details
|
||||
payloadJSON, err := json.Marshal(digest)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling digest payload: %w", err)
|
||||
}
|
||||
|
||||
// Create the digest bead (NOT ephemeral - this is permanent)
|
||||
title := fmt.Sprintf("Cost Report %s", digest.Date)
|
||||
bdArgs := []string{
|
||||
"create",
|
||||
"--type=event",
|
||||
"--title=" + title,
|
||||
"--event-category=costs.digest",
|
||||
"--event-payload=" + string(payloadJSON),
|
||||
"--description=" + desc.String(),
|
||||
"--silent",
|
||||
}
|
||||
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
output, err := bdCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating digest bead: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
digestID := strings.TrimSpace(string(output))
|
||||
|
||||
// Auto-close the digest (it's an audit record, not work)
|
||||
closeCmd := exec.Command("bd", "close", digestID, "--reason=daily cost digest")
|
||||
_ = closeCmd.Run() // Best effort
|
||||
|
||||
return digestID, nil
|
||||
}
|
||||
|
||||
// deleteSessionCostWisps deletes ephemeral session.ended wisps for a target date.
|
||||
func deleteSessionCostWisps(targetDate time.Time) (int, error) {
|
||||
// List all wisps
|
||||
listCmd := exec.Command("bd", "mol", "wisp", "list", "--all", "--json")
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] wisp list failed in deletion: %v\n", err)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var wispList WispListOutput
|
||||
if err := json.Unmarshal(listOutput, &wispList); err != nil {
|
||||
return 0, fmt.Errorf("parsing wisp list: %w", err)
|
||||
}
|
||||
|
||||
targetDay := targetDate.Format("2006-01-02")
|
||||
|
||||
// Collect all wisp IDs that match our criteria
|
||||
var wispIDsToDelete []string
|
||||
|
||||
for _, wisp := range wispList.Wisps {
|
||||
// Get full wisp details to check if it's a session.ended event
|
||||
showCmd := exec.Command("bd", "show", wisp.ID, "--json")
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] bd show failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] JSON unmarshal failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(events) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
event := events[0]
|
||||
|
||||
// Only delete session.ended wisps
|
||||
if event.EventKind != "session.ended" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse payload to get ended_at for date filtering
|
||||
var payload SessionPayload
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &payload); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] payload unmarshal failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
endedAt := event.CreatedAt
|
||||
if payload.EndedAt != "" {
|
||||
if parsed, err := time.Parse(time.RFC3339, payload.EndedAt); err == nil {
|
||||
endedAt = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Only delete wisps from the target date
|
||||
if endedAt.Format("2006-01-02") != targetDay {
|
||||
continue
|
||||
}
|
||||
|
||||
wispIDsToDelete = append(wispIDsToDelete, wisp.ID)
|
||||
}
|
||||
|
||||
if len(wispIDsToDelete) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Batch delete all wisps in a single subprocess call
|
||||
burnArgs := append([]string{"mol", "burn", "--force"}, wispIDsToDelete...)
|
||||
burnCmd := exec.Command("bd", burnArgs...)
|
||||
if burnErr := burnCmd.Run(); burnErr != nil {
|
||||
return 0, fmt.Errorf("batch burn failed: %w", burnErr)
|
||||
}
|
||||
|
||||
return len(wispIDsToDelete), nil
|
||||
}
|
||||
|
||||
// runCostsMigrate migrates legacy session.ended beads to the new architecture.
|
||||
func runCostsMigrate(cmd *cobra.Command, args []string) error {
|
||||
// Query all session.ended events (both open and closed)
|
||||
listArgs := []string{
|
||||
"list",
|
||||
"--type=event",
|
||||
"--all",
|
||||
"--limit=0",
|
||||
"--json",
|
||||
}
|
||||
|
||||
listCmd := exec.Command("bd", listArgs...)
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
fmt.Println(style.Dim.Render("No events found or bd command failed"))
|
||||
return nil
|
||||
}
|
||||
|
||||
var listItems []EventListItem
|
||||
if err := json.Unmarshal(listOutput, &listItems); err != nil {
|
||||
return fmt.Errorf("parsing event list: %w", err)
|
||||
}
|
||||
|
||||
if len(listItems) == 0 {
|
||||
fmt.Println(style.Dim.Render("No events found"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get full details for all events
|
||||
showArgs := []string{"show", "--json"}
|
||||
for _, item := range listItems {
|
||||
showArgs = append(showArgs, item.ID)
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("showing events: %w", err)
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
return fmt.Errorf("parsing event details: %w", err)
|
||||
}
|
||||
|
||||
// Find open session.ended events
|
||||
var openEvents []SessionEvent
|
||||
var closedCount int
|
||||
for _, event := range events {
|
||||
if event.EventKind != "session.ended" {
|
||||
continue
|
||||
}
|
||||
if event.Status == "closed" {
|
||||
closedCount++
|
||||
continue
|
||||
}
|
||||
openEvents = append(openEvents, event)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Legacy session.ended beads:\n", style.Bold.Render("📊"))
|
||||
fmt.Printf(" Closed: %d (no action needed)\n", closedCount)
|
||||
fmt.Printf(" Open: %d (will be closed)\n", len(openEvents))
|
||||
|
||||
if len(openEvents) == 0 {
|
||||
fmt.Println(style.Success.Render("\n✓ No migration needed - all session.ended events are already closed"))
|
||||
return nil
|
||||
}
|
||||
|
||||
if migrateDryRun {
|
||||
fmt.Printf("\n%s Would close %d open session.ended events\n", style.Bold.Render("[DRY RUN]"), len(openEvents))
|
||||
for _, event := range openEvents {
|
||||
fmt.Printf(" - %s: %s\n", event.ID, event.Title)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all open session.ended events
|
||||
closedMigrated := 0
|
||||
for _, event := range openEvents {
|
||||
closeCmd := exec.Command("bd", "close", event.ID, "--reason=migrated to wisp architecture")
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: could not close %s: %v\n", event.ID, err)
|
||||
continue
|
||||
}
|
||||
closedMigrated++
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s Migrated %d session.ended events (closed)\n", style.Success.Render("✓"), closedMigrated)
|
||||
fmt.Println(style.Dim.Render("Legacy beads preserved for historical queries."))
|
||||
fmt.Println(style.Dim.Render("New session costs will use ephemeral wisps + daily digests."))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -61,6 +61,20 @@ func TestDeriveSessionName(t *testing.T) {
|
||||
},
|
||||
expected: "gt-ai-deacon",
|
||||
},
|
||||
{
|
||||
name: "mayor session without GT_TOWN",
|
||||
envVars: map[string]string{
|
||||
"GT_ROLE": "mayor",
|
||||
},
|
||||
expected: "gt-mayor",
|
||||
},
|
||||
{
|
||||
name: "deacon session without GT_TOWN",
|
||||
envVars: map[string]string{
|
||||
"GT_ROLE": "deacon",
|
||||
},
|
||||
expected: "gt-deacon",
|
||||
},
|
||||
{
|
||||
name: "no env vars",
|
||||
envVars: map[string]string{},
|
||||
|
||||
@@ -19,6 +19,7 @@ var (
|
||||
crewAccount string
|
||||
crewAgentOverride string
|
||||
crewAll bool
|
||||
crewListAll bool
|
||||
crewDryRun bool
|
||||
)
|
||||
|
||||
@@ -77,7 +78,8 @@ Shows git branch, session state, and git status for each workspace.
|
||||
|
||||
Examples:
|
||||
gt crew list # List in current rig
|
||||
gt crew list --rig greenplace # List in specific rig
|
||||
gt crew list --rig greenplace # List in specific rig
|
||||
gt crew list --all # List in all rigs
|
||||
gt crew list --json # JSON output`,
|
||||
RunE: runCrewList,
|
||||
}
|
||||
@@ -323,6 +325,7 @@ func init() {
|
||||
crewAddCmd.Flags().BoolVar(&crewBranch, "branch", false, "Create a feature branch (crew/<name>)")
|
||||
|
||||
crewListCmd.Flags().StringVar(&crewRig, "rig", "", "Filter by rig name")
|
||||
crewListCmd.Flags().BoolVar(&crewListAll, "all", false, "List crew workspaces in all rigs")
|
||||
crewListCmd.Flags().BoolVar(&crewJSON, "json", false, "Output as JSON")
|
||||
|
||||
crewAtCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use")
|
||||
|
||||
@@ -56,9 +56,7 @@ func runCrewAdd(cmd *cobra.Command, args []string) error {
|
||||
crewGit := git.NewGit(r.Path)
|
||||
crewMgr := crew.NewManager(r, crewGit)
|
||||
|
||||
// Beads for agent bead creation (use mayor/rig where beads.db lives)
|
||||
// The rig root .beads/ only has config.yaml, no database.
|
||||
bd := beads.New(filepath.Join(r.Path, "mayor", "rig"))
|
||||
bd := beads.New(beads.ResolveBeadsDir(r.Path))
|
||||
|
||||
// Track results
|
||||
var created []string
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -29,7 +30,19 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
// Try to detect from current directory
|
||||
detected, err := detectCrewFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not detect crew workspace from current directory: %w\n\nUsage: gt crew at <name>", err)
|
||||
// Try to show available crew members if we can detect the rig
|
||||
hint := "\n\nUsage: gt crew at <name>"
|
||||
if crewRig != "" {
|
||||
if mgr, _, mgrErr := getCrewManager(crewRig); mgrErr == nil {
|
||||
if members, listErr := mgr.List(); listErr == nil && len(members) > 0 {
|
||||
hint = fmt.Sprintf("\n\nAvailable crew in %s:", crewRig)
|
||||
for _, m := range members {
|
||||
hint += fmt.Sprintf("\n %s", m.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("could not detect crew workspace from current directory: %w%s", err, hint)
|
||||
}
|
||||
name = detected.crewName
|
||||
if crewRig == "" {
|
||||
@@ -61,7 +74,7 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolve account for Claude config
|
||||
// Resolve account for runtime config
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
@@ -75,6 +88,9 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Using account: %s\n", accountHandle)
|
||||
}
|
||||
|
||||
runtimeConfig := config.LoadRuntimeConfig(r.Path)
|
||||
_ = runtime.EnsureSettingsForRole(worker.ClonePath, "crew", runtimeConfig)
|
||||
|
||||
// Check if session exists
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(r.Name, name)
|
||||
@@ -83,15 +99,15 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
|
||||
// Before creating a new session, check if there's already a Claude session
|
||||
// Before creating a new session, check if there's already a runtime session
|
||||
// running in this crew's directory (might have been started manually or via
|
||||
// a different mechanism)
|
||||
if !hasSession {
|
||||
existingSessions, err := t.FindSessionByWorkDir(worker.ClonePath, true)
|
||||
existingSessions, err := t.FindSessionByWorkDir(worker.ClonePath, runtimeConfig.Tmux.ProcessNames)
|
||||
if err == nil && len(existingSessions) > 0 {
|
||||
// Found an existing session with an agent running in this directory
|
||||
// Found an existing session with runtime running in this directory
|
||||
existingSession := existingSessions[0]
|
||||
fmt.Printf("%s Found existing agent session '%s' in crew directory\n",
|
||||
fmt.Printf("%s Found existing runtime session '%s' in crew directory\n",
|
||||
style.Warning.Render("⚠"),
|
||||
existingSession)
|
||||
fmt.Printf(" Attaching to existing session instead of creating a new one\n")
|
||||
@@ -125,9 +141,9 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
_ = t.SetEnvironment(sessionID, "GT_RIG", r.Name)
|
||||
_ = t.SetEnvironment(sessionID, "GT_CREW", name)
|
||||
|
||||
// Set CLAUDE_CONFIG_DIR for account selection (non-fatal)
|
||||
if claudeConfigDir != "" {
|
||||
_ = t.SetEnvironment(sessionID, "CLAUDE_CONFIG_DIR", claudeConfigDir)
|
||||
// Set runtime config dir for account selection (non-fatal)
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && claudeConfigDir != "" {
|
||||
_ = t.SetEnvironment(sessionID, runtimeConfig.Session.ConfigDirEnv, claudeConfigDir)
|
||||
}
|
||||
|
||||
// Apply rig-based theming (non-fatal: theming failure doesn't affect operation)
|
||||
@@ -146,31 +162,35 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("getting pane ID: %w", err)
|
||||
}
|
||||
|
||||
// Use respawn-pane to replace shell with Claude directly
|
||||
// This gives cleaner lifecycle: Claude exits → session ends (no intermediate shell)
|
||||
// Pass "gt prime" as initial prompt so Claude loads context immediately
|
||||
// Use respawn-pane to replace shell with runtime directly
|
||||
// This gives cleaner lifecycle: runtime exits → session ends (no intermediate shell)
|
||||
// Pass "gt prime" as initial prompt if supported
|
||||
// Export GT_ROLE and BD_ACTOR since tmux SetEnvironment only affects new panes
|
||||
startupCmd, err := config.BuildCrewStartupCommandWithAgentOverride(r.Name, name, r.Path, "gt prime", crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
// Prepend config dir env if available
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && claudeConfigDir != "" {
|
||||
startupCmd = config.PrependEnv(startupCmd, map[string]string{runtimeConfig.Session.ConfigDirEnv: claudeConfigDir})
|
||||
}
|
||||
if err := t.RespawnPane(paneID, startupCmd); err != nil {
|
||||
return fmt.Errorf("starting claude: %w", err)
|
||||
return fmt.Errorf("starting runtime: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Created session for %s/%s\n",
|
||||
style.Bold.Render("✓"), r.Name, name)
|
||||
} else {
|
||||
// Session exists - check if Claude is still running
|
||||
// Session exists - check if runtime is still running
|
||||
// Uses both pane command check and UI marker detection to avoid
|
||||
// restarting when user is in a subshell spawned from Claude
|
||||
// restarting when user is in a subshell spawned from the runtime
|
||||
agentCfg, _, err := config.ResolveAgentConfigWithOverride(townRoot, r.Path, crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving agent: %w", err)
|
||||
}
|
||||
if !t.IsAgentRunning(sessionID, config.ExpectedPaneCommands(agentCfg)...) {
|
||||
// Claude has exited, restart it using respawn-pane
|
||||
fmt.Printf("Claude exited, restarting...\n")
|
||||
// Runtime has exited, restart it using respawn-pane
|
||||
fmt.Printf("Runtime exited, restarting...\n")
|
||||
|
||||
// Get pane ID for respawn
|
||||
paneID, err := t.GetPaneID(sessionID)
|
||||
@@ -178,15 +198,19 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("getting pane ID: %w", err)
|
||||
}
|
||||
|
||||
// Use respawn-pane to replace shell with Claude directly
|
||||
// Pass "gt prime" as initial prompt so Claude loads context immediately
|
||||
// Use respawn-pane to replace shell with runtime directly
|
||||
// Pass "gt prime" as initial prompt if supported
|
||||
// Export GT_ROLE and BD_ACTOR since tmux SetEnvironment only affects new panes
|
||||
startupCmd, err := config.BuildCrewStartupCommandWithAgentOverride(r.Name, name, r.Path, "gt prime", crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
// Prepend config dir env if available
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && claudeConfigDir != "" {
|
||||
startupCmd = config.PrependEnv(startupCmd, map[string]string{runtimeConfig.Session.ConfigDirEnv: claudeConfigDir})
|
||||
}
|
||||
if err := t.RespawnPane(paneID, startupCmd); err != nil {
|
||||
return fmt.Errorf("restarting claude: %w", err)
|
||||
return fmt.Errorf("restarting runtime: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func detectCrewFromCwd() (*crewDetection, error) {
|
||||
// Look for pattern: <rig>/crew/<name>/...
|
||||
// Minimum: rig, crew, name = 3 parts
|
||||
if len(parts) < 3 {
|
||||
return nil, fmt.Errorf("not in a crew workspace (path too short)")
|
||||
return nil, fmt.Errorf("not inside a crew workspace - specify the crew name or cd into a crew directory (e.g., gastown/crew/max)")
|
||||
}
|
||||
|
||||
rigName := parts[0]
|
||||
@@ -137,7 +137,7 @@ func detectCrewFromCwd() (*crewDetection, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// isShellCommand checks if the command is a shell (meaning Claude has exited).
|
||||
// isShellCommand checks if the command is a shell (meaning the runtime has exited).
|
||||
func isShellCommand(cmd string) bool {
|
||||
shells := constants.SupportedShells
|
||||
for _, shell := range shells {
|
||||
@@ -170,6 +170,29 @@ func execAgent(cfg *config.RuntimeConfig, prompt string) error {
|
||||
return syscall.Exec(agentPath, args, os.Environ())
|
||||
}
|
||||
|
||||
// execRuntime execs the runtime CLI, replacing the current process.
|
||||
// Used when we're already in the target session and just need to start the runtime.
|
||||
// If prompt is provided, it's passed according to the runtime's prompt mode.
|
||||
func execRuntime(prompt, rigPath, configDir string) error {
|
||||
runtimeConfig := config.LoadRuntimeConfig(rigPath)
|
||||
args := runtimeConfig.BuildArgsWithPrompt(prompt)
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("runtime command not configured")
|
||||
}
|
||||
|
||||
binPath, err := exec.LookPath(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("runtime command not found: %w", err)
|
||||
}
|
||||
|
||||
env := os.Environ()
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && configDir != "" {
|
||||
env = append(env, fmt.Sprintf("%s=%s", runtimeConfig.Session.ConfigDirEnv, configDir))
|
||||
}
|
||||
|
||||
return syscall.Exec(binPath, args, env)
|
||||
}
|
||||
|
||||
// isInTmuxSession checks if we're currently inside the target tmux session.
|
||||
func isInTmuxSession(targetSession string) bool {
|
||||
// TMUX env var format: /tmp/tmux-501/default,12345,0
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/mail"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/townlog"
|
||||
@@ -163,7 +164,7 @@ func runCrewRemove(cmd *cobra.Command, args []string) error {
|
||||
} else {
|
||||
// Default: CLOSE the agent bead (preserves CV history)
|
||||
closeArgs := []string{"close", agentBeadID, "--reason=Crew workspace removed"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
@@ -236,9 +237,10 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Use manager's Start() with refresh options
|
||||
err = crewMgr.Start(name, crew.StartOptions{
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "refresh", // Startup nudge topic
|
||||
Interactive: true, // No --dangerously-skip-permissions
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "refresh", // Startup nudge topic
|
||||
Interactive: true, // No --dangerously-skip-permissions
|
||||
AgentOverride: crewAgentOverride,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting crew session: %w", err)
|
||||
@@ -252,8 +254,9 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// runCrewStart starts crew workers in a rig.
|
||||
// args[0] is the rig name (optional if inferrable from cwd)
|
||||
// args[1:] are crew member names (optional - defaults to all if not specified)
|
||||
// If first arg is a valid rig name, it's used as the rig; otherwise rig is inferred from cwd.
|
||||
// Remaining args (or all args if rig is inferred) are crew member names.
|
||||
// Defaults to all crew members if no names specified.
|
||||
func runCrewStart(cmd *cobra.Command, args []string) error {
|
||||
var rigName string
|
||||
var crewNames []string
|
||||
@@ -262,8 +265,16 @@ func runCrewStart(cmd *cobra.Command, args []string) error {
|
||||
// No args - infer rig from cwd
|
||||
rigName = "" // getCrewManager will infer from cwd
|
||||
} else {
|
||||
rigName = args[0]
|
||||
crewNames = args[1:]
|
||||
// Check if first arg is a valid rig name
|
||||
if _, _, err := getRig(args[0]); err == nil {
|
||||
// First arg is a rig name
|
||||
rigName = args[0]
|
||||
crewNames = args[1:]
|
||||
} else {
|
||||
// First arg is not a rig - infer rig from cwd and treat all args as crew names
|
||||
rigName = "" // getCrewManager will infer from cwd
|
||||
crewNames = args
|
||||
}
|
||||
}
|
||||
|
||||
// Get the rig manager and rig (infers from cwd if rigName is empty)
|
||||
@@ -346,8 +357,9 @@ func runCrewRestart(cmd *cobra.Command, args []string) error {
|
||||
// Use manager's Start() with restart options
|
||||
// Start() will create workspace if needed (idempotent)
|
||||
err = crewMgr.Start(name, crew.StartOptions{
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "restart", // Startup nudge topic
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "restart", // Startup nudge topic
|
||||
AgentOverride: crewAgentOverride,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Error restarting %s: %v\n", arg, err)
|
||||
@@ -425,8 +437,9 @@ func runCrewRestartAll() error {
|
||||
|
||||
// Use manager's Start() with restart options
|
||||
err = crewMgr.Start(agent.AgentName, crew.StartOptions{
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "restart", // Startup nudge topic
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "restart", // Startup nudge topic
|
||||
AgentOverride: crewAgentOverride,
|
||||
})
|
||||
if err != nil {
|
||||
failed++
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
@@ -22,43 +24,63 @@ type CrewListItem struct {
|
||||
}
|
||||
|
||||
func runCrewList(cmd *cobra.Command, args []string) error {
|
||||
crewMgr, r, err := getCrewManager(crewRig)
|
||||
if err != nil {
|
||||
return err
|
||||
if crewListAll && crewRig != "" {
|
||||
return fmt.Errorf("cannot use --all with --rig")
|
||||
}
|
||||
|
||||
workers, err := crewMgr.List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing crew workers: %w", err)
|
||||
}
|
||||
|
||||
if len(workers) == 0 {
|
||||
fmt.Println("No crew workspaces found.")
|
||||
return nil
|
||||
var rigs []*rig.Rig
|
||||
if crewListAll {
|
||||
allRigs, _, err := getAllRigs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rigs = allRigs
|
||||
} else {
|
||||
_, r, err := getCrewManager(crewRig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rigs = []*rig.Rig{r}
|
||||
}
|
||||
|
||||
// Check session and git status for each worker
|
||||
t := tmux.NewTmux()
|
||||
var items []CrewListItem
|
||||
|
||||
for _, w := range workers {
|
||||
sessionID := crewSessionName(r.Name, w.Name)
|
||||
hasSession, _ := t.HasSession(sessionID)
|
||||
for _, r := range rigs {
|
||||
crewGit := git.NewGit(r.Path)
|
||||
crewMgr := crew.NewManager(r, crewGit)
|
||||
|
||||
crewGit := git.NewGit(w.ClonePath)
|
||||
gitClean := true
|
||||
if status, err := crewGit.Status(); err == nil {
|
||||
gitClean = status.Clean
|
||||
workers, err := crewMgr.List()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to list crew workers in %s: %v\n", r.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, CrewListItem{
|
||||
Name: w.Name,
|
||||
Rig: r.Name,
|
||||
Branch: w.Branch,
|
||||
Path: w.ClonePath,
|
||||
HasSession: hasSession,
|
||||
GitClean: gitClean,
|
||||
})
|
||||
for _, w := range workers {
|
||||
sessionID := crewSessionName(r.Name, w.Name)
|
||||
hasSession, _ := t.HasSession(sessionID)
|
||||
|
||||
workerGit := git.NewGit(w.ClonePath)
|
||||
gitClean := true
|
||||
if status, err := workerGit.Status(); err == nil {
|
||||
gitClean = status.Clean
|
||||
}
|
||||
|
||||
items = append(items, CrewListItem{
|
||||
Name: w.Name,
|
||||
Rig: r.Name,
|
||||
Branch: w.Branch,
|
||||
Path: w.ClonePath,
|
||||
HasSession: hasSession,
|
||||
GitClean: gitClean,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(items) == 0 {
|
||||
fmt.Println("No crew workspaces found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
if crewJSON {
|
||||
|
||||
127
internal/cmd/crew_list_test.go
Normal file
127
internal/cmd/crew_list_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
func setupTestTownForCrewList(t *testing.T, rigs map[string][]string) string {
|
||||
t.Helper()
|
||||
|
||||
townRoot := t.TempDir()
|
||||
mayorDir := filepath.Join(townRoot, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor: %v", err)
|
||||
}
|
||||
|
||||
townConfig := &config.TownConfig{
|
||||
Type: "town",
|
||||
Version: config.CurrentTownVersion,
|
||||
Name: "test-town",
|
||||
PublicName: "Test Town",
|
||||
CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
}
|
||||
if err := config.SaveTownConfig(filepath.Join(mayorDir, "town.json"), townConfig); err != nil {
|
||||
t.Fatalf("save town.json: %v", err)
|
||||
}
|
||||
|
||||
rigsConfig := &config.RigsConfig{
|
||||
Version: config.CurrentRigsVersion,
|
||||
Rigs: make(map[string]config.RigEntry),
|
||||
}
|
||||
|
||||
for rigName, crewNames := range rigs {
|
||||
rigsConfig.Rigs[rigName] = config.RigEntry{
|
||||
GitURL: "https://example.com/" + rigName + ".git",
|
||||
AddedAt: time.Now(),
|
||||
}
|
||||
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
crewDir := filepath.Join(rigPath, "crew")
|
||||
if err := os.MkdirAll(crewDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew dir: %v", err)
|
||||
}
|
||||
for _, crewName := range crewNames {
|
||||
if err := os.MkdirAll(filepath.Join(crewDir, crewName), 0755); err != nil {
|
||||
t.Fatalf("mkdir crew worker: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := config.SaveRigsConfig(filepath.Join(mayorDir, "rigs.json"), rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
|
||||
return townRoot
|
||||
}
|
||||
|
||||
func TestRunCrewList_AllWithRigErrors(t *testing.T) {
|
||||
townRoot := setupTestTownForCrewList(t, map[string][]string{"rig-a": {"alice"}})
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
crewListAll = true
|
||||
crewRig = "rig-a"
|
||||
defer func() {
|
||||
crewListAll = false
|
||||
crewRig = ""
|
||||
}()
|
||||
|
||||
err := runCrewList(&cobra.Command{}, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for --all with --rig, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunCrewList_AllAggregatesJSON(t *testing.T) {
|
||||
townRoot := setupTestTownForCrewList(t, map[string][]string{
|
||||
"rig-a": {"alice"},
|
||||
"rig-b": {"bob"},
|
||||
})
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
crewListAll = true
|
||||
crewJSON = true
|
||||
crewRig = ""
|
||||
defer func() {
|
||||
crewListAll = false
|
||||
crewJSON = false
|
||||
}()
|
||||
|
||||
output := captureStdout(t, func() {
|
||||
if err := runCrewList(&cobra.Command{}, nil); err != nil {
|
||||
t.Fatalf("runCrewList failed: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
var items []CrewListItem
|
||||
if err := json.Unmarshal([]byte(output), &items); err != nil {
|
||||
t.Fatalf("unmarshal output: %v", err)
|
||||
}
|
||||
if len(items) != 2 {
|
||||
t.Fatalf("expected 2 crew workers, got %d", len(items))
|
||||
}
|
||||
|
||||
rigs := map[string]bool{}
|
||||
for _, item := range items {
|
||||
rigs[item.Rig] = true
|
||||
}
|
||||
if !rigs["rig-a"] || !rigs["rig-b"] {
|
||||
t.Fatalf("expected crew from rig-a and rig-b, got: %#v", rigs)
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/deacon"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
@@ -112,7 +113,7 @@ var deaconTriggerPendingCmd = &cobra.Command{
|
||||
|
||||
⚠️ BOOTSTRAP MODE ONLY - Uses regex detection (ZFC violation acceptable).
|
||||
|
||||
This command uses WaitForClaudeReady (regex) to detect when Claude is ready.
|
||||
This command uses WaitForRuntimeReady (regex) to detect when the runtime is ready.
|
||||
This is appropriate for daemon bootstrap when no AI is available.
|
||||
|
||||
In steady-state, the Deacon should use AI-based observation instead:
|
||||
@@ -205,6 +206,35 @@ Examples:
|
||||
RunE: runDeaconStaleHooks,
|
||||
}
|
||||
|
||||
var deaconPauseCmd = &cobra.Command{
|
||||
Use: "pause",
|
||||
Short: "Pause the Deacon to prevent patrol actions",
|
||||
Long: `Pause the Deacon to prevent it from performing any patrol actions.
|
||||
|
||||
When paused, the Deacon:
|
||||
- Will not create patrol molecules
|
||||
- Will not run health checks
|
||||
- Will not take any autonomous actions
|
||||
- Will display a PAUSED message on startup
|
||||
|
||||
The pause state persists across session restarts. Use 'gt deacon resume'
|
||||
to allow the Deacon to work again.
|
||||
|
||||
Examples:
|
||||
gt deacon pause # Pause with no reason
|
||||
gt deacon pause --reason="testing" # Pause with a reason`,
|
||||
RunE: runDeaconPause,
|
||||
}
|
||||
|
||||
var deaconResumeCmd = &cobra.Command{
|
||||
Use: "resume",
|
||||
Short: "Resume the Deacon to allow patrol actions",
|
||||
Long: `Resume the Deacon so it can perform patrol actions again.
|
||||
|
||||
This removes the pause file and allows the Deacon to work normally.`,
|
||||
RunE: runDeaconResume,
|
||||
}
|
||||
|
||||
var (
|
||||
triggerTimeout time.Duration
|
||||
|
||||
@@ -220,6 +250,9 @@ var (
|
||||
// Stale hooks flags
|
||||
staleHooksMaxAge time.Duration
|
||||
staleHooksDryRun bool
|
||||
|
||||
// Pause flags
|
||||
pauseReason string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -234,6 +267,8 @@ func init() {
|
||||
deaconCmd.AddCommand(deaconForceKillCmd)
|
||||
deaconCmd.AddCommand(deaconHealthStateCmd)
|
||||
deaconCmd.AddCommand(deaconStaleHooksCmd)
|
||||
deaconCmd.AddCommand(deaconPauseCmd)
|
||||
deaconCmd.AddCommand(deaconResumeCmd)
|
||||
|
||||
// Flags for trigger-pending
|
||||
deaconTriggerPendingCmd.Flags().DurationVar(&triggerTimeout, "timeout", 2*time.Second,
|
||||
@@ -259,6 +294,10 @@ func init() {
|
||||
deaconStaleHooksCmd.Flags().BoolVar(&staleHooksDryRun, "dry-run", false,
|
||||
"Preview what would be unhooked without making changes")
|
||||
|
||||
// Flags for pause
|
||||
deaconPauseCmd.Flags().StringVar(&pauseReason, "reason", "",
|
||||
"Reason for pausing the Deacon")
|
||||
|
||||
deaconStartCmd.Flags().StringVar(&deaconAgentOverride, "agent", "", "Agent alias to run the Deacon with (overrides town default)")
|
||||
deaconAttachCmd.Flags().StringVar(&deaconAgentOverride, "agent", "", "Agent alias to run the Deacon with (overrides town default)")
|
||||
deaconRestartCmd.Flags().StringVar(&deaconAgentOverride, "agent", "", "Agent alias to run the Deacon with (overrides town default)")
|
||||
@@ -345,6 +384,9 @@ func startDeaconSession(t *tmux.Tmux, sessionName, agentOverride string) error {
|
||||
}
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
runtimeConfig := config.LoadRuntimeConfig("")
|
||||
_ = runtime.RunStartupFallback(t, sessionName, "deacon", runtimeConfig)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: "deacon",
|
||||
@@ -418,6 +460,23 @@ func runDeaconStatus(cmd *cobra.Command, args []string) error {
|
||||
|
||||
sessionName := getDeaconSessionName()
|
||||
|
||||
// Check pause state first (most important)
|
||||
townRoot, _ := workspace.FindFromCwdOrError()
|
||||
if townRoot != "" {
|
||||
paused, state, err := deacon.IsPaused(townRoot)
|
||||
if err == nil && paused {
|
||||
fmt.Printf("%s DEACON PAUSED\n", style.Bold.Render("⏸️"))
|
||||
if state.Reason != "" {
|
||||
fmt.Printf(" Reason: %s\n", state.Reason)
|
||||
}
|
||||
fmt.Printf(" Paused at: %s\n", state.PausedAt.Format(time.RFC3339))
|
||||
fmt.Printf(" Paused by: %s\n", state.PausedBy)
|
||||
fmt.Println()
|
||||
fmt.Printf("Resume with: %s\n", style.Dim.Render("gt deacon resume"))
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
@@ -487,6 +546,19 @@ func runDeaconHeartbeat(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check if Deacon is paused - if so, refuse to update heartbeat
|
||||
paused, state, err := deacon.IsPaused(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking pause state: %w", err)
|
||||
}
|
||||
if paused {
|
||||
fmt.Printf("%s Deacon is paused. Use 'gt deacon resume' to unpause.\n", style.Bold.Render("⏸️"))
|
||||
if state.Reason != "" {
|
||||
fmt.Printf(" Reason: %s\n", state.Reason)
|
||||
}
|
||||
return errors.New("Deacon is paused")
|
||||
}
|
||||
|
||||
action := ""
|
||||
if len(args) > 0 {
|
||||
action = strings.Join(args, " ")
|
||||
@@ -951,3 +1023,68 @@ func runDeaconStaleHooks(cmd *cobra.Command, args []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDeaconPause pauses the Deacon to prevent patrol actions.
|
||||
func runDeaconPause(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check if already paused
|
||||
paused, state, err := deacon.IsPaused(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking pause state: %w", err)
|
||||
}
|
||||
if paused {
|
||||
fmt.Printf("%s Deacon is already paused\n", style.Dim.Render("○"))
|
||||
fmt.Printf(" Reason: %s\n", state.Reason)
|
||||
fmt.Printf(" Paused at: %s\n", state.PausedAt.Format(time.RFC3339))
|
||||
fmt.Printf(" Paused by: %s\n", state.PausedBy)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pause the Deacon
|
||||
if err := deacon.Pause(townRoot, pauseReason, "human"); err != nil {
|
||||
return fmt.Errorf("pausing Deacon: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Deacon paused\n", style.Bold.Render("⏸️"))
|
||||
if pauseReason != "" {
|
||||
fmt.Printf(" Reason: %s\n", pauseReason)
|
||||
}
|
||||
fmt.Printf(" Pause file: %s\n", deacon.GetPauseFile(townRoot))
|
||||
fmt.Println()
|
||||
fmt.Printf("The Deacon will not perform any patrol actions until resumed.\n")
|
||||
fmt.Printf("Resume with: %s\n", style.Dim.Render("gt deacon resume"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDeaconResume resumes the Deacon to allow patrol actions.
|
||||
func runDeaconResume(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check if paused
|
||||
paused, _, err := deacon.IsPaused(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking pause state: %w", err)
|
||||
}
|
||||
if !paused {
|
||||
fmt.Printf("%s Deacon is not paused\n", style.Dim.Render("○"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume the Deacon
|
||||
if err := deacon.Resume(townRoot); err != nil {
|
||||
return fmt.Errorf("resuming Deacon: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Deacon resumed\n", style.Bold.Render("▶️"))
|
||||
fmt.Println("The Deacon can now perform patrol actions.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
72
internal/cmd/disable.go
Normal file
72
internal/cmd/disable.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// ABOUTME: Command to disable Gas Town system-wide.
|
||||
// ABOUTME: Sets the global state to disabled so tools work vanilla.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var disableClean bool
|
||||
|
||||
var disableCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Disable Gas Town system-wide",
|
||||
Long: `Disable Gas Town for all agentic coding tools.
|
||||
|
||||
When disabled:
|
||||
- Shell hooks become no-ops
|
||||
- Claude Code SessionStart hooks skip 'gt prime'
|
||||
- Tools work 100% vanilla (no Gas Town behavior)
|
||||
|
||||
The workspace (~/gt) is preserved. Use 'gt enable' to re-enable.
|
||||
|
||||
Flags:
|
||||
--clean Also remove shell integration from ~/.zshrc/~/.bashrc
|
||||
|
||||
Environment overrides still work:
|
||||
GASTOWN_ENABLED=1 - Enable for current session only`,
|
||||
RunE: runDisable,
|
||||
}
|
||||
|
||||
func init() {
|
||||
disableCmd.Flags().BoolVar(&disableClean, "clean", false,
|
||||
"Remove shell integration from RC files")
|
||||
rootCmd.AddCommand(disableCmd)
|
||||
}
|
||||
|
||||
func runDisable(cmd *cobra.Command, args []string) error {
|
||||
if err := state.Disable(); err != nil {
|
||||
return fmt.Errorf("disabling Gas Town: %w", err)
|
||||
}
|
||||
|
||||
if disableClean {
|
||||
if err := removeShellIntegration(); err != nil {
|
||||
fmt.Printf("%s Could not clean shell integration: %v\n",
|
||||
style.Warning.Render("!"), err)
|
||||
} else {
|
||||
fmt.Println(" Removed shell integration from RC files")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("%s Gas Town disabled\n", style.Success.Render("✓"))
|
||||
fmt.Println()
|
||||
fmt.Println("All agentic coding tools now work vanilla.")
|
||||
if !disableClean {
|
||||
fmt.Printf("Use %s to also remove shell hooks\n",
|
||||
style.Dim.Render("gt disable --clean"))
|
||||
}
|
||||
fmt.Printf("Use %s to re-enable\n", style.Dim.Render("gt enable"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeShellIntegration() error {
|
||||
return shell.Remove()
|
||||
}
|
||||
@@ -108,18 +108,22 @@ func runDoctor(cmd *cobra.Command, args []string) error {
|
||||
// Register workspace-level checks first (fundamental)
|
||||
d.RegisterAll(doctor.WorkspaceChecks()...)
|
||||
|
||||
d.Register(doctor.NewGlobalStateCheck())
|
||||
|
||||
// Register built-in checks
|
||||
d.Register(doctor.NewTownGitCheck())
|
||||
d.Register(doctor.NewDaemonCheck())
|
||||
d.Register(doctor.NewRepoFingerprintCheck())
|
||||
d.Register(doctor.NewBootHealthCheck())
|
||||
d.Register(doctor.NewBeadsDatabaseCheck())
|
||||
d.Register(doctor.NewFormulaCheck())
|
||||
d.Register(doctor.NewBdDaemonCheck())
|
||||
d.Register(doctor.NewPrefixConflictCheck())
|
||||
d.Register(doctor.NewPrefixMismatchCheck())
|
||||
d.Register(doctor.NewRoutesCheck())
|
||||
d.Register(doctor.NewOrphanSessionCheck())
|
||||
d.Register(doctor.NewOrphanProcessCheck())
|
||||
d.Register(doctor.NewGTRootCheck())
|
||||
d.Register(doctor.NewWispGCCheck())
|
||||
d.Register(doctor.NewBranchCheck())
|
||||
d.Register(doctor.NewBeadsSyncOrphanCheck())
|
||||
@@ -135,6 +139,7 @@ func runDoctor(cmd *cobra.Command, args []string) error {
|
||||
d.Register(doctor.NewPatrolPluginsAccessibleCheck())
|
||||
d.Register(doctor.NewPatrolRolesHavePromptsCheck())
|
||||
d.Register(doctor.NewAgentBeadsCheck())
|
||||
d.Register(doctor.NewRigBeadsCheck())
|
||||
|
||||
// NOTE: StaleAttachmentsCheck removed - staleness detection belongs in Deacon molecule
|
||||
|
||||
|
||||
54
internal/cmd/enable.go
Normal file
54
internal/cmd/enable.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// ABOUTME: Command to enable Gas Town system-wide.
|
||||
// ABOUTME: Sets the global state to enabled for all agentic coding tools.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var enableCmd = &cobra.Command{
|
||||
Use: "enable",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Enable Gas Town system-wide",
|
||||
Long: `Enable Gas Town for all agentic coding tools.
|
||||
|
||||
When enabled:
|
||||
- Shell hooks set GT_TOWN_ROOT and GT_RIG environment variables
|
||||
- Claude Code SessionStart hooks run 'gt prime' for context
|
||||
- Git repos are auto-registered as rigs (configurable)
|
||||
|
||||
Use 'gt disable' to turn off. Use 'gt status --global' to check state.
|
||||
|
||||
Environment overrides:
|
||||
GASTOWN_DISABLED=1 - Disable for current session only
|
||||
GASTOWN_ENABLED=1 - Enable for current session only`,
|
||||
RunE: runEnable,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(enableCmd)
|
||||
}
|
||||
|
||||
func runEnable(cmd *cobra.Command, args []string) error {
|
||||
if err := state.Enable(Version); err != nil {
|
||||
return fmt.Errorf("enabling Gas Town: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Gas Town enabled\n", style.Success.Render("✓"))
|
||||
fmt.Println()
|
||||
fmt.Println("Gas Town will now:")
|
||||
fmt.Println(" • Inject context into Claude Code sessions")
|
||||
fmt.Println(" • Set GT_TOWN_ROOT and GT_RIG environment variables")
|
||||
fmt.Println(" • Auto-register git repos as rigs (if configured)")
|
||||
fmt.Println()
|
||||
fmt.Printf("Use %s to disable, %s to check status\n",
|
||||
style.Dim.Render("gt disable"),
|
||||
style.Dim.Render("gt status --global"))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -357,9 +357,13 @@ func buildRestartCommand(sessionName string) (string, error) {
|
||||
// Build environment exports - role vars first, then Claude vars
|
||||
var exports []string
|
||||
if gtRole != "" {
|
||||
exports = append(exports, fmt.Sprintf("GT_ROLE=%s", gtRole))
|
||||
exports = append(exports, fmt.Sprintf("BD_ACTOR=%s", gtRole))
|
||||
exports = append(exports, fmt.Sprintf("GIT_AUTHOR_NAME=%s", gtRole))
|
||||
runtimeConfig := config.LoadRuntimeConfig("")
|
||||
exports = append(exports, "GT_ROLE="+gtRole)
|
||||
exports = append(exports, "BD_ACTOR="+gtRole)
|
||||
exports = append(exports, "GIT_AUTHOR_NAME="+gtRole)
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.SessionIDEnv != "" {
|
||||
exports = append(exports, "GT_SESSION_ID_ENV="+runtimeConfig.Session.SessionIDEnv)
|
||||
}
|
||||
}
|
||||
|
||||
// Add Claude-related env vars from current environment
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
@@ -172,7 +173,7 @@ func runHook(_ *cobra.Command, args []string) error {
|
||||
// Close completed molecule bead (use bd close --force for pinned)
|
||||
closeArgs := []string{"close", existing.ID, "--force",
|
||||
"--reason=Auto-replaced by gt hook (molecule complete)"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
@@ -139,7 +139,7 @@ func discoverHooks(townRoot string) ([]HookInfo, error) {
|
||||
polecatsDir := filepath.Join(rigPath, "polecats")
|
||||
if polecats, err := os.ReadDir(polecatsDir); err == nil {
|
||||
for _, p := range polecats {
|
||||
if p.IsDir() {
|
||||
if p.IsDir() && !strings.HasPrefix(p.Name(), ".") {
|
||||
locations = append(locations, struct {
|
||||
path string
|
||||
agent string
|
||||
|
||||
@@ -16,9 +16,12 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/deps"
|
||||
"github.com/steveyegge/gastown/internal/formula"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/templates"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
"github.com/steveyegge/gastown/internal/wrappers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -30,6 +33,8 @@ var (
|
||||
installGit bool
|
||||
installGitHub string
|
||||
installPublic bool
|
||||
installShell bool
|
||||
installWrappers bool
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
@@ -55,7 +60,8 @@ Examples:
|
||||
gt install ~/gt --no-beads # Skip .beads/ initialization
|
||||
gt install ~/gt --git # Also init git with .gitignore
|
||||
gt install ~/gt --github=user/repo # Create private GitHub repo (default)
|
||||
gt install ~/gt --github=user/repo --public # Create public GitHub repo`,
|
||||
gt install ~/gt --github=user/repo --public # Create public GitHub repo
|
||||
gt install ~/gt --shell # Install shell integration (sets GT_TOWN_ROOT/GT_RIG)`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runInstall,
|
||||
}
|
||||
@@ -69,6 +75,8 @@ func init() {
|
||||
installCmd.Flags().BoolVar(&installGit, "git", false, "Initialize git with .gitignore")
|
||||
installCmd.Flags().StringVar(&installGitHub, "github", "", "Create GitHub repo (format: owner/repo, private by default)")
|
||||
installCmd.Flags().BoolVar(&installPublic, "public", false, "Make GitHub repo public (use with --github)")
|
||||
installCmd.Flags().BoolVar(&installShell, "shell", false, "Install shell integration (sets GT_TOWN_ROOT/GT_RIG env vars)")
|
||||
installCmd.Flags().BoolVar(&installWrappers, "wrappers", false, "Install gt-codex/gt-opencode wrapper scripts to ~/bin/")
|
||||
rootCmd.AddCommand(installCmd)
|
||||
}
|
||||
|
||||
@@ -260,6 +268,29 @@ func runInstall(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf(" ✓ Created .claude/commands/ (slash commands for all agents)\n")
|
||||
}
|
||||
|
||||
if installShell {
|
||||
fmt.Println()
|
||||
if err := shell.Install(); err != nil {
|
||||
fmt.Printf(" %s Could not install shell integration: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Installed shell integration (%s)\n", shell.RCFilePath(shell.DetectShell()))
|
||||
}
|
||||
if err := state.Enable(Version); err != nil {
|
||||
fmt.Printf(" %s Could not enable Gas Town: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Enabled Gas Town globally\n")
|
||||
}
|
||||
}
|
||||
|
||||
if installWrappers {
|
||||
fmt.Println()
|
||||
if err := wrappers.Install(); err != nil {
|
||||
fmt.Printf(" %s Could not install wrapper scripts: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Installed gt-codex and gt-opencode to %s\n", wrappers.BinDir())
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s HQ created successfully!\n", style.Bold.Render("✓"))
|
||||
fmt.Println()
|
||||
fmt.Println("Next steps:")
|
||||
@@ -313,6 +344,16 @@ func initTownBeads(townPath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Configure custom types for Gas Town (agent, role, rig, convoy).
|
||||
// These were extracted from beads core in v0.46.0 and now require explicit config.
|
||||
customTypes := "agent,role,rig,convoy,event"
|
||||
configCmd := exec.Command("bd", "config", "set", "types.custom", customTypes)
|
||||
configCmd.Dir = townPath
|
||||
if configOutput, configErr := configCmd.CombinedOutput(); configErr != nil {
|
||||
// Non-fatal: older beads versions don't need this, newer ones do
|
||||
fmt.Printf(" %s Could not set custom types: %s\n", style.Dim.Render("⚠"), strings.TrimSpace(string(configOutput)))
|
||||
}
|
||||
|
||||
// Ensure database has repository fingerprint (GH #25).
|
||||
// This is idempotent - safe on both new and legacy (pre-0.17.5) databases.
|
||||
// Without fingerprint, the bd daemon fails to start silently.
|
||||
@@ -321,6 +362,13 @@ func initTownBeads(townPath string) error {
|
||||
fmt.Printf(" %s Could not verify repo fingerprint: %v\n", style.Dim.Render("⚠"), err)
|
||||
}
|
||||
|
||||
// Ensure routes.jsonl has an explicit town-level mapping for hq-* beads.
|
||||
// This keeps hq-* operations stable even when invoked from rig worktrees.
|
||||
if err := beads.AppendRoute(townPath, beads.Route{Prefix: "hq-", Path: "."}); err != nil {
|
||||
// Non-fatal: routing still works in many contexts, but explicit mapping is preferred.
|
||||
fmt.Printf(" %s Could not update routes.jsonl: %v\n", style.Dim.Render("⚠"), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -337,6 +385,20 @@ func ensureRepoFingerprint(beadsPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureCustomTypes registers Gas Town custom issue types with beads.
|
||||
// Beads core only supports built-in types (bug, feature, task, etc.).
|
||||
// Gas Town needs custom types: agent, role, rig, convoy, slot.
|
||||
// This is idempotent - safe to call multiple times.
|
||||
func ensureCustomTypes(beadsPath string) error {
|
||||
cmd := exec.Command("bd", "config", "set", "types.custom", "agent,role,rig,convoy,slot")
|
||||
cmd.Dir = beadsPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("bd config set types.custom: %s", strings.TrimSpace(string(output)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// initTownAgentBeads creates town-level agent and role beads using hq- prefix.
|
||||
// This creates:
|
||||
// - hq-mayor, hq-deacon (agent beads for town-level agents)
|
||||
@@ -358,6 +420,13 @@ func ensureRepoFingerprint(beadsPath string) error {
|
||||
func initTownAgentBeads(townPath string) error {
|
||||
bd := beads.New(townPath)
|
||||
|
||||
// bd init doesn't enable "custom" issue types by default, but Gas Town uses
|
||||
// agent/role beads during install and runtime. Ensure these types are enabled
|
||||
// before attempting to create any town-level system beads.
|
||||
if err := ensureBeadsCustomTypes(townPath, []string{"agent", "role", "rig", "convoy", "slot"}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Role beads (global templates)
|
||||
roleDefs := []struct {
|
||||
id string
|
||||
@@ -476,3 +545,17 @@ func initTownAgentBeads(townPath string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureBeadsCustomTypes(workDir string, types []string) error {
|
||||
if len(types) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("bd", "config", "set", "types.custom", strings.Join(types, ","))
|
||||
cmd.Dir = workDir
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("bd config set types.custom failed: %s", strings.TrimSpace(string(output)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -143,6 +143,21 @@ func TestInstallTownRoleSlots(t *testing.T) {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Log install output for CI debugging
|
||||
t.Logf("gt install output:\n%s", output)
|
||||
|
||||
// Verify beads directory was created
|
||||
beadsDir := filepath.Join(hqPath, ".beads")
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
t.Fatalf("beads directory not created at %s", beadsDir)
|
||||
}
|
||||
|
||||
// List beads for debugging
|
||||
listCmd := exec.Command("bd", "--no-daemon", "list", "--type=agent")
|
||||
listCmd.Dir = hqPath
|
||||
listOutput, _ := listCmd.CombinedOutput()
|
||||
t.Logf("bd list --type=agent output:\n%s", listOutput)
|
||||
|
||||
assertSlotValue(t, hqPath, "hq-mayor", "role", "hq-mayor-role")
|
||||
assertSlotValue(t, hqPath, "hq-deacon", "role", "hq-deacon-role")
|
||||
}
|
||||
|
||||
@@ -47,6 +47,9 @@ var (
|
||||
|
||||
// Integration status flags
|
||||
mqIntegrationStatusJSON bool
|
||||
|
||||
// Integration create flags
|
||||
mqIntegrationCreateBranch string
|
||||
)
|
||||
|
||||
var mqCmd = &cobra.Command{
|
||||
@@ -190,18 +193,31 @@ var mqIntegrationCreateCmd = &cobra.Command{
|
||||
Short: "Create an integration branch for an epic",
|
||||
Long: `Create an integration branch for batch work on an epic.
|
||||
|
||||
Creates a branch named integration/<epic-id> from main and pushes it
|
||||
to origin. Future MRs for this epic's children can target this branch.
|
||||
Creates a branch from main and pushes it to origin. Future MRs for this
|
||||
epic's children can target this branch.
|
||||
|
||||
Branch naming:
|
||||
Default: integration/<epic-id>
|
||||
Config: Set merge_queue.integration_branch_template in rig settings
|
||||
Override: Use --branch flag for one-off customization
|
||||
|
||||
Template variables:
|
||||
{epic} - Full epic ID (e.g., "RA-123")
|
||||
{prefix} - Epic prefix before first hyphen (e.g., "RA")
|
||||
{user} - Git user.name (e.g., "klauern")
|
||||
|
||||
Actions:
|
||||
1. Verify epic exists
|
||||
2. Create branch integration/<epic-id> from main
|
||||
2. Create branch from main (using template or --branch)
|
||||
3. Push to origin
|
||||
4. Store integration branch info in epic metadata
|
||||
4. Store actual branch name in epic metadata
|
||||
|
||||
Example:
|
||||
Examples:
|
||||
gt mq integration create gt-auth-epic
|
||||
# Creates integration/gt-auth-epic from main`,
|
||||
# Creates integration/gt-auth-epic (default)
|
||||
|
||||
gt mq integration create RA-123 --branch "klauern/PROJ-1234/{epic}"
|
||||
# Creates klauern/PROJ-1234/RA-123`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runMqIntegrationCreate,
|
||||
}
|
||||
@@ -287,6 +303,7 @@ func init() {
|
||||
mqCmd.AddCommand(mqStatusCmd)
|
||||
|
||||
// Integration branch subcommands
|
||||
mqIntegrationCreateCmd.Flags().StringVar(&mqIntegrationCreateBranch, "branch", "", "Override branch name template (supports {epic}, {prefix}, {user})")
|
||||
mqIntegrationCmd.AddCommand(mqIntegrationCreateCmd)
|
||||
|
||||
// Integration land flags
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -16,6 +17,141 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
// Integration branch template constants
|
||||
const defaultIntegrationBranchTemplate = "integration/{epic}"
|
||||
|
||||
// invalidBranchCharsRegex matches characters that are invalid in git branch names.
|
||||
// Git branch names cannot contain: ~ ^ : \ space, .., @{, or end with .lock
|
||||
var invalidBranchCharsRegex = regexp.MustCompile(`[~^:\s\\]|\.\.|\.\.|@\{`)
|
||||
|
||||
// buildIntegrationBranchName expands an integration branch template with variables.
|
||||
// Variables supported:
|
||||
// - {epic}: Full epic ID (e.g., "RA-123")
|
||||
// - {prefix}: Epic prefix before first hyphen (e.g., "RA")
|
||||
// - {user}: Git user.name (e.g., "klauern")
|
||||
//
|
||||
// If template is empty, uses defaultIntegrationBranchTemplate.
|
||||
func buildIntegrationBranchName(template, epicID string) string {
|
||||
if template == "" {
|
||||
template = defaultIntegrationBranchTemplate
|
||||
}
|
||||
|
||||
result := template
|
||||
result = strings.ReplaceAll(result, "{epic}", epicID)
|
||||
result = strings.ReplaceAll(result, "{prefix}", extractEpicPrefix(epicID))
|
||||
|
||||
// Git user (optional - leaves placeholder if not available)
|
||||
if user := getGitUserName(); user != "" {
|
||||
result = strings.ReplaceAll(result, "{user}", user)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// extractEpicPrefix extracts the prefix from an epic ID (before the first hyphen).
|
||||
// Examples: "RA-123" -> "RA", "PROJ-456" -> "PROJ", "abc" -> "abc"
|
||||
func extractEpicPrefix(epicID string) string {
|
||||
if idx := strings.Index(epicID, "-"); idx > 0 {
|
||||
return epicID[:idx]
|
||||
}
|
||||
return epicID
|
||||
}
|
||||
|
||||
// getGitUserName returns the git user.name config value, or empty if not set.
|
||||
func getGitUserName() string {
|
||||
cmd := exec.Command("git", "config", "user.name")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// validateBranchName checks if a branch name is valid for git.
|
||||
// Returns an error if the branch name contains invalid characters.
|
||||
func validateBranchName(branchName string) error {
|
||||
if branchName == "" {
|
||||
return fmt.Errorf("branch name cannot be empty")
|
||||
}
|
||||
|
||||
// Check for invalid characters
|
||||
if invalidBranchCharsRegex.MatchString(branchName) {
|
||||
return fmt.Errorf("branch name %q contains invalid characters (~ ^ : \\ space, .., or @{)", branchName)
|
||||
}
|
||||
|
||||
// Check for .lock suffix
|
||||
if strings.HasSuffix(branchName, ".lock") {
|
||||
return fmt.Errorf("branch name %q cannot end with .lock", branchName)
|
||||
}
|
||||
|
||||
// Check for leading/trailing slashes or dots
|
||||
if strings.HasPrefix(branchName, "/") || strings.HasSuffix(branchName, "/") {
|
||||
return fmt.Errorf("branch name %q cannot start or end with /", branchName)
|
||||
}
|
||||
if strings.HasPrefix(branchName, ".") || strings.HasSuffix(branchName, ".") {
|
||||
return fmt.Errorf("branch name %q cannot start or end with .", branchName)
|
||||
}
|
||||
|
||||
// Check for consecutive slashes
|
||||
if strings.Contains(branchName, "//") {
|
||||
return fmt.Errorf("branch name %q cannot contain consecutive slashes", branchName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getIntegrationBranchField extracts the integration_branch field from an epic's description.
|
||||
// Returns empty string if the field is not found.
|
||||
func getIntegrationBranchField(description string) string {
|
||||
if description == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
lines := strings.Split(description, "\n")
|
||||
for _, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if strings.HasPrefix(strings.ToLower(trimmed), "integration_branch:") {
|
||||
value := strings.TrimPrefix(trimmed, "integration_branch:")
|
||||
value = strings.TrimPrefix(value, "Integration_branch:")
|
||||
value = strings.TrimPrefix(value, "INTEGRATION_BRANCH:")
|
||||
// Handle case variations
|
||||
for _, prefix := range []string{"integration_branch:", "Integration_branch:", "INTEGRATION_BRANCH:"} {
|
||||
if strings.HasPrefix(trimmed, prefix) {
|
||||
value = strings.TrimPrefix(trimmed, prefix)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Re-parse properly - the prefix removal above is messy
|
||||
parts := strings.SplitN(trimmed, ":", 2)
|
||||
if len(parts) == 2 {
|
||||
return strings.TrimSpace(parts[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getIntegrationBranchTemplate returns the integration branch template to use.
|
||||
// Priority: CLI flag > rig config > default
|
||||
func getIntegrationBranchTemplate(rigPath, cliOverride string) string {
|
||||
if cliOverride != "" {
|
||||
return cliOverride
|
||||
}
|
||||
|
||||
// Try to load rig settings
|
||||
settingsPath := filepath.Join(rigPath, "settings", "config.json")
|
||||
settings, err := config.LoadRigSettings(settingsPath)
|
||||
if err != nil {
|
||||
return defaultIntegrationBranchTemplate
|
||||
}
|
||||
|
||||
if settings.MergeQueue != nil && settings.MergeQueue.IntegrationBranchTemplate != "" {
|
||||
return settings.MergeQueue.IntegrationBranchTemplate
|
||||
}
|
||||
|
||||
return defaultIntegrationBranchTemplate
|
||||
}
|
||||
|
||||
// IntegrationStatusOutput is the JSON output structure for integration status.
|
||||
type IntegrationStatusOutput struct {
|
||||
Epic string `json:"epic"`
|
||||
@@ -66,8 +202,14 @@ func runMqIntegrationCreate(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("'%s' is a %s, not an epic", epicID, epic.Type)
|
||||
}
|
||||
|
||||
// Build integration branch name
|
||||
branchName := "integration/" + epicID
|
||||
// Build integration branch name from template
|
||||
template := getIntegrationBranchTemplate(r.Path, mqIntegrationCreateBranch)
|
||||
branchName := buildIntegrationBranchName(template, epicID)
|
||||
|
||||
// Validate the branch name
|
||||
if err := validateBranchName(branchName); err != nil {
|
||||
return fmt.Errorf("invalid branch name: %w", err)
|
||||
}
|
||||
|
||||
// Initialize git for the rig
|
||||
g := git.NewGit(r.Path)
|
||||
@@ -185,9 +327,6 @@ func runMqIntegrationLand(cmd *cobra.Command, args []string) error {
|
||||
bd := beads.New(r.Path)
|
||||
g := git.NewGit(r.Path)
|
||||
|
||||
// Build integration branch name
|
||||
branchName := "integration/" + epicID
|
||||
|
||||
// Show what we're about to do
|
||||
if mqIntegrationLandDryRun {
|
||||
fmt.Printf("%s Dry run - no changes will be made\n\n", style.Bold.Render("🔍"))
|
||||
@@ -206,6 +345,13 @@ func runMqIntegrationLand(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("'%s' is a %s, not an epic", epicID, epic.Type)
|
||||
}
|
||||
|
||||
// Get integration branch name from epic metadata (stored at create time)
|
||||
// Fall back to default template for backward compatibility with old epics
|
||||
branchName := getIntegrationBranchField(epic.Description)
|
||||
if branchName == "" {
|
||||
branchName = buildIntegrationBranchName(defaultIntegrationBranchTemplate, epicID)
|
||||
}
|
||||
|
||||
fmt.Printf("Landing integration branch for epic: %s\n", epicID)
|
||||
fmt.Printf(" Title: %s\n\n", epic.Title)
|
||||
|
||||
@@ -455,8 +601,21 @@ func runMqIntegrationStatus(cmd *cobra.Command, args []string) error {
|
||||
// Initialize beads for the rig
|
||||
bd := beads.New(r.Path)
|
||||
|
||||
// Build integration branch name
|
||||
branchName := "integration/" + epicID
|
||||
// Fetch epic to get stored branch name
|
||||
epic, err := bd.Show(epicID)
|
||||
if err != nil {
|
||||
if err == beads.ErrNotFound {
|
||||
return fmt.Errorf("epic '%s' not found", epicID)
|
||||
}
|
||||
return fmt.Errorf("fetching epic: %w", err)
|
||||
}
|
||||
|
||||
// Get integration branch name from epic metadata (stored at create time)
|
||||
// Fall back to default template for backward compatibility with old epics
|
||||
branchName := getIntegrationBranchField(epic.Description)
|
||||
if branchName == "" {
|
||||
branchName = buildIntegrationBranchName(defaultIntegrationBranchTemplate, epicID)
|
||||
}
|
||||
|
||||
// Initialize git for the rig
|
||||
g := git.NewGit(r.Path)
|
||||
@@ -492,8 +651,8 @@ func runMqIntegrationStatus(cmd *cobra.Command, args []string) error {
|
||||
aheadCount = 0 // Non-fatal
|
||||
}
|
||||
|
||||
// Query for MRs targeting this integration branch
|
||||
targetBranch := "integration/" + epicID
|
||||
// Query for MRs targeting this integration branch (use resolved name)
|
||||
targetBranch := branchName
|
||||
|
||||
// Get all merge-request issues
|
||||
allMRs, err := bd.List(beads.ListOptions{
|
||||
|
||||
@@ -434,3 +434,247 @@ func TestFilterMRsByTarget_NoMRFields(t *testing.T) {
|
||||
t.Errorf("filterMRsByTarget() should filter out issues without MR fields, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for configurable integration branch naming (Issue #104)
|
||||
|
||||
func TestBuildIntegrationBranchName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
template string
|
||||
epicID string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "default template",
|
||||
template: "",
|
||||
epicID: "RA-123",
|
||||
want: "integration/RA-123",
|
||||
},
|
||||
{
|
||||
name: "explicit default template",
|
||||
template: "integration/{epic}",
|
||||
epicID: "PROJ-456",
|
||||
want: "integration/PROJ-456",
|
||||
},
|
||||
{
|
||||
name: "custom template with prefix",
|
||||
template: "{prefix}/{epic}",
|
||||
epicID: "RA-123",
|
||||
want: "RA/RA-123",
|
||||
},
|
||||
{
|
||||
name: "complex template",
|
||||
template: "feature/{prefix}/work/{epic}",
|
||||
epicID: "PROJ-789",
|
||||
want: "feature/PROJ/work/PROJ-789",
|
||||
},
|
||||
{
|
||||
name: "epic without hyphen",
|
||||
template: "{prefix}/{epic}",
|
||||
epicID: "epicname",
|
||||
want: "epicname/epicname",
|
||||
},
|
||||
{
|
||||
name: "user variable left as-is without git config",
|
||||
template: "{user}/{epic}",
|
||||
epicID: "RA-123",
|
||||
// Note: {user} is replaced with git user.name if available,
|
||||
// otherwise left as placeholder. In tests, it depends on git config.
|
||||
want: "", // We'll check pattern instead
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := buildIntegrationBranchName(tt.template, tt.epicID)
|
||||
if tt.want == "" {
|
||||
// For user variable test, just check {epic} was replaced
|
||||
if stringContains(got, "{epic}") {
|
||||
t.Errorf("buildIntegrationBranchName() = %q, should have replaced {epic}", got)
|
||||
}
|
||||
} else if got != tt.want {
|
||||
t.Errorf("buildIntegrationBranchName() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractEpicPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
epicID string
|
||||
want string
|
||||
}{
|
||||
{"RA-123", "RA"},
|
||||
{"PROJ-456", "PROJ"},
|
||||
{"gt-auth-epic", "gt"},
|
||||
{"epicname", "epicname"},
|
||||
{"X-1", "X"},
|
||||
{"-123", "-123"}, // No prefix before hyphen, return full string
|
||||
{"", ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.epicID, func(t *testing.T) {
|
||||
got := extractEpicPrefix(tt.epicID)
|
||||
if got != tt.want {
|
||||
t.Errorf("extractEpicPrefix(%q) = %q, want %q", tt.epicID, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateBranchName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
branchName string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid simple branch",
|
||||
branchName: "integration/gt-epic",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid nested branch",
|
||||
branchName: "user/project/feature",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid with hyphens and underscores",
|
||||
branchName: "user-name/feature_branch",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty branch name",
|
||||
branchName: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains tilde",
|
||||
branchName: "branch~1",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains caret",
|
||||
branchName: "branch^2",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains colon",
|
||||
branchName: "branch:ref",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains space",
|
||||
branchName: "branch name",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains backslash",
|
||||
branchName: "branch\\name",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains double dot",
|
||||
branchName: "branch..name",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains at-brace",
|
||||
branchName: "branch@{name}",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ends with .lock",
|
||||
branchName: "branch.lock",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "starts with slash",
|
||||
branchName: "/branch",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ends with slash",
|
||||
branchName: "branch/",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "starts with dot",
|
||||
branchName: ".branch",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ends with dot",
|
||||
branchName: "branch.",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "consecutive slashes",
|
||||
branchName: "branch//name",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateBranchName(tt.branchName)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("validateBranchName(%q) error = %v, wantErr %v", tt.branchName, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIntegrationBranchField(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "field at beginning",
|
||||
description: "integration_branch: klauern/PROJ-123/RA-epic\nSome description",
|
||||
want: "klauern/PROJ-123/RA-epic",
|
||||
},
|
||||
{
|
||||
name: "field in middle",
|
||||
description: "Some text\nintegration_branch: custom/branch\nMore text",
|
||||
want: "custom/branch",
|
||||
},
|
||||
{
|
||||
name: "field with extra whitespace",
|
||||
description: " integration_branch: spaced/branch \nOther content",
|
||||
want: "spaced/branch",
|
||||
},
|
||||
{
|
||||
name: "no integration_branch field",
|
||||
description: "Just a plain description\nWith multiple lines",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "mixed case field name",
|
||||
description: "Integration_branch: CamelCase/branch",
|
||||
want: "CamelCase/branch",
|
||||
},
|
||||
{
|
||||
name: "default format",
|
||||
description: "integration_branch: integration/gt-epic\nEpic for auth work",
|
||||
want: "integration/gt-epic",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := getIntegrationBranchField(tt.description)
|
||||
if got != tt.want {
|
||||
t.Errorf("getIntegrationBranchField() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
@@ -89,7 +90,6 @@ Examples:
|
||||
RunE: runPolecatRemove,
|
||||
}
|
||||
|
||||
|
||||
var polecatSyncCmd = &cobra.Command{
|
||||
Use: "sync <rig>/<polecat>",
|
||||
Short: "Sync beads for a polecat",
|
||||
@@ -129,15 +129,15 @@ Examples:
|
||||
}
|
||||
|
||||
var (
|
||||
polecatSyncAll bool
|
||||
polecatSyncFromMain bool
|
||||
polecatStatusJSON bool
|
||||
polecatGitStateJSON bool
|
||||
polecatGCDryRun bool
|
||||
polecatNukeAll bool
|
||||
polecatNukeDryRun bool
|
||||
polecatNukeForce bool
|
||||
polecatCheckRecoveryJSON bool
|
||||
polecatSyncAll bool
|
||||
polecatSyncFromMain bool
|
||||
polecatStatusJSON bool
|
||||
polecatGitStateJSON bool
|
||||
polecatGCDryRun bool
|
||||
polecatNukeAll bool
|
||||
polecatNukeDryRun bool
|
||||
polecatNukeForce bool
|
||||
polecatCheckRecoveryJSON bool
|
||||
)
|
||||
|
||||
var polecatGCCmd = &cobra.Command{
|
||||
@@ -579,7 +579,7 @@ func runPolecatSync(cmd *cobra.Command, args []string) error {
|
||||
polecatName = ""
|
||||
}
|
||||
|
||||
mgr, r, err := getPolecatManager(rigName)
|
||||
mgr, _, err := getPolecatManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -606,10 +606,15 @@ func runPolecatSync(cmd *cobra.Command, args []string) error {
|
||||
// Sync each polecat
|
||||
var syncErrors []string
|
||||
for _, name := range polecatsToSync {
|
||||
polecatDir := filepath.Join(r.Path, "polecats", name)
|
||||
// Get polecat to get correct clone path (handles old vs new structure)
|
||||
p, err := mgr.Get(name)
|
||||
if err != nil {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: %v", name, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Check directory exists
|
||||
if _, err := os.Stat(polecatDir); os.IsNotExist(err) {
|
||||
if _, err := os.Stat(p.ClonePath); os.IsNotExist(err) {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: directory not found", name))
|
||||
continue
|
||||
}
|
||||
@@ -623,7 +628,7 @@ func runPolecatSync(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Syncing %s/%s...\n", rigName, name)
|
||||
|
||||
syncCmd := exec.Command("bd", syncArgs...)
|
||||
syncCmd.Dir = polecatDir
|
||||
syncCmd.Dir = p.ClonePath
|
||||
output, err := syncCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: %v", name, err))
|
||||
@@ -975,7 +980,7 @@ type RecoveryStatus struct {
|
||||
NeedsRecovery bool `json:"needs_recovery"`
|
||||
Verdict string `json:"verdict"` // SAFE_TO_NUKE or NEEDS_RECOVERY
|
||||
Branch string `json:"branch,omitempty"`
|
||||
Issue string `json:"issue,omitempty"`
|
||||
Issue string `json:"issue,omitempty"`
|
||||
}
|
||||
|
||||
func runPolecatCheckRecovery(cmd *cobra.Command, args []string) error {
|
||||
@@ -1477,7 +1482,7 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
// Step 5: Close agent bead (if exists)
|
||||
agentBeadID := beads.PolecatBeadID(p.rigName, p.polecatName)
|
||||
closeArgs := []string{"close", agentBeadID, "--reason=nuked"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
201
internal/cmd/polecat_dotdir_test.go
Normal file
201
internal/cmd/polecat_dotdir_test.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
func TestDiscoverHooksSkipsPolecatDotDirs(t *testing.T) {
|
||||
townRoot := setupTestTownForDotDir(t)
|
||||
rigPath := filepath.Join(townRoot, "gastown")
|
||||
|
||||
settingsPath := filepath.Join(rigPath, "polecats", ".claude", ".claude", "settings.json")
|
||||
if err := os.MkdirAll(filepath.Dir(settingsPath), 0755); err != nil {
|
||||
t.Fatalf("mkdir settings dir: %v", err)
|
||||
}
|
||||
|
||||
settings := `{"hooks":{"SessionStart":[{"matcher":"*","hooks":[{"type":"Stop","command":"echo hi"}]}]}}`
|
||||
if err := os.WriteFile(settingsPath, []byte(settings), 0644); err != nil {
|
||||
t.Fatalf("write settings: %v", err)
|
||||
}
|
||||
|
||||
hooks, err := discoverHooks(townRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("discoverHooks: %v", err)
|
||||
}
|
||||
|
||||
if len(hooks) != 0 {
|
||||
t.Fatalf("expected no hooks, got %d", len(hooks))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartPolecatsWithWorkSkipsDotDirs(t *testing.T) {
|
||||
townRoot := setupTestTownForDotDir(t)
|
||||
rigName := "gastown"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
addRigEntry(t, townRoot, rigName)
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(rigPath, "polecats", ".claude"), 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude polecat: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(rigPath, "polecats", "toast"), 0755); err != nil {
|
||||
t.Fatalf("mkdir polecat: %v", err)
|
||||
}
|
||||
|
||||
binDir := t.TempDir()
|
||||
bdScript := `#!/bin/sh
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
shift
|
||||
fi
|
||||
cmd="$1"
|
||||
case "$cmd" in
|
||||
list)
|
||||
if [ "$(basename "$PWD")" = ".claude" ]; then
|
||||
echo '[{"id":"gt-1"}]'
|
||||
else
|
||||
echo '[]'
|
||||
fi
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
`
|
||||
writeScript(t, binDir, "bd", bdScript)
|
||||
|
||||
tmuxScript := `#!/bin/sh
|
||||
if [ "$1" = "has-session" ]; then
|
||||
echo "tmux error" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
`
|
||||
writeScript(t, binDir, "tmux", tmuxScript)
|
||||
|
||||
t.Setenv("PATH", fmt.Sprintf("%s:%s", binDir, os.Getenv("PATH")))
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir town root: %v", err)
|
||||
}
|
||||
|
||||
started, errs := startPolecatsWithWork(townRoot, rigName)
|
||||
|
||||
if len(started) != 0 {
|
||||
t.Fatalf("expected no polecats started, got %v", started)
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
t.Fatalf("expected no errors, got %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSessionCheckSkipsDotDirs(t *testing.T) {
|
||||
townRoot := setupTestTownForDotDir(t)
|
||||
rigName := "gastown"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
addRigEntry(t, townRoot, rigName)
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(rigPath, "polecats", ".claude"), 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude polecat: %v", err)
|
||||
}
|
||||
|
||||
binDir := t.TempDir()
|
||||
tmuxScript := `#!/bin/sh
|
||||
if [ "$1" = "has-session" ]; then
|
||||
echo "can't find session" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
`
|
||||
writeScript(t, binDir, "tmux", tmuxScript)
|
||||
t.Setenv("PATH", fmt.Sprintf("%s:%s", binDir, os.Getenv("PATH")))
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir town root: %v", err)
|
||||
}
|
||||
|
||||
output := captureStdout(t, func() {
|
||||
if err := runSessionCheck(&cobra.Command{}, []string{rigName}); err != nil {
|
||||
t.Fatalf("runSessionCheck: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if strings.Contains(output, ".claude") {
|
||||
t.Fatalf("expected .claude to be ignored, output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func addRigEntry(t *testing.T, townRoot, rigName string) {
|
||||
t.Helper()
|
||||
|
||||
rigsPath := filepath.Join(townRoot, "mayor", "rigs.json")
|
||||
rigsConfig, err := config.LoadRigsConfig(rigsPath)
|
||||
if err != nil {
|
||||
t.Fatalf("load rigs.json: %v", err)
|
||||
}
|
||||
if rigsConfig.Rigs == nil {
|
||||
rigsConfig.Rigs = make(map[string]config.RigEntry)
|
||||
}
|
||||
rigsConfig.Rigs[rigName] = config.RigEntry{
|
||||
GitURL: "file:///dev/null",
|
||||
AddedAt: time.Now(),
|
||||
}
|
||||
if err := config.SaveRigsConfig(rigsPath, rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setupTestTownForDotDir(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
townRoot := t.TempDir()
|
||||
|
||||
mayorDir := filepath.Join(townRoot, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor: %v", err)
|
||||
}
|
||||
|
||||
rigsPath := filepath.Join(mayorDir, "rigs.json")
|
||||
rigsConfig := &config.RigsConfig{
|
||||
Version: 1,
|
||||
Rigs: make(map[string]config.RigEntry),
|
||||
}
|
||||
if err := config.SaveRigsConfig(rigsPath, rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
|
||||
return townRoot
|
||||
}
|
||||
|
||||
func writeScript(t *testing.T, dir, name, content string) {
|
||||
t.Helper()
|
||||
|
||||
path := filepath.Join(dir, name)
|
||||
if err := os.WriteFile(path, []byte(content), 0755); err != nil {
|
||||
t.Fatalf("write %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func SpawnPolecatForSling(rigName string, opts SlingSpawnOptions) (*SpawnedPolec
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Resolve account for Claude config
|
||||
// Resolve account for runtime config
|
||||
accountsPath := constants.MayorAccountsPath(townRoot)
|
||||
claudeConfigDir, accountHandle, err := config.ResolveAccountConfigDir(accountsPath, opts.Account)
|
||||
if err != nil {
|
||||
@@ -158,7 +158,7 @@ func SpawnPolecatForSling(rigName string, opts SlingSpawnOptions) (*SpawnedPolec
|
||||
if !running {
|
||||
fmt.Printf("Starting session for %s/%s...\n", rigName, polecatName)
|
||||
startOpts := polecat.SessionStartOptions{
|
||||
ClaudeConfigDir: claudeConfigDir,
|
||||
RuntimeConfigDir: claudeConfigDir,
|
||||
}
|
||||
if opts.Agent != "" {
|
||||
cmd, err := config.BuildPolecatStartupCommandWithAgentOverride(rigName, polecatName, r.Path, "", opts.Agent)
|
||||
|
||||
@@ -17,10 +17,13 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/checkpoint"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/deacon"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/lock"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/templates"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -80,12 +83,15 @@ func init() {
|
||||
type RoleContext = RoleInfo
|
||||
|
||||
func runPrime(cmd *cobra.Command, args []string) error {
|
||||
if !state.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting current directory: %w", err)
|
||||
}
|
||||
|
||||
// Find town root
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding workspace: %w", err)
|
||||
@@ -601,6 +607,11 @@ func outputStartupDirective(ctx RoleContext) {
|
||||
fmt.Println(" - If attachment found → **RUN IT** (no human input needed)")
|
||||
fmt.Println(" - If no attachment → await user instruction")
|
||||
case RoleDeacon:
|
||||
// Skip startup protocol if paused - the pause message was already shown
|
||||
paused, _, _ := deacon.IsPaused(ctx.TownRoot)
|
||||
if paused {
|
||||
return
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
@@ -911,6 +922,13 @@ func showMoleculeProgress(b *beads.Beads, rootID string) {
|
||||
// Deacon uses wisps (Wisp:true issues in main .beads/) for patrol cycles.
|
||||
// Deacon is a town-level role, so it uses town root beads (not rig beads).
|
||||
func outputDeaconPatrolContext(ctx RoleContext) {
|
||||
// Check if Deacon is paused - if so, output PAUSED message and skip patrol context
|
||||
paused, state, err := deacon.IsPaused(ctx.TownRoot)
|
||||
if err == nil && paused {
|
||||
outputDeaconPausedMessage(state)
|
||||
return
|
||||
}
|
||||
|
||||
cfg := PatrolConfig{
|
||||
RoleName: "deacon",
|
||||
PatrolMolName: "mol-deacon-patrol",
|
||||
@@ -930,6 +948,32 @@ func outputDeaconPatrolContext(ctx RoleContext) {
|
||||
outputPatrolContext(cfg)
|
||||
}
|
||||
|
||||
// outputDeaconPausedMessage outputs a prominent PAUSED message for the Deacon.
|
||||
// When paused, the Deacon must not perform any patrol actions.
|
||||
func outputDeaconPausedMessage(state *deacon.PauseState) {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s\n\n", style.Bold.Render("## ⏸️ DEACON PAUSED"))
|
||||
fmt.Println("You are paused and must NOT perform any patrol actions.")
|
||||
fmt.Println()
|
||||
if state.Reason != "" {
|
||||
fmt.Printf("Reason: %s\n", state.Reason)
|
||||
}
|
||||
fmt.Printf("Paused at: %s\n", state.PausedAt.Format(time.RFC3339))
|
||||
if state.PausedBy != "" {
|
||||
fmt.Printf("Paused by: %s\n", state.PausedBy)
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("Wait for human to run `gt deacon resume` before working.")
|
||||
fmt.Println()
|
||||
fmt.Println("**DO NOT:**")
|
||||
fmt.Println("- Create patrol molecules")
|
||||
fmt.Println("- Run heartbeats")
|
||||
fmt.Println("- Check agent health")
|
||||
fmt.Println("- Take any autonomous actions")
|
||||
fmt.Println()
|
||||
fmt.Println("You may respond to direct human questions.")
|
||||
}
|
||||
|
||||
// outputWitnessPatrolContext shows patrol molecule status for the Witness.
|
||||
// Witness AUTO-BONDS its patrol molecule on startup if one isn't already running.
|
||||
func outputWitnessPatrolContext(ctx RoleContext) {
|
||||
@@ -1460,22 +1504,17 @@ func outputSessionMetadata(ctx RoleContext) {
|
||||
// resolveSessionIDForPrime finds the session ID from available sources.
|
||||
// Priority: GT_SESSION_ID env, CLAUDE_SESSION_ID env, persisted file, fallback.
|
||||
func resolveSessionIDForPrime(actor string) string {
|
||||
// 1. GT_SESSION_ID (new canonical)
|
||||
if id := os.Getenv("GT_SESSION_ID"); id != "" {
|
||||
// 1. Try runtime's session ID lookup (checks GT_SESSION_ID_ENV, then CLAUDE_SESSION_ID)
|
||||
if id := runtime.SessionIDFromEnv(); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
// 2. CLAUDE_SESSION_ID (legacy/Claude Code)
|
||||
if id := os.Getenv("CLAUDE_SESSION_ID"); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
// 3. Persisted session file (from gt prime --hook)
|
||||
// 2. Persisted session file (from gt prime --hook)
|
||||
if id := ReadPersistedSessionID(); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
// 4. Fallback to generated identifier
|
||||
// 3. Fallback to generated identifier
|
||||
return fmt.Sprintf("%s-%d", actor, os.Getpid())
|
||||
}
|
||||
|
||||
|
||||
147
internal/cmd/rig_detect.go
Normal file
147
internal/cmd/rig_detect.go
Normal file
@@ -0,0 +1,147 @@
|
||||
// ABOUTME: Hidden command for shell hook to detect rigs and update cache.
|
||||
// ABOUTME: Called by shell integration to set GT_TOWN_ROOT and GT_RIG env vars.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var rigDetectCache string
|
||||
|
||||
var rigDetectCmd = &cobra.Command{
|
||||
Use: "detect [path]",
|
||||
Short: "Detect rig from repository path (internal use)",
|
||||
Hidden: true,
|
||||
Long: `Detect rig from a repository path and optionally cache the result.
|
||||
|
||||
This is an internal command used by shell integration. It checks if the given
|
||||
path is inside a Gas Town rig and outputs shell variable assignments.
|
||||
|
||||
When --cache is specified, the result is written to ~/.cache/gastown/rigs.cache
|
||||
for fast lookups by the shell hook.
|
||||
|
||||
Output format (to stdout):
|
||||
export GT_TOWN_ROOT=/path/to/town
|
||||
export GT_RIG=rigname
|
||||
|
||||
Or if not in a rig:
|
||||
unset GT_TOWN_ROOT GT_RIG`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runRigDetect,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rigCmd.AddCommand(rigDetectCmd)
|
||||
rigDetectCmd.Flags().StringVar(&rigDetectCache, "cache", "", "Repository path to cache detection result for")
|
||||
}
|
||||
|
||||
func runRigDetect(cmd *cobra.Command, args []string) error {
|
||||
checkPath := "."
|
||||
if len(args) > 0 {
|
||||
checkPath = args[0]
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(checkPath)
|
||||
if err != nil {
|
||||
return outputNotInRig()
|
||||
}
|
||||
|
||||
townRoot, err := workspace.Find(absPath)
|
||||
if err != nil || townRoot == "" {
|
||||
return outputNotInRig()
|
||||
}
|
||||
|
||||
rigName := detectRigFromPath(townRoot, absPath)
|
||||
|
||||
if rigName != "" {
|
||||
fmt.Printf("export GT_TOWN_ROOT=%q\n", townRoot)
|
||||
fmt.Printf("export GT_RIG=%q\n", rigName)
|
||||
} else {
|
||||
fmt.Printf("export GT_TOWN_ROOT=%q\n", townRoot)
|
||||
fmt.Println("unset GT_RIG")
|
||||
}
|
||||
|
||||
if rigDetectCache != "" {
|
||||
if err := updateRigCache(rigDetectCache, townRoot, rigName); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: could not update cache: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func detectRigFromPath(townRoot, absPath string) string {
|
||||
rel, err := filepath.Rel(townRoot, absPath)
|
||||
if err != nil || strings.HasPrefix(rel, "..") {
|
||||
return ""
|
||||
}
|
||||
|
||||
parts := strings.Split(rel, string(filepath.Separator))
|
||||
if len(parts) == 0 || parts[0] == "." {
|
||||
return ""
|
||||
}
|
||||
|
||||
candidateRig := parts[0]
|
||||
|
||||
switch candidateRig {
|
||||
case "mayor", "deacon", ".beads", ".claude", ".git", "plugins":
|
||||
return ""
|
||||
}
|
||||
|
||||
rigConfigPath := filepath.Join(townRoot, candidateRig, "config.json")
|
||||
if _, err := os.Stat(rigConfigPath); err == nil {
|
||||
return candidateRig
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func outputNotInRig() error {
|
||||
fmt.Println("unset GT_TOWN_ROOT GT_RIG")
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateRigCache(repoRoot, townRoot, rigName string) error {
|
||||
cacheDir := state.CacheDir()
|
||||
if err := os.MkdirAll(cacheDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cachePath := filepath.Join(cacheDir, "rigs.cache")
|
||||
|
||||
existing := make(map[string]string)
|
||||
if data, err := os.ReadFile(cachePath); err == nil {
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
if idx := strings.Index(line, ":"); idx > 0 {
|
||||
existing[line[:idx]] = line[idx+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var value string
|
||||
if rigName != "" {
|
||||
value = fmt.Sprintf("export GT_TOWN_ROOT=%q; export GT_RIG=%q", townRoot, rigName)
|
||||
} else if townRoot != "" {
|
||||
value = fmt.Sprintf("export GT_TOWN_ROOT=%q; unset GT_RIG", townRoot)
|
||||
} else {
|
||||
value = "unset GT_TOWN_ROOT GT_RIG"
|
||||
}
|
||||
|
||||
existing[repoRoot] = value
|
||||
|
||||
var lines []string
|
||||
for k, v := range existing {
|
||||
lines = append(lines, k+":"+v)
|
||||
}
|
||||
|
||||
return os.WriteFile(cachePath, []byte(strings.Join(lines, "\n")+"\n"), 0644)
|
||||
}
|
||||
186
internal/cmd/rig_quick_add.go
Normal file
186
internal/cmd/rig_quick_add.go
Normal file
@@ -0,0 +1,186 @@
|
||||
// ABOUTME: Quick-add command for adding a repo to Gas Town with minimal friction.
|
||||
// ABOUTME: Used by shell hook for automatic "add to Gas Town?" prompts.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var (
|
||||
quickAddUser string
|
||||
quickAddYes bool
|
||||
quickAddQuiet bool
|
||||
)
|
||||
|
||||
var rigQuickAddCmd = &cobra.Command{
|
||||
Use: "quick-add [path]",
|
||||
Short: "Quickly add current repo to Gas Town",
|
||||
Hidden: true,
|
||||
Long: `Quickly add a git repository to Gas Town with minimal interaction.
|
||||
|
||||
This command is designed for the shell hook's "Add to Gas Town?" prompt.
|
||||
It infers the rig name from the directory and git URL from the remote.
|
||||
|
||||
Examples:
|
||||
gt rig quick-add # Add current directory
|
||||
gt rig quick-add ~/Repos/myproject # Add specific path
|
||||
gt rig quick-add --yes # Non-interactive`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runRigQuickAdd,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rigCmd.AddCommand(rigQuickAddCmd)
|
||||
rigQuickAddCmd.Flags().StringVar(&quickAddUser, "user", "", "Crew workspace name (default: $USER)")
|
||||
rigQuickAddCmd.Flags().BoolVar(&quickAddYes, "yes", false, "Non-interactive, assume yes")
|
||||
rigQuickAddCmd.Flags().BoolVar(&quickAddQuiet, "quiet", false, "Minimal output")
|
||||
}
|
||||
|
||||
func runRigQuickAdd(cmd *cobra.Command, args []string) error {
|
||||
targetPath := "."
|
||||
if len(args) > 0 {
|
||||
targetPath = args[0]
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving path: %w", err)
|
||||
}
|
||||
|
||||
if townRoot, err := workspace.Find(absPath); err == nil && townRoot != "" {
|
||||
return fmt.Errorf("already part of a Gas Town workspace: %s", townRoot)
|
||||
}
|
||||
|
||||
gitRoot, err := findGitRoot(absPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not a git repository: %w", err)
|
||||
}
|
||||
|
||||
gitURL, err := findGitRemoteURL(gitRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no git remote found: %w", err)
|
||||
}
|
||||
|
||||
rigName := sanitizeRigName(filepath.Base(gitRoot))
|
||||
|
||||
townRoot, err := findOrCreateTown()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding Gas Town: %w", err)
|
||||
}
|
||||
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
if _, err := os.Stat(rigPath); err == nil {
|
||||
return fmt.Errorf("rig %q already exists in %s", rigName, townRoot)
|
||||
}
|
||||
|
||||
originalName := filepath.Base(gitRoot)
|
||||
if rigName != originalName && !quickAddQuiet {
|
||||
fmt.Printf("Note: Using %q as rig name (sanitized from %q)\n", rigName, originalName)
|
||||
}
|
||||
|
||||
if !quickAddQuiet {
|
||||
fmt.Printf("Adding %s to Gas Town...\n", style.Bold.Render(rigName))
|
||||
fmt.Printf(" Repository: %s\n", gitURL)
|
||||
fmt.Printf(" Town: %s\n", townRoot)
|
||||
}
|
||||
|
||||
addArgs := []string{"rig", "add", rigName, gitURL}
|
||||
addCmd := exec.Command("gt", addArgs...)
|
||||
addCmd.Dir = townRoot
|
||||
addCmd.Stdout = os.Stdout
|
||||
addCmd.Stderr = os.Stderr
|
||||
if err := addCmd.Run(); err != nil {
|
||||
fmt.Printf("\n%s Failed to add rig. You can try manually:\n", style.Warning.Render("⚠"))
|
||||
fmt.Printf(" cd %s && gt rig add %s %s\n", townRoot, rigName, gitURL)
|
||||
return fmt.Errorf("gt rig add failed: %w", err)
|
||||
}
|
||||
|
||||
user := quickAddUser
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
if user == "" {
|
||||
user = "default"
|
||||
}
|
||||
|
||||
if !quickAddQuiet {
|
||||
fmt.Printf("\nCreating crew workspace for %s...\n", user)
|
||||
}
|
||||
|
||||
crewArgs := []string{"crew", "add", user, "--rig", rigName}
|
||||
crewCmd := exec.Command("gt", crewArgs...)
|
||||
crewCmd.Dir = filepath.Join(townRoot, rigName)
|
||||
crewCmd.Stdout = os.Stdout
|
||||
crewCmd.Stderr = os.Stderr
|
||||
if err := crewCmd.Run(); err != nil {
|
||||
fmt.Printf(" %s Could not create crew workspace: %v\n", style.Dim.Render("⚠"), err)
|
||||
fmt.Printf(" Run manually: cd %s && gt crew add %s --rig %s\n", filepath.Join(townRoot, rigName), user, rigName)
|
||||
}
|
||||
|
||||
crewPath := filepath.Join(townRoot, rigName, "crew", user)
|
||||
if !quickAddQuiet {
|
||||
fmt.Printf("\n%s Added to Gas Town!\n", style.Success.Render("✓"))
|
||||
fmt.Printf("\nYour workspace: %s\n", style.Bold.Render(crewPath))
|
||||
}
|
||||
|
||||
fmt.Printf("GT_CREW_PATH=%s\n", crewPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findGitRoot(path string) (string, error) {
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
cmd.Dir = path
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
||||
func findGitRemoteURL(gitRoot string) (string, error) {
|
||||
cmd := exec.Command("git", "remote", "get-url", "origin")
|
||||
cmd.Dir = gitRoot
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
||||
func sanitizeRigName(name string) string {
|
||||
name = strings.ReplaceAll(name, "-", "_")
|
||||
name = strings.ReplaceAll(name, ".", "_")
|
||||
name = strings.ReplaceAll(name, " ", "_")
|
||||
return name
|
||||
}
|
||||
|
||||
func findOrCreateTown() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
candidates := []string{
|
||||
filepath.Join(home, "gt"),
|
||||
filepath.Join(home, "gastown"),
|
||||
}
|
||||
|
||||
for _, path := range candidates {
|
||||
mayorDir := filepath.Join(path, "mayor")
|
||||
if _, err := os.Stat(mayorDir); err == nil {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no Gas Town found - run 'gt install ~/gt' first")
|
||||
}
|
||||
@@ -649,6 +649,9 @@ func runSessionCheck(cmd *cobra.Command, args []string) error {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
polecatName := entry.Name()
|
||||
sessionName := fmt.Sprintf("gt-%s-%s", r.Name, polecatName)
|
||||
totalChecked++
|
||||
|
||||
99
internal/cmd/shell.go
Normal file
99
internal/cmd/shell.go
Normal file
@@ -0,0 +1,99 @@
|
||||
// ABOUTME: Shell integration management commands.
|
||||
// ABOUTME: Install/remove shell hooks without full HQ setup.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var shellCmd = &cobra.Command{
|
||||
Use: "shell",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Manage shell integration",
|
||||
RunE: requireSubcommand,
|
||||
}
|
||||
|
||||
var shellInstallCmd = &cobra.Command{
|
||||
Use: "install",
|
||||
Short: "Install or update shell integration",
|
||||
Long: `Install or update the Gas Town shell integration.
|
||||
|
||||
This adds a hook to your shell RC file that:
|
||||
- Sets GT_TOWN_ROOT and GT_RIG when you cd into a Gas Town rig
|
||||
- Offers to add new git repos to Gas Town on first visit
|
||||
|
||||
Run this after upgrading gt to get the latest shell hook features.`,
|
||||
RunE: runShellInstall,
|
||||
}
|
||||
|
||||
var shellRemoveCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove shell integration",
|
||||
RunE: runShellRemove,
|
||||
}
|
||||
|
||||
var shellStatusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show shell integration status",
|
||||
RunE: runShellStatus,
|
||||
}
|
||||
|
||||
func init() {
|
||||
shellCmd.AddCommand(shellInstallCmd)
|
||||
shellCmd.AddCommand(shellRemoveCmd)
|
||||
shellCmd.AddCommand(shellStatusCmd)
|
||||
rootCmd.AddCommand(shellCmd)
|
||||
}
|
||||
|
||||
func runShellInstall(cmd *cobra.Command, args []string) error {
|
||||
if err := shell.Install(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := state.Enable(Version); err != nil {
|
||||
fmt.Printf("%s Could not enable Gas Town: %v\n", style.Dim.Render("⚠"), err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Shell integration installed (%s)\n", style.Success.Render("✓"), shell.RCFilePath(shell.DetectShell()))
|
||||
fmt.Println()
|
||||
fmt.Println("Run 'source ~/.zshrc' or open a new terminal to activate.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func runShellRemove(cmd *cobra.Command, args []string) error {
|
||||
if err := shell.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s Shell integration removed\n", style.Success.Render("✓"))
|
||||
return nil
|
||||
}
|
||||
|
||||
func runShellStatus(cmd *cobra.Command, args []string) error {
|
||||
s, err := state.Load()
|
||||
if err != nil {
|
||||
fmt.Println("Gas Town: not configured")
|
||||
fmt.Println("Shell integration: not installed")
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.Enabled {
|
||||
fmt.Println("Gas Town: enabled")
|
||||
} else {
|
||||
fmt.Println("Gas Town: disabled")
|
||||
}
|
||||
|
||||
if s.ShellIntegration != "" {
|
||||
fmt.Printf("Shell integration: %s (%s)\n", s.ShellIntegration, shell.RCFilePath(s.ShellIntegration))
|
||||
} else {
|
||||
fmt.Println("Shell integration: not installed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -387,8 +387,14 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
if formulaName != "" {
|
||||
fmt.Printf(" Instantiating formula %s...\n", formulaName)
|
||||
|
||||
// Route bd mutations (cook/wisp/bond) to the correct beads context for the target bead.
|
||||
// Some bd mol commands don't support prefix routing, so we must run them from the
|
||||
// rig directory that owns the bead's database.
|
||||
formulaWorkDir := beads.ResolveHookDir(townRoot, beadID, hookWorkDir)
|
||||
|
||||
// Step 1: Cook the formula (ensures proto exists)
|
||||
cookCmd := exec.Command("bd", "--no-daemon", "cook", formulaName)
|
||||
cookCmd.Dir = formulaWorkDir
|
||||
cookCmd.Stderr = os.Stderr
|
||||
if err := cookCmd.Run(); err != nil {
|
||||
return fmt.Errorf("cooking formula %s: %w", formulaName, err)
|
||||
@@ -398,6 +404,7 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
featureVar := fmt.Sprintf("feature=%s", info.Title)
|
||||
wispArgs := []string{"--no-daemon", "mol", "wisp", formulaName, "--var", featureVar, "--json"}
|
||||
wispCmd := exec.Command("bd", wispArgs...)
|
||||
wispCmd.Dir = formulaWorkDir
|
||||
wispCmd.Stderr = os.Stderr
|
||||
wispOut, err := wispCmd.Output()
|
||||
if err != nil {
|
||||
@@ -415,6 +422,7 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
// Use --no-daemon for mol bond (requires direct database access)
|
||||
bondArgs := []string{"--no-daemon", "mol", "bond", wispRootID, beadID, "--json"}
|
||||
bondCmd := exec.Command("bd", bondArgs...)
|
||||
bondCmd.Dir = formulaWorkDir
|
||||
bondCmd.Stderr = os.Stderr
|
||||
bondOut, err := bondCmd.Output()
|
||||
if err != nil {
|
||||
@@ -503,7 +511,7 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
// This enables no-tmux mode where agents discover args via gt prime / bd show.
|
||||
func storeArgsInBead(beadID, args string) error {
|
||||
// Get the bead to preserve existing description content
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json")
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
out, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching bead: %w", err)
|
||||
@@ -712,8 +720,12 @@ func sessionToAgentID(sessionName string) string {
|
||||
// verifyBeadExists checks that the bead exists using bd show.
|
||||
// Uses bd's native prefix-based routing via routes.jsonl - do NOT set BEADS_DIR
|
||||
// as that overrides routing and breaks resolution of rig-level beads.
|
||||
//
|
||||
// Uses --no-daemon with --allow-stale to avoid daemon socket timing issues
|
||||
// while still finding beads when database is out of sync with JSONL.
|
||||
// For existence checks, stale data is acceptable - we just need to know it exists.
|
||||
func verifyBeadExists(beadID string) error {
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json")
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
// Run from town root so bd can find routes.jsonl for prefix-based routing.
|
||||
// Do NOT set BEADS_DIR - that overrides routing and breaks rig bead resolution.
|
||||
if townRoot, err := workspace.FindFromCwd(); err == nil {
|
||||
@@ -734,8 +746,9 @@ type beadInfo struct {
|
||||
|
||||
// getBeadInfo returns status and assignee for a bead.
|
||||
// Uses bd's native prefix-based routing via routes.jsonl.
|
||||
// Uses --no-daemon with --allow-stale for consistency with verifyBeadExists.
|
||||
func getBeadInfo(beadID string) (*beadInfo, error) {
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json")
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
// Run from town root so bd can find routes.jsonl for prefix-based routing.
|
||||
if townRoot, err := workspace.FindFromCwd(); err == nil {
|
||||
cmd.Dir = townRoot
|
||||
@@ -806,15 +819,16 @@ func resolveSelfTarget() (agentID string, pane string, hookRoot string, err erro
|
||||
|
||||
// verifyFormulaExists checks that the formula exists using bd formula show.
|
||||
// Formulas are TOML files (.formula.toml).
|
||||
// Uses --no-daemon with --allow-stale for consistency with verifyBeadExists.
|
||||
func verifyFormulaExists(formulaName string) error {
|
||||
// Try bd formula show (handles all formula file formats)
|
||||
cmd := exec.Command("bd", "--no-daemon", "formula", "show", formulaName)
|
||||
cmd := exec.Command("bd", "--no-daemon", "formula", "show", formulaName, "--allow-stale")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try with mol- prefix
|
||||
cmd = exec.Command("bd", "--no-daemon", "formula", "show", "mol-"+formulaName)
|
||||
cmd = exec.Command("bd", "--no-daemon", "formula", "show", "mol-"+formulaName, "--allow-stale")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseWispIDFromJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -183,3 +188,318 @@ func TestFormatTrackBeadIDConsumerCompatibility(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlingFormulaOnBeadRoutesBDCommandsToTargetRig(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Minimal workspace marker so workspace.FindFromCwd() succeeds.
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, "mayor", "rig"), 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
// Create a rig path that owns gt-* beads, and a routes.jsonl pointing to it.
|
||||
rigDir := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, ".beads"), 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(rigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir rigDir: %v", err)
|
||||
}
|
||||
routes := strings.Join([]string{
|
||||
`{"prefix":"gt-","path":"gastown/mayor/rig"}`,
|
||||
`{"prefix":"hq-","path":"."}`,
|
||||
"",
|
||||
}, "\n")
|
||||
if err := os.WriteFile(filepath.Join(townRoot, ".beads", "routes.jsonl"), []byte(routes), 0644); err != nil {
|
||||
t.Fatalf("write routes.jsonl: %v", err)
|
||||
}
|
||||
|
||||
// Stub bd so we can observe the working directory for cook/wisp/bond.
|
||||
binDir := filepath.Join(townRoot, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir binDir: %v", err)
|
||||
}
|
||||
logPath := filepath.Join(townRoot, "bd.log")
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
bdScript := `#!/bin/sh
|
||||
set -e
|
||||
echo "$(pwd)|$*" >> "${BD_LOG}"
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
shift
|
||||
fi
|
||||
cmd="$1"
|
||||
shift || true
|
||||
case "$cmd" in
|
||||
show)
|
||||
echo '[{"title":"Test issue","status":"open","assignee":"","description":""}]'
|
||||
;;
|
||||
formula)
|
||||
# formula show <name>
|
||||
exit 0
|
||||
;;
|
||||
cook)
|
||||
exit 0
|
||||
;;
|
||||
mol)
|
||||
sub="$1"
|
||||
shift || true
|
||||
case "$sub" in
|
||||
wisp)
|
||||
echo '{"new_epic_id":"gt-wisp-xyz"}'
|
||||
;;
|
||||
bond)
|
||||
echo '{"root_id":"gt-wisp-xyz"}'
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
t.Setenv(EnvGTRole, "mayor")
|
||||
t.Setenv("GT_POLECAT", "")
|
||||
t.Setenv("GT_CREW", "")
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(filepath.Join(townRoot, "mayor", "rig")); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we don't leak global flag state across tests.
|
||||
prevOn := slingOnTarget
|
||||
prevVars := slingVars
|
||||
prevDryRun := slingDryRun
|
||||
prevNoConvoy := slingNoConvoy
|
||||
t.Cleanup(func() {
|
||||
slingOnTarget = prevOn
|
||||
slingVars = prevVars
|
||||
slingDryRun = prevDryRun
|
||||
slingNoConvoy = prevNoConvoy
|
||||
})
|
||||
|
||||
slingDryRun = false
|
||||
slingNoConvoy = true
|
||||
slingVars = nil
|
||||
slingOnTarget = "gt-abc123"
|
||||
|
||||
if err := runSling(nil, []string{"mol-review"}); err != nil {
|
||||
t.Fatalf("runSling: %v", err)
|
||||
}
|
||||
|
||||
logBytes, err := os.ReadFile(logPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read bd log: %v", err)
|
||||
}
|
||||
logLines := strings.Split(strings.TrimSpace(string(logBytes)), "\n")
|
||||
|
||||
wantDir := rigDir
|
||||
if resolved, err := filepath.EvalSymlinks(wantDir); err == nil {
|
||||
wantDir = resolved
|
||||
}
|
||||
gotCook := false
|
||||
gotWisp := false
|
||||
gotBond := false
|
||||
|
||||
for _, line := range logLines {
|
||||
parts := strings.SplitN(line, "|", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
dir := parts[0]
|
||||
if resolved, err := filepath.EvalSymlinks(dir); err == nil {
|
||||
dir = resolved
|
||||
}
|
||||
args := parts[1]
|
||||
|
||||
switch {
|
||||
case strings.Contains(args, " cook "):
|
||||
gotCook = true
|
||||
if dir != wantDir {
|
||||
t.Fatalf("bd cook ran in %q, want %q (args: %q)", dir, wantDir, args)
|
||||
}
|
||||
case strings.Contains(args, " mol wisp "):
|
||||
gotWisp = true
|
||||
if dir != wantDir {
|
||||
t.Fatalf("bd mol wisp ran in %q, want %q (args: %q)", dir, wantDir, args)
|
||||
}
|
||||
case strings.Contains(args, " mol bond "):
|
||||
gotBond = true
|
||||
if dir != wantDir {
|
||||
t.Fatalf("bd mol bond ran in %q, want %q (args: %q)", dir, wantDir, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !gotCook || !gotWisp || !gotBond {
|
||||
t.Fatalf("missing expected bd commands: cook=%v wisp=%v bond=%v (log: %q)", gotCook, gotWisp, gotBond, string(logBytes))
|
||||
}
|
||||
}
|
||||
|
||||
// TestVerifyBeadExistsAllowStale reproduces the bug in gtl-ncq where beads
|
||||
// visible via regular bd show fail with --no-daemon due to database sync issues.
|
||||
// The fix uses --allow-stale to skip the sync check for existence verification.
|
||||
func TestVerifyBeadExistsAllowStale(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Create minimal workspace structure
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, "mayor", "rig"), 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
// Create a stub bd that simulates the sync issue:
|
||||
// - --no-daemon without --allow-stale fails (database out of sync)
|
||||
// - --no-daemon with --allow-stale succeeds (skips sync check)
|
||||
binDir := filepath.Join(townRoot, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir binDir: %v", err)
|
||||
}
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
bdScript := `#!/bin/sh
|
||||
# Check for --allow-stale flag
|
||||
allow_stale=false
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--allow-stale" ]; then
|
||||
allow_stale=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
if [ "$allow_stale" = "true" ]; then
|
||||
# --allow-stale skips sync check, succeeds
|
||||
echo '[{"title":"Test bead","status":"open","assignee":""}]'
|
||||
exit 0
|
||||
else
|
||||
# Without --allow-stale, fails with sync error
|
||||
echo '{"error":"Database out of sync with JSONL."}'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# Daemon mode works
|
||||
echo '[{"title":"Test bead","status":"open","assignee":""}]'
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// EXPECTED: verifyBeadExists should use --no-daemon --allow-stale and succeed
|
||||
beadID := "jv-v599"
|
||||
err = verifyBeadExists(beadID)
|
||||
if err != nil {
|
||||
t.Errorf("verifyBeadExists(%q) failed: %v\nExpected --allow-stale to skip sync check", beadID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSlingWithAllowStale tests the full gt sling flow with --allow-stale fix.
|
||||
// This is an integration test for the gtl-ncq bug.
|
||||
func TestSlingWithAllowStale(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Create minimal workspace structure
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, "mayor", "rig"), 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
// Create stub bd that respects --allow-stale
|
||||
binDir := filepath.Join(townRoot, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir binDir: %v", err)
|
||||
}
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
bdScript := `#!/bin/sh
|
||||
# Check for --allow-stale flag
|
||||
allow_stale=false
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--allow-stale" ]; then
|
||||
allow_stale=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
shift
|
||||
cmd="$1"
|
||||
if [ "$cmd" = "show" ]; then
|
||||
if [ "$allow_stale" = "true" ]; then
|
||||
echo '[{"title":"Synced bead","status":"open","assignee":""}]'
|
||||
exit 0
|
||||
fi
|
||||
echo '{"error":"Database out of sync"}'
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
cmd="$1"
|
||||
shift || true
|
||||
case "$cmd" in
|
||||
show)
|
||||
echo '[{"title":"Synced bead","status":"open","assignee":""}]'
|
||||
;;
|
||||
update)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
t.Setenv(EnvGTRole, "crew")
|
||||
t.Setenv("GT_CREW", "jv")
|
||||
t.Setenv("GT_POLECAT", "")
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Save and restore global flags
|
||||
prevDryRun := slingDryRun
|
||||
prevNoConvoy := slingNoConvoy
|
||||
t.Cleanup(func() {
|
||||
slingDryRun = prevDryRun
|
||||
slingNoConvoy = prevNoConvoy
|
||||
})
|
||||
|
||||
slingDryRun = true
|
||||
slingNoConvoy = true
|
||||
|
||||
// EXPECTED: gt sling should use daemon mode and succeed
|
||||
// ACTUAL: verifyBeadExists uses --no-daemon and fails with sync error
|
||||
beadID := "jv-v599"
|
||||
err = runSling(nil, []string{beadID})
|
||||
if err != nil {
|
||||
// Check if it's the specific error we're testing for
|
||||
if strings.Contains(err.Error(), "is not a valid bead or formula") {
|
||||
t.Errorf("gt sling failed to recognize bead %q: %v\nExpected to use daemon mode, but used --no-daemon which fails when DB out of sync", beadID, err)
|
||||
} else {
|
||||
// Some other error - might be expected in dry-run mode
|
||||
t.Logf("gt sling returned error (may be expected in test): %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/claude"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
@@ -18,8 +17,8 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/mayor"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/refinery"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/witness"
|
||||
@@ -204,7 +203,7 @@ func startCoreAgents(townRoot string, agentOverride string) error {
|
||||
|
||||
// Start Deacon (health monitor)
|
||||
deaconMgr := deacon.NewManager(townRoot)
|
||||
if err := deaconMgr.Start(); err != nil {
|
||||
if err := deaconMgr.Start(agentOverride); err != nil {
|
||||
if err == deacon.ErrAlreadyRunning {
|
||||
fmt.Printf(" %s Deacon already running\n", style.Dim.Render("○"))
|
||||
} else {
|
||||
@@ -246,17 +245,15 @@ func startRigAgents(t *tmux.Tmux, townRoot string) {
|
||||
}
|
||||
|
||||
// Start Refinery
|
||||
refinerySession := fmt.Sprintf("gt-%s-refinery", r.Name)
|
||||
refineryRunning, _ := t.HasSession(refinerySession)
|
||||
if refineryRunning {
|
||||
fmt.Printf(" %s %s refinery already running\n", style.Dim.Render("○"), r.Name)
|
||||
} else {
|
||||
created, err := ensureRefinerySession(r.Name, r)
|
||||
if err != nil {
|
||||
refineryMgr := refinery.NewManager(r)
|
||||
if err := refineryMgr.Start(false); err != nil {
|
||||
if errors.Is(err, refinery.ErrAlreadyRunning) {
|
||||
fmt.Printf(" %s %s refinery already running\n", style.Dim.Render("○"), r.Name)
|
||||
} else {
|
||||
fmt.Printf(" %s %s refinery failed: %v\n", style.Dim.Render("○"), r.Name, err)
|
||||
} else if created {
|
||||
fmt.Printf(" %s %s refinery started\n", style.Bold.Render("✓"), r.Name)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s %s refinery started\n", style.Bold.Render("✓"), r.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -320,86 +317,6 @@ func discoverAllRigs(townRoot string) ([]*rig.Rig, error) {
|
||||
return rigMgr.DiscoverRigs()
|
||||
}
|
||||
|
||||
// ensureRefinerySession creates a refinery tmux session if it doesn't exist.
|
||||
// Returns true if a new session was created, false if it already existed.
|
||||
func ensureRefinerySession(rigName string, r *rig.Rig) (bool, error) {
|
||||
t := tmux.NewTmux()
|
||||
sessionName := fmt.Sprintf("gt-%s-refinery", rigName)
|
||||
|
||||
// Check if session already exists
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
|
||||
if running {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Working directory is the refinery's rig clone
|
||||
refineryRigDir := filepath.Join(r.Path, "refinery", "rig")
|
||||
if _, err := os.Stat(refineryRigDir); os.IsNotExist(err) {
|
||||
// Fall back to rig path if refinery/rig doesn't exist
|
||||
refineryRigDir = r.Path
|
||||
}
|
||||
|
||||
// Ensure Claude settings exist in refinery/ (not refinery/rig/) so we don't
|
||||
// write into the source repo. Claude walks up the tree to find settings.
|
||||
refineryParentDir := filepath.Join(r.Path, "refinery")
|
||||
if err := claude.EnsureSettingsForRole(refineryParentDir, "refinery"); err != nil {
|
||||
return false, fmt.Errorf("ensuring Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Create new tmux session
|
||||
if err := t.NewSession(sessionName, refineryRigDir); err != nil {
|
||||
return false, fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Set environment
|
||||
bdActor := fmt.Sprintf("%s/refinery", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", "refinery")
|
||||
_ = t.SetEnvironment(sessionName, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", bdActor)
|
||||
|
||||
// Set beads environment
|
||||
beadsDir := filepath.Join(r.Path, "mayor", "rig", ".beads")
|
||||
_ = t.SetEnvironment(sessionName, "BEADS_DIR", beadsDir)
|
||||
_ = t.SetEnvironment(sessionName, "BEADS_NO_DAEMON", "1")
|
||||
_ = t.SetEnvironment(sessionName, "BEADS_AGENT_NAME", fmt.Sprintf("%s/refinery", rigName))
|
||||
|
||||
// Apply Gas Town theming (non-fatal: theming failure doesn't affect operation)
|
||||
theme := tmux.AssignTheme(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, rigName, "refinery", "refinery")
|
||||
|
||||
// Launch Claude directly (no respawn loop - daemon handles restart)
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
if err := t.SendKeys(sessionName, config.BuildAgentStartupCommand("refinery", bdActor, r.Path, "")); err != nil {
|
||||
return false, fmt.Errorf("sending command: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/refinery", rigName)
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "deacon",
|
||||
Topic: "patrol",
|
||||
}) // Non-fatal
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send the propulsion nudge to trigger autonomous patrol execution.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
_ = t.NudgeSession(sessionName, session.PropulsionNudgeForRole("refinery", refineryRigDir)) // Non-fatal
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func runShutdown(cmd *cobra.Command, args []string) error {
|
||||
t := tmux.NewTmux()
|
||||
|
||||
@@ -777,6 +694,7 @@ func runStartCrew(cmd *cobra.Command, args []string) error {
|
||||
err = crewMgr.Start(name, crew.StartOptions{
|
||||
Account: startCrewAccount,
|
||||
ClaudeConfigDir: claudeConfigDir,
|
||||
AgentOverride: startCrewAgentOverride,
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, crew.ErrSessionRunning) {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/swarm"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
@@ -808,7 +809,7 @@ func runSwarmLand(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Close the swarm epic in beads
|
||||
closeArgs := []string{"close", swarmID, "--reason", "Swarm landed to main"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
@@ -867,7 +868,7 @@ func runSwarmCancel(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Close the swarm epic in beads with canceled reason
|
||||
closeArgs := []string{"close", swarmID, "--reason", "Swarm canceled"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/formula"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -322,7 +323,7 @@ func runSynthesisClose(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Close the convoy
|
||||
closeArgs := []string{"close", convoyID, "--reason=synthesis complete"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
171
internal/cmd/uninstall.go
Normal file
171
internal/cmd/uninstall.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// ABOUTME: Command to completely uninstall Gas Town from the system.
|
||||
// ABOUTME: Removes shell integration, wrappers, state, and optionally workspace.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/wrappers"
|
||||
)
|
||||
|
||||
var (
|
||||
uninstallWorkspace bool
|
||||
uninstallForce bool
|
||||
)
|
||||
|
||||
var uninstallCmd = &cobra.Command{
|
||||
Use: "uninstall",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Remove Gas Town from the system",
|
||||
Long: `Completely remove Gas Town from the system.
|
||||
|
||||
By default, removes:
|
||||
- Shell integration (~/.zshrc or ~/.bashrc)
|
||||
- Wrapper scripts (~/bin/gt-codex, ~/bin/gt-opencode)
|
||||
- State directory (~/.local/state/gastown/)
|
||||
- Config directory (~/.config/gastown/)
|
||||
- Cache directory (~/.cache/gastown/)
|
||||
|
||||
The workspace (e.g., ~/gt) is NOT removed unless --workspace is specified.
|
||||
|
||||
Use --force to skip confirmation prompts.
|
||||
|
||||
Examples:
|
||||
gt uninstall # Remove Gas Town, keep workspace
|
||||
gt uninstall --workspace # Also remove workspace directory
|
||||
gt uninstall --force # Skip confirmation`,
|
||||
RunE: runUninstall,
|
||||
}
|
||||
|
||||
func init() {
|
||||
uninstallCmd.Flags().BoolVar(&uninstallWorkspace, "workspace", false,
|
||||
"Also remove the workspace directory (DESTRUCTIVE)")
|
||||
uninstallCmd.Flags().BoolVarP(&uninstallForce, "force", "f", false,
|
||||
"Skip confirmation prompts")
|
||||
rootCmd.AddCommand(uninstallCmd)
|
||||
}
|
||||
|
||||
func runUninstall(cmd *cobra.Command, args []string) error {
|
||||
if !uninstallForce {
|
||||
fmt.Println("This will remove Gas Town from your system.")
|
||||
fmt.Println()
|
||||
fmt.Println("The following will be removed:")
|
||||
fmt.Printf(" • Shell integration (%s)\n", shell.RCFilePath(shell.DetectShell()))
|
||||
fmt.Printf(" • Wrapper scripts (%s)\n", wrappers.BinDir())
|
||||
fmt.Printf(" • State directory (%s)\n", state.StateDir())
|
||||
fmt.Printf(" • Config directory (%s)\n", state.ConfigDir())
|
||||
fmt.Printf(" • Cache directory (%s)\n", state.CacheDir())
|
||||
|
||||
if uninstallWorkspace {
|
||||
fmt.Println()
|
||||
fmt.Printf(" %s WORKSPACE WILL BE DELETED\n", style.Warning.Render("⚠"))
|
||||
fmt.Println(" This cannot be undone!")
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Print("Continue? [y/N] ")
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
response, _ := reader.ReadString('\n')
|
||||
response = strings.TrimSpace(strings.ToLower(response))
|
||||
|
||||
if response != "y" && response != "yes" {
|
||||
fmt.Println("Aborted.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var errors []string
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Removing Gas Town...")
|
||||
|
||||
if err := shell.Remove(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("shell integration: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed shell integration\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := wrappers.Remove(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("wrapper scripts: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed wrapper scripts\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(state.StateDir()); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Sprintf("state directory: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed state directory\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(state.ConfigDir()); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Sprintf("config directory: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed config directory\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(state.CacheDir()); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Sprintf("cache directory: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed cache directory\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if uninstallWorkspace {
|
||||
workspaceDir := findWorkspaceForUninstall()
|
||||
if workspaceDir != "" {
|
||||
if err := os.RemoveAll(workspaceDir); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("workspace: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed workspace: %s\n", style.Success.Render("✓"), workspaceDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s Some components could not be removed:\n", style.Warning.Render("⚠"))
|
||||
for _, e := range errors {
|
||||
fmt.Printf(" • %s\n", e)
|
||||
}
|
||||
return fmt.Errorf("uninstall incomplete")
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("%s Gas Town has been uninstalled\n", style.Success.Render("✓"))
|
||||
fmt.Println()
|
||||
fmt.Println("To reinstall, run:")
|
||||
fmt.Printf(" %s\n", style.Dim.Render("go install github.com/steveyegge/gastown/cmd/gt@latest"))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("gt install ~/gt --shell"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findWorkspaceForUninstall() string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
candidates := []string{
|
||||
filepath.Join(home, "gt"),
|
||||
filepath.Join(home, "gastown"),
|
||||
}
|
||||
|
||||
for _, path := range candidates {
|
||||
mayorDir := filepath.Join(path, "mayor")
|
||||
if _, err := os.Stat(mayorDir); err == nil {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
@@ -83,7 +83,7 @@ func runUp(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// 2. Deacon (Claude agent)
|
||||
deaconMgr := deacon.NewManager(townRoot)
|
||||
if err := deaconMgr.Start(); err != nil {
|
||||
if err := deaconMgr.Start(""); err != nil {
|
||||
if err == deacon.ErrAlreadyRunning {
|
||||
printStatus("Deacon", true, deaconMgr.SessionName())
|
||||
} else {
|
||||
@@ -451,6 +451,9 @@ func startPolecatsWithWork(townRoot, rigName string) ([]string, map[string]error
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
polecatName := entry.Name()
|
||||
polecatPath := filepath.Join(polecatsDir, polecatName)
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// Version information - set at build time via ldflags
|
||||
var (
|
||||
Version = "0.2.2"
|
||||
Version = "0.2.3"
|
||||
// Build can be set via ldflags at compile time
|
||||
Build = "dev"
|
||||
// Commit and Branch - the git revision the binary was built from (optional ldflag)
|
||||
|
||||
@@ -21,12 +21,18 @@ const (
|
||||
AgentGemini AgentPreset = "gemini"
|
||||
// AgentCodex is OpenAI Codex.
|
||||
AgentCodex AgentPreset = "codex"
|
||||
// AgentCursor is Cursor Agent.
|
||||
AgentCursor AgentPreset = "cursor"
|
||||
// AgentAuggie is Auggie CLI.
|
||||
AgentAuggie AgentPreset = "auggie"
|
||||
// AgentAmp is Sourcegraph AMP.
|
||||
AgentAmp AgentPreset = "amp"
|
||||
)
|
||||
|
||||
// AgentPresetInfo contains the configuration details for an agent preset.
|
||||
// This extends the basic RuntimeConfig with agent-specific metadata.
|
||||
type AgentPresetInfo struct {
|
||||
// Name is the preset identifier (e.g., "claude", "gemini", "codex").
|
||||
// Name is the preset identifier (e.g., "claude", "gemini", "codex", "cursor", "auggie", "amp").
|
||||
Name AgentPreset `json:"name"`
|
||||
|
||||
// Command is the CLI binary to invoke.
|
||||
@@ -35,6 +41,11 @@ type AgentPresetInfo struct {
|
||||
// Args are the default command-line arguments for autonomous mode.
|
||||
Args []string `json:"args"`
|
||||
|
||||
// ProcessNames are the process names to look for when detecting if the agent is running.
|
||||
// Used by tmux.IsAgentRunning to check pane_current_command.
|
||||
// E.g., ["node"] for Claude, ["cursor-agent"] for Cursor.
|
||||
ProcessNames []string `json:"process_names,omitempty"`
|
||||
|
||||
// SessionIDEnv is the environment variable for session ID.
|
||||
// Used for resuming sessions across restarts.
|
||||
SessionIDEnv string `json:"session_id_env,omitempty"`
|
||||
@@ -91,6 +102,7 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
Name: AgentClaude,
|
||||
Command: "claude",
|
||||
Args: []string{"--dangerously-skip-permissions"},
|
||||
ProcessNames: []string{"node"}, // Claude runs as Node.js
|
||||
SessionIDEnv: "CLAUDE_SESSION_ID",
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
@@ -102,6 +114,7 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
Name: AgentGemini,
|
||||
Command: "gemini",
|
||||
Args: []string{"--approval-mode", "yolo"},
|
||||
ProcessNames: []string{"gemini"}, // Gemini CLI binary
|
||||
SessionIDEnv: "GEMINI_SESSION_ID",
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
@@ -116,6 +129,7 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
Name: AgentCodex,
|
||||
Command: "codex",
|
||||
Args: []string{"--yolo"},
|
||||
ProcessNames: []string{"codex"}, // Codex CLI binary
|
||||
SessionIDEnv: "", // Codex captures from JSONL output
|
||||
ResumeFlag: "resume",
|
||||
ResumeStyle: "subcommand",
|
||||
@@ -126,6 +140,43 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
OutputFlag: "--json",
|
||||
},
|
||||
},
|
||||
AgentCursor: {
|
||||
Name: AgentCursor,
|
||||
Command: "cursor-agent",
|
||||
Args: []string{"-f"}, // Force mode (YOLO equivalent), -p requires prompt
|
||||
ProcessNames: []string{"cursor-agent"},
|
||||
SessionIDEnv: "", // Uses --resume with chatId directly
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
SupportsHooks: false, // TODO: verify hooks support
|
||||
SupportsForkSession: false,
|
||||
NonInteractive: &NonInteractiveConfig{
|
||||
PromptFlag: "-p",
|
||||
OutputFlag: "--output-format json",
|
||||
},
|
||||
},
|
||||
AgentAuggie: {
|
||||
Name: AgentAuggie,
|
||||
Command: "auggie",
|
||||
Args: []string{"--allow-indexing"},
|
||||
ProcessNames: []string{"auggie"},
|
||||
SessionIDEnv: "",
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
SupportsHooks: false,
|
||||
SupportsForkSession: false,
|
||||
},
|
||||
AgentAmp: {
|
||||
Name: AgentAmp,
|
||||
Command: "amp",
|
||||
Args: []string{"--dangerously-allow-all", "--no-ide"},
|
||||
ProcessNames: []string{"amp"},
|
||||
SessionIDEnv: "",
|
||||
ResumeFlag: "threads continue",
|
||||
ResumeStyle: "subcommand", // 'amp threads continue <threadId>'
|
||||
SupportsHooks: false,
|
||||
SupportsForkSession: false,
|
||||
},
|
||||
}
|
||||
|
||||
// Registry state with proper synchronization.
|
||||
@@ -164,16 +215,11 @@ func ensureRegistry() {
|
||||
initRegistryLocked()
|
||||
}
|
||||
|
||||
// LoadAgentRegistry loads agent definitions from a JSON file and merges with built-ins.
|
||||
// User-defined agents override built-in presets with the same name.
|
||||
// This function caches loaded paths to avoid redundant file reads.
|
||||
func LoadAgentRegistry(path string) error {
|
||||
registryMu.Lock()
|
||||
defer registryMu.Unlock()
|
||||
|
||||
// loadAgentRegistryFromPath loads agent definitions from a JSON file and merges with built-ins.
|
||||
// Caller must hold registryMu write lock.
|
||||
func loadAgentRegistryFromPathLocked(path string) error {
|
||||
initRegistryLocked()
|
||||
|
||||
// Check if already loaded from this path
|
||||
if loadedPaths[path] {
|
||||
return nil
|
||||
}
|
||||
@@ -181,8 +227,8 @@ func LoadAgentRegistry(path string) error {
|
||||
data, err := os.ReadFile(path) //nolint:gosec // G304: path is from config
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
loadedPaths[path] = true // Mark as "loaded" (no file)
|
||||
return nil // No custom config, use built-ins only
|
||||
loadedPaths[path] = true
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -192,7 +238,6 @@ func LoadAgentRegistry(path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Merge user-defined agents (override built-ins)
|
||||
for name, preset := range userRegistry.Agents {
|
||||
preset.Name = AgentPreset(name)
|
||||
globalRegistry.Agents[name] = preset
|
||||
@@ -202,12 +247,41 @@ func LoadAgentRegistry(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadAgentRegistry loads agent definitions from a JSON file and merges with built-ins.
|
||||
// User-defined agents override built-in presets with the same name.
|
||||
// This function caches loaded paths to avoid redundant file reads.
|
||||
func LoadAgentRegistry(path string) error {
|
||||
registryMu.Lock()
|
||||
defer registryMu.Unlock()
|
||||
return loadAgentRegistryFromPathLocked(path)
|
||||
}
|
||||
|
||||
// DefaultAgentRegistryPath returns the default path for agent registry.
|
||||
// Located alongside other town settings.
|
||||
func DefaultAgentRegistryPath(townRoot string) string {
|
||||
return filepath.Join(townRoot, "settings", "agents.json")
|
||||
}
|
||||
|
||||
// DefaultRigAgentRegistryPath returns the default path for rig-level agent registry.
|
||||
// Located in <rig>/settings/agents.json.
|
||||
func DefaultRigAgentRegistryPath(rigPath string) string {
|
||||
return filepath.Join(rigPath, "settings", "agents.json")
|
||||
}
|
||||
|
||||
// RigAgentRegistryPath returns the path for rig-level agent registry.
|
||||
// Alias for DefaultRigAgentRegistryPath for consistency with other path functions.
|
||||
func RigAgentRegistryPath(rigPath string) string {
|
||||
return DefaultRigAgentRegistryPath(rigPath)
|
||||
}
|
||||
|
||||
// LoadRigAgentRegistry loads agent definitions from a rig-level JSON file and merges with built-ins.
|
||||
// This function works similarly to LoadAgentRegistry but for rig-level configurations.
|
||||
func LoadRigAgentRegistry(path string) error {
|
||||
registryMu.Lock()
|
||||
defer registryMu.Unlock()
|
||||
return loadAgentRegistryFromPathLocked(path)
|
||||
}
|
||||
|
||||
// GetAgentPreset returns the preset info for a given agent name.
|
||||
// Returns nil if the preset is not found.
|
||||
func GetAgentPreset(name AgentPreset) *AgentPresetInfo {
|
||||
@@ -305,6 +379,18 @@ func GetSessionIDEnvVar(agentName string) string {
|
||||
return info.SessionIDEnv
|
||||
}
|
||||
|
||||
// GetProcessNames returns the process names used to detect if an agent is running.
|
||||
// Used by tmux.IsAgentRunning to check pane_current_command.
|
||||
// Returns ["node"] for Claude (default) if agent is not found or has no ProcessNames.
|
||||
func GetProcessNames(agentName string) []string {
|
||||
info := GetAgentPresetByName(agentName)
|
||||
if info == nil || len(info.ProcessNames) == 0 {
|
||||
// Default to Claude's process name for backwards compatibility
|
||||
return []string{"node"}
|
||||
}
|
||||
return info.ProcessNames
|
||||
}
|
||||
|
||||
// MergeWithPreset applies preset defaults to a RuntimeConfig.
|
||||
// User-specified values take precedence over preset defaults.
|
||||
// Returns a new RuntimeConfig without modifying the original.
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBuiltinPresets(t *testing.T) {
|
||||
// Ensure all built-in presets are accessible (E2E tested agents only)
|
||||
presets := []AgentPreset{AgentClaude, AgentGemini, AgentCodex}
|
||||
// Ensure all built-in presets are accessible
|
||||
presets := []AgentPreset{AgentClaude, AgentGemini, AgentCodex, AgentCursor, AgentAuggie, AgentAmp}
|
||||
|
||||
for _, preset := range presets {
|
||||
info := GetAgentPreset(preset)
|
||||
@@ -22,6 +22,11 @@ func TestBuiltinPresets(t *testing.T) {
|
||||
if info.Command == "" {
|
||||
t.Errorf("preset %s has empty Command", preset)
|
||||
}
|
||||
|
||||
// All presets should have ProcessNames for agent detection
|
||||
if len(info.ProcessNames) == 0 {
|
||||
t.Errorf("preset %s has empty ProcessNames", preset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +39,9 @@ func TestGetAgentPresetByName(t *testing.T) {
|
||||
{"claude", AgentClaude, false},
|
||||
{"gemini", AgentGemini, false},
|
||||
{"codex", AgentCodex, false},
|
||||
{"cursor", AgentCursor, false},
|
||||
{"auggie", AgentAuggie, false},
|
||||
{"amp", AgentAmp, false},
|
||||
{"aider", "", true}, // Not built-in, can be added via config
|
||||
{"opencode", "", true}, // Not built-in, can be added via config
|
||||
{"unknown", "", true},
|
||||
@@ -63,6 +71,9 @@ func TestRuntimeConfigFromPreset(t *testing.T) {
|
||||
{AgentClaude, "claude"},
|
||||
{AgentGemini, "gemini"},
|
||||
{AgentCodex, "codex"},
|
||||
{AgentCursor, "cursor-agent"},
|
||||
{AgentAuggie, "auggie"},
|
||||
{AgentAmp, "amp"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -84,6 +95,9 @@ func TestIsKnownPreset(t *testing.T) {
|
||||
{"claude", true},
|
||||
{"gemini", true},
|
||||
{"codex", true},
|
||||
{"cursor", true},
|
||||
{"auggie", true},
|
||||
{"amp", true},
|
||||
{"aider", false}, // Not built-in, can be added via config
|
||||
{"opencode", false}, // Not built-in, can be added via config
|
||||
{"unknown", false},
|
||||
@@ -128,7 +142,7 @@ func TestLoadAgentRegistry(t *testing.T) {
|
||||
// Reset global registry for test isolation
|
||||
ResetRegistryForTesting()
|
||||
|
||||
// Load the custom registry
|
||||
// Load should succeed
|
||||
if err := LoadAgentRegistry(configPath); err != nil {
|
||||
t.Fatalf("LoadAgentRegistry failed: %v", err)
|
||||
}
|
||||
@@ -138,6 +152,7 @@ func TestLoadAgentRegistry(t *testing.T) {
|
||||
if myAgent == nil {
|
||||
t.Fatal("custom agent 'my-agent' not found after loading registry")
|
||||
}
|
||||
|
||||
if myAgent.Command != "my-agent-bin" {
|
||||
t.Errorf("my-agent.Command = %v, want my-agent-bin", myAgent.Command)
|
||||
}
|
||||
@@ -196,6 +211,7 @@ func TestMergeWithPreset(t *testing.T) {
|
||||
if merged.Command != "/custom/claude" {
|
||||
t.Errorf("merged command should be user value, got %s", merged.Command)
|
||||
}
|
||||
|
||||
if len(merged.Args) != 1 || merged.Args[0] != "--custom-arg" {
|
||||
t.Errorf("merged args should be user value, got %v", merged.Args)
|
||||
}
|
||||
@@ -251,12 +267,14 @@ func TestBuildResumeCommand(t *testing.T) {
|
||||
agentName: "claude",
|
||||
sessionID: "",
|
||||
wantEmpty: true,
|
||||
contains: []string{"claude"},
|
||||
},
|
||||
{
|
||||
name: "unknown agent",
|
||||
agentName: "unknown-agent",
|
||||
sessionID: "session-123",
|
||||
wantEmpty: true,
|
||||
contains: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -286,6 +304,9 @@ func TestSupportsSessionResume(t *testing.T) {
|
||||
{"claude", true},
|
||||
{"gemini", true},
|
||||
{"codex", true},
|
||||
{"cursor", true},
|
||||
{"auggie", true},
|
||||
{"amp", true},
|
||||
{"unknown", false},
|
||||
}
|
||||
|
||||
@@ -305,7 +326,10 @@ func TestGetSessionIDEnvVar(t *testing.T) {
|
||||
}{
|
||||
{"claude", "CLAUDE_SESSION_ID"},
|
||||
{"gemini", "GEMINI_SESSION_ID"},
|
||||
{"codex", ""}, // Codex uses JSONL output instead
|
||||
{"codex", ""}, // Codex uses JSONL output instead
|
||||
{"cursor", ""}, // Cursor uses --resume with chatId directly
|
||||
{"auggie", ""}, // Auggie uses --resume directly
|
||||
{"amp", ""}, // AMP uses 'threads continue' subcommand
|
||||
{"unknown", ""},
|
||||
}
|
||||
|
||||
@@ -317,3 +341,277 @@ func TestGetSessionIDEnvVar(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetProcessNames(t *testing.T) {
|
||||
tests := []struct {
|
||||
agentName string
|
||||
want []string
|
||||
}{
|
||||
{"claude", []string{"node"}},
|
||||
{"gemini", []string{"gemini"}},
|
||||
{"codex", []string{"codex"}},
|
||||
{"cursor", []string{"cursor-agent"}},
|
||||
{"auggie", []string{"auggie"}},
|
||||
{"amp", []string{"amp"}},
|
||||
{"unknown", []string{"node"}}, // Falls back to Claude's process
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.agentName, func(t *testing.T) {
|
||||
got := GetProcessNames(tt.agentName)
|
||||
if len(got) != len(tt.want) {
|
||||
t.Errorf("GetProcessNames(%s) = %v, want %v", tt.agentName, got, tt.want)
|
||||
return
|
||||
}
|
||||
for i := range got {
|
||||
if got[i] != tt.want[i] {
|
||||
t.Errorf("GetProcessNames(%s)[%d] = %q, want %q", tt.agentName, i, got[i], tt.want[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListAgentPresetsMatchesConstants(t *testing.T) {
|
||||
// Ensure all AgentPreset constants are returned by ListAgentPresets
|
||||
allConstants := []AgentPreset{AgentClaude, AgentGemini, AgentCodex, AgentCursor, AgentAuggie, AgentAmp}
|
||||
presets := ListAgentPresets()
|
||||
|
||||
// Convert to map for quick lookup
|
||||
presetMap := make(map[string]bool)
|
||||
for _, p := range presets {
|
||||
presetMap[p] = true
|
||||
}
|
||||
|
||||
// Verify all constants are in the list
|
||||
for _, c := range allConstants {
|
||||
if !presetMap[string(c)] {
|
||||
t.Errorf("ListAgentPresets() missing constant %q", c)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify no empty names
|
||||
for _, p := range presets {
|
||||
if p == "" {
|
||||
t.Error("ListAgentPresets() contains empty string")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentCommandGeneration(t *testing.T) {
|
||||
// Test full command line generation for each agent
|
||||
tests := []struct {
|
||||
preset AgentPreset
|
||||
wantCommand string
|
||||
wantContains []string // Args that should be present
|
||||
}{
|
||||
{
|
||||
preset: AgentClaude,
|
||||
wantCommand: "claude",
|
||||
wantContains: []string{"--dangerously-skip-permissions"},
|
||||
},
|
||||
{
|
||||
preset: AgentGemini,
|
||||
wantCommand: "gemini",
|
||||
wantContains: []string{"--approval-mode", "yolo"},
|
||||
},
|
||||
{
|
||||
preset: AgentCodex,
|
||||
wantCommand: "codex",
|
||||
wantContains: []string{"--yolo"},
|
||||
},
|
||||
{
|
||||
preset: AgentCursor,
|
||||
wantCommand: "cursor-agent",
|
||||
wantContains: []string{"-f"},
|
||||
},
|
||||
{
|
||||
preset: AgentAuggie,
|
||||
wantCommand: "auggie",
|
||||
wantContains: []string{"--allow-indexing"},
|
||||
},
|
||||
{
|
||||
preset: AgentAmp,
|
||||
wantCommand: "amp",
|
||||
wantContains: []string{"--dangerously-allow-all", "--no-ide"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(string(tt.preset), func(t *testing.T) {
|
||||
rc := RuntimeConfigFromPreset(tt.preset)
|
||||
if rc == nil {
|
||||
t.Fatal("RuntimeConfigFromPreset returned nil")
|
||||
}
|
||||
|
||||
if rc.Command != tt.wantCommand {
|
||||
t.Errorf("Command = %q, want %q", rc.Command, tt.wantCommand)
|
||||
}
|
||||
|
||||
// Check required args are present
|
||||
argsStr := strings.Join(rc.Args, " ")
|
||||
for _, arg := range tt.wantContains {
|
||||
found := false
|
||||
for _, a := range rc.Args {
|
||||
if a == arg {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Args %q missing expected %q", argsStr, arg)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCursorAgentPreset(t *testing.T) {
|
||||
// Verify cursor agent preset is correctly configured
|
||||
info := GetAgentPreset(AgentCursor)
|
||||
if info == nil {
|
||||
t.Fatal("cursor preset not found")
|
||||
}
|
||||
|
||||
// Check command
|
||||
if info.Command != "cursor-agent" {
|
||||
t.Errorf("cursor command = %q, want cursor-agent", info.Command)
|
||||
}
|
||||
|
||||
// Check YOLO-equivalent flag (-f for force mode)
|
||||
// Note: -p is for non-interactive mode with prompt, not used for default Args
|
||||
hasF := false
|
||||
for _, arg := range info.Args {
|
||||
if arg == "-f" {
|
||||
hasF = true
|
||||
}
|
||||
}
|
||||
if !hasF {
|
||||
t.Error("cursor args missing -f (force/YOLO mode)")
|
||||
}
|
||||
|
||||
// Check ProcessNames for detection
|
||||
if len(info.ProcessNames) == 0 {
|
||||
t.Error("cursor ProcessNames is empty")
|
||||
}
|
||||
if info.ProcessNames[0] != "cursor-agent" {
|
||||
t.Errorf("cursor ProcessNames[0] = %q, want cursor-agent", info.ProcessNames[0])
|
||||
}
|
||||
|
||||
// Check resume support
|
||||
if info.ResumeFlag != "--resume" {
|
||||
t.Errorf("cursor ResumeFlag = %q, want --resume", info.ResumeFlag)
|
||||
}
|
||||
if info.ResumeStyle != "flag" {
|
||||
t.Errorf("cursor ResumeStyle = %q, want flag", info.ResumeStyle)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDefaultRigAgentRegistryPath verifies that the default rig agent registry path is constructed correctly.
|
||||
func TestDefaultRigAgentRegistryPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
rigPath string
|
||||
expectedPath string
|
||||
}{
|
||||
{"/Users/alice/gt/myproject", "/Users/alice/gt/myproject/settings/agents.json"},
|
||||
{"/tmp/my-rig", "/tmp/my-rig/settings/agents.json"},
|
||||
{"relative/path", "relative/path/settings/agents.json"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.rigPath, func(t *testing.T) {
|
||||
got := DefaultRigAgentRegistryPath(tt.rigPath)
|
||||
want := tt.expectedPath
|
||||
if got != want {
|
||||
t.Errorf("DefaultRigAgentRegistryPath(%s) = %s, want %s", tt.rigPath, got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoadRigAgentRegistry verifies that rig-level agent registry is loaded correctly.
|
||||
func TestLoadRigAgentRegistry(t *testing.T) {
|
||||
// Reset registry for test isolation
|
||||
ResetRegistryForTesting()
|
||||
t.Cleanup(ResetRegistryForTesting)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
registryPath := filepath.Join(tmpDir, "settings", "agents.json")
|
||||
configDir := filepath.Join(tmpDir, "settings")
|
||||
|
||||
// Create settings directory
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create settings dir: %v", err)
|
||||
}
|
||||
|
||||
// Write agent registry
|
||||
registryContent := `{
|
||||
"version": 1,
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": ["--session"],
|
||||
"non_interactive": {
|
||||
"subcommand": "run",
|
||||
"output_flag": "--format json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
if err := os.WriteFile(registryPath, []byte(registryContent), 0644); err != nil {
|
||||
t.Fatalf("failed to write registry file: %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Load should succeed and merge agents
|
||||
t.Run("load and merge", func(t *testing.T) {
|
||||
if err := LoadRigAgentRegistry(registryPath); err != nil {
|
||||
t.Fatalf("LoadRigAgentRegistry(%s) failed: %v", registryPath, err)
|
||||
}
|
||||
|
||||
info := GetAgentPresetByName("opencode")
|
||||
if info == nil {
|
||||
t.Fatal("expected opencode agent to be available after loading rig registry")
|
||||
}
|
||||
|
||||
if info.Command != "opencode" {
|
||||
t.Errorf("expected opencode agent command to be 'opencode', got %s", info.Command)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: File not found should return nil (no error)
|
||||
t.Run("file not found", func(t *testing.T) {
|
||||
nonExistentPath := filepath.Join(tmpDir, "other-rig", "settings", "agents.json")
|
||||
if err := LoadRigAgentRegistry(nonExistentPath); err != nil {
|
||||
t.Errorf("LoadRigAgentRegistry(%s) should not error for non-existent file: %v", nonExistentPath, err)
|
||||
}
|
||||
|
||||
// Verify that previously loaded agent (from test 1) is still available
|
||||
info := GetAgentPresetByName("opencode")
|
||||
if info == nil {
|
||||
t.Errorf("expected opencode agent to still be available after loading non-existent path")
|
||||
return
|
||||
}
|
||||
if info.Command != "opencode" {
|
||||
t.Errorf("expected opencode agent command to be 'opencode', got %s", info.Command)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Invalid JSON should error
|
||||
t.Run("invalid JSON", func(t *testing.T) {
|
||||
invalidRegistryPath := filepath.Join(tmpDir, "bad-rig", "settings", "agents.json")
|
||||
badConfigDir := filepath.Join(tmpDir, "bad-rig", "settings")
|
||||
if err := os.MkdirAll(badConfigDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create bad-rig settings dir: %v", err)
|
||||
}
|
||||
|
||||
invalidContent := `{"version": 1, "agents": {invalid json}}`
|
||||
if err := os.WriteFile(invalidRegistryPath, []byte(invalidContent), 0644); err != nil {
|
||||
t.Fatalf("failed to write invalid registry file: %v", err)
|
||||
}
|
||||
|
||||
if err := LoadRigAgentRegistry(invalidRegistryPath); err == nil {
|
||||
t.Errorf("LoadRigAgentRegistry(%s) should error for invalid JSON: got nil", invalidRegistryPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
444
internal/config/integration_test.go
Normal file
444
internal/config/integration_test.go
Normal file
@@ -0,0 +1,444 @@
|
||||
// Test Rig-Level Custom Agent Support
|
||||
//
|
||||
// This integration test verifies that custom agents defined in rig-level
|
||||
// settings/config.json are correctly loaded and used when spawning polecats.
|
||||
// It creates a stub agent, configures it at the rig level, and verifies
|
||||
// the agent is actually used via tmux session capture.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestRigLevelCustomAgentIntegration tests end-to-end rig-level custom agent functionality.
|
||||
// This test:
|
||||
// 1. Creates a stub agent script that echoes identifiable output
|
||||
// 2. Sets up a minimal town/rig with the custom agent configured
|
||||
// 3. Verifies that BuildPolecatStartupCommand uses the custom agent
|
||||
// 4. Optionally spawns a tmux session and verifies output (if tmux available)
|
||||
func TestRigLevelCustomAgentIntegration(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create the stub agent script
|
||||
stubAgentPath := createStubAgent(t, tmpDir)
|
||||
|
||||
// Set up town structure
|
||||
townRoot := filepath.Join(tmpDir, "town")
|
||||
rigName := "testrig"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
setupTestTownWithCustomAgent(t, townRoot, rigName, stubAgentPath)
|
||||
|
||||
// Test 1: Verify ResolveAgentConfig picks up the custom agent
|
||||
t.Run("ResolveAgentConfig uses rig-level agent", func(t *testing.T) {
|
||||
rc := ResolveAgentConfig(townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveAgentConfig returned nil")
|
||||
}
|
||||
|
||||
if rc.Command != stubAgentPath {
|
||||
t.Errorf("Expected command %q, got %q", stubAgentPath, rc.Command)
|
||||
}
|
||||
|
||||
// Verify args are passed through
|
||||
if len(rc.Args) != 2 || rc.Args[0] != "--test-mode" || rc.Args[1] != "--stub" {
|
||||
t.Errorf("Expected args [--test-mode --stub], got %v", rc.Args)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: Verify BuildPolecatStartupCommand includes the custom agent
|
||||
t.Run("BuildPolecatStartupCommand uses custom agent", func(t *testing.T) {
|
||||
cmd := BuildPolecatStartupCommand(rigName, "test-polecat", rigPath, "")
|
||||
|
||||
if !strings.Contains(cmd, stubAgentPath) {
|
||||
t.Errorf("Expected command to contain stub agent path %q, got: %s", stubAgentPath, cmd)
|
||||
}
|
||||
|
||||
if !strings.Contains(cmd, "--test-mode") {
|
||||
t.Errorf("Expected command to contain --test-mode, got: %s", cmd)
|
||||
}
|
||||
|
||||
// Verify environment variables are set
|
||||
if !strings.Contains(cmd, "GT_ROLE=polecat") {
|
||||
t.Errorf("Expected GT_ROLE=polecat in command, got: %s", cmd)
|
||||
}
|
||||
|
||||
if !strings.Contains(cmd, "GT_POLECAT=test-polecat") {
|
||||
t.Errorf("Expected GT_POLECAT=test-polecat in command, got: %s", cmd)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Verify ResolveAgentConfigWithOverride respects rig agents
|
||||
t.Run("ResolveAgentConfigWithOverride with rig agent", func(t *testing.T) {
|
||||
rc, agentName, err := ResolveAgentConfigWithOverride(townRoot, rigPath, "stub-agent")
|
||||
if err != nil {
|
||||
t.Fatalf("ResolveAgentConfigWithOverride failed: %v", err)
|
||||
}
|
||||
|
||||
if agentName != "stub-agent" {
|
||||
t.Errorf("Expected agent name 'stub-agent', got %q", agentName)
|
||||
}
|
||||
|
||||
if rc.Command != stubAgentPath {
|
||||
t.Errorf("Expected command %q, got %q", stubAgentPath, rc.Command)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 4: Verify unknown agent override returns error
|
||||
t.Run("ResolveAgentConfigWithOverride unknown agent errors", func(t *testing.T) {
|
||||
_, _, err := ResolveAgentConfigWithOverride(townRoot, rigPath, "nonexistent-agent")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for nonexistent agent, got nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
t.Errorf("Expected 'not found' error, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Tmux integration (skip if tmux not available)
|
||||
t.Run("TmuxSessionWithCustomAgent", func(t *testing.T) {
|
||||
if _, err := exec.LookPath("tmux"); err != nil {
|
||||
t.Skip("tmux not available, skipping session test")
|
||||
}
|
||||
|
||||
testTmuxSessionWithStubAgent(t, tmpDir, stubAgentPath, rigName)
|
||||
})
|
||||
}
|
||||
|
||||
// createStubAgent creates a bash script that simulates an AI agent.
|
||||
// The script echoes identifiable output and handles simple Q&A.
|
||||
func createStubAgent(t *testing.T, tmpDir string) string {
|
||||
t.Helper()
|
||||
|
||||
stubScript := `#!/bin/bash
|
||||
# Stub Agent for Integration Testing
|
||||
# This simulates an AI agent with identifiable output
|
||||
|
||||
AGENT_NAME="STUB_AGENT"
|
||||
AGENT_VERSION="1.0.0"
|
||||
|
||||
echo "=========================================="
|
||||
echo "STUB_AGENT_STARTED"
|
||||
echo "Agent: $AGENT_NAME v$AGENT_VERSION"
|
||||
echo "Args: $@"
|
||||
echo "Working Dir: $(pwd)"
|
||||
echo "GT_ROLE: ${GT_ROLE:-not_set}"
|
||||
echo "GT_POLECAT: ${GT_POLECAT:-not_set}"
|
||||
echo "GT_RIG: ${GT_RIG:-not_set}"
|
||||
echo "=========================================="
|
||||
|
||||
# Simple Q&A loop
|
||||
while true; do
|
||||
echo ""
|
||||
echo "STUB_AGENT_READY"
|
||||
echo "Enter question (or 'exit' to quit):"
|
||||
|
||||
# Read with timeout for non-interactive testing
|
||||
if read -t 5 question; then
|
||||
case "$question" in
|
||||
"exit"|"quit"|"q")
|
||||
echo "STUB_AGENT_EXITING"
|
||||
exit 0
|
||||
;;
|
||||
"what is 2+2"*)
|
||||
echo "STUB_AGENT_ANSWER: 4"
|
||||
;;
|
||||
"ping"*)
|
||||
echo "STUB_AGENT_ANSWER: pong"
|
||||
;;
|
||||
"status"*)
|
||||
echo "STUB_AGENT_ANSWER: operational"
|
||||
;;
|
||||
*)
|
||||
echo "STUB_AGENT_ANSWER: I received your question: $question"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
# Timeout - check if we should exit
|
||||
if [ -f "/tmp/stub_agent_stop_$$" ]; then
|
||||
echo "STUB_AGENT_STOPPING (signal file detected)"
|
||||
rm -f "/tmp/stub_agent_stop_$$"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
`
|
||||
|
||||
stubPath := filepath.Join(tmpDir, "stub-agent")
|
||||
if err := os.WriteFile(stubPath, []byte(stubScript), 0755); err != nil {
|
||||
t.Fatalf("Failed to create stub agent: %v", err)
|
||||
}
|
||||
|
||||
return stubPath
|
||||
}
|
||||
|
||||
// setupTestTownWithCustomAgent creates a minimal town/rig structure with a custom agent.
|
||||
func setupTestTownWithCustomAgent(t *testing.T, townRoot, rigName, stubAgentPath string) {
|
||||
t.Helper()
|
||||
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
// Create directory structure
|
||||
dirs := []string{
|
||||
filepath.Join(townRoot, "mayor"),
|
||||
filepath.Join(townRoot, "settings"),
|
||||
filepath.Join(rigPath, "settings"),
|
||||
filepath.Join(rigPath, "polecats"),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create directory %s: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create town.json
|
||||
townConfig := map[string]interface{}{
|
||||
"type": "town",
|
||||
"version": 2,
|
||||
"name": "test-town",
|
||||
"created_at": time.Now().Format(time.RFC3339),
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "mayor", "town.json"), townConfig)
|
||||
|
||||
// Create town settings (empty, uses defaults)
|
||||
townSettings := map[string]interface{}{
|
||||
"type": "town-settings",
|
||||
"version": 1,
|
||||
"default_agent": "claude",
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "settings", "config.json"), townSettings)
|
||||
|
||||
// Create rig settings with custom agent
|
||||
rigSettings := map[string]interface{}{
|
||||
"type": "rig-settings",
|
||||
"version": 1,
|
||||
"agent": "stub-agent",
|
||||
"agents": map[string]interface{}{
|
||||
"stub-agent": map[string]interface{}{
|
||||
"command": stubAgentPath,
|
||||
"args": []string{"--test-mode", "--stub"},
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(rigPath, "settings", "config.json"), rigSettings)
|
||||
|
||||
// Create rigs.json
|
||||
rigsConfig := map[string]interface{}{
|
||||
"version": 1,
|
||||
"rigs": map[string]interface{}{
|
||||
rigName: map[string]interface{}{
|
||||
"git_url": "https://github.com/test/testrepo.git",
|
||||
"added_at": time.Now().Format(time.RFC3339),
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "mayor", "rigs.json"), rigsConfig)
|
||||
}
|
||||
|
||||
// writeTownJSON writes a JSON config file.
|
||||
func writeTownJSON(t *testing.T, path string, data interface{}) {
|
||||
t.Helper()
|
||||
|
||||
jsonData, err := json.MarshalIndent(data, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal JSON for %s: %v", path, err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(path, jsonData, 0644); err != nil {
|
||||
t.Fatalf("Failed to write %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func pollForOutput(t *testing.T, sessionName, expected string, timeout time.Duration) (string, bool) {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
output := captureTmuxPane(t, sessionName, 50)
|
||||
if strings.Contains(output, expected) {
|
||||
return output, true
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return captureTmuxPane(t, sessionName, 50), false
|
||||
}
|
||||
|
||||
func testTmuxSessionWithStubAgent(t *testing.T, tmpDir, stubAgentPath, rigName string) {
|
||||
t.Helper()
|
||||
|
||||
sessionName := fmt.Sprintf("gt-test-pid%d-%d", os.Getpid(), time.Now().UnixNano())
|
||||
workDir := tmpDir
|
||||
|
||||
exec.Command("tmux", "kill-session", "-t", sessionName).Run()
|
||||
|
||||
defer func() {
|
||||
exec.Command("tmux", "kill-session", "-t", sessionName).Run()
|
||||
}()
|
||||
|
||||
cmd := exec.Command("tmux", "new-session", "-d", "-s", sessionName, "-c", workDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Failed to create tmux session: %v", err)
|
||||
}
|
||||
|
||||
envVars := map[string]string{
|
||||
"GT_ROLE": "polecat",
|
||||
"GT_POLECAT": "test-polecat",
|
||||
"GT_RIG": rigName,
|
||||
}
|
||||
|
||||
for key, val := range envVars {
|
||||
cmd := exec.Command("tmux", "set-environment", "-t", sessionName, key, val)
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Logf("Warning: failed to set %s: %v", key, err)
|
||||
}
|
||||
}
|
||||
|
||||
agentCmd := fmt.Sprintf("%s --test-mode --stub", stubAgentPath)
|
||||
cmd = exec.Command("tmux", "send-keys", "-t", sessionName, agentCmd, "Enter")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Failed to send keys: %v", err)
|
||||
}
|
||||
|
||||
output, found := pollForOutput(t, sessionName, "STUB_AGENT_STARTED", 12*time.Second)
|
||||
if !found {
|
||||
t.Skipf("stub agent output not detected; tmux capture unreliable. Output:\n%s", output)
|
||||
}
|
||||
|
||||
if !strings.Contains(output, "GT_ROLE: polecat") {
|
||||
t.Logf("Warning: GT_ROLE not visible in agent output (tmux env may not propagate to subshell)")
|
||||
}
|
||||
|
||||
cmd = exec.Command("tmux", "send-keys", "-t", sessionName, "ping", "Enter")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Failed to send ping: %v", err)
|
||||
}
|
||||
|
||||
output, found = pollForOutput(t, sessionName, "STUB_AGENT_ANSWER: pong", 6*time.Second)
|
||||
if !found {
|
||||
t.Errorf("Expected 'pong' response, got:\n%s", output)
|
||||
}
|
||||
|
||||
cmd = exec.Command("tmux", "send-keys", "-t", sessionName, "exit", "Enter")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Logf("Warning: failed to send exit: %v", err)
|
||||
}
|
||||
|
||||
output, found = pollForOutput(t, sessionName, "STUB_AGENT_EXITING", 3*time.Second)
|
||||
if !found {
|
||||
t.Logf("Note: Agent may have exited before capture. Output:\n%s", output)
|
||||
}
|
||||
|
||||
t.Logf("Tmux session test completed successfully")
|
||||
}
|
||||
|
||||
// captureTmuxPane captures the output from a tmux pane.
|
||||
func captureTmuxPane(t *testing.T, sessionName string, lines int) string {
|
||||
t.Helper()
|
||||
|
||||
cmd := exec.Command("tmux", "capture-pane", "-t", sessionName, "-p", "-S", fmt.Sprintf("-%d", lines))
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
t.Logf("Warning: failed to capture pane: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(output)
|
||||
}
|
||||
|
||||
func waitForTmuxOutputContains(t *testing.T, sessionName, needle string, timeout time.Duration) (string, bool) {
|
||||
t.Helper()
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
output := ""
|
||||
for time.Now().Before(deadline) {
|
||||
output = captureTmuxPane(t, sessionName, 200)
|
||||
if strings.Contains(output, needle) {
|
||||
return output, true
|
||||
}
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
return output, false
|
||||
}
|
||||
|
||||
// TestRigAgentOverridesTownAgent verifies rig agents take precedence over town agents.
|
||||
func TestRigAgentOverridesTownAgent(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town")
|
||||
rigName := "testrig"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
// Create directory structure
|
||||
dirs := []string{
|
||||
filepath.Join(townRoot, "mayor"),
|
||||
filepath.Join(townRoot, "settings"),
|
||||
filepath.Join(rigPath, "settings"),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create directory %s: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Town settings with a custom agent
|
||||
townSettings := map[string]interface{}{
|
||||
"type": "town-settings",
|
||||
"version": 1,
|
||||
"default_agent": "my-agent",
|
||||
"agents": map[string]interface{}{
|
||||
"my-agent": map[string]interface{}{
|
||||
"command": "/town/path/to/agent",
|
||||
"args": []string{"--town-level"},
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "settings", "config.json"), townSettings)
|
||||
|
||||
// Rig settings with SAME agent name but different config (should override)
|
||||
rigSettings := map[string]interface{}{
|
||||
"type": "rig-settings",
|
||||
"version": 1,
|
||||
"agent": "my-agent",
|
||||
"agents": map[string]interface{}{
|
||||
"my-agent": map[string]interface{}{
|
||||
"command": "/rig/path/to/agent",
|
||||
"args": []string{"--rig-level"},
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(rigPath, "settings", "config.json"), rigSettings)
|
||||
|
||||
// Create town.json
|
||||
townConfig := map[string]interface{}{
|
||||
"type": "town",
|
||||
"version": 2,
|
||||
"name": "test-town",
|
||||
"created_at": time.Now().Format(time.RFC3339),
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "mayor", "town.json"), townConfig)
|
||||
|
||||
// Resolve agent config
|
||||
rc := ResolveAgentConfig(townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveAgentConfig returned nil")
|
||||
}
|
||||
|
||||
// Rig agent should take precedence
|
||||
if rc.Command != "/rig/path/to/agent" {
|
||||
t.Errorf("Expected rig agent command '/rig/path/to/agent', got %q", rc.Command)
|
||||
}
|
||||
|
||||
if len(rc.Args) != 1 || rc.Args[0] != "--rig-level" {
|
||||
t.Errorf("Expected rig args [--rig-level], got %v", rc.Args)
|
||||
}
|
||||
}
|
||||
@@ -726,15 +726,7 @@ func LoadRuntimeConfig(rigPath string) *RuntimeConfig {
|
||||
if settings.Runtime == nil {
|
||||
return DefaultRuntimeConfig()
|
||||
}
|
||||
// Fill in defaults for empty fields
|
||||
rc := settings.Runtime
|
||||
if rc.Command == "" {
|
||||
rc.Command = "claude"
|
||||
}
|
||||
if rc.Args == nil {
|
||||
rc.Args = []string{"--dangerously-skip-permissions"}
|
||||
}
|
||||
return rc
|
||||
return normalizeRuntimeConfig(settings.Runtime)
|
||||
}
|
||||
|
||||
// TownSettingsPath returns the path to town settings file.
|
||||
@@ -824,6 +816,9 @@ func ResolveAgentConfig(townRoot, rigPath string) *RuntimeConfig {
|
||||
// Load custom agent registry if it exists
|
||||
_ = LoadAgentRegistry(DefaultAgentRegistryPath(townRoot))
|
||||
|
||||
// Load rig-level custom agent registry if it exists (for per-rig custom agents)
|
||||
_ = LoadRigAgentRegistry(RigAgentRegistryPath(rigPath))
|
||||
|
||||
// Determine which agent name to use
|
||||
agentName := ""
|
||||
if rigSettings != nil && rigSettings.Agent != "" {
|
||||
@@ -834,8 +829,7 @@ func ResolveAgentConfig(townRoot, rigPath string) *RuntimeConfig {
|
||||
agentName = "claude" // ultimate fallback
|
||||
}
|
||||
|
||||
// Look up the agent configuration
|
||||
return lookupAgentConfig(agentName, townSettings)
|
||||
return lookupAgentConfig(agentName, townSettings, rigSettings)
|
||||
}
|
||||
|
||||
// ResolveAgentConfigWithOverride resolves the agent configuration for a rig, with an optional override.
|
||||
@@ -864,6 +858,9 @@ func ResolveAgentConfigWithOverride(townRoot, rigPath, agentOverride string) (*R
|
||||
// Load custom agent registry if it exists
|
||||
_ = LoadAgentRegistry(DefaultAgentRegistryPath(townRoot))
|
||||
|
||||
// Load rig-level custom agent registry if it exists (for per-rig custom agents)
|
||||
_ = LoadRigAgentRegistry(RigAgentRegistryPath(rigPath))
|
||||
|
||||
// Determine which agent name to use
|
||||
agentName := ""
|
||||
if agentOverride != "" {
|
||||
@@ -876,13 +873,21 @@ func ResolveAgentConfigWithOverride(townRoot, rigPath, agentOverride string) (*R
|
||||
agentName = "claude" // ultimate fallback
|
||||
}
|
||||
|
||||
// If an override is requested, validate it exists.
|
||||
// If an override is requested, validate it exists
|
||||
if agentOverride != "" {
|
||||
// Check rig-level custom agents first
|
||||
if rigSettings != nil && rigSettings.Agents != nil {
|
||||
if custom, ok := rigSettings.Agents[agentName]; ok && custom != nil {
|
||||
return fillRuntimeDefaults(custom), agentName, nil
|
||||
}
|
||||
}
|
||||
// Then check town-level custom agents
|
||||
if townSettings.Agents != nil {
|
||||
if custom, ok := townSettings.Agents[agentName]; ok && custom != nil {
|
||||
return fillRuntimeDefaults(custom), agentName, nil
|
||||
}
|
||||
}
|
||||
// Then check built-in presets
|
||||
if preset := GetAgentPresetByName(agentName); preset != nil {
|
||||
return RuntimeConfigFromPreset(AgentPreset(agentName)), agentName, nil
|
||||
}
|
||||
@@ -890,13 +895,20 @@ func ResolveAgentConfigWithOverride(townRoot, rigPath, agentOverride string) (*R
|
||||
}
|
||||
|
||||
// Normal lookup path (no override)
|
||||
return lookupAgentConfig(agentName, townSettings), agentName, nil
|
||||
return lookupAgentConfig(agentName, townSettings, rigSettings), agentName, nil
|
||||
}
|
||||
|
||||
// lookupAgentConfig looks up an agent by name.
|
||||
// First checks town's custom agents, then built-in presets from agents.go.
|
||||
func lookupAgentConfig(name string, townSettings *TownSettings) *RuntimeConfig {
|
||||
// First check town's custom agents
|
||||
// Checks rig-level custom agents first, then town's custom agents, then built-in presets from agents.go.
|
||||
func lookupAgentConfig(name string, townSettings *TownSettings, rigSettings *RigSettings) *RuntimeConfig {
|
||||
// First check rig's custom agents (NEW - fix for rig-level agent support)
|
||||
if rigSettings != nil && rigSettings.Agents != nil {
|
||||
if custom, ok := rigSettings.Agents[name]; ok && custom != nil {
|
||||
return fillRuntimeDefaults(custom)
|
||||
}
|
||||
}
|
||||
|
||||
// Then check town's custom agents (existing)
|
||||
if townSettings != nil && townSettings.Agents != nil {
|
||||
if custom, ok := townSettings.Agents[name]; ok && custom != nil {
|
||||
return fillRuntimeDefaults(custom)
|
||||
@@ -1044,13 +1056,15 @@ func findTownRootFromCwd() (string, error) {
|
||||
// prompt is optional - if provided, appended as the initial prompt.
|
||||
func BuildStartupCommand(envVars map[string]string, rigPath, prompt string) string {
|
||||
var rc *RuntimeConfig
|
||||
var townRoot string
|
||||
if rigPath != "" {
|
||||
// Derive town root from rig path
|
||||
townRoot := filepath.Dir(rigPath)
|
||||
townRoot = filepath.Dir(rigPath)
|
||||
rc = ResolveAgentConfig(townRoot, rigPath)
|
||||
} else {
|
||||
// Try to detect town root from cwd for town-level agents (mayor, deacon)
|
||||
townRoot, err := findTownRootFromCwd()
|
||||
var err error
|
||||
townRoot, err = findTownRootFromCwd()
|
||||
if err != nil {
|
||||
rc = DefaultRuntimeConfig()
|
||||
} else {
|
||||
@@ -1058,9 +1072,22 @@ func BuildStartupCommand(envVars map[string]string, rigPath, prompt string) stri
|
||||
}
|
||||
}
|
||||
|
||||
// Copy env vars to avoid mutating caller map
|
||||
resolvedEnv := make(map[string]string, len(envVars)+2)
|
||||
for k, v := range envVars {
|
||||
resolvedEnv[k] = v
|
||||
}
|
||||
// Add GT_ROOT so agents can find town-level resources (formulas, etc.)
|
||||
if townRoot != "" {
|
||||
resolvedEnv["GT_ROOT"] = townRoot
|
||||
}
|
||||
if rc.Session != nil && rc.Session.SessionIDEnv != "" {
|
||||
resolvedEnv["GT_SESSION_ID_ENV"] = rc.Session.SessionIDEnv
|
||||
}
|
||||
|
||||
// Build environment export prefix
|
||||
var exports []string
|
||||
for k, v := range envVars {
|
||||
for k, v := range resolvedEnv {
|
||||
exports = append(exports, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
@@ -1082,6 +1109,21 @@ func BuildStartupCommand(envVars map[string]string, rigPath, prompt string) stri
|
||||
return cmd
|
||||
}
|
||||
|
||||
// PrependEnv prepends export statements to a command string.
|
||||
func PrependEnv(command string, envVars map[string]string) string {
|
||||
if len(envVars) == 0 {
|
||||
return command
|
||||
}
|
||||
|
||||
var exports []string
|
||||
for k, v := range envVars {
|
||||
exports = append(exports, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(exports)
|
||||
return "export " + strings.Join(exports, " ") + " && " + command
|
||||
}
|
||||
|
||||
// BuildStartupCommandWithAgentOverride builds a startup command like BuildStartupCommand,
|
||||
// but uses agentOverride if non-empty.
|
||||
func BuildStartupCommandWithAgentOverride(envVars map[string]string, rigPath, prompt, agentOverride string) (string, error) {
|
||||
|
||||
@@ -778,12 +778,18 @@ func TestMessagingConfigPath(t *testing.T) {
|
||||
|
||||
func TestRuntimeConfigDefaults(t *testing.T) {
|
||||
rc := DefaultRuntimeConfig()
|
||||
if rc.Provider != "claude" {
|
||||
t.Errorf("Provider = %q, want %q", rc.Provider, "claude")
|
||||
}
|
||||
if rc.Command != "claude" {
|
||||
t.Errorf("Command = %q, want %q", rc.Command, "claude")
|
||||
}
|
||||
if len(rc.Args) != 1 || rc.Args[0] != "--dangerously-skip-permissions" {
|
||||
t.Errorf("Args = %v, want [--dangerously-skip-permissions]", rc.Args)
|
||||
}
|
||||
if rc.Session == nil || rc.Session.SessionIDEnv != "CLAUDE_SESSION_ID" {
|
||||
t.Errorf("SessionIDEnv = %q, want %q", rc.Session.SessionIDEnv, "CLAUDE_SESSION_ID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRuntimeConfigBuildCommand(t *testing.T) {
|
||||
@@ -879,6 +885,18 @@ func TestRuntimeConfigBuildCommandWithPrompt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBuildAgentStartupCommand(t *testing.T) {
|
||||
// BuildAgentStartupCommand auto-detects town root from cwd when rigPath is empty.
|
||||
// Use a temp directory to ensure we exercise the fallback default config path.
|
||||
origWD, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmpWD := t.TempDir()
|
||||
if err := os.Chdir(tmpWD); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(origWD) })
|
||||
|
||||
// Test without rig config (uses defaults)
|
||||
cmd := BuildAgentStartupCommand("witness", "gastown/witness", "", "")
|
||||
|
||||
@@ -1558,3 +1576,94 @@ func TestSaveTownSettings(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestLookupAgentConfigWithRigSettings verifies that lookupAgentConfig checks
|
||||
// rig-level agents first, then town-level agents, then built-ins.
|
||||
func TestLookupAgentConfigWithRigSettings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rigSettings *RigSettings
|
||||
townSettings *TownSettings
|
||||
expectedCommand string
|
||||
expectedFrom string
|
||||
}{
|
||||
{
|
||||
name: "rig-custom-agent",
|
||||
rigSettings: &RigSettings{
|
||||
Agent: "default-rig-agent",
|
||||
Agents: map[string]*RuntimeConfig{
|
||||
"rig-custom-agent": {
|
||||
Command: "custom-rig-cmd",
|
||||
Args: []string{"--rig-flag"},
|
||||
},
|
||||
},
|
||||
},
|
||||
townSettings: &TownSettings{
|
||||
Agents: map[string]*RuntimeConfig{
|
||||
"town-custom-agent": {
|
||||
Command: "custom-town-cmd",
|
||||
Args: []string{"--town-flag"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCommand: "custom-rig-cmd",
|
||||
expectedFrom: "rig",
|
||||
},
|
||||
{
|
||||
name: "town-custom-agent",
|
||||
rigSettings: &RigSettings{
|
||||
Agents: map[string]*RuntimeConfig{
|
||||
"other-rig-agent": {
|
||||
Command: "other-rig-cmd",
|
||||
},
|
||||
},
|
||||
},
|
||||
townSettings: &TownSettings{
|
||||
Agents: map[string]*RuntimeConfig{
|
||||
"town-custom-agent": {
|
||||
Command: "custom-town-cmd",
|
||||
Args: []string{"--town-flag"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCommand: "custom-town-cmd",
|
||||
expectedFrom: "town",
|
||||
},
|
||||
{
|
||||
name: "unknown-agent",
|
||||
rigSettings: nil,
|
||||
townSettings: nil,
|
||||
expectedCommand: "claude",
|
||||
expectedFrom: "builtin",
|
||||
},
|
||||
{
|
||||
name: "claude",
|
||||
rigSettings: &RigSettings{
|
||||
Agent: "claude",
|
||||
},
|
||||
townSettings: &TownSettings{
|
||||
Agents: map[string]*RuntimeConfig{
|
||||
"claude": {
|
||||
Command: "custom-claude",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCommand: "custom-claude",
|
||||
expectedFrom: "town",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rc := lookupAgentConfig(tt.name, tt.townSettings, tt.rigSettings)
|
||||
|
||||
if rc == nil {
|
||||
t.Errorf("lookupAgentConfig(%s) returned nil", tt.name)
|
||||
}
|
||||
|
||||
if rc.Command != tt.expectedCommand {
|
||||
t.Errorf("lookupAgentConfig(%s).Command = %s, want %s", tt.name, rc.Command, tt.expectedCommand)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -38,7 +39,7 @@ type TownSettings struct {
|
||||
Version int `json:"version"` // schema version
|
||||
|
||||
// DefaultAgent is the name of the agent preset to use by default.
|
||||
// Can be a built-in preset ("claude", "gemini", "codex")
|
||||
// Can be a built-in preset ("claude", "gemini", "codex", "cursor", "auggie", "amp")
|
||||
// or a custom agent name defined in settings/agents.json.
|
||||
// Default: "claude"
|
||||
DefaultAgent string `json:"default_agent,omitempty"`
|
||||
@@ -190,11 +191,16 @@ type RigSettings struct {
|
||||
Runtime *RuntimeConfig `json:"runtime,omitempty"` // LLM runtime settings (deprecated: use Agent)
|
||||
|
||||
// Agent selects which agent preset to use for this rig.
|
||||
// Can be a built-in preset ("claude", "gemini", "codex")
|
||||
// Can be a built-in preset ("claude", "gemini", "codex", "cursor", "auggie", "amp")
|
||||
// or a custom agent defined in settings/agents.json.
|
||||
// If empty, uses the town's default_agent setting.
|
||||
// Takes precedence over Runtime if both are set.
|
||||
Agent string `json:"agent,omitempty"`
|
||||
|
||||
// Agents defines custom agent configurations or overrides for this rig.
|
||||
// Similar to TownSettings.Agents but applies to this rig only.
|
||||
// Allows per-rig custom agents for polecats and crew members.
|
||||
Agents map[string]*RuntimeConfig `json:"agents,omitempty"`
|
||||
}
|
||||
|
||||
// CrewConfig represents crew workspace settings for a rig.
|
||||
@@ -215,45 +221,95 @@ type CrewConfig struct {
|
||||
// This allows switching between different LLM backends (claude, aider, etc.)
|
||||
// without modifying startup code.
|
||||
type RuntimeConfig struct {
|
||||
// Provider selects runtime-specific defaults and integration behavior.
|
||||
// Known values: "claude", "codex", "generic". Default: "claude".
|
||||
Provider string `json:"provider,omitempty"`
|
||||
|
||||
// Command is the CLI command to invoke (e.g., "claude", "aider").
|
||||
// Default: "claude"
|
||||
Command string `json:"command,omitempty"`
|
||||
|
||||
// Args are additional command-line arguments.
|
||||
// Default: ["--dangerously-skip-permissions"]
|
||||
Args []string `json:"args,omitempty"`
|
||||
// Default: ["--dangerously-skip-permissions"] for built-in agents.
|
||||
// Empty array [] means no args (not "use defaults").
|
||||
Args []string `json:"args"`
|
||||
|
||||
// InitialPrompt is an optional first message to send after startup.
|
||||
// For claude, this is passed as the prompt argument.
|
||||
// Empty by default (hooks handle context).
|
||||
InitialPrompt string `json:"initial_prompt,omitempty"`
|
||||
|
||||
// PromptMode controls how prompts are passed to the runtime.
|
||||
// Supported values: "arg" (append prompt arg), "none" (ignore prompt).
|
||||
// Default: "arg" for claude/generic, "none" for codex.
|
||||
PromptMode string `json:"prompt_mode,omitempty"`
|
||||
|
||||
// Session config controls environment integration for runtime session IDs.
|
||||
Session *RuntimeSessionConfig `json:"session,omitempty"`
|
||||
|
||||
// Hooks config controls runtime hook installation (if supported).
|
||||
Hooks *RuntimeHooksConfig `json:"hooks,omitempty"`
|
||||
|
||||
// Tmux config controls process detection and readiness heuristics.
|
||||
Tmux *RuntimeTmuxConfig `json:"tmux,omitempty"`
|
||||
|
||||
// Instructions controls the per-workspace instruction file name.
|
||||
Instructions *RuntimeInstructionsConfig `json:"instructions,omitempty"`
|
||||
}
|
||||
|
||||
// RuntimeSessionConfig configures how Gas Town discovers runtime session IDs.
|
||||
type RuntimeSessionConfig struct {
|
||||
// SessionIDEnv is the environment variable set by the runtime to identify a session.
|
||||
// Default: "CLAUDE_SESSION_ID" for claude, empty for codex/generic.
|
||||
SessionIDEnv string `json:"session_id_env,omitempty"`
|
||||
|
||||
// ConfigDirEnv is the environment variable that selects a runtime account/config dir.
|
||||
// Default: "CLAUDE_CONFIG_DIR" for claude, empty for codex/generic.
|
||||
ConfigDirEnv string `json:"config_dir_env,omitempty"`
|
||||
}
|
||||
|
||||
// RuntimeHooksConfig configures runtime hook installation.
|
||||
type RuntimeHooksConfig struct {
|
||||
// Provider controls which hook templates to install: "claude", "opencode", or "none".
|
||||
Provider string `json:"provider,omitempty"`
|
||||
|
||||
// Dir is the settings directory (e.g., ".claude").
|
||||
Dir string `json:"dir,omitempty"`
|
||||
|
||||
// SettingsFile is the settings file name (e.g., "settings.json").
|
||||
SettingsFile string `json:"settings_file,omitempty"`
|
||||
}
|
||||
|
||||
// RuntimeTmuxConfig controls tmux heuristics for detecting runtime readiness.
|
||||
type RuntimeTmuxConfig struct {
|
||||
// ProcessNames are tmux pane commands that indicate the runtime is running.
|
||||
ProcessNames []string `json:"process_names,omitempty"`
|
||||
|
||||
// ReadyPromptPrefix is the prompt prefix to detect readiness (e.g., "> ").
|
||||
ReadyPromptPrefix string `json:"ready_prompt_prefix,omitempty"`
|
||||
|
||||
// ReadyDelayMs is a fixed delay used when prompt detection is unavailable.
|
||||
ReadyDelayMs int `json:"ready_delay_ms,omitempty"`
|
||||
}
|
||||
|
||||
// RuntimeInstructionsConfig controls the name of the role instruction file.
|
||||
type RuntimeInstructionsConfig struct {
|
||||
// File is the instruction filename (e.g., "CLAUDE.md", "AGENTS.md").
|
||||
File string `json:"file,omitempty"`
|
||||
}
|
||||
|
||||
// DefaultRuntimeConfig returns a RuntimeConfig with sensible defaults.
|
||||
func DefaultRuntimeConfig() *RuntimeConfig {
|
||||
return &RuntimeConfig{
|
||||
Command: "claude",
|
||||
Args: []string{"--dangerously-skip-permissions"},
|
||||
}
|
||||
return normalizeRuntimeConfig(&RuntimeConfig{Provider: "claude"})
|
||||
}
|
||||
|
||||
// BuildCommand returns the full command line string.
|
||||
// For use with tmux SendKeys.
|
||||
func (rc *RuntimeConfig) BuildCommand() string {
|
||||
if rc == nil {
|
||||
return DefaultRuntimeConfig().BuildCommand()
|
||||
}
|
||||
resolved := normalizeRuntimeConfig(rc)
|
||||
|
||||
cmd := rc.Command
|
||||
if cmd == "" {
|
||||
cmd = "claude"
|
||||
}
|
||||
|
||||
// Build args
|
||||
args := rc.Args
|
||||
if args == nil {
|
||||
args = []string{"--dangerously-skip-permissions"}
|
||||
}
|
||||
cmd := resolved.Command
|
||||
args := resolved.Args
|
||||
|
||||
// Combine command and args
|
||||
if len(args) > 0 {
|
||||
@@ -266,15 +322,16 @@ func (rc *RuntimeConfig) BuildCommand() string {
|
||||
// If the config has an InitialPrompt, it's appended as a quoted argument.
|
||||
// If prompt is provided, it overrides the config's InitialPrompt.
|
||||
func (rc *RuntimeConfig) BuildCommandWithPrompt(prompt string) string {
|
||||
base := rc.BuildCommand()
|
||||
resolved := normalizeRuntimeConfig(rc)
|
||||
base := resolved.BuildCommand()
|
||||
|
||||
// Use provided prompt or fall back to config
|
||||
p := prompt
|
||||
if p == "" && rc != nil {
|
||||
p = rc.InitialPrompt
|
||||
if p == "" {
|
||||
p = resolved.InitialPrompt
|
||||
}
|
||||
|
||||
if p == "" {
|
||||
if p == "" || resolved.PromptMode == "none" {
|
||||
return base
|
||||
}
|
||||
|
||||
@@ -282,6 +339,216 @@ func (rc *RuntimeConfig) BuildCommandWithPrompt(prompt string) string {
|
||||
return base + " " + quoteForShell(p)
|
||||
}
|
||||
|
||||
// BuildArgsWithPrompt returns the runtime command and args suitable for exec.
|
||||
func (rc *RuntimeConfig) BuildArgsWithPrompt(prompt string) []string {
|
||||
resolved := normalizeRuntimeConfig(rc)
|
||||
args := append([]string{resolved.Command}, resolved.Args...)
|
||||
|
||||
p := prompt
|
||||
if p == "" {
|
||||
p = resolved.InitialPrompt
|
||||
}
|
||||
|
||||
if p != "" && resolved.PromptMode != "none" {
|
||||
args = append(args, p)
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func normalizeRuntimeConfig(rc *RuntimeConfig) *RuntimeConfig {
|
||||
if rc == nil {
|
||||
rc = &RuntimeConfig{}
|
||||
}
|
||||
|
||||
if rc.Provider == "" {
|
||||
rc.Provider = "claude"
|
||||
}
|
||||
|
||||
if rc.Command == "" {
|
||||
rc.Command = defaultRuntimeCommand(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Args == nil {
|
||||
rc.Args = defaultRuntimeArgs(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.PromptMode == "" {
|
||||
rc.PromptMode = defaultPromptMode(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Session == nil {
|
||||
rc.Session = &RuntimeSessionConfig{}
|
||||
}
|
||||
|
||||
if rc.Session.SessionIDEnv == "" {
|
||||
rc.Session.SessionIDEnv = defaultSessionIDEnv(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Session.ConfigDirEnv == "" {
|
||||
rc.Session.ConfigDirEnv = defaultConfigDirEnv(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Hooks == nil {
|
||||
rc.Hooks = &RuntimeHooksConfig{}
|
||||
}
|
||||
|
||||
if rc.Hooks.Provider == "" {
|
||||
rc.Hooks.Provider = defaultHooksProvider(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Hooks.Dir == "" {
|
||||
rc.Hooks.Dir = defaultHooksDir(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Hooks.SettingsFile == "" {
|
||||
rc.Hooks.SettingsFile = defaultHooksFile(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Tmux == nil {
|
||||
rc.Tmux = &RuntimeTmuxConfig{}
|
||||
}
|
||||
|
||||
if rc.Tmux.ProcessNames == nil {
|
||||
rc.Tmux.ProcessNames = defaultProcessNames(rc.Provider, rc.Command)
|
||||
}
|
||||
|
||||
if rc.Tmux.ReadyPromptPrefix == "" {
|
||||
rc.Tmux.ReadyPromptPrefix = defaultReadyPromptPrefix(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Tmux.ReadyDelayMs == 0 {
|
||||
rc.Tmux.ReadyDelayMs = defaultReadyDelayMs(rc.Provider)
|
||||
}
|
||||
|
||||
if rc.Instructions == nil {
|
||||
rc.Instructions = &RuntimeInstructionsConfig{}
|
||||
}
|
||||
|
||||
if rc.Instructions.File == "" {
|
||||
rc.Instructions.File = defaultInstructionsFile(rc.Provider)
|
||||
}
|
||||
|
||||
return rc
|
||||
}
|
||||
|
||||
func defaultRuntimeCommand(provider string) string {
|
||||
switch provider {
|
||||
case "codex":
|
||||
return "codex"
|
||||
case "opencode":
|
||||
return "opencode"
|
||||
case "generic":
|
||||
return ""
|
||||
default:
|
||||
return "claude"
|
||||
}
|
||||
}
|
||||
|
||||
func defaultRuntimeArgs(provider string) []string {
|
||||
switch provider {
|
||||
case "claude":
|
||||
return []string{"--dangerously-skip-permissions"}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultPromptMode(provider string) string {
|
||||
switch provider {
|
||||
case "codex":
|
||||
return "none"
|
||||
case "opencode":
|
||||
return "none"
|
||||
default:
|
||||
return "arg"
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSessionIDEnv(provider string) string {
|
||||
if provider == "claude" {
|
||||
return "CLAUDE_SESSION_ID"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func defaultConfigDirEnv(provider string) string {
|
||||
if provider == "claude" {
|
||||
return "CLAUDE_CONFIG_DIR"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func defaultHooksProvider(provider string) string {
|
||||
switch provider {
|
||||
case "claude":
|
||||
return "claude"
|
||||
case "opencode":
|
||||
return "opencode"
|
||||
default:
|
||||
return "none"
|
||||
}
|
||||
}
|
||||
|
||||
func defaultHooksDir(provider string) string {
|
||||
switch provider {
|
||||
case "claude":
|
||||
return ".claude"
|
||||
case "opencode":
|
||||
return ".opencode/plugin"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func defaultHooksFile(provider string) string {
|
||||
switch provider {
|
||||
case "claude":
|
||||
return "settings.json"
|
||||
case "opencode":
|
||||
return "gastown.js"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func defaultProcessNames(provider, command string) []string {
|
||||
if provider == "claude" {
|
||||
return []string{"node"}
|
||||
}
|
||||
if command != "" {
|
||||
return []string{filepath.Base(command)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultReadyPromptPrefix(provider string) string {
|
||||
if provider == "claude" {
|
||||
return "> "
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func defaultReadyDelayMs(provider string) int {
|
||||
if provider == "claude" {
|
||||
return 10000
|
||||
}
|
||||
if provider == "codex" {
|
||||
return 3000
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func defaultInstructionsFile(provider string) string {
|
||||
if provider == "codex" {
|
||||
return "AGENTS.md"
|
||||
}
|
||||
if provider == "opencode" {
|
||||
return "AGENTS.md"
|
||||
}
|
||||
return "CLAUDE.md"
|
||||
}
|
||||
|
||||
// quoteForShell quotes a string for safe shell usage.
|
||||
func quoteForShell(s string) string {
|
||||
// Simple quoting: wrap in double quotes, escape internal quotes
|
||||
@@ -338,6 +605,14 @@ type MergeQueueConfig struct {
|
||||
// IntegrationBranches enables integration branch workflow for epics.
|
||||
IntegrationBranches bool `json:"integration_branches"`
|
||||
|
||||
// IntegrationBranchTemplate is the pattern for integration branch names.
|
||||
// Supports variables: {epic}, {prefix}, {user}
|
||||
// - {epic}: Full epic ID (e.g., "RA-123")
|
||||
// - {prefix}: Epic prefix before first hyphen (e.g., "RA")
|
||||
// - {user}: Git user.name (e.g., "klauern")
|
||||
// Default: "integration/{epic}"
|
||||
IntegrationBranchTemplate string `json:"integration_branch_template,omitempty"`
|
||||
|
||||
// OnConflict specifies conflict resolution strategy: "assign_back" or "auto_rebase".
|
||||
OnConflict string `json:"on_conflict"`
|
||||
|
||||
|
||||
@@ -50,6 +50,9 @@ type StartOptions struct {
|
||||
|
||||
// Interactive removes --dangerously-skip-permissions for interactive/refresh mode.
|
||||
Interactive bool
|
||||
|
||||
// AgentOverride specifies an alternate agent alias (e.g., for testing).
|
||||
AgentOverride string
|
||||
}
|
||||
|
||||
// validateCrewName checks that a crew name is safe and valid.
|
||||
@@ -171,6 +174,13 @@ func (m *Manager) Add(name string, createBranch bool) (*CrewWorker, error) {
|
||||
fmt.Printf("Warning: could not set up shared beads: %v\n", err)
|
||||
}
|
||||
|
||||
// Copy overlay files from .runtime/overlay/ to crew root.
|
||||
// This allows services to have .env and other config files at their root.
|
||||
if err := rig.CopyOverlay(m.rig.Path, crewPath); err != nil {
|
||||
// Non-fatal - log warning but continue
|
||||
fmt.Printf("Warning: could not copy overlay files: %v\n", err)
|
||||
}
|
||||
|
||||
// NOTE: Slash commands (.claude/commands/) are provisioned at town level by gt install.
|
||||
// All agents inherit them via Claude's directory traversal - no per-workspace copies needed.
|
||||
|
||||
@@ -472,8 +482,34 @@ func (m *Manager) Start(name string, opts StartOptions) error {
|
||||
return fmt.Errorf("ensuring Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Create tmux session
|
||||
if err := t.NewSession(sessionID, worker.ClonePath); err != nil {
|
||||
// Build the startup beacon for predecessor discovery via /resume
|
||||
// Pass it as Claude's initial prompt - processed when Claude is ready
|
||||
address := fmt.Sprintf("%s/crew/%s", m.rig.Name, name)
|
||||
topic := opts.Topic
|
||||
if topic == "" {
|
||||
topic = "start"
|
||||
}
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: topic,
|
||||
})
|
||||
|
||||
// Build startup command first
|
||||
// SessionStart hook handles context loading (gt prime --hook)
|
||||
claudeCmd, err := config.BuildCrewStartupCommandWithAgentOverride(m.rig.Name, name, m.rig.Path, beacon, opts.AgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
|
||||
// For interactive/refresh mode, remove --dangerously-skip-permissions
|
||||
if opts.Interactive {
|
||||
claudeCmd = strings.Replace(claudeCmd, " --dangerously-skip-permissions", "", 1)
|
||||
}
|
||||
|
||||
// Create session with command directly to avoid send-keys race condition.
|
||||
// See: https://github.com/anthropics/gastown/issues/280
|
||||
if err := t.NewSessionWithCommand(sessionID, worker.ClonePath, claudeCmd); err != nil {
|
||||
return fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
@@ -494,37 +530,6 @@ func (m *Manager) Start(name string, opts StartOptions) error {
|
||||
// Set up C-b n/p keybindings for crew session cycling (non-fatal)
|
||||
_ = t.SetCrewCycleBindings(sessionID)
|
||||
|
||||
// Wait for shell to be ready
|
||||
if err := t.WaitForShellReady(sessionID, constants.ShellReadyTimeout); err != nil {
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
|
||||
// Build the startup beacon for predecessor discovery via /resume
|
||||
// Pass it as Claude's initial prompt - processed when Claude is ready
|
||||
address := fmt.Sprintf("%s/crew/%s", m.rig.Name, name)
|
||||
topic := opts.Topic
|
||||
if topic == "" {
|
||||
topic = "start"
|
||||
}
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: topic,
|
||||
})
|
||||
|
||||
// Start claude with environment exports and beacon as initial prompt
|
||||
// SessionStart hook handles context loading (gt prime --hook)
|
||||
claudeCmd := config.BuildCrewStartupCommand(m.rig.Name, name, m.rig.Path, beacon)
|
||||
|
||||
// For interactive/refresh mode, remove --dangerously-skip-permissions
|
||||
if opts.Interactive {
|
||||
claudeCmd = strings.Replace(claudeCmd, " --dangerously-skip-permissions", "", 1)
|
||||
}
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
_ = t.KillSession(sessionID) // best-effort cleanup
|
||||
return fmt.Errorf("starting claude: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal: session continues even if this times out)
|
||||
_ = t.WaitForCommand(sessionID, constants.SupportedShells, constants.ClaudeStartTimeout)
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
@@ -168,40 +169,40 @@ const recoveryHeartbeatInterval = 3 * time.Minute
|
||||
func (d *Daemon) heartbeat(state *State) {
|
||||
d.logger.Println("Heartbeat starting (recovery-focused)")
|
||||
|
||||
// 1. Poke Boot (the Deacon's watchdog) instead of Deacon directly
|
||||
// Boot handles the "when to wake Deacon" decision via triage logic
|
||||
// 1. Ensure Deacon is running (restart if dead)
|
||||
d.ensureDeaconRunning()
|
||||
|
||||
// 2. Poke Boot for intelligent triage (stuck/nudge/interrupt)
|
||||
// Boot handles nuanced "is Deacon responsive" decisions
|
||||
d.ensureBootRunning()
|
||||
|
||||
// 1b. Direct Deacon heartbeat check (belt-and-suspenders)
|
||||
// 3. Direct Deacon heartbeat check (belt-and-suspenders)
|
||||
// Boot may not detect all stuck states; this provides a fallback
|
||||
d.checkDeaconHeartbeat()
|
||||
|
||||
// 2. Ensure Witnesses are running for all rigs (restart if dead)
|
||||
// 4. Ensure Witnesses are running for all rigs (restart if dead)
|
||||
d.ensureWitnessesRunning()
|
||||
|
||||
// 2b. Ensure Refineries are running for all rigs (restart if dead)
|
||||
// 5. Ensure Refineries are running for all rigs (restart if dead)
|
||||
d.ensureRefineriesRunning()
|
||||
|
||||
// 3. Trigger pending polecat spawns (bootstrap mode - ZFC violation acceptable)
|
||||
// 6. Trigger pending polecat spawns (bootstrap mode - ZFC violation acceptable)
|
||||
// This ensures polecats get nudged even when Deacon isn't in a patrol cycle.
|
||||
// Uses regex-based WaitForClaudeReady, which is acceptable for daemon bootstrap.
|
||||
// Uses regex-based WaitForRuntimeReady, which is acceptable for daemon bootstrap.
|
||||
d.triggerPendingSpawns()
|
||||
|
||||
// 4. Process lifecycle requests
|
||||
// 7. Process lifecycle requests
|
||||
d.processLifecycleRequests()
|
||||
|
||||
// 5. Stale agent check REMOVED (gt-zecmc)
|
||||
// Was: d.checkStaleAgents() - marked agents "dead" based on bead update time.
|
||||
// This violated "discover, don't track" - agent liveness is observable from tmux.
|
||||
// The daemon now checks tmux directly in ensureXxxRunning() functions.
|
||||
// 8. (Removed) Stale agent check - violated "discover, don't track"
|
||||
|
||||
// 6. Check for GUPP violations (agents with work-on-hook not progressing)
|
||||
// 9. Check for GUPP violations (agents with work-on-hook not progressing)
|
||||
d.checkGUPPViolations()
|
||||
|
||||
// 7. Check for orphaned work (assigned to dead agents)
|
||||
// 10. Check for orphaned work (assigned to dead agents)
|
||||
d.checkOrphanedWork()
|
||||
|
||||
// 8. Check polecat session health (proactive crash detection)
|
||||
// 11. Check polecat session health (proactive crash detection)
|
||||
// This validates tmux sessions are still alive for polecats with work-on-hook
|
||||
d.checkPolecatSessionHealth()
|
||||
|
||||
@@ -290,51 +291,20 @@ func (d *Daemon) runDegradedBootTriage(b *boot.Boot) {
|
||||
}
|
||||
|
||||
// ensureDeaconRunning ensures the Deacon is running.
|
||||
// Discover, don't track: checks tmux directly instead of bead state (gt-zecmc).
|
||||
// The Deacon is the system's heartbeat - it must always be running.
|
||||
// Uses deacon.Manager for consistent startup behavior (WaitForShellReady, GUPP, etc.).
|
||||
func (d *Daemon) ensureDeaconRunning() {
|
||||
deaconSession := d.getDeaconSessionName()
|
||||
mgr := deacon.NewManager(d.config.TownRoot)
|
||||
|
||||
// Check if tmux session exists and Claude is running (observable reality)
|
||||
hasSession, sessionErr := d.tmux.HasSession(deaconSession)
|
||||
if sessionErr == nil && hasSession {
|
||||
if d.tmux.IsClaudeRunning(deaconSession) {
|
||||
if err := mgr.Start(""); err != nil {
|
||||
if err == deacon.ErrAlreadyRunning {
|
||||
// Deacon is running - nothing to do
|
||||
return
|
||||
}
|
||||
// Session exists but Claude not running - zombie session, kill it
|
||||
d.logger.Println("Deacon session exists but Claude not running, killing zombie session...")
|
||||
if err := d.tmux.KillSession(deaconSession); err != nil {
|
||||
d.logger.Printf("Warning: failed to kill zombie Deacon session: %v", err)
|
||||
}
|
||||
// Fall through to restart
|
||||
}
|
||||
|
||||
// Deacon not running - start it
|
||||
d.logger.Println("Deacon not running, starting...")
|
||||
|
||||
// Create session in deacon directory (ensures correct CLAUDE.md is loaded)
|
||||
// Use EnsureSessionFresh to handle zombie sessions that exist but have dead Claude
|
||||
deaconDir := filepath.Join(d.config.TownRoot, "deacon")
|
||||
sessionName := d.getDeaconSessionName()
|
||||
if err := d.tmux.EnsureSessionFresh(sessionName, deaconDir); err != nil {
|
||||
d.logger.Printf("Error creating Deacon session: %v", err)
|
||||
d.logger.Printf("Error starting Deacon: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Set environment (non-fatal: session works without these)
|
||||
_ = d.tmux.SetEnvironment(sessionName, "GT_ROLE", "deacon")
|
||||
_ = d.tmux.SetEnvironment(sessionName, "BD_ACTOR", "deacon")
|
||||
|
||||
// Launch Claude directly (no shell respawn loop)
|
||||
// The daemon will detect if Claude exits and restart it on next heartbeat
|
||||
// Export GT_ROLE and BD_ACTOR so Claude inherits them (tmux SetEnvironment doesn't export to processes)
|
||||
if err := d.tmux.SendKeys(sessionName, config.BuildAgentStartupCommand("deacon", "deacon", "", "")); err != nil {
|
||||
d.logger.Printf("Error launching Claude in Deacon session: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
d.logger.Println("Deacon session started successfully")
|
||||
d.logger.Println("Deacon started successfully")
|
||||
}
|
||||
|
||||
// checkDeaconHeartbeat checks if the Deacon is making progress.
|
||||
@@ -367,7 +337,8 @@ func (d *Daemon) checkDeaconHeartbeat() {
|
||||
}
|
||||
|
||||
if !hasSession {
|
||||
// Session doesn't exist - ensureBootRunning will handle restart
|
||||
// Session doesn't exist - ensureDeaconRunning already ran earlier
|
||||
// in heartbeat, so Deacon should be starting
|
||||
return
|
||||
}
|
||||
|
||||
@@ -378,7 +349,7 @@ func (d *Daemon) checkDeaconHeartbeat() {
|
||||
if err := d.tmux.KillSession(sessionName); err != nil {
|
||||
d.logger.Printf("Error killing stuck Deacon: %v", err)
|
||||
}
|
||||
// ensureDeaconRunning will be called next heartbeat to restart
|
||||
// ensureDeaconRunning will restart on next heartbeat
|
||||
} else {
|
||||
// Stuck but not critically - nudge to wake up
|
||||
d.logger.Printf("Deacon stuck for %s - nudging session", age.Round(time.Minute))
|
||||
@@ -439,7 +410,7 @@ func (d *Daemon) ensureRefineriesRunning() {
|
||||
// ensureRefineryRunning ensures the refinery for a specific rig is running.
|
||||
// Discover, don't track: uses Manager.Start() which checks tmux directly (gt-zecmc).
|
||||
func (d *Daemon) ensureRefineryRunning(rigName string) {
|
||||
// Check rig operational state before auto-starting
|
||||
// Check rig operational state before auto-starting
|
||||
if operational, reason := d.isRigOperational(rigName); !operational {
|
||||
d.logger.Printf("Skipping refinery auto-start for %s: %s", rigName, reason)
|
||||
return
|
||||
@@ -527,7 +498,7 @@ func (d *Daemon) isRigOperational(rigName string) (bool, string) {
|
||||
}
|
||||
|
||||
// triggerPendingSpawns polls pending polecat spawns and triggers those that are ready.
|
||||
// This is bootstrap mode - uses regex-based WaitForClaudeReady which is acceptable
|
||||
// This is bootstrap mode - uses regex-based WaitForRuntimeReady which is acceptable
|
||||
// for daemon operations when no AI agent is guaranteed to be running.
|
||||
// The timeout is short (2s) to avoid blocking the heartbeat.
|
||||
func (d *Daemon) triggerPendingSpawns() {
|
||||
@@ -546,7 +517,7 @@ func (d *Daemon) triggerPendingSpawns() {
|
||||
|
||||
d.logger.Printf("Found %d pending spawn(s), attempting to trigger...", len(pending))
|
||||
|
||||
// Trigger pending spawns (uses WaitForClaudeReady with short timeout)
|
||||
// Trigger pending spawns (uses WaitForRuntimeReady with short timeout)
|
||||
results, err := polecat.TriggerPendingSpawns(d.config.TownRoot, triggerTimeout)
|
||||
if err != nil {
|
||||
d.logger.Printf("Error triggering spawns: %v", err)
|
||||
@@ -697,18 +668,35 @@ func (d *Daemon) checkPolecatSessionHealth() {
|
||||
func (d *Daemon) checkRigPolecatHealth(rigName string) {
|
||||
// Get polecat directories for this rig
|
||||
polecatsDir := filepath.Join(d.config.TownRoot, rigName, "polecats")
|
||||
entries, err := os.ReadDir(polecatsDir)
|
||||
polecats, err := listPolecatWorktrees(polecatsDir)
|
||||
if err != nil {
|
||||
return // No polecats directory - rig might not have polecats
|
||||
}
|
||||
|
||||
for _, polecatName := range polecats {
|
||||
d.checkPolecatHealth(rigName, polecatName)
|
||||
}
|
||||
}
|
||||
|
||||
func listPolecatWorktrees(polecatsDir string) ([]string, error) {
|
||||
entries, err := os.ReadDir(polecatsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
polecats := make([]string, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
polecatName := entry.Name()
|
||||
d.checkPolecatHealth(rigName, polecatName)
|
||||
name := entry.Name()
|
||||
if strings.HasPrefix(name, ".") {
|
||||
continue
|
||||
}
|
||||
polecats = append(polecats, name)
|
||||
}
|
||||
|
||||
return polecats, nil
|
||||
}
|
||||
|
||||
// checkPolecatHealth checks a single polecat's session health.
|
||||
@@ -764,8 +752,14 @@ func (d *Daemon) restartPolecatSession(rigName, polecatName, sessionName string)
|
||||
return fmt.Errorf("cannot restart polecat: %s", reason)
|
||||
}
|
||||
|
||||
// Determine working directory
|
||||
workDir := filepath.Join(d.config.TownRoot, rigName, "polecats", polecatName)
|
||||
// Determine working directory (handle both new and old structures)
|
||||
// New structure: polecats/<name>/<rigname>/
|
||||
// Old structure: polecats/<name>/
|
||||
workDir := filepath.Join(d.config.TownRoot, rigName, "polecats", polecatName, rigName)
|
||||
if _, err := os.Stat(workDir); os.IsNotExist(err) {
|
||||
// Fall back to old structure
|
||||
workDir = filepath.Join(d.config.TownRoot, rigName, "polecats", polecatName)
|
||||
}
|
||||
|
||||
// Verify the worktree exists
|
||||
if _, err := os.Stat(workDir); os.IsNotExist(err) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -206,6 +207,33 @@ func TestSaveLoadState_Roundtrip(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestListPolecatWorktrees_SkipsHiddenDirs(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
polecatsDir := filepath.Join(tmpDir, "some-rig", "polecats")
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(polecatsDir, ".claude"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(polecatsDir, "furiosa"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(polecatsDir, "not-a-dir.txt"), []byte("x"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
polecats, err := listPolecatWorktrees(polecatsDir)
|
||||
if err != nil {
|
||||
t.Fatalf("listPolecatWorktrees returned error: %v", err)
|
||||
}
|
||||
|
||||
if slices.Contains(polecats, ".claude") {
|
||||
t.Fatalf("expected hidden dir .claude to be ignored, got %v", polecats)
|
||||
}
|
||||
if !slices.Contains(polecats, "furiosa") {
|
||||
t.Fatalf("expected furiosa to be included, got %v", polecats)
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: TestIsWitnessSession removed - isWitnessSession function was deleted
|
||||
// as part of ZFC cleanup. Witness poking is now Deacon's responsibility.
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package daemon
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -427,6 +428,12 @@ func (d *Daemon) getWorkDir(config *beads.RoleConfig, parsed *ParsedIdentity) st
|
||||
case "crew":
|
||||
return filepath.Join(d.config.TownRoot, parsed.RigName, "crew", parsed.AgentName)
|
||||
case "polecat":
|
||||
// New structure: polecats/<name>/<rigname>/ (for LLM ergonomics)
|
||||
// Old structure: polecats/<name>/ (for backward compat)
|
||||
newPath := filepath.Join(d.config.TownRoot, parsed.RigName, "polecats", parsed.AgentName, parsed.RigName)
|
||||
if _, err := os.Stat(newPath); err == nil {
|
||||
return newPath
|
||||
}
|
||||
return filepath.Join(d.config.TownRoot, parsed.RigName, "polecats", parsed.AgentName)
|
||||
default:
|
||||
return ""
|
||||
@@ -466,6 +473,10 @@ func (d *Daemon) getStartCommand(roleConfig *beads.RoleConfig, parsed *ParsedIde
|
||||
|
||||
// Default command for all agents - use runtime config
|
||||
defaultCmd := "exec " + config.GetRuntimeCommand(rigPath)
|
||||
runtimeConfig := config.LoadRuntimeConfig(rigPath)
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.SessionIDEnv != "" {
|
||||
defaultCmd = config.PrependEnv(defaultCmd, map[string]string{"GT_SESSION_ID_ENV": runtimeConfig.Session.SessionIDEnv})
|
||||
}
|
||||
|
||||
// Polecats need environment variables set in the command
|
||||
if parsed.RoleType == "polecat" {
|
||||
|
||||
@@ -29,6 +29,10 @@ func TestGetRoleConfigForIdentity_PrefersTownRoleBead(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
runBd(t, townRoot, "init", "--quiet", "--prefix", "hq")
|
||||
|
||||
runBd(t, townRoot, "config", "set", "types.custom", "agent,role,rig,convoy,event")
|
||||
|
||||
runBd(t, townRoot, "config", "set", "types.custom", "agent,role,rig,convoy,event")
|
||||
|
||||
// Create canonical role bead.
|
||||
runBd(t, townRoot, "create",
|
||||
"--id", "hq-witness-role",
|
||||
@@ -62,6 +66,10 @@ func TestGetRoleConfigForIdentity_FallsBackToLegacyRoleBead(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
runBd(t, townRoot, "init", "--quiet", "--prefix", "gt")
|
||||
|
||||
runBd(t, townRoot, "config", "set", "types.custom", "agent,role,rig,convoy,event")
|
||||
|
||||
runBd(t, townRoot, "config", "set", "types.custom", "agent,role,rig,convoy,event")
|
||||
|
||||
// Only legacy role bead exists.
|
||||
runBd(t, townRoot, "create",
|
||||
"--id", "gt-witness-role",
|
||||
|
||||
@@ -49,8 +49,9 @@ func (m *Manager) deaconDir() string {
|
||||
}
|
||||
|
||||
// Start starts the deacon session.
|
||||
// The deacon runs in a respawn loop for automatic recovery.
|
||||
func (m *Manager) Start() error {
|
||||
// agentOverride allows specifying an alternate agent alias (e.g., for testing).
|
||||
// Restarts are handled by daemon via ensureDeaconRunning on each heartbeat.
|
||||
func (m *Manager) Start(agentOverride string) error {
|
||||
t := tmux.NewTmux()
|
||||
sessionID := m.SessionName()
|
||||
|
||||
@@ -78,8 +79,16 @@ func (m *Manager) Start() error {
|
||||
return fmt.Errorf("ensuring Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Create new tmux session
|
||||
if err := t.NewSession(sessionID, deaconDir); err != nil {
|
||||
// Build startup command first
|
||||
// Restarts are handled by daemon via ensureDeaconRunning on each heartbeat
|
||||
startupCmd, err := config.BuildAgentStartupCommandWithAgentOverride("deacon", "deacon", "", "", agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
|
||||
// Create session with command directly to avoid send-keys race condition.
|
||||
// See: https://github.com/anthropics/gastown/issues/280
|
||||
if err := t.NewSessionWithCommand(sessionID, deaconDir, startupCmd); err != nil {
|
||||
return fmt.Errorf("creating tmux session: %w", err)
|
||||
}
|
||||
|
||||
@@ -91,21 +100,7 @@ func (m *Manager) Start() error {
|
||||
theme := tmux.DeaconTheme()
|
||||
_ = t.ConfigureGasTownSession(sessionID, theme, "", "Deacon", "health-check")
|
||||
|
||||
// Launch Claude in a respawn loop for automatic recovery
|
||||
// The respawn loop ensures the deacon restarts if Claude crashes
|
||||
runtimeCmd := config.GetRuntimeCommand("")
|
||||
respawnCmd := fmt.Sprintf(
|
||||
`export GT_ROLE=deacon BD_ACTOR=deacon GIT_AUTHOR_NAME=deacon && while true; do echo "⛪ Starting Deacon session..."; %s; echo ""; echo "Deacon exited. Restarting in 2s... (Ctrl-C to stop)"; sleep 2; done`,
|
||||
runtimeCmd,
|
||||
)
|
||||
|
||||
if err := t.SendKeysDelayed(sessionID, respawnCmd, 200); err != nil {
|
||||
_ = t.KillSession(sessionID) // best-effort cleanup
|
||||
return fmt.Errorf("starting Claude agent: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
// Note: Deacon respawn loop makes this tricky - Claude restarts multiple times
|
||||
if err := t.WaitForCommand(sessionID, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal - try to continue anyway
|
||||
}
|
||||
@@ -115,8 +110,18 @@ func (m *Manager) Start() error {
|
||||
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Note: Deacon doesn't get startup nudge due to respawn loop complexity
|
||||
// The deacon uses its own patrol pattern defined in its CLAUDE.md/prime
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
_ = session.StartupNudge(t, sessionID, session.StartupNudgeConfig{
|
||||
Recipient: "deacon",
|
||||
Sender: "daemon",
|
||||
Topic: "patrol",
|
||||
}) // Non-fatal
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send the propulsion nudge to trigger autonomous patrol execution.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
_ = t.NudgeSession(sessionID, session.PropulsionNudgeForRole("deacon", deaconDir)) // Non-fatal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
88
internal/deacon/pause.go
Normal file
88
internal/deacon/pause.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Package deacon provides the Deacon agent infrastructure.
|
||||
package deacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PauseState represents the Deacon pause file contents.
|
||||
// When paused, the Deacon must not perform any patrol actions.
|
||||
type PauseState struct {
|
||||
// Paused is true if the Deacon is currently paused.
|
||||
Paused bool `json:"paused"`
|
||||
|
||||
// Reason explains why the Deacon was paused.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
|
||||
// PausedAt is when the Deacon was paused.
|
||||
PausedAt time.Time `json:"paused_at"`
|
||||
|
||||
// PausedBy identifies who paused the Deacon (e.g., "human", "mayor").
|
||||
PausedBy string `json:"paused_by,omitempty"`
|
||||
}
|
||||
|
||||
// GetPauseFile returns the path to the Deacon pause file.
|
||||
func GetPauseFile(townRoot string) string {
|
||||
return filepath.Join(townRoot, ".runtime", "deacon", "paused.json")
|
||||
}
|
||||
|
||||
// IsPaused checks if the Deacon is currently paused.
|
||||
// Returns (isPaused, pauseState, error).
|
||||
// If the pause file doesn't exist, returns (false, nil, nil).
|
||||
func IsPaused(townRoot string) (bool, *PauseState, error) {
|
||||
pauseFile := GetPauseFile(townRoot)
|
||||
|
||||
data, err := os.ReadFile(pauseFile) //nolint:gosec // G304: path is constructed from trusted townRoot
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil, nil
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
var state PauseState
|
||||
if err := json.Unmarshal(data, &state); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return state.Paused, &state, nil
|
||||
}
|
||||
|
||||
// Pause pauses the Deacon by creating the pause file.
|
||||
func Pause(townRoot, reason, pausedBy string) error {
|
||||
pauseFile := GetPauseFile(townRoot)
|
||||
|
||||
// Ensure parent directory exists
|
||||
if err := os.MkdirAll(filepath.Dir(pauseFile), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state := PauseState{
|
||||
Paused: true,
|
||||
Reason: reason,
|
||||
PausedAt: time.Now().UTC(),
|
||||
PausedBy: pausedBy,
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(state, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(pauseFile, data, 0600)
|
||||
}
|
||||
|
||||
// Resume resumes the Deacon by removing the pause file.
|
||||
func Resume(townRoot string) error {
|
||||
pauseFile := GetPauseFile(townRoot)
|
||||
|
||||
err := os.Remove(pauseFile)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -468,12 +468,20 @@ func (c *CloneDivergenceCheck) findAllClones(townRoot string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
// Add polecats
|
||||
// Add polecats (handle both new and old structures)
|
||||
// New structure: polecats/<name>/<rigname>/
|
||||
// Old structure: polecats/<name>/
|
||||
rigName := entry.Name()
|
||||
polecatsPath := filepath.Join(rigPath, "polecats")
|
||||
if polecatEntries, err := os.ReadDir(polecatsPath); err == nil {
|
||||
for _, polecat := range polecatEntries {
|
||||
if polecat.IsDir() && !strings.HasPrefix(polecat.Name(), ".") {
|
||||
path := filepath.Join(polecatsPath, polecat.Name())
|
||||
// Try new structure first
|
||||
path := filepath.Join(polecatsPath, polecat.Name(), rigName)
|
||||
if !c.isGitRepo(path) {
|
||||
// Fall back to old structure
|
||||
path = filepath.Join(polecatsPath, polecat.Name())
|
||||
}
|
||||
if c.isGitRepo(path) {
|
||||
clones = append(clones, path)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/steveyegge/gastown/internal/claude"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/templates"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -287,15 +288,23 @@ func (c *ClaudeSettingsCheck) findSettingsFiles(townRoot string) []staleSettings
|
||||
if !pcEntry.IsDir() || pcEntry.Name() == ".claude" {
|
||||
continue
|
||||
}
|
||||
pcWrongSettings := filepath.Join(polecatsDir, pcEntry.Name(), ".claude", "settings.json")
|
||||
if fileExists(pcWrongSettings) {
|
||||
files = append(files, staleSettingsInfo{
|
||||
path: pcWrongSettings,
|
||||
agentType: "polecat",
|
||||
rigName: rigName,
|
||||
sessionName: fmt.Sprintf("gt-%s-%s", rigName, pcEntry.Name()),
|
||||
wrongLocation: true,
|
||||
})
|
||||
// Check for wrong settings in both structures:
|
||||
// Old structure: polecats/<name>/.claude/settings.json
|
||||
// New structure: polecats/<name>/<rigname>/.claude/settings.json
|
||||
wrongPaths := []string{
|
||||
filepath.Join(polecatsDir, pcEntry.Name(), ".claude", "settings.json"),
|
||||
filepath.Join(polecatsDir, pcEntry.Name(), rigName, ".claude", "settings.json"),
|
||||
}
|
||||
for _, pcWrongSettings := range wrongPaths {
|
||||
if fileExists(pcWrongSettings) {
|
||||
files = append(files, staleSettingsInfo{
|
||||
path: pcWrongSettings,
|
||||
agentType: "polecat",
|
||||
rigName: rigName,
|
||||
sessionName: fmt.Sprintf("gt-%s-%s", rigName, pcEntry.Name()),
|
||||
wrongLocation: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -477,14 +486,11 @@ func (c *ClaudeSettingsCheck) Fix(ctx *CheckContext) error {
|
||||
}
|
||||
|
||||
// Town-root files were inherited by ALL agents via directory traversal.
|
||||
// Cycle all Gas Town sessions so they pick up the corrected file locations.
|
||||
// This includes gt-* (rig agents) and hq-* (mayor, deacon).
|
||||
sessions, _ := t.ListSessions()
|
||||
for _, sess := range sessions {
|
||||
if strings.HasPrefix(sess, session.Prefix) || strings.HasPrefix(sess, session.HQPrefix) {
|
||||
_ = t.KillSession(sess)
|
||||
}
|
||||
}
|
||||
// Warn user to restart agents - don't auto-kill sessions as that's too disruptive,
|
||||
// especially since deacon runs gt doctor automatically which would create a loop.
|
||||
// Settings are only read at startup, so running agents already have config loaded.
|
||||
fmt.Printf("\n %s Town-root settings were moved. Restart agents to pick up new config:\n", style.Warning.Render("⚠"))
|
||||
fmt.Printf(" gt up --restart\n\n")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -1016,3 +1016,69 @@ func TestClaudeSettingsCheck_FixMovesCLAUDEmdToMayor(t *testing.T) {
|
||||
t.Error("expected CLAUDE.md to be created at mayor/")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaudeSettingsCheck_TownRootSettingsWarnsInsteadOfKilling(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create mayor directory (needed for fix to recreate settings there)
|
||||
mayorDir := filepath.Join(tmpDir, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create settings.json at town root (wrong location - pollutes all agents)
|
||||
staleTownRootDir := filepath.Join(tmpDir, ".claude")
|
||||
if err := os.MkdirAll(staleTownRootDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
staleTownRootSettings := filepath.Join(staleTownRootDir, "settings.json")
|
||||
// Create valid settings content
|
||||
settingsContent := `{
|
||||
"env": {"PATH": "/usr/bin"},
|
||||
"enabledPlugins": ["claude-code-expert"],
|
||||
"hooks": {
|
||||
"SessionStart": [{"matcher": "", "hooks": [{"type": "command", "command": "gt prime"}]}],
|
||||
"Stop": [{"matcher": "", "hooks": [{"type": "command", "command": "gt handoff"}]}]
|
||||
}
|
||||
}`
|
||||
if err := os.WriteFile(staleTownRootSettings, []byte(settingsContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewClaudeSettingsCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
|
||||
// Run to detect
|
||||
result := check.Run(ctx)
|
||||
if result.Status != StatusError {
|
||||
t.Fatalf("expected StatusError for town root settings, got %v", result.Status)
|
||||
}
|
||||
|
||||
// Verify it's flagged as wrong location
|
||||
foundWrongLocation := false
|
||||
for _, d := range result.Details {
|
||||
if strings.Contains(d, "wrong location") {
|
||||
foundWrongLocation = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundWrongLocation {
|
||||
t.Errorf("expected details to mention wrong location, got %v", result.Details)
|
||||
}
|
||||
|
||||
// Apply fix - should NOT return error and should NOT kill sessions
|
||||
// (session killing would require tmux which isn't available in tests)
|
||||
if err := check.Fix(ctx); err != nil {
|
||||
t.Fatalf("Fix failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify stale file was deleted
|
||||
if _, err := os.Stat(staleTownRootSettings); !os.IsNotExist(err) {
|
||||
t.Error("expected settings.json at town root to be deleted")
|
||||
}
|
||||
|
||||
// Verify .claude directory was cleaned up (best-effort)
|
||||
if _, err := os.Stat(staleTownRootDir); !os.IsNotExist(err) {
|
||||
t.Error("expected .claude directory at town root to be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,8 +267,9 @@ func (c *SettingsCheck) findRigs(townRoot string) []string {
|
||||
return findAllRigs(townRoot)
|
||||
}
|
||||
|
||||
// SessionHookCheck verifies settings.json files use session-start.sh for proper
|
||||
// session_id passthrough. Without this wrapper, gt seance cannot discover sessions.
|
||||
// SessionHookCheck verifies settings.json files use proper session_id passthrough.
|
||||
// Valid options: session-start.sh wrapper OR 'gt prime --hook'.
|
||||
// Without proper config, gt seance cannot discover sessions.
|
||||
type SessionHookCheck struct {
|
||||
BaseCheck
|
||||
}
|
||||
@@ -278,12 +279,12 @@ func NewSessionHookCheck() *SessionHookCheck {
|
||||
return &SessionHookCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "session-hooks",
|
||||
CheckDescription: "Check that settings.json hooks use session-start.sh",
|
||||
CheckDescription: "Check that settings.json hooks use session-start.sh or --hook flag",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run checks if all settings.json files use session-start.sh wrapper.
|
||||
// Run checks if all settings.json files use session-start.sh or --hook flag.
|
||||
func (c *SessionHookCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
var issues []string
|
||||
var checked int
|
||||
@@ -307,7 +308,7 @@ func (c *SessionHookCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("All %d settings.json file(s) use session-start.sh", checked),
|
||||
Message: fmt.Sprintf("All %d settings.json file(s) use proper session_id passthrough", checked),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,7 +317,7 @@ func (c *SessionHookCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("%d hook issue(s) found across settings.json files", len(issues)),
|
||||
Details: issues,
|
||||
FixHint: "Update SessionStart/PreCompact hooks to use 'bash ~/.claude/hooks/session-start.sh' for session_id passthrough",
|
||||
FixHint: "Update hooks to use 'gt prime --hook' or 'bash ~/.claude/hooks/session-start.sh' for session_id passthrough",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,22 +335,22 @@ func (c *SessionHookCheck) checkSettingsFile(path string) []string {
|
||||
// Check for SessionStart hooks
|
||||
if strings.Contains(content, "SessionStart") {
|
||||
if !c.usesSessionStartScript(content, "SessionStart") {
|
||||
problems = append(problems, "SessionStart uses bare 'gt prime' (missing session_id passthrough)")
|
||||
problems = append(problems, "SessionStart uses bare 'gt prime' - add --hook flag or use session-start.sh")
|
||||
}
|
||||
}
|
||||
|
||||
// Check for PreCompact hooks
|
||||
if strings.Contains(content, "PreCompact") {
|
||||
if !c.usesSessionStartScript(content, "PreCompact") {
|
||||
problems = append(problems, "PreCompact uses bare 'gt prime' (missing session_id passthrough)")
|
||||
problems = append(problems, "PreCompact uses bare 'gt prime' - add --hook flag or use session-start.sh")
|
||||
}
|
||||
}
|
||||
|
||||
return problems
|
||||
}
|
||||
|
||||
// usesSessionStartScript checks if the hook configuration uses session-start.sh.
|
||||
// Returns true if the hook is properly configured or if no hook is configured.
|
||||
// usesSessionStartScript checks if the hook configuration handles session_id properly.
|
||||
// Valid: session-start.sh wrapper OR 'gt prime --hook'. Returns true if properly configured.
|
||||
func (c *SessionHookCheck) usesSessionStartScript(content, hookType string) bool {
|
||||
// Find the hook section - look for the hook type followed by its configuration
|
||||
// This is a simple heuristic - we look for "gt prime" without session-start.sh
|
||||
@@ -382,10 +383,15 @@ func (c *SessionHookCheck) usesSessionStartScript(content, hookType string) bool
|
||||
return true // Uses the wrapper script
|
||||
}
|
||||
|
||||
// Check if it uses bare 'gt prime' without the wrapper
|
||||
// Patterns to detect: "gt prime", "'gt prime'", "gt prime\""
|
||||
// Check if it uses 'gt prime --hook' which handles session_id via stdin
|
||||
if strings.Contains(section, "gt prime") {
|
||||
return false // Uses bare gt prime without session-start.sh
|
||||
// gt prime --hook is valid - it reads session_id from stdin JSON
|
||||
// Must match --hook as complete flag, not substring (e.g., --hookup)
|
||||
if containsFlag(section, "--hook") {
|
||||
return true
|
||||
}
|
||||
// Bare 'gt prime' without --hook doesn't get session_id
|
||||
return false
|
||||
}
|
||||
|
||||
// No gt prime or session-start.sh found - might be a different hook configuration
|
||||
@@ -454,14 +460,24 @@ func (c *SessionHookCheck) findSettingsFiles(townRoot string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
// Polecats
|
||||
// Polecats (handle both new and old structures)
|
||||
// New structure: polecats/<name>/<rigname>/.claude/settings.json
|
||||
// Old structure: polecats/<name>/.claude/settings.json
|
||||
rigName := filepath.Base(rig)
|
||||
polecatsPath := filepath.Join(rig, "polecats")
|
||||
if polecatEntries, err := os.ReadDir(polecatsPath); err == nil {
|
||||
for _, polecat := range polecatEntries {
|
||||
if polecat.IsDir() && !strings.HasPrefix(polecat.Name(), ".") {
|
||||
polecatSettings := filepath.Join(polecatsPath, polecat.Name(), ".claude", "settings.json")
|
||||
// Try new structure first
|
||||
polecatSettings := filepath.Join(polecatsPath, polecat.Name(), rigName, ".claude", "settings.json")
|
||||
if _, err := os.Stat(polecatSettings); err == nil {
|
||||
files = append(files, polecatSettings)
|
||||
} else {
|
||||
// Fall back to old structure
|
||||
polecatSettings = filepath.Join(polecatsPath, polecat.Name(), ".claude", "settings.json")
|
||||
if _, err := os.Stat(polecatSettings); err == nil {
|
||||
files = append(files, polecatSettings)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -504,3 +520,16 @@ func findAllRigs(townRoot string) []string {
|
||||
|
||||
return rigs
|
||||
}
|
||||
|
||||
func containsFlag(s, flag string) bool {
|
||||
idx := strings.Index(s, flag)
|
||||
if idx == -1 {
|
||||
return false
|
||||
}
|
||||
end := idx + len(flag)
|
||||
if end >= len(s) {
|
||||
return true
|
||||
}
|
||||
next := s[end]
|
||||
return next == '"' || next == ' ' || next == '\'' || next == '\n' || next == '\t'
|
||||
}
|
||||
|
||||
226
internal/doctor/config_check_test.go
Normal file
226
internal/doctor/config_check_test.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSessionHookCheck_UsesSessionStartScript(t *testing.T) {
|
||||
check := NewSessionHookCheck()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
content string
|
||||
hookType string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "bare gt prime fails",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "gt prime --hook passes",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --hook"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "session-start.sh passes",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "bash ~/.claude/hooks/session-start.sh"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "no SessionStart hook passes",
|
||||
content: `{"hooks": {"Stop": [{"hooks": [{"type": "command", "command": "gt handoff"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "PreCompact with --hook passes",
|
||||
content: `{"hooks": {"PreCompact": [{"hooks": [{"type": "command", "command": "gt prime --hook"}]}]}}`,
|
||||
hookType: "PreCompact",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "PreCompact bare gt prime fails",
|
||||
content: `{"hooks": {"PreCompact": [{"hooks": [{"type": "command", "command": "gt prime"}]}]}}`,
|
||||
hookType: "PreCompact",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "gt prime --hook with extra flags passes",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --hook --verbose"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "gt prime with --hook not first still passes",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --verbose --hook"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "gt prime with other flags but no --hook fails",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --verbose"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "both session-start.sh and gt prime passes (session-start.sh wins)",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "bash session-start.sh && gt prime"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "gt prime --hookup is NOT valid (false positive check)",
|
||||
content: `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --hookup"}]}]}}`,
|
||||
hookType: "SessionStart",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := check.usesSessionStartScript(tt.content, tt.hookType)
|
||||
if got != tt.want {
|
||||
t.Errorf("usesSessionStartScript() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSessionHookCheck_Run(t *testing.T) {
|
||||
t.Run("bare gt prime warns", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
claudeDir := filepath.Join(tmpDir, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
settings := `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime"}]}]}}`
|
||||
if err := os.WriteFile(filepath.Join(claudeDir, "settings.json"), []byte(settings), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewSessionHookCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusWarning {
|
||||
t.Errorf("expected StatusWarning, got %v", result.Status)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("gt prime --hook passes", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
claudeDir := filepath.Join(tmpDir, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
settings := `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --hook"}]}]}}`
|
||||
if err := os.WriteFile(filepath.Join(claudeDir, "settings.json"), []byte(settings), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewSessionHookCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("expected StatusOK, got %v: %v", result.Status, result.Details)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("rig-level settings with --hook passes", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
rigDir := filepath.Join(tmpDir, "myrig")
|
||||
if err := os.MkdirAll(filepath.Join(rigDir, "crew"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
claudeDir := filepath.Join(rigDir, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
settings := `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --hook"}]}]}}`
|
||||
if err := os.WriteFile(filepath.Join(claudeDir, "settings.json"), []byte(settings), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewSessionHookCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("expected StatusOK for rig-level settings, got %v: %v", result.Status, result.Details)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("rig-level bare gt prime warns", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
rigDir := filepath.Join(tmpDir, "myrig")
|
||||
if err := os.MkdirAll(filepath.Join(rigDir, "polecats"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
claudeDir := filepath.Join(rigDir, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
settings := `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime"}]}]}}`
|
||||
if err := os.WriteFile(filepath.Join(claudeDir, "settings.json"), []byte(settings), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewSessionHookCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusWarning {
|
||||
t.Errorf("expected StatusWarning for rig-level bare gt prime, got %v", result.Status)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid hooks warns", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
claudeDir := filepath.Join(tmpDir, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
settings := `{"hooks": {"SessionStart": [{"hooks": [{"type": "command", "command": "gt prime --hook"}]}], "PreCompact": [{"hooks": [{"type": "command", "command": "gt prime"}]}]}}`
|
||||
if err := os.WriteFile(filepath.Join(claudeDir, "settings.json"), []byte(settings), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewSessionHookCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusWarning {
|
||||
t.Errorf("expected StatusWarning when PreCompact is invalid, got %v", result.Status)
|
||||
}
|
||||
if len(result.Details) != 1 {
|
||||
t.Errorf("expected 1 issue (PreCompact), got %d: %v", len(result.Details), result.Details)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no settings files returns OK", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
check := NewSessionHookCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("expected StatusOK when no settings files, got %v", result.Status)
|
||||
}
|
||||
})
|
||||
}
|
||||
129
internal/doctor/formula_check.go
Normal file
129
internal/doctor/formula_check.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/formula"
|
||||
)
|
||||
|
||||
// FormulaCheck verifies that embedded formulas are up-to-date.
|
||||
// It detects outdated formulas (binary updated), missing formulas (user deleted),
|
||||
// and modified formulas (user customized). Can auto-fix outdated and missing.
|
||||
type FormulaCheck struct {
|
||||
FixableCheck
|
||||
}
|
||||
|
||||
// NewFormulaCheck creates a new formula check.
|
||||
func NewFormulaCheck() *FormulaCheck {
|
||||
return &FormulaCheck{
|
||||
FixableCheck: FixableCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "formulas",
|
||||
CheckDescription: "Check embedded formulas are up-to-date",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run checks if formulas need updating.
|
||||
func (c *FormulaCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
report, err := formula.CheckFormulaHealth(ctx.TownRoot)
|
||||
if err != nil {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("Could not check formulas: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
// All good
|
||||
if report.Outdated == 0 && report.Missing == 0 && report.Modified == 0 && report.New == 0 && report.Untracked == 0 {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("%d formulas up-to-date", report.OK),
|
||||
}
|
||||
}
|
||||
|
||||
// Build details
|
||||
var details []string
|
||||
var needsFix bool
|
||||
|
||||
for _, f := range report.Formulas {
|
||||
switch f.Status {
|
||||
case "outdated":
|
||||
details = append(details, fmt.Sprintf(" %s: update available", f.Name))
|
||||
needsFix = true
|
||||
case "missing":
|
||||
details = append(details, fmt.Sprintf(" %s: missing (will reinstall)", f.Name))
|
||||
needsFix = true
|
||||
case "modified":
|
||||
details = append(details, fmt.Sprintf(" %s: locally modified (skipping)", f.Name))
|
||||
case "new":
|
||||
details = append(details, fmt.Sprintf(" %s: new formula available", f.Name))
|
||||
needsFix = true
|
||||
case "untracked":
|
||||
details = append(details, fmt.Sprintf(" %s: untracked (will update)", f.Name))
|
||||
needsFix = true
|
||||
}
|
||||
}
|
||||
|
||||
// Determine status
|
||||
status := StatusOK
|
||||
if needsFix {
|
||||
status = StatusWarning
|
||||
}
|
||||
|
||||
// Build message
|
||||
var parts []string
|
||||
if report.Outdated > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d outdated", report.Outdated))
|
||||
}
|
||||
if report.Missing > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d missing", report.Missing))
|
||||
}
|
||||
if report.New > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d new", report.New))
|
||||
}
|
||||
if report.Untracked > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d untracked", report.Untracked))
|
||||
}
|
||||
if report.Modified > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d modified", report.Modified))
|
||||
}
|
||||
|
||||
message := fmt.Sprintf("Formulas: %s", strings.Join(parts, ", "))
|
||||
|
||||
result := &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: status,
|
||||
Message: message,
|
||||
Details: details,
|
||||
}
|
||||
|
||||
if needsFix {
|
||||
result.FixHint = "Run 'gt doctor --fix' to update formulas"
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Fix updates outdated and missing formulas.
|
||||
func (c *FormulaCheck) Fix(ctx *CheckContext) error {
|
||||
updated, skipped, reinstalled, err := formula.UpdateFormulas(ctx.TownRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Log what was done (caller will re-run check to show new status)
|
||||
if updated > 0 || reinstalled > 0 || skipped > 0 {
|
||||
// The doctor framework will re-run the check after fix
|
||||
// so we don't need to log here
|
||||
_ = updated
|
||||
_ = reinstalled
|
||||
_ = skipped
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
103
internal/doctor/formula_check_test.go
Normal file
103
internal/doctor/formula_check_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/formula"
|
||||
)
|
||||
|
||||
func TestNewFormulaCheck(t *testing.T) {
|
||||
check := NewFormulaCheck()
|
||||
if check.Name() != "formulas" {
|
||||
t.Errorf("Name() = %q, want %q", check.Name(), "formulas")
|
||||
}
|
||||
if !check.CanFix() {
|
||||
t.Error("FormulaCheck should be fixable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormulaCheck_Run_AllOK(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision formulas fresh
|
||||
_, err := formula.ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
check := NewFormulaCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("Status = %v, want %v", result.Status, StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormulaCheck_Run_Missing(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision formulas
|
||||
_, err := formula.ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Delete a formula
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
formulaPath := filepath.Join(formulasDir, "mol-deacon-patrol.formula.toml")
|
||||
if err := os.Remove(formulaPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewFormulaCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusWarning {
|
||||
t.Errorf("Status = %v, want %v", result.Status, StatusWarning)
|
||||
}
|
||||
if result.FixHint == "" {
|
||||
t.Error("should have FixHint")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormulaCheck_Fix(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision formulas
|
||||
_, err := formula.ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Delete a formula
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
formulaPath := filepath.Join(formulasDir, "mol-deacon-patrol.formula.toml")
|
||||
if err := os.Remove(formulaPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := NewFormulaCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
|
||||
// Run fix
|
||||
if err := check.Fix(ctx); err != nil {
|
||||
t.Fatalf("Fix() error: %v", err)
|
||||
}
|
||||
|
||||
// Verify formula was restored
|
||||
if _, err := os.Stat(formulaPath); os.IsNotExist(err) {
|
||||
t.Error("formula should have been restored")
|
||||
}
|
||||
|
||||
// Re-run check - should be OK now
|
||||
result := check.Run(ctx)
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("after fix, Status = %v, want %v", result.Status, StatusOK)
|
||||
}
|
||||
}
|
||||
110
internal/doctor/global_state_check.go
Normal file
110
internal/doctor/global_state_check.go
Normal file
@@ -0,0 +1,110 @@
|
||||
// ABOUTME: Doctor check for Gas Town global state configuration.
|
||||
// ABOUTME: Validates that state directories and shell integration are properly configured.
|
||||
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
)
|
||||
|
||||
type GlobalStateCheck struct {
|
||||
BaseCheck
|
||||
}
|
||||
|
||||
func NewGlobalStateCheck() *GlobalStateCheck {
|
||||
return &GlobalStateCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "global-state",
|
||||
CheckDescription: "Validates Gas Town global state and shell integration",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *GlobalStateCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
result := &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
}
|
||||
|
||||
var details []string
|
||||
var warnings []string
|
||||
var errors []string
|
||||
|
||||
s, err := state.Load()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
result.Message = "Global state not initialized"
|
||||
result.FixHint = "Run: gt enable"
|
||||
result.Status = StatusWarning
|
||||
return result
|
||||
}
|
||||
result.Message = "Cannot read global state"
|
||||
result.Details = []string{err.Error()}
|
||||
result.Status = StatusError
|
||||
return result
|
||||
}
|
||||
|
||||
if s.Enabled {
|
||||
details = append(details, "Gas Town: enabled")
|
||||
} else {
|
||||
details = append(details, "Gas Town: disabled")
|
||||
warnings = append(warnings, "Gas Town is disabled globally")
|
||||
}
|
||||
|
||||
if s.Version != "" {
|
||||
details = append(details, "Version: "+s.Version)
|
||||
}
|
||||
|
||||
if s.MachineID != "" {
|
||||
details = append(details, "Machine ID: "+s.MachineID)
|
||||
}
|
||||
|
||||
rcPath := shell.RCFilePath(shell.DetectShell())
|
||||
if hasShellIntegration(rcPath) {
|
||||
details = append(details, "Shell integration: installed ("+rcPath+")")
|
||||
} else {
|
||||
warnings = append(warnings, "Shell integration not installed")
|
||||
}
|
||||
|
||||
hookPath := filepath.Join(state.ConfigDir(), "shell-hook.sh")
|
||||
if _, err := os.Stat(hookPath); err == nil {
|
||||
details = append(details, "Hook script: present")
|
||||
} else {
|
||||
if hasShellIntegration(rcPath) {
|
||||
errors = append(errors, "Hook script missing but shell integration installed")
|
||||
}
|
||||
}
|
||||
|
||||
result.Details = details
|
||||
|
||||
if len(errors) > 0 {
|
||||
result.Status = StatusError
|
||||
result.Message = errors[0]
|
||||
result.FixHint = "Run: gt install --shell"
|
||||
} else if len(warnings) > 0 {
|
||||
result.Status = StatusWarning
|
||||
result.Message = warnings[0]
|
||||
if !s.Enabled {
|
||||
result.FixHint = "Run: gt enable"
|
||||
} else {
|
||||
result.FixHint = "Run: gt install --shell"
|
||||
}
|
||||
} else {
|
||||
result.Message = "Global state healthy"
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func hasShellIntegration(rcPath string) bool {
|
||||
data, err := os.ReadFile(rcPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(string(data), "Gas Town Integration")
|
||||
}
|
||||
119
internal/doctor/gtroot_check.go
Normal file
119
internal/doctor/gtroot_check.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
|
||||
// GTRootCheck verifies that tmux sessions have GT_ROOT set.
|
||||
// Sessions without GT_ROOT cannot find town-level formulas.
|
||||
type GTRootCheck struct {
|
||||
BaseCheck
|
||||
tmux TmuxEnvGetter // nil means use real tmux
|
||||
}
|
||||
|
||||
// TmuxEnvGetter abstracts tmux environment access for testing.
|
||||
type TmuxEnvGetter interface {
|
||||
ListSessions() ([]string, error)
|
||||
GetEnvironment(session, key string) (string, error)
|
||||
}
|
||||
|
||||
// realTmux wraps real tmux operations.
|
||||
type realTmux struct {
|
||||
t *tmux.Tmux
|
||||
}
|
||||
|
||||
func (r *realTmux) ListSessions() ([]string, error) {
|
||||
return r.t.ListSessions()
|
||||
}
|
||||
|
||||
func (r *realTmux) GetEnvironment(session, key string) (string, error) {
|
||||
return r.t.GetEnvironment(session, key)
|
||||
}
|
||||
|
||||
// NewGTRootCheck creates a new GT_ROOT check.
|
||||
func NewGTRootCheck() *GTRootCheck {
|
||||
return >RootCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "gt-root-env",
|
||||
CheckDescription: "Verify sessions have GT_ROOT set for formula discovery",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewGTRootCheckWithTmux creates a check with a custom tmux interface (for testing).
|
||||
func NewGTRootCheckWithTmux(t TmuxEnvGetter) *GTRootCheck {
|
||||
c := NewGTRootCheck()
|
||||
c.tmux = t
|
||||
return c
|
||||
}
|
||||
|
||||
// Run checks GT_ROOT environment variable for all Gas Town sessions.
|
||||
func (c *GTRootCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
t := c.tmux
|
||||
if t == nil {
|
||||
t = &realTmux{t: tmux.NewTmux()}
|
||||
}
|
||||
|
||||
sessions, err := t.ListSessions()
|
||||
if err != nil {
|
||||
// No tmux server - not an error, Gas Town might just be down
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "No tmux sessions running",
|
||||
}
|
||||
}
|
||||
|
||||
// Filter to Gas Town sessions (gt-* and hq-*)
|
||||
var gtSessions []string
|
||||
for _, sess := range sessions {
|
||||
if strings.HasPrefix(sess, "gt-") || strings.HasPrefix(sess, "hq-") {
|
||||
gtSessions = append(gtSessions, sess)
|
||||
}
|
||||
}
|
||||
|
||||
if len(gtSessions) == 0 {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "No Gas Town sessions running",
|
||||
}
|
||||
}
|
||||
|
||||
var missingSessions []string
|
||||
var okCount int
|
||||
|
||||
for _, sess := range gtSessions {
|
||||
gtRoot, err := t.GetEnvironment(sess, "GT_ROOT")
|
||||
if err != nil || gtRoot == "" {
|
||||
missingSessions = append(missingSessions, sess)
|
||||
} else {
|
||||
okCount++
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingSessions) == 0 {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("All %d session(s) have GT_ROOT set", okCount),
|
||||
}
|
||||
}
|
||||
|
||||
details := make([]string, 0, len(missingSessions)+2)
|
||||
for _, sess := range missingSessions {
|
||||
details = append(details, fmt.Sprintf("Missing GT_ROOT: %s", sess))
|
||||
}
|
||||
details = append(details, "", "Sessions without GT_ROOT cannot find town-level formulas.")
|
||||
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("%d session(s) missing GT_ROOT environment variable", len(missingSessions)),
|
||||
Details: details,
|
||||
FixHint: "Restart sessions to pick up GT_ROOT: gt shutdown && gt up",
|
||||
}
|
||||
}
|
||||
147
internal/doctor/gtroot_check_test.go
Normal file
147
internal/doctor/gtroot_check_test.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// mockTmuxEnv implements TmuxEnvGetter for testing.
|
||||
type mockTmuxEnv struct {
|
||||
sessions map[string]map[string]string // session -> env vars
|
||||
listErr error
|
||||
getErr error
|
||||
}
|
||||
|
||||
func (m *mockTmuxEnv) ListSessions() ([]string, error) {
|
||||
if m.listErr != nil {
|
||||
return nil, m.listErr
|
||||
}
|
||||
sessions := make([]string, 0, len(m.sessions))
|
||||
for s := range m.sessions {
|
||||
sessions = append(sessions, s)
|
||||
}
|
||||
return sessions, nil
|
||||
}
|
||||
|
||||
func (m *mockTmuxEnv) GetEnvironment(session, key string) (string, error) {
|
||||
if m.getErr != nil {
|
||||
return "", m.getErr
|
||||
}
|
||||
if env, ok := m.sessions[session]; ok {
|
||||
return env[key], nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func TestGTRootCheck_NoSessions(t *testing.T) {
|
||||
mock := &mockTmuxEnv{sessions: map[string]map[string]string{}}
|
||||
check := NewGTRootCheckWithTmux(mock)
|
||||
|
||||
result := check.Run(&CheckContext{})
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("expected StatusOK, got %v", result.Status)
|
||||
}
|
||||
if result.Message != "No Gas Town sessions running" {
|
||||
t.Errorf("unexpected message: %s", result.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGTRootCheck_NoGasTownSessions(t *testing.T) {
|
||||
mock := &mockTmuxEnv{
|
||||
sessions: map[string]map[string]string{
|
||||
"other-session": {"SOME_VAR": "value"},
|
||||
},
|
||||
}
|
||||
check := NewGTRootCheckWithTmux(mock)
|
||||
|
||||
result := check.Run(&CheckContext{})
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("expected StatusOK, got %v", result.Status)
|
||||
}
|
||||
if result.Message != "No Gas Town sessions running" {
|
||||
t.Errorf("unexpected message: %s", result.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGTRootCheck_AllSessionsHaveGTRoot(t *testing.T) {
|
||||
mock := &mockTmuxEnv{
|
||||
sessions: map[string]map[string]string{
|
||||
"hq-mayor": {"GT_ROOT": "/home/user/gt", "GT_ROLE": "mayor"},
|
||||
"hq-deacon": {"GT_ROOT": "/home/user/gt", "GT_ROLE": "deacon"},
|
||||
"gt-myrig-witness": {"GT_ROOT": "/home/user/gt", "GT_ROLE": "witness"},
|
||||
"gt-myrig-refinery": {"GT_ROOT": "/home/user/gt", "GT_ROLE": "refinery"},
|
||||
},
|
||||
}
|
||||
check := NewGTRootCheckWithTmux(mock)
|
||||
|
||||
result := check.Run(&CheckContext{})
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("expected StatusOK, got %v", result.Status)
|
||||
}
|
||||
if result.Message != "All 4 session(s) have GT_ROOT set" {
|
||||
t.Errorf("unexpected message: %s", result.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGTRootCheck_MissingGTRoot(t *testing.T) {
|
||||
mock := &mockTmuxEnv{
|
||||
sessions: map[string]map[string]string{
|
||||
"hq-mayor": {"GT_ROOT": "/home/user/gt"},
|
||||
"gt-myrig-witness": {"GT_ROLE": "witness"}, // Missing GT_ROOT
|
||||
"gt-myrig-refinery": {"GT_ROLE": "refinery"}, // Missing GT_ROOT
|
||||
},
|
||||
}
|
||||
check := NewGTRootCheckWithTmux(mock)
|
||||
|
||||
result := check.Run(&CheckContext{})
|
||||
|
||||
if result.Status != StatusWarning {
|
||||
t.Errorf("expected StatusWarning, got %v", result.Status)
|
||||
}
|
||||
if result.Message != "2 session(s) missing GT_ROOT environment variable" {
|
||||
t.Errorf("unexpected message: %s", result.Message)
|
||||
}
|
||||
if result.FixHint != "Restart sessions to pick up GT_ROOT: gt shutdown && gt up" {
|
||||
t.Errorf("unexpected fix hint: %s", result.FixHint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGTRootCheck_EmptyGTRoot(t *testing.T) {
|
||||
mock := &mockTmuxEnv{
|
||||
sessions: map[string]map[string]string{
|
||||
"hq-mayor": {"GT_ROOT": ""}, // Empty GT_ROOT should be treated as missing
|
||||
},
|
||||
}
|
||||
check := NewGTRootCheckWithTmux(mock)
|
||||
|
||||
result := check.Run(&CheckContext{})
|
||||
|
||||
if result.Status != StatusWarning {
|
||||
t.Errorf("expected StatusWarning, got %v", result.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGTRootCheck_MixedPrefixes(t *testing.T) {
|
||||
// Test that both gt-* and hq-* sessions are checked
|
||||
mock := &mockTmuxEnv{
|
||||
sessions: map[string]map[string]string{
|
||||
"hq-mayor": {"GT_ROOT": "/home/user/gt"},
|
||||
"gt-rig-witness": {"GT_ROOT": "/home/user/gt"},
|
||||
"other-session": {}, // Should be ignored
|
||||
"random": {}, // Should be ignored
|
||||
},
|
||||
}
|
||||
check := NewGTRootCheckWithTmux(mock)
|
||||
|
||||
result := check.Run(&CheckContext{})
|
||||
|
||||
if result.Status != StatusOK {
|
||||
t.Errorf("expected StatusOK, got %v", result.Status)
|
||||
}
|
||||
// Should only count the 2 Gas Town sessions
|
||||
if result.Message != "All 2 session(s) have GT_ROOT set" {
|
||||
t.Errorf("unexpected message: %s", result.Message)
|
||||
}
|
||||
}
|
||||
@@ -225,26 +225,26 @@ func (c *OrphanSessionCheck) isValidSession(sess string, validRigs []string, may
|
||||
return true
|
||||
}
|
||||
|
||||
// OrphanProcessCheck detects orphaned Claude/claude-code processes
|
||||
// that are not associated with a Gas Town tmux session.
|
||||
// OrphanProcessCheck detects runtime processes that are not
|
||||
// running inside a tmux session. These may be user's personal sessions
|
||||
// or legitimately orphaned processes from crashed Gas Town sessions.
|
||||
// This check is informational only - it does not auto-fix since we cannot
|
||||
// distinguish user sessions from orphaned Gas Town processes.
|
||||
type OrphanProcessCheck struct {
|
||||
FixableCheck
|
||||
orphanPIDs []int // Cached during Run for use in Fix
|
||||
BaseCheck
|
||||
}
|
||||
|
||||
// NewOrphanProcessCheck creates a new orphan process check.
|
||||
func NewOrphanProcessCheck() *OrphanProcessCheck {
|
||||
return &OrphanProcessCheck{
|
||||
FixableCheck: FixableCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "orphan-processes",
|
||||
CheckDescription: "Detect orphaned Claude processes",
|
||||
},
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "orphan-processes",
|
||||
CheckDescription: "Detect runtime processes outside tmux",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run checks for orphaned Claude processes.
|
||||
// Run checks for runtime processes running outside tmux.
|
||||
func (c *OrphanProcessCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
// Get list of tmux session PIDs
|
||||
tmuxPIDs, err := c.getTmuxSessionPIDs()
|
||||
@@ -257,164 +257,60 @@ func (c *OrphanProcessCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
}
|
||||
}
|
||||
|
||||
// Find Claude processes
|
||||
claudeProcs, err := c.findClaudeProcesses()
|
||||
// Find runtime processes
|
||||
runtimeProcs, err := c.findRuntimeProcesses()
|
||||
if err != nil {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusWarning,
|
||||
Message: "Could not list Claude processes",
|
||||
Message: "Could not list runtime processes",
|
||||
Details: []string{err.Error()},
|
||||
}
|
||||
}
|
||||
|
||||
if len(claudeProcs) == 0 {
|
||||
if len(runtimeProcs) == 0 {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "No Claude processes found",
|
||||
Message: "No runtime processes found",
|
||||
}
|
||||
}
|
||||
|
||||
// Check which Claude processes are orphaned
|
||||
var orphans []processInfo
|
||||
var validCount int
|
||||
// Check which runtime processes are outside tmux
|
||||
var outsideTmux []processInfo
|
||||
var insideTmux int
|
||||
|
||||
for _, proc := range claudeProcs {
|
||||
for _, proc := range runtimeProcs {
|
||||
if c.isOrphanProcess(proc, tmuxPIDs) {
|
||||
orphans = append(orphans, proc)
|
||||
outsideTmux = append(outsideTmux, proc)
|
||||
} else {
|
||||
validCount++
|
||||
insideTmux++
|
||||
}
|
||||
}
|
||||
|
||||
// Cache orphan PIDs for Fix
|
||||
c.orphanPIDs = make([]int, len(orphans))
|
||||
for i, p := range orphans {
|
||||
c.orphanPIDs[i] = p.pid
|
||||
}
|
||||
|
||||
if len(orphans) == 0 {
|
||||
if len(outsideTmux) == 0 {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("All %d Claude processes have valid parents", validCount),
|
||||
Message: fmt.Sprintf("All %d runtime processes are inside tmux", insideTmux),
|
||||
}
|
||||
}
|
||||
|
||||
details := make([]string, len(orphans))
|
||||
for i, proc := range orphans {
|
||||
details[i] = fmt.Sprintf("PID %d: %s (parent: %d)", proc.pid, proc.cmd, proc.ppid)
|
||||
details := make([]string, 0, len(outsideTmux)+2)
|
||||
details = append(details, "These may be your personal sessions or orphaned Gas Town processes.")
|
||||
details = append(details, "Verify these are expected before manually killing any:")
|
||||
for _, proc := range outsideTmux {
|
||||
details = append(details, fmt.Sprintf(" PID %d: %s (parent: %d)", proc.pid, proc.cmd, proc.ppid))
|
||||
}
|
||||
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("Found %d orphaned Claude process(es)", len(orphans)),
|
||||
Message: fmt.Sprintf("Found %d runtime process(es) running outside tmux", len(outsideTmux)),
|
||||
Details: details,
|
||||
FixHint: "Run 'gt doctor --fix' to kill orphaned processes",
|
||||
}
|
||||
}
|
||||
|
||||
// Fix kills orphaned processes, with safeguards for crew sessions.
|
||||
func (c *OrphanProcessCheck) Fix(ctx *CheckContext) error {
|
||||
if len(c.orphanPIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SAFEGUARD: Get crew session pane PIDs to avoid killing crew processes.
|
||||
// Even if a process appears orphaned, if its parent is a crew session pane,
|
||||
// we should not kill it (the detection might be wrong).
|
||||
crewPanePIDs := c.getCrewSessionPanePIDs()
|
||||
|
||||
var lastErr error
|
||||
for _, pid := range c.orphanPIDs {
|
||||
// Check if this process has a crew session ancestor
|
||||
if c.hasCrewAncestor(pid, crewPanePIDs) {
|
||||
// Skip - this process might belong to a crew session
|
||||
continue
|
||||
}
|
||||
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
if err := proc.Signal(os.Interrupt); err != nil {
|
||||
// Try SIGKILL if SIGINT fails
|
||||
if killErr := proc.Kill(); killErr != nil {
|
||||
lastErr = killErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// getCrewSessionPanePIDs returns pane PIDs for all crew sessions.
|
||||
func (c *OrphanProcessCheck) getCrewSessionPanePIDs() map[int]bool {
|
||||
pids := make(map[int]bool)
|
||||
|
||||
t := tmux.NewTmux()
|
||||
sessions, err := t.ListSessions()
|
||||
if err != nil {
|
||||
return pids
|
||||
}
|
||||
|
||||
for _, session := range sessions {
|
||||
if !isCrewSession(session) {
|
||||
continue
|
||||
}
|
||||
// Get pane PIDs for this crew session
|
||||
out, err := exec.Command("tmux", "list-panes", "-t", session, "-F", "#{pane_pid}").Output()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(out)), "\n") {
|
||||
var pid int
|
||||
if _, err := fmt.Sscanf(line, "%d", &pid); err == nil {
|
||||
pids[pid] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pids
|
||||
}
|
||||
|
||||
// hasCrewAncestor checks if a process has a crew session pane as an ancestor.
|
||||
func (c *OrphanProcessCheck) hasCrewAncestor(pid int, crewPanePIDs map[int]bool) bool {
|
||||
if len(crewPanePIDs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Walk up the process tree
|
||||
currentPID := pid
|
||||
visited := make(map[int]bool)
|
||||
|
||||
for currentPID > 1 && !visited[currentPID] {
|
||||
visited[currentPID] = true
|
||||
|
||||
// Check if this PID is a crew pane
|
||||
if crewPanePIDs[currentPID] {
|
||||
return true
|
||||
}
|
||||
|
||||
// Get parent PID
|
||||
out, err := exec.Command("ps", "-p", fmt.Sprintf("%d", currentPID), "-o", "ppid=").Output() //nolint:gosec // G204: PID is numeric from internal state
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var ppid int
|
||||
if _, err := fmt.Sscanf(strings.TrimSpace(string(out)), "%d", &ppid); err != nil {
|
||||
break
|
||||
}
|
||||
currentPID = ppid
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type processInfo struct {
|
||||
pid int
|
||||
ppid int
|
||||
@@ -462,21 +358,20 @@ func (c *OrphanProcessCheck) getTmuxSessionPIDs() (map[int]bool, error) { //noli
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// findClaudeProcesses finds all running claude/claude-code CLI processes.
|
||||
// findRuntimeProcesses finds all running runtime CLI processes.
|
||||
// Excludes Claude.app desktop application and its helpers.
|
||||
func (c *OrphanProcessCheck) findClaudeProcesses() ([]processInfo, error) {
|
||||
func (c *OrphanProcessCheck) findRuntimeProcesses() ([]processInfo, error) {
|
||||
var procs []processInfo
|
||||
|
||||
// Use ps to find claude processes
|
||||
// Look for both "claude" and "claude-code" in command
|
||||
// Use ps to find runtime processes
|
||||
out, err := exec.Command("ps", "-eo", "pid,ppid,comm").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Regex to match claude CLI processes (not Claude.app)
|
||||
// Match: "claude" or paths ending in "/claude"
|
||||
claudePattern := regexp.MustCompile(`(?i)(^claude$|/claude$)`)
|
||||
// Regex to match runtime CLI processes (not Claude.app)
|
||||
// Match: "claude", "claude-code", or "codex" (or paths ending in those)
|
||||
runtimePattern := regexp.MustCompile(`(?i)(^claude$|/claude$|^claude-code$|/claude-code$|^codex$|/codex$)`)
|
||||
|
||||
// Pattern to exclude Claude.app and related desktop processes
|
||||
excludePattern := regexp.MustCompile(`(?i)(Claude\.app|claude-native|chrome-native)`)
|
||||
@@ -487,7 +382,7 @@ func (c *OrphanProcessCheck) findClaudeProcesses() ([]processInfo, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if command matches claude CLI
|
||||
// Check if command matches runtime CLI
|
||||
cmd := strings.Join(fields[2:], " ")
|
||||
|
||||
// Skip desktop app processes
|
||||
@@ -495,8 +390,8 @@ func (c *OrphanProcessCheck) findClaudeProcesses() ([]processInfo, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only match CLI claude processes
|
||||
if !claudePattern.MatchString(cmd) {
|
||||
// Only match CLI runtime processes
|
||||
if !runtimePattern.MatchString(cmd) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -518,7 +413,7 @@ func (c *OrphanProcessCheck) findClaudeProcesses() ([]processInfo, error) {
|
||||
return procs, nil
|
||||
}
|
||||
|
||||
// isOrphanProcess checks if a Claude process is orphaned.
|
||||
// isOrphanProcess checks if a runtime process is orphaned.
|
||||
// A process is orphaned if its parent (or ancestor) is not a tmux session.
|
||||
func (c *OrphanProcessCheck) isOrphanProcess(proc processInfo, tmuxPIDs map[int]bool) bool {
|
||||
// Walk up the process tree looking for a tmux parent
|
||||
|
||||
134
internal/doctor/orphan_check_test.go
Normal file
134
internal/doctor/orphan_check_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewOrphanSessionCheck(t *testing.T) {
|
||||
check := NewOrphanSessionCheck()
|
||||
|
||||
if check.Name() != "orphan-sessions" {
|
||||
t.Errorf("expected name 'orphan-sessions', got %q", check.Name())
|
||||
}
|
||||
|
||||
if !check.CanFix() {
|
||||
t.Error("expected CanFix to return true for session check")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewOrphanProcessCheck(t *testing.T) {
|
||||
check := NewOrphanProcessCheck()
|
||||
|
||||
if check.Name() != "orphan-processes" {
|
||||
t.Errorf("expected name 'orphan-processes', got %q", check.Name())
|
||||
}
|
||||
|
||||
// OrphanProcessCheck should NOT be fixable - it's informational only
|
||||
if check.CanFix() {
|
||||
t.Error("expected CanFix to return false for process check (informational only)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrphanProcessCheck_Run(t *testing.T) {
|
||||
// This test verifies the check runs without error.
|
||||
// Results depend on whether Claude processes exist in the test environment.
|
||||
check := NewOrphanProcessCheck()
|
||||
ctx := &CheckContext{TownRoot: t.TempDir()}
|
||||
|
||||
result := check.Run(ctx)
|
||||
|
||||
// Should return OK (no processes or all inside tmux) or Warning (processes outside tmux)
|
||||
// Both are valid depending on test environment
|
||||
if result.Status != StatusOK && result.Status != StatusWarning {
|
||||
t.Errorf("expected StatusOK or StatusWarning, got %v: %s", result.Status, result.Message)
|
||||
}
|
||||
|
||||
// If warning, should have informational details
|
||||
if result.Status == StatusWarning {
|
||||
if len(result.Details) < 3 {
|
||||
t.Errorf("expected at least 3 detail lines (2 info + 1 process), got %d", len(result.Details))
|
||||
}
|
||||
// Should NOT have a FixHint since this is informational only
|
||||
if result.FixHint != "" {
|
||||
t.Errorf("expected no FixHint for informational check, got %q", result.FixHint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrphanProcessCheck_MessageContent(t *testing.T) {
|
||||
// Verify the check description is correct
|
||||
check := NewOrphanProcessCheck()
|
||||
|
||||
expectedDesc := "Detect runtime processes outside tmux"
|
||||
if check.Description() != expectedDesc {
|
||||
t.Errorf("expected description %q, got %q", expectedDesc, check.Description())
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCrewSession(t *testing.T) {
|
||||
tests := []struct {
|
||||
session string
|
||||
want bool
|
||||
}{
|
||||
{"gt-gastown-crew-joe", true},
|
||||
{"gt-beads-crew-max", true},
|
||||
{"gt-rig-crew-a", true},
|
||||
{"gt-gastown-witness", false},
|
||||
{"gt-gastown-refinery", false},
|
||||
{"gt-gastown-polecat1", false},
|
||||
{"hq-deacon", false},
|
||||
{"hq-mayor", false},
|
||||
{"other-session", false},
|
||||
{"gt-crew", false}, // Not enough parts
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.session, func(t *testing.T) {
|
||||
got := isCrewSession(tt.session)
|
||||
if got != tt.want {
|
||||
t.Errorf("isCrewSession(%q) = %v, want %v", tt.session, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrphanSessionCheck_IsValidSession(t *testing.T) {
|
||||
check := NewOrphanSessionCheck()
|
||||
validRigs := []string{"gastown", "beads"}
|
||||
mayorSession := "hq-mayor"
|
||||
deaconSession := "hq-deacon"
|
||||
|
||||
tests := []struct {
|
||||
session string
|
||||
want bool
|
||||
}{
|
||||
// Town-level sessions
|
||||
{"hq-mayor", true},
|
||||
{"hq-deacon", true},
|
||||
|
||||
// Valid rig sessions
|
||||
{"gt-gastown-witness", true},
|
||||
{"gt-gastown-refinery", true},
|
||||
{"gt-gastown-polecat1", true},
|
||||
{"gt-beads-witness", true},
|
||||
{"gt-beads-refinery", true},
|
||||
{"gt-beads-crew-max", true},
|
||||
|
||||
// Invalid rig sessions (rig doesn't exist)
|
||||
{"gt-unknown-witness", false},
|
||||
{"gt-foo-refinery", false},
|
||||
|
||||
// Non-gt sessions (should not be checked by this function,
|
||||
// but if called, they'd fail format validation)
|
||||
{"other-session", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.session, func(t *testing.T) {
|
||||
got := check.isValidSession(tt.session, validRigs, mayorSession, deaconSession)
|
||||
if got != tt.want {
|
||||
t.Errorf("isValidSession(%q) = %v, want %v", tt.session, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
171
internal/doctor/rig_beads_check.go
Normal file
171
internal/doctor/rig_beads_check.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
)
|
||||
|
||||
// RigBeadsCheck verifies that rig identity beads exist for all rigs.
|
||||
// Rig identity beads track rig metadata like git URL, prefix, and operational state.
|
||||
// They are created by gt rig add (see gt-zmznh) but may be missing for legacy rigs.
|
||||
type RigBeadsCheck struct {
|
||||
FixableCheck
|
||||
}
|
||||
|
||||
// NewRigBeadsCheck creates a new rig identity beads check.
|
||||
func NewRigBeadsCheck() *RigBeadsCheck {
|
||||
return &RigBeadsCheck{
|
||||
FixableCheck: FixableCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "rig-beads-exist",
|
||||
CheckDescription: "Verify rig identity beads exist for all rigs",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run checks if rig identity beads exist for all rigs.
|
||||
func (c *RigBeadsCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
// Load routes to get rig info
|
||||
townBeadsDir := filepath.Join(ctx.TownRoot, ".beads")
|
||||
routes, err := beads.LoadRoutes(townBeadsDir)
|
||||
if err != nil {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusWarning,
|
||||
Message: "Could not load routes.jsonl",
|
||||
}
|
||||
}
|
||||
|
||||
// Build unique rig list from routes
|
||||
// Routes have format: prefix "gt-" -> path "gastown/mayor/rig"
|
||||
rigSet := make(map[string]struct {
|
||||
prefix string
|
||||
beadsPath string
|
||||
})
|
||||
for _, r := range routes {
|
||||
// Extract rig name from path (first component)
|
||||
parts := strings.Split(r.Path, "/")
|
||||
if len(parts) >= 1 && parts[0] != "." {
|
||||
rigName := parts[0]
|
||||
prefix := strings.TrimSuffix(r.Prefix, "-")
|
||||
if _, exists := rigSet[rigName]; !exists {
|
||||
rigSet[rigName] = struct {
|
||||
prefix string
|
||||
beadsPath string
|
||||
}{
|
||||
prefix: prefix,
|
||||
beadsPath: r.Path,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(rigSet) == 0 {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "No rigs to check",
|
||||
}
|
||||
}
|
||||
|
||||
var missing []string
|
||||
var checked int
|
||||
|
||||
// Check each rig for its identity bead
|
||||
for rigName, info := range rigSet {
|
||||
rigBeadsPath := filepath.Join(ctx.TownRoot, info.beadsPath)
|
||||
bd := beads.New(rigBeadsPath)
|
||||
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(info.prefix, rigName)
|
||||
if _, err := bd.Show(rigBeadID); err != nil {
|
||||
missing = append(missing, rigBeadID)
|
||||
}
|
||||
checked++
|
||||
}
|
||||
|
||||
if len(missing) == 0 {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("All %d rig identity beads exist", checked),
|
||||
}
|
||||
}
|
||||
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("%d rig identity bead(s) missing", len(missing)),
|
||||
Details: missing,
|
||||
FixHint: "Run 'gt doctor --fix' to create missing rig identity beads",
|
||||
}
|
||||
}
|
||||
|
||||
// Fix creates missing rig identity beads.
|
||||
func (c *RigBeadsCheck) Fix(ctx *CheckContext) error {
|
||||
// Load routes to get rig info
|
||||
townBeadsDir := filepath.Join(ctx.TownRoot, ".beads")
|
||||
routes, err := beads.LoadRoutes(townBeadsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading routes.jsonl: %w", err)
|
||||
}
|
||||
|
||||
// Build unique rig list from routes
|
||||
rigSet := make(map[string]struct {
|
||||
prefix string
|
||||
beadsPath string
|
||||
})
|
||||
for _, r := range routes {
|
||||
parts := strings.Split(r.Path, "/")
|
||||
if len(parts) >= 1 && parts[0] != "." {
|
||||
rigName := parts[0]
|
||||
prefix := strings.TrimSuffix(r.Prefix, "-")
|
||||
if _, exists := rigSet[rigName]; !exists {
|
||||
rigSet[rigName] = struct {
|
||||
prefix string
|
||||
beadsPath string
|
||||
}{
|
||||
prefix: prefix,
|
||||
beadsPath: r.Path,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(rigSet) == 0 {
|
||||
return nil // No rigs to process
|
||||
}
|
||||
|
||||
// Create missing rig identity beads
|
||||
for rigName, info := range rigSet {
|
||||
rigBeadsPath := filepath.Join(ctx.TownRoot, info.beadsPath)
|
||||
bd := beads.New(rigBeadsPath)
|
||||
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(info.prefix, rigName)
|
||||
if _, err := bd.Show(rigBeadID); err != nil {
|
||||
// Bead doesn't exist - create it
|
||||
// Try to get git URL from rig config
|
||||
rigPath := filepath.Join(ctx.TownRoot, rigName)
|
||||
gitURL := ""
|
||||
if cfg, err := rig.LoadRigConfig(rigPath); err == nil {
|
||||
gitURL = cfg.GitURL
|
||||
}
|
||||
|
||||
fields := &beads.RigFields{
|
||||
Repo: gitURL,
|
||||
Prefix: info.prefix,
|
||||
State: "active",
|
||||
}
|
||||
|
||||
if _, err := bd.CreateRigBead(rigBeadID, rigName, fields); err != nil {
|
||||
return fmt.Errorf("creating %s: %w", rigBeadID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package doctor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -698,14 +699,24 @@ func (c *PolecatClonesValidCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
var warnings []string
|
||||
validCount := 0
|
||||
|
||||
// Get rig name for new structure path detection
|
||||
rigName := ctx.RigName
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() || strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
polecatPath := filepath.Join(polecatsDir, entry.Name())
|
||||
polecatName := entry.Name()
|
||||
|
||||
// Determine worktree path (handle both new and old structures)
|
||||
// New structure: polecats/<name>/<rigname>/
|
||||
// Old structure: polecats/<name>/
|
||||
polecatPath := filepath.Join(polecatsDir, polecatName, rigName)
|
||||
if _, err := os.Stat(polecatPath); os.IsNotExist(err) {
|
||||
polecatPath = filepath.Join(polecatsDir, polecatName)
|
||||
}
|
||||
|
||||
// Check if it's a git clone
|
||||
gitPath := filepath.Join(polecatPath, ".git")
|
||||
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
|
||||
@@ -1034,6 +1045,10 @@ func (c *BeadsRedirectCheck) Fix(ctx *CheckContext) error {
|
||||
// Continue - minimal config created
|
||||
} else {
|
||||
_ = output // bd init succeeded
|
||||
// Configure custom types for Gas Town (beads v0.46.0+)
|
||||
configCmd := exec.Command("bd", "config", "set", "types.custom", "agent,role,rig,convoy,event")
|
||||
configCmd.Dir = rigPath
|
||||
_, _ = configCmd.CombinedOutput() // Ignore errors - older beads don't need this
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1075,6 +1090,103 @@ func hasBeadsData(beadsDir string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// BareRepoRefspecCheck verifies that the shared bare repo has the correct refspec configured.
|
||||
// Without this, worktrees created from the bare repo cannot fetch and see origin/* refs.
|
||||
// See: https://github.com/anthropics/gastown/issues/286
|
||||
type BareRepoRefspecCheck struct {
|
||||
FixableCheck
|
||||
}
|
||||
|
||||
// NewBareRepoRefspecCheck creates a new bare repo refspec check.
|
||||
func NewBareRepoRefspecCheck() *BareRepoRefspecCheck {
|
||||
return &BareRepoRefspecCheck{
|
||||
FixableCheck: FixableCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "bare-repo-refspec",
|
||||
CheckDescription: "Verify bare repo has correct refspec for worktrees",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run checks if the bare repo has the correct remote.origin.fetch refspec.
|
||||
func (c *BareRepoRefspecCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
if ctx.RigName == "" {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "No rig specified, skipping bare repo check",
|
||||
}
|
||||
}
|
||||
|
||||
bareRepoPath := filepath.Join(ctx.RigPath(), ".repo.git")
|
||||
if _, err := os.Stat(bareRepoPath); os.IsNotExist(err) {
|
||||
// No bare repo - might be using a different architecture
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "No shared bare repo found (using individual clones)",
|
||||
}
|
||||
}
|
||||
|
||||
// Check the refspec
|
||||
cmd := exec.Command("git", "-C", bareRepoPath, "config", "--get", "remote.origin.fetch")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusError,
|
||||
Message: "Bare repo missing remote.origin.fetch refspec",
|
||||
Details: []string{
|
||||
"Worktrees cannot fetch or see origin/* refs without this config",
|
||||
"This breaks refinery merge operations and causes stale origin/main",
|
||||
},
|
||||
FixHint: "Run 'gt doctor --fix' to configure the refspec",
|
||||
}
|
||||
}
|
||||
|
||||
refspec := strings.TrimSpace(string(out))
|
||||
expectedRefspec := "+refs/heads/*:refs/remotes/origin/*"
|
||||
if refspec != expectedRefspec {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusWarning,
|
||||
Message: "Bare repo has non-standard refspec",
|
||||
Details: []string{
|
||||
fmt.Sprintf("Current: %s", refspec),
|
||||
fmt.Sprintf("Expected: %s", expectedRefspec),
|
||||
},
|
||||
FixHint: "Run 'gt doctor --fix' to update the refspec",
|
||||
}
|
||||
}
|
||||
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "Bare repo refspec configured correctly",
|
||||
}
|
||||
}
|
||||
|
||||
// Fix sets the correct refspec on the bare repo.
|
||||
func (c *BareRepoRefspecCheck) Fix(ctx *CheckContext) error {
|
||||
if ctx.RigName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
bareRepoPath := filepath.Join(ctx.RigPath(), ".repo.git")
|
||||
if _, err := os.Stat(bareRepoPath); os.IsNotExist(err) {
|
||||
return nil // No bare repo to fix
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", "-C", bareRepoPath, "config", "remote.origin.fetch", "+refs/heads/*:refs/remotes/origin/*")
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("setting refspec: %s", strings.TrimSpace(stderr.String()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RigChecks returns all rig-level health checks.
|
||||
func RigChecks() []Check {
|
||||
return []Check{
|
||||
@@ -1082,6 +1194,7 @@ func RigChecks() []Check {
|
||||
NewGitExcludeConfiguredCheck(),
|
||||
NewHooksPathConfiguredCheck(),
|
||||
NewSparseCheckoutCheck(),
|
||||
NewBareRepoRefspecCheck(),
|
||||
NewWitnessExistsCheck(),
|
||||
NewRefineryExistsCheck(),
|
||||
NewMayorCloneExistsCheck(),
|
||||
|
||||
@@ -1,24 +1,124 @@
|
||||
package formula
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"embed"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Generate formulas directory from canonical source at .beads/formulas/
|
||||
//go:generate sh -c "rm -rf formulas && mkdir -p formulas && cp ../../.beads/formulas/*.formula.toml ../../.beads/formulas/*.formula.json formulas/ 2>/dev/null || cp ../../.beads/formulas/*.formula.toml formulas/"
|
||||
//go:generate sh -c "rm -rf formulas && mkdir -p formulas && cp ../../.beads/formulas/*.formula.toml formulas/"
|
||||
|
||||
//go:embed formulas/*.formula.toml
|
||||
var formulasFS embed.FS
|
||||
|
||||
// InstalledRecord tracks which formulas were installed and their checksums.
|
||||
// Stored in .beads/formulas/.installed.json
|
||||
type InstalledRecord struct {
|
||||
Formulas map[string]string `json:"formulas"` // filename -> sha256 at install time
|
||||
}
|
||||
|
||||
// FormulaStatus represents the status of a single formula during health check.
|
||||
type FormulaStatus struct {
|
||||
Name string
|
||||
Status string // "ok", "outdated", "modified", "missing", "new", "untracked"
|
||||
EmbeddedHash string // hash computed from embedded content
|
||||
InstalledHash string // hash we installed (from .installed.json)
|
||||
CurrentHash string // hash of current file on disk
|
||||
}
|
||||
|
||||
// HealthReport contains the results of checking formula health.
|
||||
type HealthReport struct {
|
||||
Formulas []FormulaStatus
|
||||
// Counts
|
||||
OK int
|
||||
Outdated int // embedded changed, user hasn't modified
|
||||
Modified int // user modified the file (tracked in .installed.json)
|
||||
Missing int // file was deleted
|
||||
New int // new formula not yet installed
|
||||
Untracked int // file exists but not in .installed.json (safe to update)
|
||||
}
|
||||
|
||||
// computeHash computes SHA256 hash of data.
|
||||
func computeHash(data []byte) string {
|
||||
hash := sha256.Sum256(data)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// getEmbeddedFormulas returns a map of filename -> sha256 for all embedded formulas.
|
||||
func getEmbeddedFormulas() (map[string]string, error) {
|
||||
entries, err := formulasFS.ReadDir("formulas")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading formulas directory: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]string)
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
content, err := formulasFS.ReadFile("formulas/" + entry.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s: %w", entry.Name(), err)
|
||||
}
|
||||
result[entry.Name()] = computeHash(content)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// loadInstalledRecord loads the installed record from disk.
|
||||
func loadInstalledRecord(formulasDir string) (*InstalledRecord, error) {
|
||||
path := filepath.Join(formulasDir, ".installed.json")
|
||||
data, err := os.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
return &InstalledRecord{Formulas: make(map[string]string)}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading installed record: %w", err)
|
||||
}
|
||||
var r InstalledRecord
|
||||
if err := json.Unmarshal(data, &r); err != nil {
|
||||
return nil, fmt.Errorf("parsing installed record: %w", err)
|
||||
}
|
||||
if r.Formulas == nil {
|
||||
r.Formulas = make(map[string]string)
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// saveInstalledRecord saves the installed record to disk.
|
||||
func saveInstalledRecord(formulasDir string, record *InstalledRecord) error {
|
||||
path := filepath.Join(formulasDir, ".installed.json")
|
||||
data, err := json.MarshalIndent(record, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding installed record: %w", err)
|
||||
}
|
||||
return os.WriteFile(path, data, 0644)
|
||||
}
|
||||
|
||||
// computeFileHash computes SHA256 hash of a file.
|
||||
func computeFileHash(path string) (string, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return computeHash(data), nil
|
||||
}
|
||||
|
||||
// ProvisionFormulas creates the .beads/formulas/ directory with embedded formulas.
|
||||
// This ensures new installations have the standard formula library.
|
||||
// This is called during gt install for fresh installations.
|
||||
// If a formula already exists, it is skipped (no overwrite).
|
||||
// Returns the number of formulas provisioned.
|
||||
func ProvisionFormulas(beadsPath string) (int, error) {
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
entries, err := formulasFS.ReadDir("formulas")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading formulas directory: %w", err)
|
||||
@@ -30,6 +130,12 @@ func ProvisionFormulas(beadsPath string) (int, error) {
|
||||
return 0, fmt.Errorf("creating formulas directory: %w", err)
|
||||
}
|
||||
|
||||
// Load existing installed record (or create new)
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
@@ -42,8 +148,7 @@ func ProvisionFormulas(beadsPath string) (int, error) {
|
||||
if _, err := os.Stat(destPath); err == nil {
|
||||
continue
|
||||
} else if !os.IsNotExist(err) {
|
||||
// Log unexpected errors (e.g., permission denied) but continue
|
||||
log.Printf("warning: could not check formula %s: %v", entry.Name(), err)
|
||||
// Log unexpected errors but continue
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -55,8 +160,175 @@ func ProvisionFormulas(beadsPath string) (int, error) {
|
||||
if err := os.WriteFile(destPath, content, 0644); err != nil {
|
||||
return count, fmt.Errorf("writing %s: %w", entry.Name(), err)
|
||||
}
|
||||
|
||||
// Record the hash we installed
|
||||
if hash, ok := embedded[entry.Name()]; ok {
|
||||
installed.Formulas[entry.Name()] = hash
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
// Save updated installed record
|
||||
if err := saveInstalledRecord(formulasDir, installed); err != nil {
|
||||
return count, fmt.Errorf("saving installed record: %w", err)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// CheckFormulaHealth checks the status of all formulas.
|
||||
// Returns a report of which formulas are ok, outdated, modified, or missing.
|
||||
func CheckFormulaHealth(beadsPath string) (*HealthReport, error) {
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
formulasDir := filepath.Join(beadsPath, ".beads", "formulas")
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
report := &HealthReport{}
|
||||
|
||||
for filename, embeddedHash := range embedded {
|
||||
status := FormulaStatus{
|
||||
Name: filename,
|
||||
EmbeddedHash: embeddedHash,
|
||||
}
|
||||
|
||||
installedHash, wasInstalled := installed.Formulas[filename]
|
||||
status.InstalledHash = installedHash
|
||||
|
||||
destPath := filepath.Join(formulasDir, filename)
|
||||
currentHash, err := computeFileHash(destPath)
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
// File doesn't exist
|
||||
if wasInstalled {
|
||||
// We installed it before, user deleted it
|
||||
status.Status = "missing"
|
||||
report.Missing++
|
||||
} else {
|
||||
// New formula, never installed
|
||||
status.Status = "new"
|
||||
report.New++
|
||||
}
|
||||
} else if err != nil {
|
||||
// Some other error reading file
|
||||
status.Status = "error"
|
||||
} else {
|
||||
status.CurrentHash = currentHash
|
||||
|
||||
if currentHash == embeddedHash {
|
||||
// File matches embedded - all good
|
||||
status.Status = "ok"
|
||||
report.OK++
|
||||
} else if wasInstalled && currentHash == installedHash {
|
||||
// File matches what we installed, but embedded has changed
|
||||
// User hasn't modified, safe to update
|
||||
status.Status = "outdated"
|
||||
report.Outdated++
|
||||
} else if wasInstalled {
|
||||
// File was tracked and user modified it - don't overwrite
|
||||
status.Status = "modified"
|
||||
report.Modified++
|
||||
} else {
|
||||
// File exists but not tracked (e.g., from older gt version)
|
||||
// Safe to update since we have no record of user modification
|
||||
status.Status = "untracked"
|
||||
report.Untracked++
|
||||
}
|
||||
}
|
||||
|
||||
report.Formulas = append(report.Formulas, status)
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// UpdateFormulas updates formulas that are safe to update (outdated, missing, or untracked).
|
||||
// Skips user-modified formulas (tracked files that user changed).
|
||||
// Returns counts of updated, skipped (modified), and reinstalled (missing).
|
||||
func UpdateFormulas(beadsPath string) (updated, skipped, reinstalled int, err error) {
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
formulasDir := filepath.Join(beadsPath, ".beads", "formulas")
|
||||
if err := os.MkdirAll(formulasDir, 0755); err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("creating formulas directory: %w", err)
|
||||
}
|
||||
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
for filename, embeddedHash := range embedded {
|
||||
installedHash, wasInstalled := installed.Formulas[filename]
|
||||
destPath := filepath.Join(formulasDir, filename)
|
||||
currentHash, fileErr := computeFileHash(destPath)
|
||||
|
||||
shouldInstall := false
|
||||
isMissing := false
|
||||
isModified := false
|
||||
|
||||
if os.IsNotExist(fileErr) {
|
||||
// File doesn't exist - install it
|
||||
shouldInstall = true
|
||||
if wasInstalled {
|
||||
isMissing = true
|
||||
}
|
||||
} else if fileErr != nil {
|
||||
// Error reading file, skip
|
||||
continue
|
||||
} else if currentHash == embeddedHash {
|
||||
// Already up to date
|
||||
continue
|
||||
} else if wasInstalled && currentHash == installedHash {
|
||||
// User hasn't modified, safe to update
|
||||
shouldInstall = true
|
||||
} else if wasInstalled {
|
||||
// Tracked file was modified by user - skip
|
||||
isModified = true
|
||||
} else {
|
||||
// Untracked file (e.g., from older gt version) - safe to update
|
||||
shouldInstall = true
|
||||
}
|
||||
|
||||
if isModified {
|
||||
skipped++
|
||||
continue
|
||||
}
|
||||
|
||||
if shouldInstall {
|
||||
content, err := formulasFS.ReadFile("formulas/" + filename)
|
||||
if err != nil {
|
||||
return updated, skipped, reinstalled, fmt.Errorf("reading %s: %w", filename, err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(destPath, content, 0644); err != nil {
|
||||
return updated, skipped, reinstalled, fmt.Errorf("writing %s: %w", filename, err)
|
||||
}
|
||||
|
||||
// Update installed record
|
||||
installed.Formulas[filename] = embeddedHash
|
||||
|
||||
if isMissing {
|
||||
reinstalled++
|
||||
} else {
|
||||
updated++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save updated installed record
|
||||
if err := saveInstalledRecord(formulasDir, installed); err != nil {
|
||||
return updated, skipped, reinstalled, fmt.Errorf("saving installed record: %w", err)
|
||||
}
|
||||
|
||||
return updated, skipped, reinstalled, nil
|
||||
}
|
||||
|
||||
748
internal/formula/embed_test.go
Normal file
748
internal/formula/embed_test.go
Normal file
@@ -0,0 +1,748 @@
|
||||
package formula
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestGetEmbeddedFormulas verifies embedded formulas can be read and hashed.
|
||||
func TestGetEmbeddedFormulas(t *testing.T) {
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
t.Fatalf("getEmbeddedFormulas() error: %v", err)
|
||||
}
|
||||
if len(embedded) == 0 {
|
||||
t.Error("should have embedded formulas")
|
||||
}
|
||||
|
||||
// Verify at least one known formula exists
|
||||
if _, ok := embedded["mol-deacon-patrol.formula.toml"]; !ok {
|
||||
t.Error("should contain mol-deacon-patrol.formula.toml")
|
||||
}
|
||||
|
||||
// Verify hashes are valid hex strings
|
||||
for name, hash := range embedded {
|
||||
if len(hash) != 64 {
|
||||
t.Errorf("%s hash has wrong length: %d", name, len(hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestProvisionFormulas_FreshInstall tests provisioning to an empty directory.
|
||||
func TestProvisionFormulas_FreshInstall(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
count, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
if count == 0 {
|
||||
t.Error("should have provisioned at least one formula")
|
||||
}
|
||||
|
||||
// Verify formulas directory was created
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
if _, err := os.Stat(formulasDir); os.IsNotExist(err) {
|
||||
t.Error("formulas directory should exist")
|
||||
}
|
||||
|
||||
// Verify .installed.json was created
|
||||
installedPath := filepath.Join(formulasDir, ".installed.json")
|
||||
if _, err := os.Stat(installedPath); os.IsNotExist(err) {
|
||||
t.Error(".installed.json should exist")
|
||||
}
|
||||
|
||||
// Verify installed record contains the right checksums
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
t.Fatalf("loadInstalledRecord() error: %v", err)
|
||||
}
|
||||
if len(installed.Formulas) != count {
|
||||
t.Errorf("installed record has %d entries, want %d", len(installed.Formulas), count)
|
||||
}
|
||||
}
|
||||
|
||||
// TestProvisionFormulas_SkipsExisting tests that existing files are not overwritten.
|
||||
func TestProvisionFormulas_SkipsExisting(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create formulas directory with a custom formula
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
if err := os.MkdirAll(formulasDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
customContent := []byte("# Custom user formula\nformula = \"mol-deacon-patrol\"\n")
|
||||
customPath := filepath.Join(formulasDir, "mol-deacon-patrol.formula.toml")
|
||||
if err := os.WriteFile(customPath, customContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Provision formulas
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Verify custom content was NOT overwritten
|
||||
content, err := os.ReadFile(customPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(content) != string(customContent) {
|
||||
t.Error("existing formula should not have been overwritten")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCheckFormulaHealth_AllOK tests when all formulas are up to date.
|
||||
func TestCheckFormulaHealth_AllOK(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision fresh
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Check health
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckFormulaHealth() error: %v", err)
|
||||
}
|
||||
|
||||
if report.Outdated != 0 {
|
||||
t.Errorf("Outdated = %d, want 0", report.Outdated)
|
||||
}
|
||||
if report.Missing != 0 {
|
||||
t.Errorf("Missing = %d, want 0", report.Missing)
|
||||
}
|
||||
if report.Modified != 0 {
|
||||
t.Errorf("Modified = %d, want 0", report.Modified)
|
||||
}
|
||||
if report.OK == 0 {
|
||||
t.Error("OK should be > 0")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCheckFormulaHealth_UserModified tests detection of user-modified formulas.
|
||||
func TestCheckFormulaHealth_UserModified(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision fresh
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Modify a formula
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
formulaPath := filepath.Join(formulasDir, "mol-deacon-patrol.formula.toml")
|
||||
modifiedContent := []byte("# User modified this\nformula = \"mol-deacon-patrol\"\nversion = 999\n")
|
||||
if err := os.WriteFile(formulaPath, modifiedContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check health
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckFormulaHealth() error: %v", err)
|
||||
}
|
||||
|
||||
if report.Modified != 1 {
|
||||
t.Errorf("Modified = %d, want 1", report.Modified)
|
||||
}
|
||||
|
||||
// Verify the specific formula is marked as modified
|
||||
found := false
|
||||
for _, f := range report.Formulas {
|
||||
if f.Name == "mol-deacon-patrol.formula.toml" {
|
||||
if f.Status != "modified" {
|
||||
t.Errorf("mol-deacon-patrol status = %q, want %q", f.Status, "modified")
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("mol-deacon-patrol.formula.toml not found in report")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCheckFormulaHealth_Missing tests detection of deleted formulas.
|
||||
func TestCheckFormulaHealth_Missing(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision fresh
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Delete a formula
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
formulaPath := filepath.Join(formulasDir, "mol-deacon-patrol.formula.toml")
|
||||
if err := os.Remove(formulaPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check health
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckFormulaHealth() error: %v", err)
|
||||
}
|
||||
|
||||
if report.Missing != 1 {
|
||||
t.Errorf("Missing = %d, want 1", report.Missing)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCheckFormulaHealth_Outdated simulates an outdated formula.
|
||||
func TestCheckFormulaHealth_Outdated(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision fresh
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Simulate "old" installed record by changing the installed hash for a formula
|
||||
// This mimics what happens when a new binary has updated formula content
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Pick a formula that exists
|
||||
var targetFormula string
|
||||
for name := range installed.Formulas {
|
||||
targetFormula = name
|
||||
break
|
||||
}
|
||||
if targetFormula == "" {
|
||||
t.Skip("no formulas installed")
|
||||
}
|
||||
|
||||
// Write a file that simulates "old version" - content differs from embedded
|
||||
formulaPath := filepath.Join(formulasDir, targetFormula)
|
||||
oldContent := []byte("# Old version of formula\n")
|
||||
if err := os.WriteFile(formulaPath, oldContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Update installed record to match the old content's hash
|
||||
hash := sha256.Sum256(oldContent)
|
||||
installed.Formulas[targetFormula] = hex.EncodeToString(hash[:])
|
||||
|
||||
if err := saveInstalledRecord(formulasDir, installed); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now: file matches what we "installed" but differs from embedded = outdated
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckFormulaHealth() error: %v", err)
|
||||
}
|
||||
|
||||
if report.Outdated != 1 {
|
||||
t.Errorf("Outdated = %d, want 1", report.Outdated)
|
||||
}
|
||||
|
||||
// Verify the embedded hash is different from installed
|
||||
embeddedHash := embedded[targetFormula]
|
||||
if embeddedHash == installed.Formulas[targetFormula] {
|
||||
t.Error("embedded hash should differ from installed hash for this test")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateFormulas_UpdatesOutdated tests that outdated formulas are updated.
|
||||
func TestUpdateFormulas_UpdatesOutdated(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision fresh
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Simulate outdated formula
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var targetFormula string
|
||||
for name := range installed.Formulas {
|
||||
targetFormula = name
|
||||
break
|
||||
}
|
||||
if targetFormula == "" {
|
||||
t.Skip("no formulas installed")
|
||||
}
|
||||
|
||||
// Write old content
|
||||
formulaPath := filepath.Join(formulasDir, targetFormula)
|
||||
oldContent := []byte("# Old version\n")
|
||||
if err := os.WriteFile(formulaPath, oldContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Update installed record with old content's hash
|
||||
hash := sha256.Sum256(oldContent)
|
||||
installed.Formulas[targetFormula] = hex.EncodeToString(hash[:])
|
||||
if err := saveInstalledRecord(formulasDir, installed); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run update
|
||||
updated, skipped, reinstalled, err := UpdateFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("UpdateFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
if updated != 1 {
|
||||
t.Errorf("updated = %d, want 1", updated)
|
||||
}
|
||||
if skipped != 0 {
|
||||
t.Errorf("skipped = %d, want 0", skipped)
|
||||
}
|
||||
if reinstalled != 0 {
|
||||
t.Errorf("reinstalled = %d, want 0", reinstalled)
|
||||
}
|
||||
|
||||
// Verify file was updated
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckFormulaHealth() error: %v", err)
|
||||
}
|
||||
if report.Outdated != 0 {
|
||||
t.Errorf("after update, Outdated = %d, want 0", report.Outdated)
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateFormulas_SkipsModified tests that user-modified formulas are skipped.
|
||||
func TestUpdateFormulas_SkipsModified(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision fresh
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Modify a formula (user customization)
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var targetFormula string
|
||||
for name := range installed.Formulas {
|
||||
targetFormula = name
|
||||
break
|
||||
}
|
||||
if targetFormula == "" {
|
||||
t.Skip("no formulas installed")
|
||||
}
|
||||
|
||||
// Write different content that doesn't match installed hash
|
||||
formulaPath := filepath.Join(formulasDir, targetFormula)
|
||||
modifiedContent := []byte("# User customized this formula\nformula = \"custom\"\n")
|
||||
if err := os.WriteFile(formulaPath, modifiedContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run update - should skip the modified formula
|
||||
_, skipped, _, err := UpdateFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("UpdateFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
if skipped != 1 {
|
||||
t.Errorf("skipped = %d, want 1", skipped)
|
||||
}
|
||||
|
||||
// Verify file was NOT changed
|
||||
content, err := os.ReadFile(formulaPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(content) != string(modifiedContent) {
|
||||
t.Error("modified formula should not have been changed")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateFormulas_ReinstallsMissing tests that deleted formulas are reinstalled.
|
||||
func TestUpdateFormulas_ReinstallsMissing(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision fresh
|
||||
_, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Delete a formula
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var targetFormula string
|
||||
for name := range installed.Formulas {
|
||||
targetFormula = name
|
||||
break
|
||||
}
|
||||
if targetFormula == "" {
|
||||
t.Skip("no formulas installed")
|
||||
}
|
||||
|
||||
formulaPath := filepath.Join(formulasDir, targetFormula)
|
||||
if err := os.Remove(formulaPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run update
|
||||
_, _, reinstalled, err := UpdateFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("UpdateFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
if reinstalled != 1 {
|
||||
t.Errorf("reinstalled = %d, want 1", reinstalled)
|
||||
}
|
||||
|
||||
// Verify file was restored
|
||||
if _, err := os.Stat(formulaPath); os.IsNotExist(err) {
|
||||
t.Error("missing formula should have been reinstalled")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateFormulas_InstallsNew tests that new formulas are installed.
|
||||
func TestUpdateFormulas_InstallsNew(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create directory structure but with empty installed record
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
if err := os.MkdirAll(formulasDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write empty installed record
|
||||
emptyInstalled := &InstalledRecord{Formulas: make(map[string]string)}
|
||||
if err := saveInstalledRecord(formulasDir, emptyInstalled); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run update - should install all formulas as "new"
|
||||
updated, skipped, reinstalled, err := UpdateFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("UpdateFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// All formulas should be installed
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
total := updated + reinstalled
|
||||
if total != len(embedded) {
|
||||
t.Errorf("total installed = %d, want %d", total, len(embedded))
|
||||
}
|
||||
if skipped != 0 {
|
||||
t.Errorf("skipped = %d, want 0", skipped)
|
||||
}
|
||||
}
|
||||
|
||||
// TestInstalledRecordPersistence tests that the installed record survives across operations.
|
||||
func TestInstalledRecordPersistence(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Provision
|
||||
count, err := ProvisionFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ProvisionFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// Load and verify
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(installed.Formulas) != count {
|
||||
t.Errorf("installed has %d formulas, want %d", len(installed.Formulas), count)
|
||||
}
|
||||
|
||||
// Verify file is valid JSON
|
||||
installedPath := filepath.Join(formulasDir, ".installed.json")
|
||||
data, err := os.ReadFile(installedPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var decoded InstalledRecord
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
t.Errorf("installed.json is not valid JSON: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCheckFormulaHealth_NewFormula tests detection of new formulas that were never installed.
|
||||
func TestCheckFormulaHealth_NewFormula(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create formulas directory with empty installed record
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
if err := os.MkdirAll(formulasDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write empty installed record - simulates pre-existing install without this formula
|
||||
emptyInstalled := &InstalledRecord{Formulas: make(map[string]string)}
|
||||
if err := saveInstalledRecord(formulasDir, emptyInstalled); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check health - all embedded formulas should be "new"
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckFormulaHealth() error: %v", err)
|
||||
}
|
||||
|
||||
embedded, _ := getEmbeddedFormulas()
|
||||
if report.New != len(embedded) {
|
||||
t.Errorf("New = %d, want %d", report.New, len(embedded))
|
||||
}
|
||||
if report.OK != 0 {
|
||||
t.Errorf("OK = %d, want 0", report.OK)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCheckFormulaHealth_Untracked tests detection of files that exist but aren't
|
||||
// in .installed.json and don't match embedded (e.g., from older gt version).
|
||||
func TestCheckFormulaHealth_Untracked(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Get embedded formulas
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create formulas directory without .installed.json
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
if err := os.MkdirAll(formulasDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write formula files with different content (simulating older version)
|
||||
for name := range embedded {
|
||||
oldContent := []byte("# old version of " + name + "\n[molecule]\nid = \"test\"\n")
|
||||
if err := os.WriteFile(filepath.Join(formulasDir, name), oldContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check health - all should be "untracked" (not "modified" since not tracked)
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("CheckFormulaHealth() error: %v", err)
|
||||
}
|
||||
|
||||
if report.Untracked != len(embedded) {
|
||||
t.Errorf("Untracked = %d, want %d", report.Untracked, len(embedded))
|
||||
}
|
||||
if report.Modified != 0 {
|
||||
t.Errorf("Modified = %d, want 0 (untracked files shouldn't be marked as modified)", report.Modified)
|
||||
}
|
||||
if report.OK != 0 {
|
||||
t.Errorf("OK = %d, want 0", report.OK)
|
||||
}
|
||||
|
||||
// Verify all formulas have status "untracked"
|
||||
for _, f := range report.Formulas {
|
||||
if f.Status != "untracked" {
|
||||
t.Errorf("formula %s status = %q, want %q", f.Name, f.Status, "untracked")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateFormulas_UpdatesUntracked tests that untracked files get updated.
|
||||
func TestUpdateFormulas_UpdatesUntracked(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Get embedded formulas
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create formulas directory without .installed.json
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
if err := os.MkdirAll(formulasDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write formula files with different content (simulating older version)
|
||||
for name := range embedded {
|
||||
oldContent := []byte("# old version of " + name + "\n[molecule]\nid = \"test\"\n")
|
||||
if err := os.WriteFile(filepath.Join(formulasDir, name), oldContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run update - should update all untracked formulas
|
||||
updated, skipped, reinstalled, err := UpdateFormulas(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("UpdateFormulas() error: %v", err)
|
||||
}
|
||||
|
||||
// All untracked files should be updated (counted as "updated", not "reinstalled")
|
||||
if updated != len(embedded) {
|
||||
t.Errorf("updated = %d, want %d", updated, len(embedded))
|
||||
}
|
||||
if skipped != 0 {
|
||||
t.Errorf("skipped = %d, want 0", skipped)
|
||||
}
|
||||
if reinstalled != 0 {
|
||||
t.Errorf("reinstalled = %d, want 0", reinstalled)
|
||||
}
|
||||
|
||||
// Verify files now match embedded
|
||||
for name, expectedHash := range embedded {
|
||||
content, err := os.ReadFile(filepath.Join(formulasDir, name))
|
||||
if err != nil {
|
||||
t.Fatalf("reading %s: %v", name, err)
|
||||
}
|
||||
actualHash := computeHash(content)
|
||||
if actualHash != expectedHash {
|
||||
t.Errorf("%s hash mismatch after update", name)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify .installed.json was created with correct hashes
|
||||
installed, err := loadInstalledRecord(formulasDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for name, expectedHash := range embedded {
|
||||
if installed.Formulas[name] != expectedHash {
|
||||
t.Errorf(".installed.json hash for %s = %q, want %q",
|
||||
name, installed.Formulas[name], expectedHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Re-run health check - should be all OK now
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if report.OK != len(embedded) {
|
||||
t.Errorf("after update, OK = %d, want %d", report.OK, len(embedded))
|
||||
}
|
||||
if report.Untracked != 0 {
|
||||
t.Errorf("after update, Untracked = %d, want 0", report.Untracked)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCheckFormulaHealth_MixedScenarios tests a mix of OK, untracked, and modified.
|
||||
func TestCheckFormulaHealth_MixedScenarios(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Get embedded formulas
|
||||
embedded, err := getEmbeddedFormulas()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(embedded) < 3 {
|
||||
t.Skip("need at least 3 formulas for this test")
|
||||
}
|
||||
|
||||
formulasDir := filepath.Join(tmpDir, ".beads", "formulas")
|
||||
if err := os.MkdirAll(formulasDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Prepare installed record with only some formulas tracked
|
||||
installed := &InstalledRecord{Formulas: make(map[string]string)}
|
||||
|
||||
i := 0
|
||||
var okFormula, untrackedFormula, modifiedFormula string
|
||||
for name := range embedded {
|
||||
switch i {
|
||||
case 0:
|
||||
// First formula: write matching content, track it -> should be OK
|
||||
okFormula = name
|
||||
content, _ := formulasFS.ReadFile("formulas/" + name)
|
||||
if err := os.WriteFile(filepath.Join(formulasDir, name), content, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
installed.Formulas[name] = computeHash(content)
|
||||
|
||||
case 1:
|
||||
// Second formula: write old content, don't track -> should be untracked
|
||||
untrackedFormula = name
|
||||
oldContent := []byte("# untracked old version\n[molecule]\nid = \"test\"\n")
|
||||
if err := os.WriteFile(filepath.Join(formulasDir, name), oldContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Don't add to installed record
|
||||
|
||||
case 2:
|
||||
// Third formula: write different content, track with original hash -> should be modified
|
||||
modifiedFormula = name
|
||||
originalContent, _ := formulasFS.ReadFile("formulas/" + name)
|
||||
originalHash := computeHash(originalContent)
|
||||
modifiedContent := []byte("# user modified version\n[molecule]\nid = \"custom\"\n")
|
||||
if err := os.WriteFile(filepath.Join(formulasDir, name), modifiedContent, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
installed.Formulas[name] = originalHash // Track with original hash
|
||||
}
|
||||
i++
|
||||
if i >= 3 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := saveInstalledRecord(formulasDir, installed); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check health
|
||||
report, err := CheckFormulaHealth(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Find status of each test formula
|
||||
statusMap := make(map[string]string)
|
||||
for _, f := range report.Formulas {
|
||||
statusMap[f.Name] = f.Status
|
||||
}
|
||||
|
||||
if statusMap[okFormula] != "ok" {
|
||||
t.Errorf("formula %s status = %q, want %q", okFormula, statusMap[okFormula], "ok")
|
||||
}
|
||||
if statusMap[untrackedFormula] != "untracked" {
|
||||
t.Errorf("formula %s status = %q, want %q", untrackedFormula, statusMap[untrackedFormula], "untracked")
|
||||
}
|
||||
if statusMap[modifiedFormula] != "modified" {
|
||||
t.Errorf("formula %s status = %q, want %q", modifiedFormula, statusMap[modifiedFormula], "modified")
|
||||
}
|
||||
}
|
||||
@@ -27,7 +27,7 @@ Observe the current system state to inform triage decisions.
|
||||
**Step 1: Check Deacon state**
|
||||
```bash
|
||||
# Is Deacon session alive?
|
||||
tmux has-session -t gt-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
tmux has-session -t hq-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
|
||||
# If alive, what's the pane output showing?
|
||||
gt peek deacon --lines 20
|
||||
@@ -125,7 +125,7 @@ gt nudge deacon "Boot check-in: you have pending work"
|
||||
**WAKE**
|
||||
```bash
|
||||
# Send escape to break any tool waiting
|
||||
tmux send-keys -t gt-deacon Escape
|
||||
tmux send-keys -t hq-deacon Escape
|
||||
|
||||
# Brief pause
|
||||
sleep 1
|
||||
|
||||
@@ -23,7 +23,7 @@ Witnesses detect it and escalate to the Mayor.
|
||||
The Deacon's agent bead last_activity timestamp is updated during each patrol
|
||||
cycle. Witnesses check this timestamp to verify health."""
|
||||
formula = "mol-deacon-patrol"
|
||||
version = 4
|
||||
version = 7
|
||||
|
||||
[[steps]]
|
||||
id = "inbox-check"
|
||||
@@ -148,6 +148,91 @@ bd gate list --json
|
||||
After closing a gate, the Waiters field contains mail addresses to notify.
|
||||
Send a brief notification to each waiter that the gate has cleared."""
|
||||
|
||||
[[steps]]
|
||||
id = "github-gate-check"
|
||||
title = "Check GitHub CI gates"
|
||||
needs = ["inbox-check"]
|
||||
description = """
|
||||
Discover and evaluate GitHub CI gates.
|
||||
|
||||
GitHub gates (await_type: gh:run, gh:pr) require checking external CI status.
|
||||
This step discovers new gates from GitHub activity and evaluates pending ones.
|
||||
|
||||
**Step 1: Discover new GitHub gates**
|
||||
```bash
|
||||
bd gate discover
|
||||
```
|
||||
|
||||
This scans for GitHub CI gates that should be created based on:
|
||||
- Active PRs with required CI checks
|
||||
- Workflow runs that molecules are waiting on
|
||||
|
||||
**Step 2: Evaluate pending GitHub gates**
|
||||
```bash
|
||||
bd gate check --type=gh
|
||||
```
|
||||
|
||||
For each GitHub gate, this checks:
|
||||
- gh:run gates: Has the workflow run completed? Did it succeed?
|
||||
- gh:pr gates: Has the PR been merged/closed?
|
||||
|
||||
Gates that pass their condition are automatically closed.
|
||||
|
||||
**Step 3: Report closures**
|
||||
For any gates that were just closed, log the result:
|
||||
```bash
|
||||
# Gate <id> closed: GitHub CI passed
|
||||
# Gate <id> closed: PR merged
|
||||
```
|
||||
|
||||
**If no GitHub gates exist:**
|
||||
Skip - nothing to check.
|
||||
|
||||
**Exit criteria:** All GitHub gates evaluated, passing gates closed."""
|
||||
|
||||
[[steps]]
|
||||
id = "dispatch-gated-molecules"
|
||||
title = "Dispatch molecules with resolved gates"
|
||||
needs = ["gate-evaluation", "github-gate-check"]
|
||||
description = """
|
||||
Find molecules blocked on gates that have now closed and dispatch them.
|
||||
|
||||
This completes the async resume cycle without explicit waiter tracking.
|
||||
The molecule state IS the waiter - patrol discovers reality each cycle.
|
||||
|
||||
**Step 1: Find gate-ready molecules**
|
||||
```bash
|
||||
bd mol ready --gated --json
|
||||
```
|
||||
|
||||
This returns molecules where:
|
||||
- Status is in_progress
|
||||
- Current step has a gate dependency
|
||||
- The gate bead is now closed
|
||||
- No polecat currently has it hooked
|
||||
|
||||
**Step 2: For each ready molecule, dispatch to the appropriate rig**
|
||||
```bash
|
||||
# Determine target rig from molecule metadata
|
||||
bd mol show <mol-id> --json
|
||||
# Look for rig field or infer from prefix
|
||||
|
||||
# Dispatch to that rig's polecat pool
|
||||
gt sling <mol-id> <rig>/polecats
|
||||
```
|
||||
|
||||
**Step 3: Log dispatch**
|
||||
Note which molecules were dispatched for observability:
|
||||
```bash
|
||||
# Molecule <mol-id> dispatched to <rig>/polecats (gate <gate-id> cleared)
|
||||
```
|
||||
|
||||
**If no gate-ready molecules:**
|
||||
Skip - nothing to dispatch. Gates haven't closed yet or molecules
|
||||
already have active polecats working on them.
|
||||
|
||||
**Exit criteria:** All gate-ready molecules dispatched to polecats."""
|
||||
|
||||
[[steps]]
|
||||
id = "check-convoy-completion"
|
||||
title = "Check convoy completion"
|
||||
@@ -258,7 +343,7 @@ Keep notifications brief and actionable. The recipient can run bd show for detai
|
||||
[[steps]]
|
||||
id = "health-scan"
|
||||
title = "Check Witness and Refinery health"
|
||||
needs = ["trigger-pending-spawns", "gate-evaluation", "fire-notifications"]
|
||||
needs = ["trigger-pending-spawns", "dispatch-gated-molecules", "fire-notifications"]
|
||||
description = """
|
||||
Check Witness and Refinery health for each rig.
|
||||
|
||||
@@ -342,14 +427,21 @@ Reset unresponsive_cycles to 0 when component responds normally."""
|
||||
|
||||
[[steps]]
|
||||
id = "zombie-scan"
|
||||
title = "Backup check for zombie polecats"
|
||||
title = "Detect zombie polecats (NO KILL AUTHORITY)"
|
||||
needs = ["health-scan"]
|
||||
description = """
|
||||
Defense-in-depth check for zombie polecats that Witness should have cleaned.
|
||||
Defense-in-depth DETECTION of zombie polecats that Witness should have cleaned.
|
||||
|
||||
**⚠️ CRITICAL: The Deacon has NO kill authority.**
|
||||
|
||||
These are workers with context, mid-task progress, unsaved state. Every kill
|
||||
destroys work. File the warrant and let Boot handle interrogation and execution.
|
||||
You do NOT have kill authority.
|
||||
|
||||
**Why this exists:**
|
||||
The Witness is responsible for nuking polecats after they complete work (via POLECAT_DONE).
|
||||
This step provides backup detection in case the Witness fails to clean up.
|
||||
The Witness is responsible for cleaning up polecats after they complete work.
|
||||
This step provides backup DETECTION in case the Witness fails to clean up.
|
||||
Detection only - Boot handles termination.
|
||||
|
||||
**Zombie criteria:**
|
||||
- State: idle or done (no active work assigned)
|
||||
@@ -357,26 +449,34 @@ This step provides backup detection in case the Witness fails to clean up.
|
||||
- No hooked work (nothing pending for this polecat)
|
||||
- Last activity: older than 10 minutes
|
||||
|
||||
**Run the zombie scan:**
|
||||
**Run the zombie scan (DRY RUN ONLY):**
|
||||
```bash
|
||||
gt deacon zombie-scan --dry-run
|
||||
```
|
||||
|
||||
**NEVER run:**
|
||||
- `gt deacon zombie-scan` (without --dry-run)
|
||||
- `tmux kill-session`
|
||||
- `gt polecat nuke`
|
||||
- Any command that terminates a session
|
||||
|
||||
**If zombies detected:**
|
||||
1. Review the output to confirm they are truly abandoned
|
||||
2. Run without --dry-run to nuke them:
|
||||
2. File a death warrant for each detected zombie:
|
||||
```bash
|
||||
gt deacon zombie-scan
|
||||
gt warrant file <polecat> --reason "Zombie detected: no session, no hook, idle >10m"
|
||||
```
|
||||
3. Boot will handle interrogation and execution
|
||||
4. Notify the Mayor about Witness failure:
|
||||
```bash
|
||||
gt mail send mayor/ -s "Witness cleanup failure" \
|
||||
-m "Filed death warrant for <polecat>. Witness failed to clean up."
|
||||
```
|
||||
3. This will:
|
||||
- Nuke each zombie polecat
|
||||
- Notify the Mayor about Witness failure
|
||||
- Log the cleanup action
|
||||
|
||||
**If no zombies:**
|
||||
No action needed - Witness is doing its job.
|
||||
|
||||
**Note:** This is a backup mechanism. If you frequently find zombies,
|
||||
**Note:** This is a backup mechanism. If you frequently detect zombies,
|
||||
investigate why the Witness isn't cleaning up properly."""
|
||||
|
||||
[[steps]]
|
||||
@@ -505,10 +605,48 @@ Skip dispatch - system is healthy.
|
||||
|
||||
**Exit criteria:** Session GC dispatched to dog (if needed)."""
|
||||
|
||||
[[steps]]
|
||||
id = "costs-digest"
|
||||
title = "Aggregate daily costs"
|
||||
needs = ["session-gc"]
|
||||
description = """
|
||||
**DAILY DIGEST** - Aggregate yesterday's session cost wisps.
|
||||
|
||||
Session costs are recorded as ephemeral wisps (not exported to JSONL) to avoid
|
||||
log-in-database pollution. This step aggregates them into a permanent daily
|
||||
"Cost Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
**Step 1: Check if digest is needed**
|
||||
```bash
|
||||
# Preview yesterday's costs (dry run)
|
||||
gt costs digest --yesterday --dry-run
|
||||
```
|
||||
|
||||
If output shows "No session cost wisps found", skip to Step 3.
|
||||
|
||||
**Step 2: Create the digest**
|
||||
```bash
|
||||
gt costs digest --yesterday
|
||||
```
|
||||
|
||||
This:
|
||||
- Queries all session.ended wisps from yesterday
|
||||
- Creates a single "Cost Report YYYY-MM-DD" bead with aggregated data
|
||||
- Deletes the source wisps
|
||||
|
||||
**Step 3: Verify**
|
||||
The digest appears in `gt costs --week` queries.
|
||||
Daily digests preserve audit trail without per-session pollution.
|
||||
|
||||
**Timing**: Run once per morning patrol cycle. The --yesterday flag ensures
|
||||
we don't try to digest today's incomplete data.
|
||||
|
||||
**Exit criteria:** Yesterday's costs digested (or no wisps to digest)."""
|
||||
|
||||
[[steps]]
|
||||
id = "log-maintenance"
|
||||
title = "Rotate logs and prune state"
|
||||
needs = ["session-gc"]
|
||||
needs = ["costs-digest"]
|
||||
description = """
|
||||
Maintain daemon logs and state files.
|
||||
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"formula": "mol-gastown-boot",
|
||||
"description": "Mayor bootstraps Gas Town via a verification-gated lifecycle molecule.\n\n## Purpose\nWhen Mayor executes \"boot up gas town\", this proto provides the workflow.\nEach step has action + verification - steps stay open until outcome is confirmed.\n\n## Key Principles\n1. **Verification-gated steps** - Not \"command ran\" but \"outcome confirmed\"\n2. **gt peek for verification** - Capture session output to detect stalls\n3. **gt nudge for recovery** - Reliable message delivery to unstick agents\n4. **Parallel where possible** - Witnesses and refineries can start in parallel\n5. **Ephemeral execution** - Boot is a wisp, squashed to digest after completion\n\n## Execution\n```bash\nbd mol wisp mol-gastown-boot # Create wisp\n```",
|
||||
"version": 1,
|
||||
"steps": [
|
||||
{
|
||||
"id": "ensure-daemon",
|
||||
"title": "Ensure daemon",
|
||||
"description": "Verify the Gas Town daemon is running.\n\n## Action\n```bash\ngt daemon status || gt daemon start\n```\n\n## Verify\n1. Daemon PID file exists: `~/.gt/daemon.pid`\n2. Process is alive: `kill -0 $(cat ~/.gt/daemon.pid)`\n3. Daemon responds: `gt daemon status` returns success\n\n## OnFail\nCannot start daemon. Log error and continue - some commands work without daemon."
|
||||
},
|
||||
{
|
||||
"id": "ensure-deacon",
|
||||
"title": "Ensure deacon",
|
||||
"needs": ["ensure-daemon"],
|
||||
"description": "Start the Deacon and verify patrol mode is active.\n\n## Action\n```bash\ngt deacon start\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`\n2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago\n\n## OnStall\n```bash\ngt nudge deacon/ \"Start patrol.\"\nsleep 30\n# Re-verify\n```"
|
||||
},
|
||||
{
|
||||
"id": "ensure-witnesses",
|
||||
"title": "Ensure witnesses",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig witnesses.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-witness",
|
||||
"title": "Ensure gastown witness",
|
||||
"description": "Start the gastown rig Witness.\n\n## Action\n```bash\ngt witness start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-witness 2>/dev/null`\n2. Not stalled: `gt peek gastown/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-witness",
|
||||
"title": "Ensure beads witness",
|
||||
"description": "Start the beads rig Witness.\n\n## Action\n```bash\ngt witness start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-witness 2>/dev/null`\n2. Not stalled: `gt peek beads/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "ensure-refineries",
|
||||
"title": "Ensure refineries",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig refineries.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-refinery",
|
||||
"title": "Ensure gastown refinery",
|
||||
"description": "Start the gastown rig Refinery.\n\n## Action\n```bash\ngt refinery start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-refinery 2>/dev/null`\n2. Not stalled: `gt peek gastown/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-refinery",
|
||||
"title": "Ensure beads refinery",
|
||||
"description": "Start the beads rig Refinery.\n\n## Action\n```bash\ngt refinery start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-refinery 2>/dev/null`\n2. Not stalled: `gt peek beads/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "verify-town-health",
|
||||
"title": "Verify town health",
|
||||
"needs": ["ensure-witnesses", "ensure-refineries"],
|
||||
"description": "Final verification that Gas Town is healthy.\n\n## Action\n```bash\ngt status\n```\n\n## Verify\n1. Daemon running: Shows daemon status OK\n2. Deacon active: Shows deacon in patrol mode\n3. All witnesses: Each rig witness shows active\n4. All refineries: Each rig refinery shows active\n\n## OnFail\nLog degraded state but consider boot complete. Some agents may need manual recovery.\nRun `gt doctor` for detailed diagnostics."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -48,7 +48,7 @@ gt deacon start
|
||||
```
|
||||
|
||||
## Verify
|
||||
1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`
|
||||
1. Session exists: `tmux has-session -t hq-deacon 2>/dev/null`
|
||||
2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt
|
||||
3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago
|
||||
|
||||
|
||||
519
internal/formula/formulas/mol-shutdown-dance.formula.toml
Normal file
519
internal/formula/formulas/mol-shutdown-dance.formula.toml
Normal file
@@ -0,0 +1,519 @@
|
||||
description = """
|
||||
Death warrant execution state machine for Dogs.
|
||||
|
||||
Dogs execute this molecule to process death warrants. Each Dog is a lightweight
|
||||
goroutine (NOT a Claude session) that runs the interrogation state machine.
|
||||
|
||||
## Architecture Context
|
||||
|
||||
Dogs are lightweight workers in Boot's pool (see dog-pool-architecture.md):
|
||||
- Fixed pool of 5 goroutines (configurable via GT_DOG_POOL_SIZE)
|
||||
- State persisted to ~/gt/deacon/dogs/active/<id>.json
|
||||
- Recovery on Boot restart via orphan state files
|
||||
|
||||
## State Machine
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ INTERROGATING │ │
|
||||
│ │ │
|
||||
│ 1. Send health check │ │
|
||||
│ 2. Open timeout gate │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
│ gate closes (timeout or response) │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ EVALUATING │ │
|
||||
│ │ │
|
||||
│ Check tmux output for │ │
|
||||
│ ALIVE keyword │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
┌───────┴───────┐ │
|
||||
│ │ │
|
||||
▼ ▼ │
|
||||
[ALIVE found] [No ALIVE] │
|
||||
│ │ │
|
||||
│ │ attempt < 3? │
|
||||
│ ├──────────────────────────────────→─┘
|
||||
│ │ yes: attempt++, longer timeout
|
||||
│ │
|
||||
│ │ no: attempt == 3
|
||||
▼ ▼
|
||||
┌─────────┐ ┌─────────────┐
|
||||
│ PARDONED│ │ EXECUTING │
|
||||
│ │ │ │
|
||||
│ Cancel │ │ Kill tmux │
|
||||
│ warrant │ │ session │
|
||||
└────┬────┘ └──────┬──────┘
|
||||
│ │
|
||||
└────────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────┐
|
||||
│ EPITAPH │
|
||||
│ │
|
||||
│ Log outcome │
|
||||
│ Release dog │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## Timeout Gates
|
||||
|
||||
| Attempt | Timeout | Cumulative Wait |
|
||||
|---------|---------|-----------------|
|
||||
| 1 | 60s | 60s |
|
||||
| 2 | 120s | 180s (3 min) |
|
||||
| 3 | 240s | 420s (7 min) |
|
||||
|
||||
Timeout gates work like this:
|
||||
- Gate opens when interrogation message is sent
|
||||
- Gate closes when EITHER:
|
||||
a) Timeout expires (proceed to evaluate)
|
||||
b) Response detected (early close, proceed to evaluate)
|
||||
- The gate state determines the evaluation outcome
|
||||
|
||||
## Interrogation Message Format
|
||||
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within {timeout}s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: {attempt}/3
|
||||
```
|
||||
|
||||
## Response Detection
|
||||
|
||||
The Dog checks tmux output for:
|
||||
1. The ALIVE keyword (explicit response)
|
||||
2. Any Claude output after the health check (implicit activity)
|
||||
|
||||
```go
|
||||
func (d *Dog) CheckForResponse() bool {
|
||||
output := tmux.CapturePane(d.Warrant.Target, 50) // Last 50 lines
|
||||
return strings.Contains(output, "ALIVE")
|
||||
}
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|-------------|-------------|-----------------------------------------------|
|
||||
| warrant_id | hook_bead | Bead ID of the death warrant |
|
||||
| target | warrant | Session name to interrogate |
|
||||
| reason | warrant | Why warrant was issued |
|
||||
| requester | warrant | Who filed the warrant (e.g., deacon, witness) |
|
||||
|
||||
## Integration
|
||||
|
||||
Dogs are NOT Claude sessions. This molecule is:
|
||||
1. A specification document (defines the state machine)
|
||||
2. A reference for Go implementation in internal/shutdown/
|
||||
3. A template for creating warrant-tracking beads
|
||||
|
||||
The Go implementation follows this spec exactly."""
|
||||
formula = "mol-shutdown-dance"
|
||||
version = 1
|
||||
|
||||
[squash]
|
||||
trigger = "on_complete"
|
||||
template_type = "operational"
|
||||
include_metrics = true
|
||||
|
||||
# ============================================================================
|
||||
# STEP 1: WARRANT_RECEIVED
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "warrant-received"
|
||||
title = "Receive and validate death warrant"
|
||||
description = """
|
||||
Entry point when Dog is allocated from pool.
|
||||
|
||||
**1. Read warrant from allocation:**
|
||||
The Dog receives a Warrant struct containing:
|
||||
- ID: Bead ID of the warrant
|
||||
- Target: Session name (e.g., "gt-gastown-Toast")
|
||||
- Reason: Why termination requested
|
||||
- Requester: Who filed (deacon, witness, mayor)
|
||||
- FiledAt: Timestamp
|
||||
|
||||
**2. Validate target exists:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
```
|
||||
|
||||
If target doesn't exist:
|
||||
- Warrant is stale (already dead)
|
||||
- Skip to EPITAPH with outcome=already_dead
|
||||
|
||||
**3. Initialize state file:**
|
||||
Write initial state to ~/gt/deacon/dogs/active/{dog-id}.json
|
||||
|
||||
**4. Set initial attempt counter:**
|
||||
attempt = 1
|
||||
|
||||
**Exit criteria:** Warrant validated, target confirmed alive, state initialized."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 2: INTERROGATION_1 (60s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-1"
|
||||
title = "First interrogation (60s timeout)"
|
||||
needs = ["warrant-received"]
|
||||
description = """
|
||||
First attempt to contact the session.
|
||||
|
||||
**1. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 60s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 1/3
|
||||
```
|
||||
|
||||
**2. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**3. Open timeout gate:**
|
||||
Gate configuration:
|
||||
- Type: timer
|
||||
- Timeout: 60 seconds
|
||||
- Close conditions:
|
||||
a) Timer expires
|
||||
b) ALIVE keyword detected in output
|
||||
|
||||
**4. Wait for gate to close:**
|
||||
The Dog waits (select on timer channel or early close signal).
|
||||
|
||||
**5. Record interrogation timestamp:**
|
||||
Update state file with last_message_at.
|
||||
|
||||
**Exit criteria:** Message sent, waiting for gate to close."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 3: EVALUATE_1
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-1"
|
||||
title = "Evaluate first interrogation response"
|
||||
needs = ["interrogation-1"]
|
||||
description = """
|
||||
Check if session responded to first interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword:**
|
||||
```go
|
||||
if strings.Contains(output, "ALIVE") {
|
||||
return PARDONED
|
||||
}
|
||||
```
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_2
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 4: INTERROGATION_2 (120s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-2"
|
||||
title = "Second interrogation (120s timeout)"
|
||||
needs = ["evaluate-1"]
|
||||
gate = { type = "conditional", condition = "no_response_1" }
|
||||
description = """
|
||||
Second attempt with longer timeout.
|
||||
|
||||
Only executed if evaluate-1 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 2
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 120s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 2/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 120 seconds
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Second message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 5: EVALUATE_2
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-2"
|
||||
title = "Evaluate second interrogation response"
|
||||
needs = ["interrogation-2"]
|
||||
description = """
|
||||
Check if session responded to second interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_3
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 6: INTERROGATION_3 (240s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-3"
|
||||
title = "Final interrogation (240s timeout)"
|
||||
needs = ["evaluate-2"]
|
||||
gate = { type = "conditional", condition = "no_response_2" }
|
||||
description = """
|
||||
Final attempt before execution.
|
||||
|
||||
Only executed if evaluate-2 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 3
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 240s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 3/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 240 seconds
|
||||
- This is the FINAL chance
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Final message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 7: EVALUATE_3
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-3"
|
||||
title = "Evaluate final interrogation response"
|
||||
needs = ["interrogation-3"]
|
||||
description = """
|
||||
Final evaluation before execution.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to EXECUTE
|
||||
|
||||
**Exit criteria:** Final decision made."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 8: PARDON (success path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "pardon"
|
||||
title = "Pardon session - cancel warrant"
|
||||
needs = ["evaluate-1", "evaluate-2", "evaluate-3"]
|
||||
gate = { type = "conditional", condition = "alive_detected" }
|
||||
description = """
|
||||
Session responded - cancel the death warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = PARDONED
|
||||
|
||||
**2. Record pardon details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "pardoned",
|
||||
"attempt": {attempt},
|
||||
"response_time": "{time_since_last_interrogation}s",
|
||||
"pardoned_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**3. Cancel warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "Session responded at attempt {attempt}"
|
||||
```
|
||||
|
||||
**4. Notify requester:**
|
||||
```bash
|
||||
gt mail send {requester}/ -s "PARDON: {target}" -m "Death warrant cancelled.
|
||||
Session responded after attempt {attempt}.
|
||||
Warrant: {warrant_id}
|
||||
Response detected: {timestamp}"
|
||||
```
|
||||
|
||||
**Exit criteria:** Warrant cancelled, requester notified."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 9: EXECUTE (termination path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "execute"
|
||||
title = "Execute warrant - kill session"
|
||||
needs = ["evaluate-3"]
|
||||
gate = { type = "conditional", condition = "no_response_final" }
|
||||
description = """
|
||||
Session unresponsive after 3 attempts - execute the warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = EXECUTING
|
||||
|
||||
**2. Kill the tmux session:**
|
||||
```bash
|
||||
tmux kill-session -t {target}
|
||||
```
|
||||
|
||||
**3. Verify session is dead:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
# Should fail (session gone)
|
||||
```
|
||||
|
||||
**4. If session still exists (kill failed):**
|
||||
- Force kill with tmux kill-server if isolated
|
||||
- Or escalate to Boot for manual intervention
|
||||
|
||||
**5. Record execution details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "executed",
|
||||
"attempts": 3,
|
||||
"total_wait": "420s",
|
||||
"executed_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**Exit criteria:** Session terminated."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 10: EPITAPH (completion)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "epitaph"
|
||||
title = "Log cause of death and close warrant"
|
||||
needs = ["pardon", "execute"]
|
||||
description = """
|
||||
Final step - create audit record and release Dog back to pool.
|
||||
|
||||
**1. Compose epitaph based on outcome:**
|
||||
|
||||
For PARDONED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: PARDONED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Response: Attempt {attempt}, after {wait_time}s
|
||||
Pardoned at: {timestamp}
|
||||
```
|
||||
|
||||
For EXECUTED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: EXECUTED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempts: 3 (60s + 120s + 240s = 420s total)
|
||||
Executed at: {timestamp}
|
||||
```
|
||||
|
||||
For ALREADY_DEAD (target gone before interrogation):
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: ALREADY_DEAD
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Note: Target session not found at warrant processing
|
||||
```
|
||||
|
||||
**2. Close warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "{epitaph_summary}"
|
||||
```
|
||||
|
||||
**3. Move state file to completed:**
|
||||
```bash
|
||||
mv ~/gt/deacon/dogs/active/{dog-id}.json ~/gt/deacon/dogs/completed/
|
||||
```
|
||||
|
||||
**4. Report to Boot:**
|
||||
Write completion file: ~/gt/deacon/dogs/active/{dog-id}.done
|
||||
```json
|
||||
{
|
||||
"dog_id": "{dog-id}",
|
||||
"warrant_id": "{warrant_id}",
|
||||
"target": "{target}",
|
||||
"outcome": "{pardoned|executed|already_dead}",
|
||||
"duration": "{total_duration}s"
|
||||
}
|
||||
```
|
||||
|
||||
**5. Release Dog to pool:**
|
||||
Dog resets state and returns to idle channel.
|
||||
|
||||
**Exit criteria:** Warrant closed, Dog released, audit complete."""
|
||||
|
||||
# ============================================================================
|
||||
# VARIABLES
|
||||
# ============================================================================
|
||||
[vars]
|
||||
[vars.warrant_id]
|
||||
description = "Bead ID of the death warrant being processed"
|
||||
required = true
|
||||
|
||||
[vars.target]
|
||||
description = "Session name to interrogate (e.g., gt-gastown-Toast)"
|
||||
required = true
|
||||
|
||||
[vars.reason]
|
||||
description = "Why the warrant was issued"
|
||||
required = true
|
||||
|
||||
[vars.requester]
|
||||
description = "Who filed the warrant (deacon, witness, mayor)"
|
||||
required = true
|
||||
default = "deacon"
|
||||
@@ -138,7 +138,8 @@ func (g *Git) CloneBare(url, dest string) error {
|
||||
if err := cmd.Run(); err != nil {
|
||||
return g.wrapError(err, stderr.String(), []string{"clone", "--bare", url})
|
||||
}
|
||||
return nil
|
||||
// Configure refspec so worktrees can fetch and see origin/* refs
|
||||
return configureRefspec(dest)
|
||||
}
|
||||
|
||||
// configureHooksPath sets core.hooksPath to use the repo's .githooks directory
|
||||
@@ -160,6 +161,21 @@ func configureHooksPath(repoPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// configureRefspec sets remote.origin.fetch to the standard refspec for bare repos.
|
||||
// Bare clones don't have this set by default, which breaks worktrees that need to
|
||||
// fetch and see origin/* refs. Without this, `git fetch` only updates FETCH_HEAD
|
||||
// and origin/main never appears in refs/remotes/origin/main.
|
||||
// See: https://github.com/anthropics/gastown/issues/286
|
||||
func configureRefspec(repoPath string) error {
|
||||
cmd := exec.Command("git", "-C", repoPath, "config", "remote.origin.fetch", "+refs/heads/*:refs/remotes/origin/*")
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("configuring refspec: %s", strings.TrimSpace(stderr.String()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloneBareWithReference clones a bare repository using a local repo as an object reference.
|
||||
func (g *Git) CloneBareWithReference(url, dest, reference string) error {
|
||||
cmd := exec.Command("git", "clone", "--bare", "--reference-if-able", reference, url, dest)
|
||||
@@ -168,7 +184,8 @@ func (g *Git) CloneBareWithReference(url, dest, reference string) error {
|
||||
if err := cmd.Run(); err != nil {
|
||||
return g.wrapError(err, stderr.String(), []string{"clone", "--bare", "--reference-if-able", url})
|
||||
}
|
||||
return nil
|
||||
// Configure refspec so worktrees can fetch and see origin/* refs
|
||||
return configureRefspec(dest)
|
||||
}
|
||||
|
||||
// Checkout checks out the given ref.
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
)
|
||||
|
||||
// timeNow is a function that returns the current time. It can be overridden in tests.
|
||||
@@ -334,7 +335,7 @@ func (m *Mailbox) markReadBeads(id string) error {
|
||||
func (m *Mailbox) closeInDir(id, beadsDir string) error {
|
||||
args := []string{"close", id}
|
||||
// Pass session ID for work attribution if available
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
|
||||
@@ -78,8 +78,18 @@ func (m *Manager) Start(agentOverride string) error {
|
||||
return fmt.Errorf("ensuring Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Create new tmux session
|
||||
if err := t.NewSession(sessionID, mayorDir); err != nil {
|
||||
// Build startup command first - the startup hook handles 'gt prime' automatically
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
startupCmd, err := config.BuildAgentStartupCommandWithAgentOverride("mayor", "mayor", "", "", agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
|
||||
// Create session with command directly to avoid send-keys race condition.
|
||||
// This runs the command as the pane's initial process, avoiding the shell
|
||||
// readiness timing issues that cause "bad pattern" and command-not-found errors.
|
||||
// See: https://github.com/anthropics/gastown/issues/280
|
||||
if err := t.NewSessionWithCommand(sessionID, mayorDir, startupCmd); err != nil {
|
||||
return fmt.Errorf("creating tmux session: %w", err)
|
||||
}
|
||||
|
||||
@@ -91,18 +101,6 @@ func (m *Manager) Start(agentOverride string) error {
|
||||
theme := tmux.MayorTheme()
|
||||
_ = t.ConfigureGasTownSession(sessionID, theme, "", "Mayor", "coordinator")
|
||||
|
||||
// Launch Claude - the startup hook handles 'gt prime' automatically
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
startupCmd, err := config.BuildAgentStartupCommandWithAgentOverride("mayor", "mayor", "", "", agentOverride)
|
||||
if err != nil {
|
||||
_ = t.KillSession(sessionID) // best-effort cleanup
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
if err := t.SendKeysDelayed(sessionID, startupCmd, 200); err != nil {
|
||||
_ = t.KillSession(sessionID) // best-effort cleanup
|
||||
return fmt.Errorf("starting Claude agent: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionID, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal - try to continue anyway
|
||||
|
||||
40
internal/opencode/plugin.go
Normal file
40
internal/opencode/plugin.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Package opencode provides OpenCode plugin management.
|
||||
package opencode
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
//go:embed plugin/gastown.js
|
||||
var pluginFS embed.FS
|
||||
|
||||
// EnsurePluginAt ensures the Gas Town OpenCode plugin exists.
|
||||
// If the file already exists, it's left unchanged.
|
||||
func EnsurePluginAt(workDir, pluginDir, pluginFile string) error {
|
||||
if pluginDir == "" || pluginFile == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
pluginPath := filepath.Join(workDir, pluginDir, pluginFile)
|
||||
if _, err := os.Stat(pluginPath); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(pluginPath), 0755); err != nil {
|
||||
return fmt.Errorf("creating plugin directory: %w", err)
|
||||
}
|
||||
|
||||
content, err := pluginFS.ReadFile("plugin/gastown.js")
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading plugin template: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(pluginPath, content, 0644); err != nil {
|
||||
return fmt.Errorf("writing plugin: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
32
internal/opencode/plugin/gastown.js
Normal file
32
internal/opencode/plugin/gastown.js
Normal file
@@ -0,0 +1,32 @@
|
||||
// Gas Town OpenCode plugin: hooks SessionStart/Compaction via events.
|
||||
export const GasTown = async ({ $, directory }) => {
|
||||
const role = (process.env.GT_ROLE || "").toLowerCase();
|
||||
const autonomousRoles = new Set(["polecat", "witness", "refinery", "deacon"]);
|
||||
let didInit = false;
|
||||
|
||||
const run = async (cmd) => {
|
||||
try {
|
||||
await $`/bin/sh -lc ${cmd}`.cwd(directory);
|
||||
} catch (err) {
|
||||
console.error(`[gastown] ${cmd} failed`, err?.message || err);
|
||||
}
|
||||
};
|
||||
|
||||
const onSessionCreated = async () => {
|
||||
if (didInit) return;
|
||||
didInit = true;
|
||||
await run("gt prime");
|
||||
if (autonomousRoles.has(role)) {
|
||||
await run("gt mail check --inject");
|
||||
}
|
||||
await run("gt nudge deacon session-started");
|
||||
};
|
||||
|
||||
return {
|
||||
event: async ({ event }) => {
|
||||
if (event?.type === "session.created") {
|
||||
await onSessionCreated();
|
||||
}
|
||||
},
|
||||
};
|
||||
};
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
@@ -177,11 +178,36 @@ func (m *Manager) repoBase() (*git.Git, error) {
|
||||
return git.NewGit(mayorPath), nil
|
||||
}
|
||||
|
||||
// polecatDir returns the directory for a polecat.
|
||||
// polecatDir returns the parent directory for a polecat.
|
||||
// This is polecats/<name>/ - the polecat's home directory.
|
||||
func (m *Manager) polecatDir(name string) string {
|
||||
return filepath.Join(m.rig.Path, "polecats", name)
|
||||
}
|
||||
|
||||
// clonePath returns the path where the git worktree lives.
|
||||
// New structure: polecats/<name>/<rigname>/ - gives LLMs recognizable repo context.
|
||||
// Falls back to old structure: polecats/<name>/ for backward compatibility.
|
||||
func (m *Manager) clonePath(name string) string {
|
||||
// New structure: polecats/<name>/<rigname>/
|
||||
newPath := filepath.Join(m.rig.Path, "polecats", name, m.rig.Name)
|
||||
if info, err := os.Stat(newPath); err == nil && info.IsDir() {
|
||||
return newPath
|
||||
}
|
||||
|
||||
// Old structure: polecats/<name>/ (backward compat)
|
||||
oldPath := filepath.Join(m.rig.Path, "polecats", name)
|
||||
if info, err := os.Stat(oldPath); err == nil && info.IsDir() {
|
||||
// Check if this is actually a git worktree (has .git file or dir)
|
||||
gitPath := filepath.Join(oldPath, ".git")
|
||||
if _, err := os.Stat(gitPath); err == nil {
|
||||
return oldPath
|
||||
}
|
||||
}
|
||||
|
||||
// Default to new structure for new polecats
|
||||
return newPath
|
||||
}
|
||||
|
||||
// exists checks if a polecat exists.
|
||||
func (m *Manager) exists(name string) bool {
|
||||
_, err := os.Stat(m.polecatDir(name))
|
||||
@@ -213,15 +239,18 @@ func (m *Manager) AddWithOptions(name string, opts AddOptions) (*Polecat, error)
|
||||
return nil, ErrPolecatExists
|
||||
}
|
||||
|
||||
polecatPath := m.polecatDir(name)
|
||||
// New structure: polecats/<name>/<rigname>/ for LLM ergonomics
|
||||
// The polecat's home dir is polecats/<name>/, worktree is polecats/<name>/<rigname>/
|
||||
polecatDir := m.polecatDir(name)
|
||||
clonePath := filepath.Join(polecatDir, m.rig.Name)
|
||||
|
||||
// Unique branch per run - prevents drift from stale branches
|
||||
// Use base36 encoding for shorter branch names (8 chars vs 13 digits)
|
||||
branchName := fmt.Sprintf("polecat/%s-%s", name, strconv.FormatInt(time.Now().UnixMilli(), 36))
|
||||
|
||||
// Create polecats directory if needed
|
||||
polecatsDir := filepath.Join(m.rig.Path, "polecats")
|
||||
if err := os.MkdirAll(polecatsDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("creating polecats dir: %w", err)
|
||||
// Create polecat directory (polecats/<name>/)
|
||||
if err := os.MkdirAll(polecatDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("creating polecat dir: %w", err)
|
||||
}
|
||||
|
||||
// Get the repo base (bare repo or mayor/rig)
|
||||
@@ -232,7 +261,8 @@ func (m *Manager) AddWithOptions(name string, opts AddOptions) (*Polecat, error)
|
||||
|
||||
// Always create fresh branch - unique name guarantees no collision
|
||||
// git worktree add -b polecat/<name>-<timestamp> <path>
|
||||
if err := repoGit.WorktreeAdd(polecatPath, branchName); err != nil {
|
||||
// Worktree goes in polecats/<name>/<rigname>/ for LLM ergonomics
|
||||
if err := repoGit.WorktreeAdd(clonePath, branchName); err != nil {
|
||||
return nil, fmt.Errorf("creating worktree: %w", err)
|
||||
}
|
||||
|
||||
@@ -243,12 +273,19 @@ func (m *Manager) AddWithOptions(name string, opts AddOptions) (*Polecat, error)
|
||||
|
||||
// Set up shared beads: polecat uses rig's .beads via redirect file.
|
||||
// This eliminates git sync overhead - all polecats share one database.
|
||||
if err := m.setupSharedBeads(polecatPath); err != nil {
|
||||
if err := m.setupSharedBeads(clonePath); err != nil {
|
||||
// Non-fatal - polecat can still work with local beads
|
||||
// Log warning but don't fail the spawn
|
||||
fmt.Printf("Warning: could not set up shared beads: %v\n", err)
|
||||
}
|
||||
|
||||
// Copy overlay files from .runtime/overlay/ to polecat root.
|
||||
// This allows services to have .env and other config files at their root.
|
||||
if err := rig.CopyOverlay(m.rig.Path, clonePath); err != nil {
|
||||
// Non-fatal - log warning but continue
|
||||
fmt.Printf("Warning: could not copy overlay files: %v\n", err)
|
||||
}
|
||||
|
||||
// NOTE: Slash commands (.claude/commands/) are provisioned at town level by gt install.
|
||||
// All agents inherit them via Claude's directory traversal - no per-workspace copies needed.
|
||||
|
||||
@@ -275,7 +312,7 @@ func (m *Manager) AddWithOptions(name string, opts AddOptions) (*Polecat, error)
|
||||
Name: name,
|
||||
Rig: m.rig.Name,
|
||||
State: StateWorking, // Transient model: polecat spawns with work
|
||||
ClonePath: polecatPath,
|
||||
ClonePath: clonePath,
|
||||
Branch: branchName,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
@@ -302,7 +339,10 @@ func (m *Manager) RemoveWithOptions(name string, force, nuclear bool) error {
|
||||
return ErrPolecatNotFound
|
||||
}
|
||||
|
||||
polecatPath := m.polecatDir(name)
|
||||
// Clone path is where the git worktree lives (new or old structure)
|
||||
clonePath := m.clonePath(name)
|
||||
// Polecat dir is the parent directory (polecats/<name>/)
|
||||
polecatDir := m.polecatDir(name)
|
||||
|
||||
// Check for uncommitted work unless bypassed
|
||||
if !nuclear {
|
||||
@@ -317,7 +357,7 @@ func (m *Manager) RemoveWithOptions(name string, force, nuclear bool) error {
|
||||
}
|
||||
} else {
|
||||
// Fallback path: Check git directly (for polecats that haven't reported yet)
|
||||
polecatGit := git.NewGit(polecatPath)
|
||||
polecatGit := git.NewGit(clonePath)
|
||||
status, err := polecatGit.CheckUncommittedWork()
|
||||
if err == nil && !status.Clean() {
|
||||
// For backward compatibility: force only bypasses uncommitted changes, not stashes/unpushed
|
||||
@@ -337,18 +377,24 @@ func (m *Manager) RemoveWithOptions(name string, force, nuclear bool) error {
|
||||
repoGit, err := m.repoBase()
|
||||
if err != nil {
|
||||
// Fall back to direct removal if repo base not found
|
||||
return os.RemoveAll(polecatPath)
|
||||
return os.RemoveAll(polecatDir)
|
||||
}
|
||||
|
||||
// Try to remove as a worktree first (use force flag for worktree removal too)
|
||||
if err := repoGit.WorktreeRemove(polecatPath, force); err != nil {
|
||||
if err := repoGit.WorktreeRemove(clonePath, force); err != nil {
|
||||
// Fall back to direct removal if worktree removal fails
|
||||
// (e.g., if this is an old-style clone, not a worktree)
|
||||
if removeErr := os.RemoveAll(polecatPath); removeErr != nil {
|
||||
return fmt.Errorf("removing polecat dir: %w", removeErr)
|
||||
if removeErr := os.RemoveAll(clonePath); removeErr != nil {
|
||||
return fmt.Errorf("removing clone path: %w", removeErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Also remove the parent polecat directory if it's now empty
|
||||
// (for new structure: polecats/<name>/ contains only polecats/<name>/<rigname>/)
|
||||
if polecatDir != clonePath {
|
||||
_ = os.Remove(polecatDir) // Non-fatal: only removes if empty
|
||||
}
|
||||
|
||||
// Prune any stale worktree entries (non-fatal: cleanup only)
|
||||
_ = repoGit.WorktreePrune()
|
||||
|
||||
@@ -411,13 +457,19 @@ func (m *Manager) RepairWorktree(name string, force bool) (*Polecat, error) {
|
||||
// RepairWorktreeWithOptions repairs a stale polecat and creates a fresh worktree with options.
|
||||
// This is NOT for normal operation - see RepairWorktree for context.
|
||||
// Allows setting hook_bead atomically at repair time.
|
||||
// After repair, uses new structure: polecats/<name>/<rigname>/
|
||||
func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOptions) (*Polecat, error) {
|
||||
if !m.exists(name) {
|
||||
return nil, ErrPolecatNotFound
|
||||
}
|
||||
|
||||
polecatPath := m.polecatDir(name)
|
||||
polecatGit := git.NewGit(polecatPath)
|
||||
// Get the old clone path (may be old or new structure)
|
||||
oldClonePath := m.clonePath(name)
|
||||
polecatGit := git.NewGit(oldClonePath)
|
||||
|
||||
// New clone path uses new structure
|
||||
polecatDir := m.polecatDir(name)
|
||||
newClonePath := filepath.Join(polecatDir, m.rig.Name)
|
||||
|
||||
// Get the repo base (bare repo or mayor/rig)
|
||||
repoGit, err := m.repoBase()
|
||||
@@ -441,11 +493,11 @@ func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOpt
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the worktree (use force for git worktree removal)
|
||||
if err := repoGit.WorktreeRemove(polecatPath, true); err != nil {
|
||||
// Remove the old worktree (use force for git worktree removal)
|
||||
if err := repoGit.WorktreeRemove(oldClonePath, true); err != nil {
|
||||
// Fall back to direct removal
|
||||
if removeErr := os.RemoveAll(polecatPath); removeErr != nil {
|
||||
return nil, fmt.Errorf("removing polecat dir: %w", removeErr)
|
||||
if removeErr := os.RemoveAll(oldClonePath); removeErr != nil {
|
||||
return nil, fmt.Errorf("removing old clone path: %w", removeErr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -455,6 +507,11 @@ func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOpt
|
||||
// Fetch latest from origin to ensure we have fresh commits (non-fatal: may be offline)
|
||||
_ = repoGit.Fetch("origin")
|
||||
|
||||
// Ensure polecat directory exists for new structure
|
||||
if err := os.MkdirAll(polecatDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("creating polecat dir: %w", err)
|
||||
}
|
||||
|
||||
// Determine the start point for the new worktree
|
||||
// Use origin/<default-branch> to ensure we start from latest fetched commits
|
||||
defaultBranch := "main"
|
||||
@@ -468,7 +525,7 @@ func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOpt
|
||||
// and will be cleaned up by garbage collection
|
||||
// Use base36 encoding for shorter branch names (8 chars vs 13 digits)
|
||||
branchName := fmt.Sprintf("polecat/%s-%s", name, strconv.FormatInt(time.Now().UnixMilli(), 36))
|
||||
if err := repoGit.WorktreeAddFromRef(polecatPath, branchName, startPoint); err != nil {
|
||||
if err := repoGit.WorktreeAddFromRef(newClonePath, branchName, startPoint); err != nil {
|
||||
return nil, fmt.Errorf("creating fresh worktree from %s: %w", startPoint, err)
|
||||
}
|
||||
|
||||
@@ -476,10 +533,15 @@ func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOpt
|
||||
// Gas Town context is injected ephemerally via SessionStart hook (gt prime).
|
||||
|
||||
// Set up shared beads
|
||||
if err := m.setupSharedBeads(polecatPath); err != nil {
|
||||
if err := m.setupSharedBeads(newClonePath); err != nil {
|
||||
fmt.Printf("Warning: could not set up shared beads: %v\n", err)
|
||||
}
|
||||
|
||||
// Copy overlay files from .runtime/overlay/ to polecat root.
|
||||
if err := rig.CopyOverlay(m.rig.Path, newClonePath); err != nil {
|
||||
fmt.Printf("Warning: could not copy overlay files: %v\n", err)
|
||||
}
|
||||
|
||||
// NOTE: Slash commands inherited from town level - no per-workspace copies needed.
|
||||
|
||||
// Create fresh agent bead for ZFC compliance
|
||||
@@ -501,7 +563,7 @@ func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOpt
|
||||
Name: name,
|
||||
Rig: m.rig.Name,
|
||||
State: StateWorking,
|
||||
ClonePath: polecatPath,
|
||||
ClonePath: newClonePath,
|
||||
Branch: branchName,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
@@ -547,6 +609,9 @@ func (m *Manager) List() ([]*Polecat, error) {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
polecat, err := m.Get(entry.Name())
|
||||
if err != nil {
|
||||
@@ -674,10 +739,12 @@ func (m *Manager) ClearIssue(name string) error {
|
||||
// Transient polecats should always have work; no work means ready for Witness cleanup.
|
||||
// We don't interpret issue status (ZFC: Go is transport, not decision-maker).
|
||||
func (m *Manager) loadFromBeads(name string) (*Polecat, error) {
|
||||
polecatPath := m.polecatDir(name)
|
||||
// Use clonePath which handles both new (polecats/<name>/<rigname>/)
|
||||
// and old (polecats/<name>/) structures
|
||||
clonePath := m.clonePath(name)
|
||||
|
||||
// Get actual branch from worktree (branches are now timestamped)
|
||||
polecatGit := git.NewGit(polecatPath)
|
||||
polecatGit := git.NewGit(clonePath)
|
||||
branchName, err := polecatGit.CurrentBranch()
|
||||
if err != nil {
|
||||
// Fall back to old format if we can't read the branch
|
||||
@@ -694,7 +761,7 @@ func (m *Manager) loadFromBeads(name string) (*Polecat, error) {
|
||||
Name: name,
|
||||
Rig: m.rig.Name,
|
||||
State: StateWorking,
|
||||
ClonePath: polecatPath,
|
||||
ClonePath: clonePath,
|
||||
Branch: branchName,
|
||||
}, nil
|
||||
}
|
||||
@@ -712,7 +779,7 @@ func (m *Manager) loadFromBeads(name string) (*Polecat, error) {
|
||||
Name: name,
|
||||
Rig: m.rig.Name,
|
||||
State: state,
|
||||
ClonePath: polecatPath,
|
||||
ClonePath: clonePath,
|
||||
Branch: branchName,
|
||||
Issue: issueID,
|
||||
}, nil
|
||||
@@ -720,9 +787,9 @@ func (m *Manager) loadFromBeads(name string) (*Polecat, error) {
|
||||
|
||||
// setupSharedBeads creates a redirect file so the polecat uses the rig's shared .beads database.
|
||||
// This eliminates the need for git sync between polecat clones - all polecats share one database.
|
||||
func (m *Manager) setupSharedBeads(polecatPath string) error {
|
||||
func (m *Manager) setupSharedBeads(clonePath string) error {
|
||||
townRoot := filepath.Dir(m.rig.Path)
|
||||
return beads.SetupRedirect(townRoot, polecatPath)
|
||||
return beads.SetupRedirect(townRoot, clonePath)
|
||||
}
|
||||
|
||||
// CleanupStaleBranches removes orphaned polecat branches that are no longer in use.
|
||||
|
||||
@@ -2,6 +2,7 @@ package polecat
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@@ -144,6 +145,13 @@ func TestAssigneeID(t *testing.T) {
|
||||
func TestGetReturnsWorkingWithoutBeads(t *testing.T) {
|
||||
// When beads is not available, Get should return StateWorking
|
||||
// (assume the polecat is doing something if it exists)
|
||||
//
|
||||
// Skip if bd is installed - the test assumes bd is unavailable, but when bd
|
||||
// is present it queries beads and returns actual state instead of defaulting.
|
||||
if _, err := exec.LookPath("bd"); err == nil {
|
||||
t.Skip("skipping: bd is installed, test requires bd to be unavailable")
|
||||
}
|
||||
|
||||
root := t.TempDir()
|
||||
polecatDir := filepath.Join(root, "polecats", "Test")
|
||||
if err := os.MkdirAll(polecatDir, 0755); err != nil {
|
||||
@@ -186,6 +194,9 @@ func TestListWithPolecats(t *testing.T) {
|
||||
t.Fatalf("mkdir: %v", err)
|
||||
}
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(root, "polecats", ".claude"), 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude: %v", err)
|
||||
}
|
||||
// Create mayor/rig for beads path
|
||||
mayorRig := filepath.Join(root, "mayor", "rig")
|
||||
if err := os.MkdirAll(mayorRig, 0755); err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/mail"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
@@ -195,15 +196,17 @@ func TriggerPendingSpawns(townRoot string, timeout time.Duration) ([]TriggerResu
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if Claude is ready (non-blocking poll)
|
||||
err = t.WaitForClaudeReady(ps.Session, timeout)
|
||||
// Check if runtime is ready (non-blocking poll)
|
||||
rigPath := filepath.Join(townRoot, ps.Rig)
|
||||
runtimeConfig := config.LoadRuntimeConfig(rigPath)
|
||||
err = t.WaitForRuntimeReady(ps.Session, runtimeConfig, timeout)
|
||||
if err != nil {
|
||||
// Not ready yet - keep in pending
|
||||
remaining = append(remaining, ps)
|
||||
continue
|
||||
}
|
||||
|
||||
// Claude is ready - send trigger
|
||||
// Runtime is ready - send trigger
|
||||
triggerMsg := "Begin."
|
||||
if err := t.NudgeSession(ps.Session, triggerMsg); err != nil {
|
||||
result.Error = fmt.Errorf("nudging session: %w", err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user