Compare commits
211 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1701474b3d | ||
|
|
a7e9fbf699 | ||
|
|
358fcaf935 | ||
|
|
f19ddc5400 | ||
|
|
64b58b31ab | ||
|
|
afff85cdff | ||
|
|
a91e6cd643 | ||
|
|
9b2f4a7652 | ||
|
|
c8c97fdf64 | ||
|
|
43272f6fbb | ||
|
|
65c3e90374 | ||
|
|
0eacdd367b | ||
|
|
9fe9323b9c | ||
|
|
bfafb9c179 | ||
|
|
677a6ed84f | ||
|
|
da2d71c3fe | ||
|
|
e124402b7b | ||
|
|
705a7c2137 | ||
|
|
c2c6ddeaf9 | ||
|
|
b509107100 | ||
|
|
34cb28e0b9 | ||
|
|
1da3e18e60 | ||
|
|
5adb096d9d | ||
|
|
81bfe48ed3 | ||
|
|
41a758d6d8 | ||
|
|
5250e9e12a | ||
|
|
b3407759d2 | ||
|
|
c8c765a239 | ||
|
|
775af2973d | ||
|
|
da906847dd | ||
|
|
0a649e6faa | ||
|
|
fb40fa1405 | ||
|
|
7bfc2fcb76 | ||
|
|
376305e9d9 | ||
|
|
73f5b4025b | ||
|
|
c756f12d00 | ||
|
|
8d5611f14e | ||
|
|
98e154b18e | ||
|
|
38adfa4d8b | ||
|
|
03b0f7ff52 | ||
|
|
3b628150c2 | ||
|
|
1afe3fb823 | ||
|
|
caa88d96c5 | ||
|
|
4c9e8b8b99 | ||
|
|
c699e3e2ed | ||
|
|
65ecb6cafd | ||
|
|
540e33dbe9 | ||
|
|
85dd150d75 | ||
|
|
45634059dd | ||
|
|
d4da2b325d | ||
|
|
4985bdfbcc | ||
|
|
f4cbcb4ce9 | ||
|
|
c4d956ebe7 | ||
|
|
7f6fe53c6f | ||
|
|
19f4fa3ddb | ||
|
|
e648edce8c | ||
|
|
8a8b56e9e6 | ||
|
|
c91ab85457 | ||
|
|
00a59dec44 | ||
|
|
2de2d6b7e4 | ||
|
|
f30178265c | ||
|
|
5141facb21 | ||
|
|
15caf62b9f | ||
|
|
28a9de64d5 | ||
|
|
a9ed342be6 | ||
|
|
f9e788ccfb | ||
|
|
c220678162 | ||
|
|
b649635f48 | ||
|
|
117b91b87f | ||
|
|
ffa8dd56cb | ||
|
|
92042d679c | ||
|
|
585c204648 | ||
|
|
6209a49d54 | ||
|
|
ffeff97d9f | ||
|
|
9b5c889795 | ||
|
|
a07fa8bf7f | ||
|
|
06d40925d1 | ||
|
|
e2a211e295 | ||
|
|
b834bf5858 | ||
|
|
11d469edc3 | ||
|
|
de376007e0 | ||
|
|
5855b525fd | ||
|
|
f89c6f3693 | ||
|
|
ee167ae1f1 | ||
|
|
a6157829d7 | ||
|
|
4a94187068 | ||
|
|
93bdf88f6e | ||
|
|
59799f551c | ||
|
|
85d1e783b0 | ||
|
|
bf16f7894b | ||
|
|
0dc0174b26 | ||
|
|
5f8690dbda | ||
|
|
5f206fb658 | ||
|
|
d6add3f9b4 | ||
|
|
c25368cbe1 | ||
|
|
52ef89c559 | ||
|
|
541e1ac2a3 | ||
|
|
2922affa02 | ||
|
|
ab0d56dec9 | ||
|
|
b095b9c04c | ||
|
|
feeee3912a | ||
|
|
29e2c6ed9c | ||
|
|
454b2f76e7 | ||
|
|
21c1bbc118 | ||
|
|
ea8bef2029 | ||
|
|
432d14d9df | ||
|
|
b7b8e141b1 | ||
|
|
72544cc06d | ||
|
|
81a7d04239 | ||
|
|
fc4b9de02c | ||
|
|
9729e05f86 | ||
|
|
3f920048cb | ||
|
|
d00e73f110 | ||
|
|
87169a3fc7 | ||
|
|
6e84489ca3 | ||
|
|
29aed4b42f | ||
|
|
805ac7c17a | ||
|
|
ec53dfbb40 | ||
|
|
1f44482ad0 | ||
|
|
950e35317e | ||
|
|
6dbb841e22 | ||
|
|
d89aae5b5c | ||
|
|
11e3e85e9d | ||
|
|
99aae0bf02 | ||
|
|
22693c1dcc | ||
|
|
02ca9e43fa | ||
|
|
6afd85df4b | ||
|
|
3b9ca71fc4 | ||
|
|
93b19a7e72 | ||
|
|
c2451b85e7 | ||
|
|
ae88c12e07 | ||
|
|
e7a8e0a3db | ||
|
|
56742d95da | ||
|
|
60e7471cea | ||
|
|
7edd75021b | ||
|
|
a787d60add | ||
|
|
a3bccc881b | ||
|
|
74409dc32b | ||
|
|
ac63b10aa8 | ||
|
|
c306879a31 | ||
|
|
ac4649ba7d | ||
|
|
63af29284b | ||
|
|
b79e4a7c3b | ||
|
|
6fe25c757c | ||
|
|
9cb14cc41a | ||
|
|
201ef3a9c8 | ||
|
|
9e416e9ff5 | ||
|
|
83c47df980 | ||
|
|
7fe505d673 | ||
|
|
9d7dcde1e2 | ||
|
|
16fb45bb2a | ||
|
|
87a2e27fcc | ||
|
|
ad6169201a | ||
|
|
09bbb0f430 | ||
|
|
be815db5e4 | ||
|
|
31a32c084b | ||
|
|
f6f6acdb2d | ||
|
|
4799cb086f | ||
|
|
6e4f2bea29 | ||
|
|
c8150ab017 | ||
|
|
637df1d289 | ||
|
|
cf1eac8521 | ||
|
|
296440579a | ||
|
|
03fef16748 | ||
|
|
e8d27e7212 | ||
|
|
fc0b506253 | ||
|
|
5224dfb50d | ||
|
|
b33df5fa36 | ||
|
|
5ae89b3a27 | ||
|
|
2ed8de0e20 | ||
|
|
155e7dd438 | ||
|
|
8249e8a7f6 | ||
|
|
2ec66214e1 | ||
|
|
c199f7e940 | ||
|
|
b9d1813301 | ||
|
|
362917f52e | ||
|
|
0607c3a749 | ||
|
|
c073125b3b | ||
|
|
86c79e750c | ||
|
|
43cca06460 | ||
|
|
b88d3e8ee7 | ||
|
|
97564dfc13 | ||
|
|
688624ca6b | ||
|
|
c529d09e77 | ||
|
|
0c5cfcea2a | ||
|
|
c24c3ba873 | ||
|
|
8110aab257 | ||
|
|
d34e9b006c | ||
|
|
85a522f725 | ||
|
|
5be232ff8c | ||
|
|
eb6fb3c73b | ||
|
|
52533c354d | ||
|
|
a5ff31428b | ||
|
|
f49197243d | ||
|
|
904a773ade | ||
|
|
ef248a1824 | ||
|
|
4ebb96fbbc | ||
|
|
168e805d0c | ||
|
|
c678d2e3d4 | ||
|
|
8c91ff22db | ||
|
|
1be9edc272 | ||
|
|
bdaff31117 | ||
|
|
e30ebaf8ac | ||
|
|
59414834ec | ||
|
|
7e591ec0a1 | ||
|
|
59484b2af7 | ||
|
|
39d904e125 | ||
|
|
84009a3ee8 | ||
|
|
3d0183a3bb | ||
|
|
fec51d60e0 | ||
|
|
569cb182a6 |
16
.beads/.gitignore
vendored
16
.beads/.gitignore
vendored
@@ -10,6 +10,8 @@ daemon.lock
|
||||
daemon.log
|
||||
daemon.pid
|
||||
bd.sock
|
||||
sync-state.json
|
||||
last-touched
|
||||
|
||||
# Local version tracking (prevents upgrade notification spam after git ops)
|
||||
.local_version
|
||||
@@ -18,6 +20,10 @@ bd.sock
|
||||
db.sqlite
|
||||
bd.db
|
||||
|
||||
# Worktree redirect file (contains relative path to main repo's .beads/)
|
||||
# Must not be committed as paths would be wrong in other clones
|
||||
redirect
|
||||
|
||||
# Merge artifacts (temporary files from 3-way merge)
|
||||
beads.base.jsonl
|
||||
beads.base.meta.json
|
||||
@@ -26,8 +32,8 @@ beads.left.meta.json
|
||||
beads.right.jsonl
|
||||
beads.right.meta.json
|
||||
|
||||
# Keep JSONL exports and config (source of truth for git)
|
||||
!issues.jsonl
|
||||
!interactions.jsonl
|
||||
!metadata.json
|
||||
!config.json
|
||||
# NOTE: Do NOT add negation patterns (e.g., !issues.jsonl) here.
|
||||
# They would override fork protection in .git/info/exclude, allowing
|
||||
# contributors to accidentally commit upstream issue databases.
|
||||
# The JSONL files (issues.jsonl, interactions.jsonl) and config files
|
||||
# are tracked by git by default since no pattern above ignores them.
|
||||
|
||||
@@ -27,7 +27,7 @@ Observe the current system state to inform triage decisions.
|
||||
**Step 1: Check Deacon state**
|
||||
```bash
|
||||
# Is Deacon session alive?
|
||||
tmux has-session -t gt-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
tmux has-session -t hq-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
|
||||
# If alive, what's the pane output showing?
|
||||
gt peek deacon --lines 20
|
||||
@@ -125,7 +125,7 @@ gt nudge deacon "Boot check-in: you have pending work"
|
||||
**WAKE**
|
||||
```bash
|
||||
# Send escape to break any tool waiting
|
||||
tmux send-keys -t gt-deacon Escape
|
||||
tmux send-keys -t hq-deacon Escape
|
||||
|
||||
# Brief pause
|
||||
sleep 1
|
||||
|
||||
@@ -23,7 +23,7 @@ Witnesses detect it and escalate to the Mayor.
|
||||
The Deacon's agent bead last_activity timestamp is updated during each patrol
|
||||
cycle. Witnesses check this timestamp to verify health."""
|
||||
formula = "mol-deacon-patrol"
|
||||
version = 4
|
||||
version = 6
|
||||
|
||||
[[steps]]
|
||||
id = "inbox-check"
|
||||
@@ -148,6 +148,49 @@ bd gate list --json
|
||||
After closing a gate, the Waiters field contains mail addresses to notify.
|
||||
Send a brief notification to each waiter that the gate has cleared."""
|
||||
|
||||
[[steps]]
|
||||
id = "dispatch-gated-molecules"
|
||||
title = "Dispatch molecules with resolved gates"
|
||||
needs = ["gate-evaluation"]
|
||||
description = """
|
||||
Find molecules blocked on gates that have now closed and dispatch them.
|
||||
|
||||
This completes the async resume cycle without explicit waiter tracking.
|
||||
The molecule state IS the waiter - patrol discovers reality each cycle.
|
||||
|
||||
**Step 1: Find gate-ready molecules**
|
||||
```bash
|
||||
bd mol ready --gated --json
|
||||
```
|
||||
|
||||
This returns molecules where:
|
||||
- Status is in_progress
|
||||
- Current step has a gate dependency
|
||||
- The gate bead is now closed
|
||||
- No polecat currently has it hooked
|
||||
|
||||
**Step 2: For each ready molecule, dispatch to the appropriate rig**
|
||||
```bash
|
||||
# Determine target rig from molecule metadata
|
||||
bd mol show <mol-id> --json
|
||||
# Look for rig field or infer from prefix
|
||||
|
||||
# Dispatch to that rig's polecat pool
|
||||
gt sling <mol-id> <rig>/polecats
|
||||
```
|
||||
|
||||
**Step 3: Log dispatch**
|
||||
Note which molecules were dispatched for observability:
|
||||
```bash
|
||||
# Molecule <mol-id> dispatched to <rig>/polecats (gate <gate-id> cleared)
|
||||
```
|
||||
|
||||
**If no gate-ready molecules:**
|
||||
Skip - nothing to dispatch. Gates haven't closed yet or molecules
|
||||
already have active polecats working on them.
|
||||
|
||||
**Exit criteria:** All gate-ready molecules dispatched to polecats."""
|
||||
|
||||
[[steps]]
|
||||
id = "check-convoy-completion"
|
||||
title = "Check convoy completion"
|
||||
@@ -258,7 +301,7 @@ Keep notifications brief and actionable. The recipient can run bd show for detai
|
||||
[[steps]]
|
||||
id = "health-scan"
|
||||
title = "Check Witness and Refinery health"
|
||||
needs = ["trigger-pending-spawns", "gate-evaluation", "fire-notifications"]
|
||||
needs = ["trigger-pending-spawns", "dispatch-gated-molecules", "fire-notifications"]
|
||||
description = """
|
||||
Check Witness and Refinery health for each rig.
|
||||
|
||||
@@ -342,14 +385,21 @@ Reset unresponsive_cycles to 0 when component responds normally."""
|
||||
|
||||
[[steps]]
|
||||
id = "zombie-scan"
|
||||
title = "Backup check for zombie polecats"
|
||||
title = "Detect zombie polecats (NO KILL AUTHORITY)"
|
||||
needs = ["health-scan"]
|
||||
description = """
|
||||
Defense-in-depth check for zombie polecats that Witness should have cleaned.
|
||||
Defense-in-depth DETECTION of zombie polecats that Witness should have cleaned.
|
||||
|
||||
**⚠️ CRITICAL: The Deacon has NO kill authority.**
|
||||
|
||||
These are workers with context, mid-task progress, unsaved state. Every kill
|
||||
destroys work. File the warrant and let Boot handle interrogation and execution.
|
||||
You do NOT have kill authority.
|
||||
|
||||
**Why this exists:**
|
||||
The Witness is responsible for nuking polecats after they complete work (via POLECAT_DONE).
|
||||
This step provides backup detection in case the Witness fails to clean up.
|
||||
The Witness is responsible for cleaning up polecats after they complete work.
|
||||
This step provides backup DETECTION in case the Witness fails to clean up.
|
||||
Detection only - Boot handles termination.
|
||||
|
||||
**Zombie criteria:**
|
||||
- State: idle or done (no active work assigned)
|
||||
@@ -357,26 +407,34 @@ This step provides backup detection in case the Witness fails to clean up.
|
||||
- No hooked work (nothing pending for this polecat)
|
||||
- Last activity: older than 10 minutes
|
||||
|
||||
**Run the zombie scan:**
|
||||
**Run the zombie scan (DRY RUN ONLY):**
|
||||
```bash
|
||||
gt deacon zombie-scan --dry-run
|
||||
```
|
||||
|
||||
**NEVER run:**
|
||||
- `gt deacon zombie-scan` (without --dry-run)
|
||||
- `tmux kill-session`
|
||||
- `gt polecat nuke`
|
||||
- Any command that terminates a session
|
||||
|
||||
**If zombies detected:**
|
||||
1. Review the output to confirm they are truly abandoned
|
||||
2. Run without --dry-run to nuke them:
|
||||
2. File a death warrant for each detected zombie:
|
||||
```bash
|
||||
gt deacon zombie-scan
|
||||
gt warrant file <polecat> --reason "Zombie detected: no session, no hook, idle >10m"
|
||||
```
|
||||
3. Boot will handle interrogation and execution
|
||||
4. Notify the Mayor about Witness failure:
|
||||
```bash
|
||||
gt mail send mayor/ -s "Witness cleanup failure" \
|
||||
-m "Filed death warrant for <polecat>. Witness failed to clean up."
|
||||
```
|
||||
3. This will:
|
||||
- Nuke each zombie polecat
|
||||
- Notify the Mayor about Witness failure
|
||||
- Log the cleanup action
|
||||
|
||||
**If no zombies:**
|
||||
No action needed - Witness is doing its job.
|
||||
|
||||
**Note:** This is a backup mechanism. If you frequently find zombies,
|
||||
**Note:** This is a backup mechanism. If you frequently detect zombies,
|
||||
investigate why the Witness isn't cleaning up properly."""
|
||||
|
||||
[[steps]]
|
||||
@@ -505,10 +563,48 @@ Skip dispatch - system is healthy.
|
||||
|
||||
**Exit criteria:** Session GC dispatched to dog (if needed)."""
|
||||
|
||||
[[steps]]
|
||||
id = "costs-digest"
|
||||
title = "Aggregate daily costs"
|
||||
needs = ["session-gc"]
|
||||
description = """
|
||||
**DAILY DIGEST** - Aggregate yesterday's session cost wisps.
|
||||
|
||||
Session costs are recorded as ephemeral wisps (not exported to JSONL) to avoid
|
||||
log-in-database pollution. This step aggregates them into a permanent daily
|
||||
"Cost Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
**Step 1: Check if digest is needed**
|
||||
```bash
|
||||
# Preview yesterday's costs (dry run)
|
||||
gt costs digest --yesterday --dry-run
|
||||
```
|
||||
|
||||
If output shows "No session cost wisps found", skip to Step 3.
|
||||
|
||||
**Step 2: Create the digest**
|
||||
```bash
|
||||
gt costs digest --yesterday
|
||||
```
|
||||
|
||||
This:
|
||||
- Queries all session.ended wisps from yesterday
|
||||
- Creates a single "Cost Report YYYY-MM-DD" bead with aggregated data
|
||||
- Deletes the source wisps
|
||||
|
||||
**Step 3: Verify**
|
||||
The digest appears in `gt costs --week` queries.
|
||||
Daily digests preserve audit trail without per-session pollution.
|
||||
|
||||
**Timing**: Run once per morning patrol cycle. The --yesterday flag ensures
|
||||
we don't try to digest today's incomplete data.
|
||||
|
||||
**Exit criteria:** Yesterday's costs digested (or no wisps to digest)."""
|
||||
|
||||
[[steps]]
|
||||
id = "log-maintenance"
|
||||
title = "Rotate logs and prune state"
|
||||
needs = ["session-gc"]
|
||||
needs = ["costs-digest"]
|
||||
description = """
|
||||
Maintain daemon logs and state files.
|
||||
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"formula": "mol-gastown-boot",
|
||||
"description": "Mayor bootstraps Gas Town via a verification-gated lifecycle molecule.\n\n## Purpose\nWhen Mayor executes \"boot up gas town\", this proto provides the workflow.\nEach step has action + verification - steps stay open until outcome is confirmed.\n\n## Key Principles\n1. **Verification-gated steps** - Not \"command ran\" but \"outcome confirmed\"\n2. **gt peek for verification** - Capture session output to detect stalls\n3. **gt nudge for recovery** - Reliable message delivery to unstick agents\n4. **Parallel where possible** - Witnesses and refineries can start in parallel\n5. **Ephemeral execution** - Boot is a wisp, squashed to digest after completion\n\n## Execution\n```bash\nbd mol wisp mol-gastown-boot # Create wisp\n```",
|
||||
"version": 1,
|
||||
"steps": [
|
||||
{
|
||||
"id": "ensure-daemon",
|
||||
"title": "Ensure daemon",
|
||||
"description": "Verify the Gas Town daemon is running.\n\n## Action\n```bash\ngt daemon status || gt daemon start\n```\n\n## Verify\n1. Daemon PID file exists: `~/.gt/daemon.pid`\n2. Process is alive: `kill -0 $(cat ~/.gt/daemon.pid)`\n3. Daemon responds: `gt daemon status` returns success\n\n## OnFail\nCannot start daemon. Log error and continue - some commands work without daemon."
|
||||
},
|
||||
{
|
||||
"id": "ensure-deacon",
|
||||
"title": "Ensure deacon",
|
||||
"needs": ["ensure-daemon"],
|
||||
"description": "Start the Deacon and verify patrol mode is active.\n\n## Action\n```bash\ngt deacon start\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`\n2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago\n\n## OnStall\n```bash\ngt nudge deacon/ \"Start patrol.\"\nsleep 30\n# Re-verify\n```"
|
||||
},
|
||||
{
|
||||
"id": "ensure-witnesses",
|
||||
"title": "Ensure witnesses",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig witnesses.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-witness",
|
||||
"title": "Ensure gastown witness",
|
||||
"description": "Start the gastown rig Witness.\n\n## Action\n```bash\ngt witness start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-witness 2>/dev/null`\n2. Not stalled: `gt peek gastown/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-witness",
|
||||
"title": "Ensure beads witness",
|
||||
"description": "Start the beads rig Witness.\n\n## Action\n```bash\ngt witness start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-witness 2>/dev/null`\n2. Not stalled: `gt peek beads/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "ensure-refineries",
|
||||
"title": "Ensure refineries",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig refineries.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-refinery",
|
||||
"title": "Ensure gastown refinery",
|
||||
"description": "Start the gastown rig Refinery.\n\n## Action\n```bash\ngt refinery start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-refinery 2>/dev/null`\n2. Not stalled: `gt peek gastown/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-refinery",
|
||||
"title": "Ensure beads refinery",
|
||||
"description": "Start the beads rig Refinery.\n\n## Action\n```bash\ngt refinery start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-refinery 2>/dev/null`\n2. Not stalled: `gt peek beads/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "verify-town-health",
|
||||
"title": "Verify town health",
|
||||
"needs": ["ensure-witnesses", "ensure-refineries"],
|
||||
"description": "Final verification that Gas Town is healthy.\n\n## Action\n```bash\ngt status\n```\n\n## Verify\n1. Daemon running: Shows daemon status OK\n2. Deacon active: Shows deacon in patrol mode\n3. All witnesses: Each rig witness shows active\n4. All refineries: Each rig refinery shows active\n\n## OnFail\nLog degraded state but consider boot complete. Some agents may need manual recovery.\nRun `gt doctor` for detailed diagnostics."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -48,7 +48,7 @@ gt deacon start
|
||||
```
|
||||
|
||||
## Verify
|
||||
1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`
|
||||
1. Session exists: `tmux has-session -t hq-deacon 2>/dev/null`
|
||||
2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt
|
||||
3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago
|
||||
|
||||
|
||||
519
.beads/formulas/mol-shutdown-dance.formula.toml
Normal file
519
.beads/formulas/mol-shutdown-dance.formula.toml
Normal file
@@ -0,0 +1,519 @@
|
||||
description = """
|
||||
Death warrant execution state machine for Dogs.
|
||||
|
||||
Dogs execute this molecule to process death warrants. Each Dog is a lightweight
|
||||
goroutine (NOT a Claude session) that runs the interrogation state machine.
|
||||
|
||||
## Architecture Context
|
||||
|
||||
Dogs are lightweight workers in Boot's pool (see dog-pool-architecture.md):
|
||||
- Fixed pool of 5 goroutines (configurable via GT_DOG_POOL_SIZE)
|
||||
- State persisted to ~/gt/deacon/dogs/active/<id>.json
|
||||
- Recovery on Boot restart via orphan state files
|
||||
|
||||
## State Machine
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ INTERROGATING │ │
|
||||
│ │ │
|
||||
│ 1. Send health check │ │
|
||||
│ 2. Open timeout gate │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
│ gate closes (timeout or response) │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ EVALUATING │ │
|
||||
│ │ │
|
||||
│ Check tmux output for │ │
|
||||
│ ALIVE keyword │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
┌───────┴───────┐ │
|
||||
│ │ │
|
||||
▼ ▼ │
|
||||
[ALIVE found] [No ALIVE] │
|
||||
│ │ │
|
||||
│ │ attempt < 3? │
|
||||
│ ├──────────────────────────────────→─┘
|
||||
│ │ yes: attempt++, longer timeout
|
||||
│ │
|
||||
│ │ no: attempt == 3
|
||||
▼ ▼
|
||||
┌─────────┐ ┌─────────────┐
|
||||
│ PARDONED│ │ EXECUTING │
|
||||
│ │ │ │
|
||||
│ Cancel │ │ Kill tmux │
|
||||
│ warrant │ │ session │
|
||||
└────┬────┘ └──────┬──────┘
|
||||
│ │
|
||||
└────────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────┐
|
||||
│ EPITAPH │
|
||||
│ │
|
||||
│ Log outcome │
|
||||
│ Release dog │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## Timeout Gates
|
||||
|
||||
| Attempt | Timeout | Cumulative Wait |
|
||||
|---------|---------|-----------------|
|
||||
| 1 | 60s | 60s |
|
||||
| 2 | 120s | 180s (3 min) |
|
||||
| 3 | 240s | 420s (7 min) |
|
||||
|
||||
Timeout gates work like this:
|
||||
- Gate opens when interrogation message is sent
|
||||
- Gate closes when EITHER:
|
||||
a) Timeout expires (proceed to evaluate)
|
||||
b) Response detected (early close, proceed to evaluate)
|
||||
- The gate state determines the evaluation outcome
|
||||
|
||||
## Interrogation Message Format
|
||||
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within {timeout}s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: {attempt}/3
|
||||
```
|
||||
|
||||
## Response Detection
|
||||
|
||||
The Dog checks tmux output for:
|
||||
1. The ALIVE keyword (explicit response)
|
||||
2. Any Claude output after the health check (implicit activity)
|
||||
|
||||
```go
|
||||
func (d *Dog) CheckForResponse() bool {
|
||||
output := tmux.CapturePane(d.Warrant.Target, 50) // Last 50 lines
|
||||
return strings.Contains(output, "ALIVE")
|
||||
}
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|-------------|-------------|-----------------------------------------------|
|
||||
| warrant_id | hook_bead | Bead ID of the death warrant |
|
||||
| target | warrant | Session name to interrogate |
|
||||
| reason | warrant | Why warrant was issued |
|
||||
| requester | warrant | Who filed the warrant (e.g., deacon, witness) |
|
||||
|
||||
## Integration
|
||||
|
||||
Dogs are NOT Claude sessions. This molecule is:
|
||||
1. A specification document (defines the state machine)
|
||||
2. A reference for Go implementation in internal/shutdown/
|
||||
3. A template for creating warrant-tracking beads
|
||||
|
||||
The Go implementation follows this spec exactly."""
|
||||
formula = "mol-shutdown-dance"
|
||||
version = 1
|
||||
|
||||
[squash]
|
||||
trigger = "on_complete"
|
||||
template_type = "operational"
|
||||
include_metrics = true
|
||||
|
||||
# ============================================================================
|
||||
# STEP 1: WARRANT_RECEIVED
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "warrant-received"
|
||||
title = "Receive and validate death warrant"
|
||||
description = """
|
||||
Entry point when Dog is allocated from pool.
|
||||
|
||||
**1. Read warrant from allocation:**
|
||||
The Dog receives a Warrant struct containing:
|
||||
- ID: Bead ID of the warrant
|
||||
- Target: Session name (e.g., "gt-gastown-Toast")
|
||||
- Reason: Why termination requested
|
||||
- Requester: Who filed (deacon, witness, mayor)
|
||||
- FiledAt: Timestamp
|
||||
|
||||
**2. Validate target exists:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
```
|
||||
|
||||
If target doesn't exist:
|
||||
- Warrant is stale (already dead)
|
||||
- Skip to EPITAPH with outcome=already_dead
|
||||
|
||||
**3. Initialize state file:**
|
||||
Write initial state to ~/gt/deacon/dogs/active/{dog-id}.json
|
||||
|
||||
**4. Set initial attempt counter:**
|
||||
attempt = 1
|
||||
|
||||
**Exit criteria:** Warrant validated, target confirmed alive, state initialized."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 2: INTERROGATION_1 (60s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-1"
|
||||
title = "First interrogation (60s timeout)"
|
||||
needs = ["warrant-received"]
|
||||
description = """
|
||||
First attempt to contact the session.
|
||||
|
||||
**1. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 60s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 1/3
|
||||
```
|
||||
|
||||
**2. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**3. Open timeout gate:**
|
||||
Gate configuration:
|
||||
- Type: timer
|
||||
- Timeout: 60 seconds
|
||||
- Close conditions:
|
||||
a) Timer expires
|
||||
b) ALIVE keyword detected in output
|
||||
|
||||
**4. Wait for gate to close:**
|
||||
The Dog waits (select on timer channel or early close signal).
|
||||
|
||||
**5. Record interrogation timestamp:**
|
||||
Update state file with last_message_at.
|
||||
|
||||
**Exit criteria:** Message sent, waiting for gate to close."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 3: EVALUATE_1
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-1"
|
||||
title = "Evaluate first interrogation response"
|
||||
needs = ["interrogation-1"]
|
||||
description = """
|
||||
Check if session responded to first interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword:**
|
||||
```go
|
||||
if strings.Contains(output, "ALIVE") {
|
||||
return PARDONED
|
||||
}
|
||||
```
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_2
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 4: INTERROGATION_2 (120s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-2"
|
||||
title = "Second interrogation (120s timeout)"
|
||||
needs = ["evaluate-1"]
|
||||
gate = { type = "conditional", condition = "no_response_1" }
|
||||
description = """
|
||||
Second attempt with longer timeout.
|
||||
|
||||
Only executed if evaluate-1 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 2
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 120s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 2/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 120 seconds
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Second message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 5: EVALUATE_2
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-2"
|
||||
title = "Evaluate second interrogation response"
|
||||
needs = ["interrogation-2"]
|
||||
description = """
|
||||
Check if session responded to second interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_3
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 6: INTERROGATION_3 (240s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-3"
|
||||
title = "Final interrogation (240s timeout)"
|
||||
needs = ["evaluate-2"]
|
||||
gate = { type = "conditional", condition = "no_response_2" }
|
||||
description = """
|
||||
Final attempt before execution.
|
||||
|
||||
Only executed if evaluate-2 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 3
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 240s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 3/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 240 seconds
|
||||
- This is the FINAL chance
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Final message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 7: EVALUATE_3
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-3"
|
||||
title = "Evaluate final interrogation response"
|
||||
needs = ["interrogation-3"]
|
||||
description = """
|
||||
Final evaluation before execution.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to EXECUTE
|
||||
|
||||
**Exit criteria:** Final decision made."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 8: PARDON (success path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "pardon"
|
||||
title = "Pardon session - cancel warrant"
|
||||
needs = ["evaluate-1", "evaluate-2", "evaluate-3"]
|
||||
gate = { type = "conditional", condition = "alive_detected" }
|
||||
description = """
|
||||
Session responded - cancel the death warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = PARDONED
|
||||
|
||||
**2. Record pardon details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "pardoned",
|
||||
"attempt": {attempt},
|
||||
"response_time": "{time_since_last_interrogation}s",
|
||||
"pardoned_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**3. Cancel warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "Session responded at attempt {attempt}"
|
||||
```
|
||||
|
||||
**4. Notify requester:**
|
||||
```bash
|
||||
gt mail send {requester}/ -s "PARDON: {target}" -m "Death warrant cancelled.
|
||||
Session responded after attempt {attempt}.
|
||||
Warrant: {warrant_id}
|
||||
Response detected: {timestamp}"
|
||||
```
|
||||
|
||||
**Exit criteria:** Warrant cancelled, requester notified."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 9: EXECUTE (termination path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "execute"
|
||||
title = "Execute warrant - kill session"
|
||||
needs = ["evaluate-3"]
|
||||
gate = { type = "conditional", condition = "no_response_final" }
|
||||
description = """
|
||||
Session unresponsive after 3 attempts - execute the warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = EXECUTING
|
||||
|
||||
**2. Kill the tmux session:**
|
||||
```bash
|
||||
tmux kill-session -t {target}
|
||||
```
|
||||
|
||||
**3. Verify session is dead:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
# Should fail (session gone)
|
||||
```
|
||||
|
||||
**4. If session still exists (kill failed):**
|
||||
- Force kill with tmux kill-server if isolated
|
||||
- Or escalate to Boot for manual intervention
|
||||
|
||||
**5. Record execution details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "executed",
|
||||
"attempts": 3,
|
||||
"total_wait": "420s",
|
||||
"executed_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**Exit criteria:** Session terminated."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 10: EPITAPH (completion)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "epitaph"
|
||||
title = "Log cause of death and close warrant"
|
||||
needs = ["pardon", "execute"]
|
||||
description = """
|
||||
Final step - create audit record and release Dog back to pool.
|
||||
|
||||
**1. Compose epitaph based on outcome:**
|
||||
|
||||
For PARDONED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: PARDONED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Response: Attempt {attempt}, after {wait_time}s
|
||||
Pardoned at: {timestamp}
|
||||
```
|
||||
|
||||
For EXECUTED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: EXECUTED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempts: 3 (60s + 120s + 240s = 420s total)
|
||||
Executed at: {timestamp}
|
||||
```
|
||||
|
||||
For ALREADY_DEAD (target gone before interrogation):
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: ALREADY_DEAD
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Note: Target session not found at warrant processing
|
||||
```
|
||||
|
||||
**2. Close warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "{epitaph_summary}"
|
||||
```
|
||||
|
||||
**3. Move state file to completed:**
|
||||
```bash
|
||||
mv ~/gt/deacon/dogs/active/{dog-id}.json ~/gt/deacon/dogs/completed/
|
||||
```
|
||||
|
||||
**4. Report to Boot:**
|
||||
Write completion file: ~/gt/deacon/dogs/active/{dog-id}.done
|
||||
```json
|
||||
{
|
||||
"dog_id": "{dog-id}",
|
||||
"warrant_id": "{warrant_id}",
|
||||
"target": "{target}",
|
||||
"outcome": "{pardoned|executed|already_dead}",
|
||||
"duration": "{total_duration}s"
|
||||
}
|
||||
```
|
||||
|
||||
**5. Release Dog to pool:**
|
||||
Dog resets state and returns to idle channel.
|
||||
|
||||
**Exit criteria:** Warrant closed, Dog released, audit complete."""
|
||||
|
||||
# ============================================================================
|
||||
# VARIABLES
|
||||
# ============================================================================
|
||||
[vars]
|
||||
[vars.warrant_id]
|
||||
description = "Bead ID of the death warrant being processed"
|
||||
required = true
|
||||
|
||||
[vars.target]
|
||||
description = "Session name to interrogate (e.g., gt-gastown-Toast)"
|
||||
required = true
|
||||
|
||||
[vars.reason]
|
||||
description = "Why the warrant was issued"
|
||||
required = true
|
||||
|
||||
[vars.requester]
|
||||
description = "Who filed the warrant (deacon, witness, mayor)"
|
||||
required = true
|
||||
default = "deacon"
|
||||
2669
.beads/issues.jsonl
Normal file
2669
.beads/issues.jsonl
Normal file
File diff suppressed because it is too large
Load Diff
120
.github/workflows/ci.yml
vendored
120
.github/workflows/ci.yml
vendored
@@ -68,6 +68,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
@@ -82,8 +84,122 @@ jobs:
|
||||
- name: Build
|
||||
run: go build -v ./cmd/gt
|
||||
|
||||
- name: Test
|
||||
run: go test -v -race -short ./...
|
||||
- name: Test with Coverage
|
||||
run: |
|
||||
go test -race -short -coverprofile=coverage.out ./... 2>&1 | tee test-output.txt
|
||||
|
||||
- name: Upload Coverage Data
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
path: |
|
||||
coverage.out
|
||||
test-output.txt
|
||||
|
||||
# Separate job to process coverage after ALL tests complete
|
||||
coverage:
|
||||
name: Coverage Report
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test, integration]
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Download Coverage Data
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
|
||||
- name: Generate Coverage Report
|
||||
run: |
|
||||
# Parse per-package coverage from test output
|
||||
echo "## Code Coverage Report" > coverage-report.md
|
||||
echo "" >> coverage-report.md
|
||||
|
||||
# Get overall coverage
|
||||
TOTAL=$(go tool cover -func=coverage.out | grep total | awk '{print $3}')
|
||||
echo "**Overall Coverage: ${TOTAL}**" >> coverage-report.md
|
||||
echo "" >> coverage-report.md
|
||||
|
||||
# Create per-package table
|
||||
echo "| Package | Coverage |" >> coverage-report.md
|
||||
echo "|---------|----------|" >> coverage-report.md
|
||||
|
||||
# Extract package coverage from all test output lines
|
||||
grep -E "github.com/steveyegge/gastown.*coverage:" test-output.txt | \
|
||||
sed 's/.*github.com\/steveyegge\/gastown\///' | \
|
||||
awk '{
|
||||
pkg = $1
|
||||
for (i=2; i<=NF; i++) {
|
||||
if ($i == "coverage:") {
|
||||
cov = $(i+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
printf "| %s | %s |\n", pkg, cov
|
||||
}' | sort -u >> coverage-report.md
|
||||
|
||||
echo "" >> coverage-report.md
|
||||
echo "---" >> coverage-report.md
|
||||
echo "_Generated by CI_" >> coverage-report.md
|
||||
|
||||
# Show in logs
|
||||
cat coverage-report.md
|
||||
|
||||
- name: Upload Coverage Report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage-report.md
|
||||
retention-days: 30
|
||||
|
||||
- name: Comment Coverage on PR
|
||||
# Only for internal PRs - fork PRs can't write comments
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const report = fs.readFileSync('coverage-report.md', 'utf8');
|
||||
|
||||
// Find existing coverage comment
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
|
||||
const botComment = comments.find(comment =>
|
||||
comment.user.type === 'Bot' &&
|
||||
comment.body.includes('## Code Coverage Report')
|
||||
);
|
||||
|
||||
if (botComment) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id,
|
||||
body: report
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: report
|
||||
});
|
||||
}
|
||||
|
||||
- name: Coverage Note for Fork PRs
|
||||
if: github.event.pull_request.head.repo.full_name != github.repository
|
||||
run: |
|
||||
echo "::notice::Coverage report uploaded as artifact (fork PRs cannot post comments). Download from Actions tab."
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
|
||||
184
CHANGELOG.md
184
CHANGELOG.md
@@ -7,6 +7,190 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.2.3] - 2026-01-09
|
||||
|
||||
Worker safety release - prevents accidental termination of active agents.
|
||||
|
||||
> **Note**: The Deacon safety improvements are believed to be correct but have not
|
||||
> yet been extensively tested in production. We recommend running with
|
||||
> `gt deacon pause` initially and monitoring behavior before enabling full patrol.
|
||||
> Please report any issues. A 0.3.0 release will follow once these changes are
|
||||
> battle-tested.
|
||||
|
||||
### Critical Safety Improvements
|
||||
|
||||
- **Kill authority removed from Deacon** - Deacon patrol now only detects zombies via `--dry-run`, never kills directly. Death warrants are filed for Boot to handle interrogation/execution. This prevents destruction of worker context, mid-task progress, and unsaved state (#gt-vhaej)
|
||||
- **Bulletproof pause mechanism** - Multi-layer pause for Deacon with file-based state, `gt deacon pause/resume` commands, and guards in `gt prime` and heartbeat (#265)
|
||||
- **Doctor warns instead of killing** - `gt doctor` now warns about stale town-root settings rather than killing sessions (#243)
|
||||
- **Orphan process check informational** - Doctor's orphan process detection is now informational only, not actionable (#272)
|
||||
|
||||
### Added
|
||||
|
||||
- **`gt account switch` command** - Switch between Claude Code accounts with `gt account switch <handle>`. Manages `~/.claude` symlinks and updates default account
|
||||
- **`gt crew list --all`** - Show all crew members across all rigs (#276)
|
||||
- **Rig-level custom agent support** - Configure different agents per-rig (#12)
|
||||
- **Rig identity beads check** - Doctor validates rig identity beads exist
|
||||
- **GT_ROOT env var** - Set for all agent sessions for consistent environment
|
||||
- **New agent presets** - Added Cursor, Auggie (Augment Code), and Sourcegraph AMP as built-in agent presets (#247)
|
||||
- **Context Management docs** - Added to Witness template for better context handling (gt-jjama)
|
||||
|
||||
### Fixed
|
||||
|
||||
- **`gt prime --hook` recognized** - Doctor now recognizes `gt prime --hook` as valid session hook config (#14)
|
||||
- **Integration test reliability** - Improved test stability (#13)
|
||||
- **IsClaudeRunning detection** - Now detects 'claude' and version patterns correctly (#273)
|
||||
- **Deacon heartbeat restored** - `ensureDeaconRunning` restored to heartbeat using Manager pattern (#271)
|
||||
- **Deacon session names** - Correct session name references in formulas (#270)
|
||||
- **Hidden directory scanning** - Ignore `.claude` and other dot directories when enumerating polecats (#258, #279)
|
||||
- **SetupRedirect tracked beads** - Works correctly with tracked beads architecture where canonical location is `mayor/rig/.beads`
|
||||
- **Tmux shell ready** - Wait for shell ready before sending keys (#264)
|
||||
- **Gastown prefix derivation** - Correctly derive `gt-` prefix for gastown compound words (gt-m46bb)
|
||||
- **Custom beads types** - Register custom beads types during install (#250)
|
||||
|
||||
### Changed
|
||||
|
||||
- **Refinery Manager pattern** - Replaced `ensureRefinerySession` with `refinery.Manager.Start()` for consistency
|
||||
|
||||
### Removed
|
||||
|
||||
- **Unused formula JSON** - Removed unused JSON formula file (cleanup)
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to all contributors for this release:
|
||||
- @julianknutsen - Doctor fixes (#14, #271, #272, #273), formula fixes (#270), GT_ROOT env (#268)
|
||||
- @joshuavial - Hidden directory scanning (#258, #279), crew list --all (#276)
|
||||
|
||||
## [0.2.2] - 2026-01-07
|
||||
|
||||
Rig operational state management, unified agent startup, and extensive stability fixes.
|
||||
|
||||
### Added
|
||||
|
||||
#### Rig Operational State Management
|
||||
- **`gt rig park/unpark` commands** - Level 1 rig control: pause daemon auto-start while preserving sessions
|
||||
- **`gt rig dock/undock` commands** - Level 2 rig control: stop all sessions and prevent auto-start (gt-9gm9n)
|
||||
- **`gt rig config` commands** - Per-rig configuration management (gt-hhmkq)
|
||||
- **Rig identity beads** - Schema and creation for rig identity tracking (gt-zmznh)
|
||||
- **Property layer lookup** - Hierarchical configuration resolution (gt-emh1c)
|
||||
- **Operational state in status** - `gt rig status` shows park/dock state
|
||||
|
||||
#### Agent Configuration & Startup
|
||||
- **`--agent` overrides** - Override agent for start/attach/sling commands
|
||||
- **Unified agent startup** - Manager pattern for consistent agent initialization
|
||||
- **Claude settings installation** - Auto-install during rig and HQ creation
|
||||
- **Runtime-aware tmux checks** - Detect actual agent state from tmux sessions
|
||||
|
||||
#### Status & Monitoring
|
||||
- **`gt status --watch`** - Watch mode with auto-refresh (#231)
|
||||
- **Compact status output** - One-line-per-worker format as new default
|
||||
- **LED status indicators** - Visual indicators for rigs in Mayor tmux status line
|
||||
- **Parked/docked indicators** - Pause emoji (⏸) for inactive rigs in statusline
|
||||
|
||||
#### Beads & Workflow
|
||||
- **Minimum beads version check** - Validates beads CLI compatibility (gt-im3fl)
|
||||
- **ZFC convoy auto-close** - `bd close` triggers convoy completion (gt-3qw5s)
|
||||
- **Stale hooked bead cleanup** - Deacon clears orphaned hooks (gt-2yls3)
|
||||
- **Doctor prefix mismatch detection** - Detect misconfigured rig prefixes (gt-17wdl)
|
||||
- **Unified beads redirect** - Single redirect system for tracked and local beads (#222)
|
||||
- **Route from rig to town beads** - Cross-level bead routing
|
||||
|
||||
#### Infrastructure
|
||||
- **Windows-compatible file locking** - Daemon lock works on Windows
|
||||
- **`--purge` flag for crews** - Full crew obliteration option
|
||||
- **Debug logging for suppressed errors** - Better visibility into startup issues (gt-6d7eh)
|
||||
- **hq- prefix in tmux cycle bindings** - Navigate to Mayor/Deacon sessions
|
||||
- **Wisp config storage layer** - Transient/local settings for ephemeral workflows
|
||||
- **Sparse checkout** - Exclude Claude context files from source repos
|
||||
|
||||
### Changed
|
||||
|
||||
- **Daemon respects rig operational state** - Parked/docked rigs not auto-started
|
||||
- **Agent startup unified** - Manager pattern replaces ad-hoc initialization
|
||||
- **Mayor files moved** - Reorganized into `mayor/` subdirectory
|
||||
- **Refinery merges local branches** - No longer fetches from origin (gt-cio03)
|
||||
- **Polecats start from origin/default-branch** - Consistent recycled state
|
||||
- **Observable states removed** - Discover agent state from tmux, don't track (gt-zecmc)
|
||||
- **mol-town-shutdown v3** - Complete cleanup formula (gt-ux23f)
|
||||
- **Witness delays polecat cleanup** - Wait until MR merges (gt-12hwb)
|
||||
- **Nudge on divergence** - Daemon nudges agents instead of silent accept
|
||||
- **README rewritten** - Comprehensive guides and architecture docs (#226)
|
||||
- **`gt rigs` → `gt rig list`** - Command renamed in templates/docs (#217)
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Doctor & Lifecycle
|
||||
- **`--restart-sessions` flag required** - Doctor won't cycle sessions without explicit flag (gt-j44ri)
|
||||
- **Only cycle patrol roles** - Doctor --fix doesn't restart crew/polecats (hq-qthgye)
|
||||
- **Session-ended events auto-closed** - Prevent accumulation (gt-8tc1v)
|
||||
- **GUPP propulsion nudge** - Added to daemon restartSession
|
||||
|
||||
#### Sling & Beads
|
||||
- **Sling uses bd native routing** - No BEADS_DIR override needed
|
||||
- **Sling parses wisp JSON correctly** - Handle `new_epic_id` field
|
||||
- **Sling resolves rig path** - Cross-rig bead hooking works
|
||||
- **Sling waits for Claude ready** - Don't nudge until session responsive (#146)
|
||||
- **Correct beads database for sling** - Rig-level beads used (gt-n5gga)
|
||||
- **Close hooked beads before clearing** - Proper cleanup order (gt-vwjz6)
|
||||
- **Removed dead sling flags** - `--molecule` and `--quality` cleaned up
|
||||
|
||||
#### Agent Sessions
|
||||
- **Witness kills tmux on Stop()** - Clean session termination
|
||||
- **Deacon uses session package** - Correct hq- session names (gt-r38pj)
|
||||
- **Honor rig agent for witness/refinery** - Respect per-rig settings
|
||||
- **Canonical hq role bead IDs** - Consistent naming
|
||||
- **hq- prefix in status display** - Global agents shown correctly (gt-vcvyd)
|
||||
- **Restart Claude when dead** - Recover sessions where tmux exists but Claude died
|
||||
- **Town session cycling** - Works from any directory
|
||||
|
||||
#### Polecat & Crew
|
||||
- **Nuke not blocked by stale hooks** - Closed beads don't prevent cleanup (gt-jc7bq)
|
||||
- **Crew stop dry-run support** - Preview cleanup before executing (gt-kjcx4)
|
||||
- **Crew defaults to --all** - `gt crew start <rig>` starts all crew (gt-s8mpt)
|
||||
- **Polecat cleanup handlers** - `gt witness process` invokes handlers (gt-h3gzj)
|
||||
|
||||
#### Daemon & Configuration
|
||||
- **Create mayor/daemon.json** - `gt start` and `gt doctor --fix` initialize daemon state (#225)
|
||||
- **Initialize git before beads** - Enable repo fingerprint (#180)
|
||||
- **Handoff preserves env vars** - Claude Code environment not lost (#216)
|
||||
- **Agent settings passed correctly** - Witness and daemon respawn use rigPath
|
||||
- **Log rig discovery errors** - Don't silently swallow (gt-rsnj9)
|
||||
|
||||
#### Refinery & Merge Queue
|
||||
- **Use rig's default_branch** - Not hardcoded 'main'
|
||||
- **MERGE_FAILED sent to Witness** - Proper failure notification
|
||||
- **Removed BranchPushedToRemote checks** - Local-only workflow support (gt-dymy5)
|
||||
|
||||
#### Misc Fixes
|
||||
- **BeadsSetupRedirect preserves tracked files** - Don't clobber existing files (gt-fj0ol)
|
||||
- **PATH export in hooks** - Ensure commands find binaries
|
||||
- **Replace panic with fallback** - ID generation gracefully degrades (#213)
|
||||
- **Removed duplicate WorktreeAddFromRef** - Code cleanup
|
||||
- **Town root beads for Deacon** - Use correct beads location (gt-sstg)
|
||||
|
||||
### Refactored
|
||||
|
||||
- **AgentStateManager pattern** - Shared state management extracted (gt-gaw8e)
|
||||
- **CleanupStatus type** - Replace raw strings (gt-77gq7)
|
||||
- **ExecWithOutput utility** - Common command execution (gt-vurfr)
|
||||
- **runBdCommand helper** - DRY mail package (gt-8i6bg)
|
||||
- **Config expansion helper** - Generic DRY config (gt-i85sg)
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Property layers guide** - Implementation documentation
|
||||
- **Worktree architecture** - Clarified beads routing
|
||||
- **Agent config** - Onboarding docs mention --agent overrides
|
||||
- **Polecat Operations section** - Added to Mayor docs (#140)
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to all contributors for this release:
|
||||
- @julianknutsen - Claude settings inheritance (#239)
|
||||
- @joshuavial - Sling wisp JSON parse (#238)
|
||||
- @michaellady - Unified beads redirect (#222), daemon.json fix (#225)
|
||||
- @greghughespdx - PATH in hooks fix (#139)
|
||||
|
||||
## [0.2.1] - 2026-01-05
|
||||
|
||||
Bug fixes, security hardening, and new `gt config` command.
|
||||
|
||||
2
Makefile
2
Makefile
@@ -23,7 +23,7 @@ ifeq ($(shell uname),Darwin)
|
||||
endif
|
||||
|
||||
install: build
|
||||
cp $(BUILD_DIR)/$(BINARY) ~/bin/$(BINARY)
|
||||
cp $(BUILD_DIR)/$(BINARY) ~/.local/bin/$(BINARY)
|
||||
|
||||
clean:
|
||||
rm -f $(BUILD_DIR)/$(BINARY)
|
||||
|
||||
631
README.md
631
README.md
@@ -1,388 +1,481 @@
|
||||
# Gas Town
|
||||
|
||||
Multi-agent orchestrator for Claude Code. Track work with convoys; sling to agents.
|
||||
**Multi-agent orchestration system for Claude Code with persistent work tracking**
|
||||
|
||||
## Why Gas Town?
|
||||
## Overview
|
||||
|
||||
| Without | With Gas Town |
|
||||
|---------|---------------|
|
||||
| Agents forget work after restart | Work persists on hooks - survives crashes, compaction, restarts |
|
||||
| Manual coordination | Agents have mailboxes, identities, and structured handoffs |
|
||||
| 4-10 agents is chaotic | Comfortably scale to 20-30 agents |
|
||||
| Work state in agent memory | Work state in Beads (git-backed ledger) |
|
||||
Gas Town is a workspace manager that lets you coordinate multiple Claude Code agents working on different tasks. Instead of losing context when agents restart, Gas Town persists work state in git-backed hooks, enabling reliable multi-agent workflows.
|
||||
|
||||
## Prerequisites
|
||||
### What Problem Does This Solve?
|
||||
|
||||
- **Go 1.23+** - [go.dev/dl](https://go.dev/dl/)
|
||||
- **Git 2.25+** - for worktree support
|
||||
- **beads (bd)** - [github.com/steveyegge/beads](https://github.com/steveyegge/beads) - required for issue tracking
|
||||
- **tmux 3.0+** - recommended for the full experience (the Mayor session is the primary interface)
|
||||
- **Claude Code CLI** - [claude.ai/code](https://claude.ai/code)
|
||||
| Challenge | Gas Town Solution |
|
||||
| ------------------------------- | -------------------------------------------- |
|
||||
| Agents lose context on restart | Work persists in git-backed hooks |
|
||||
| Manual agent coordination | Built-in mailboxes, identities, and handoffs |
|
||||
| 4-10 agents become chaotic | Scale comfortably to 20-30 agents |
|
||||
| Work state lost in agent memory | Work state stored in Beads ledger |
|
||||
|
||||
## Quick Start
|
||||
### Architecture
|
||||
|
||||
```bash
|
||||
# Install
|
||||
go install github.com/steveyegge/gastown/cmd/gt@latest
|
||||
```mermaid
|
||||
graph TB
|
||||
Mayor[The Mayor<br/>AI Coordinator]
|
||||
Town[Town Workspace<br/>~/gt/]
|
||||
|
||||
# Ensure Go binaries are in your PATH (add to ~/.zshrc or ~/.bashrc)
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
Town --> Mayor
|
||||
Town --> Rig1[Rig: Project A]
|
||||
Town --> Rig2[Rig: Project B]
|
||||
|
||||
# Create workspace (--git auto-initializes git repository)
|
||||
gt install ~/gt --git
|
||||
cd ~/gt
|
||||
Rig1 --> Crew1[Crew Member<br/>Your workspace]
|
||||
Rig1 --> Hooks1[Hooks<br/>Persistent storage]
|
||||
Rig1 --> Polecats1[Polecats<br/>Worker agents]
|
||||
|
||||
# Add a project
|
||||
gt rig add myproject https://github.com/you/repo.git
|
||||
Rig2 --> Crew2[Crew Member]
|
||||
Rig2 --> Hooks2[Hooks]
|
||||
Rig2 --> Polecats2[Polecats]
|
||||
|
||||
# Create your personal workspace
|
||||
gt crew add <yourname> --rig myproject
|
||||
Hooks1 -.git worktree.-> GitRepo1[Git Repository]
|
||||
Hooks2 -.git worktree.-> GitRepo2[Git Repository]
|
||||
|
||||
# Start working
|
||||
cd myproject/crew/<yourname>
|
||||
```
|
||||
|
||||
For advanced multi-agent coordination, use the Mayor session:
|
||||
|
||||
```bash
|
||||
gt mayor attach # Enter the Mayor's office
|
||||
```
|
||||
|
||||
Inside the Mayor session, you're talking to Claude with full town context:
|
||||
|
||||
> "Help me fix the authentication bug in myproject"
|
||||
|
||||
The Mayor will create convoys, dispatch workers, and coordinate everything. You can also run CLI commands directly:
|
||||
|
||||
```bash
|
||||
# Create a convoy and sling work (CLI workflow)
|
||||
gt convoy create "Feature X" issue-123 issue-456 --notify --human
|
||||
gt sling issue-123 myproject
|
||||
|
||||
# Track progress
|
||||
gt convoy list
|
||||
|
||||
# Switch between agent sessions
|
||||
gt agents
|
||||
style Mayor fill:#e1f5ff
|
||||
style Town fill:#f0f0f0
|
||||
style Rig1 fill:#fff4e1
|
||||
style Rig2 fill:#fff4e1
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
**The Mayor** is your AI coordinator. It's Claude Code with full context about your workspace, projects, and agents. The Mayor session (`gt prime`) is the primary way to interact with Gas Town - just tell it what you want to accomplish.
|
||||
### The Mayor 🎩
|
||||
|
||||
```
|
||||
Town (~/gt/) Your workspace
|
||||
├── Mayor Your AI coordinator (start here)
|
||||
├── Rig (project) Container for a git project + its agents
|
||||
│ ├── Polecats Workers (ephemeral, spawn → work → disappear)
|
||||
│ ├── Witness Monitors workers, handles lifecycle
|
||||
│ └── Refinery Merge queue processor
|
||||
```
|
||||
Your primary AI coordinator. The Mayor is a Claude Code instance with full context about your workspace, projects, and agents. **Start here** - just tell the Mayor what you want to accomplish.
|
||||
|
||||
**Hook**: Each agent has a hook where work hangs. On wake, run what's on your hook.
|
||||
### Town 🏘️
|
||||
|
||||
**Beads**: Git-backed issue tracker. All work state lives here. [github.com/steveyegge/beads](https://github.com/steveyegge/beads)
|
||||
Your workspace directory (e.g., `~/gt/`). Contains all projects, agents, and configuration.
|
||||
|
||||
## Workflows
|
||||
### Rigs 🏗️
|
||||
|
||||
### Full Stack (Recommended)
|
||||
Project containers. Each rig wraps a git repository and manages its associated agents.
|
||||
|
||||
The primary Gas Town experience. Agents run in tmux sessions with the Mayor as your interface.
|
||||
### Crew Members 👤
|
||||
|
||||
Your personal workspace within a rig. Where you do hands-on work.
|
||||
|
||||
### Polecats 🦨
|
||||
|
||||
Ephemeral worker agents that spawn, complete a task, and disappear.
|
||||
|
||||
### Hooks 🪝
|
||||
|
||||
Git worktree-based persistent storage for agent work. Survives crashes and restarts.
|
||||
|
||||
### Convoys 🚚
|
||||
|
||||
Work tracking units. Bundle multiple issues/tasks that get assigned to agents.
|
||||
|
||||
### Beads Integration 📿
|
||||
|
||||
Git-backed issue tracking system that stores work state as structured data.
|
||||
|
||||
> **New to Gas Town?** See the [Glossary](docs/glossary.md) for a complete guide to terminology and concepts.
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Go 1.23+** - [go.dev/dl](https://go.dev/dl/)
|
||||
- **Git 2.25+** - for worktree support
|
||||
- **beads (bd) 0.44.0+** - [github.com/steveyegge/beads](https://github.com/steveyegge/beads) (required for custom type support)
|
||||
- **tmux 3.0+** - recommended for full experience
|
||||
- **Claude Code CLI** (default runtime) - [claude.ai/code](https://claude.ai/code)
|
||||
- **Codex CLI** (optional runtime) - [developers.openai.com/codex/cli](https://developers.openai.com/codex/cli)
|
||||
|
||||
### Setup
|
||||
|
||||
```bash
|
||||
gt start # Start Gas Town (daemon + Mayor session)
|
||||
gt mayor attach # Enter Mayor session
|
||||
# Install Gas Town
|
||||
go install github.com/steveyegge/gastown/cmd/gt@latest
|
||||
|
||||
# Inside Mayor session, just ask:
|
||||
# "Create a convoy for issues 123 and 456 in myproject"
|
||||
# "What's the status of my work?"
|
||||
# "Show me what the witness is doing"
|
||||
# Add Go binaries to PATH (add to ~/.zshrc or ~/.bashrc)
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
|
||||
# Or use CLI commands:
|
||||
gt convoy create "Feature X" issue-123 issue-456
|
||||
gt sling issue-123 myproject # Spawns polecat automatically
|
||||
gt convoy list # Dashboard view
|
||||
gt agents # Navigate between sessions
|
||||
# Create workspace with git initialization
|
||||
gt install ~/gt --git
|
||||
cd ~/gt
|
||||
|
||||
# Add your first project
|
||||
gt rig add myproject https://github.com/you/repo.git
|
||||
|
||||
# Create your crew workspace
|
||||
gt crew add yourname --rig myproject
|
||||
cd myproject/crew/yourname
|
||||
|
||||
# Start the Mayor session (your main interface)
|
||||
gt mayor attach
|
||||
```
|
||||
|
||||
### Minimal (No Tmux)
|
||||
## Quick Start Guide
|
||||
|
||||
Run individual Claude Code instances manually. Gas Town just tracks state.
|
||||
### Basic Workflow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant You
|
||||
participant Mayor
|
||||
participant Convoy
|
||||
participant Agent
|
||||
participant Hook
|
||||
|
||||
You->>Mayor: Tell Mayor what to build
|
||||
Mayor->>Convoy: Create convoy with issues
|
||||
Mayor->>Agent: Sling issue to agent
|
||||
Agent->>Hook: Store work state
|
||||
Agent->>Agent: Complete work
|
||||
Agent->>Convoy: Report completion
|
||||
Mayor->>You: Summary of progress
|
||||
```
|
||||
|
||||
### Example: Feature Development
|
||||
|
||||
```bash
|
||||
# 1. Start the Mayor
|
||||
gt mayor attach
|
||||
|
||||
# 2. In Mayor session, create a convoy
|
||||
gt convoy create "Feature X" issue-123 issue-456 --notify --human
|
||||
|
||||
# 3. Assign work to an agent
|
||||
gt sling issue-123 myproject
|
||||
|
||||
# 4. Track progress
|
||||
gt convoy list
|
||||
|
||||
# 5. Monitor agents
|
||||
gt agents
|
||||
```
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### Mayor Workflow (Recommended)
|
||||
|
||||
**Best for:** Coordinating complex, multi-issue work
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
Start([Start Mayor]) --> Tell[Tell Mayor<br/>what to build]
|
||||
Tell --> Creates[Mayor creates<br/>convoy + agents]
|
||||
Creates --> Monitor[Monitor progress<br/>via convoy list]
|
||||
Monitor --> Done{All done?}
|
||||
Done -->|No| Monitor
|
||||
Done -->|Yes| Review[Review work]
|
||||
```
|
||||
|
||||
**Commands:**
|
||||
|
||||
```bash
|
||||
# Attach to Mayor
|
||||
gt mayor attach
|
||||
|
||||
# In Mayor, create convoy and let it orchestrate
|
||||
gt convoy create "Auth System" issue-101 issue-102 --notify
|
||||
|
||||
# Track progress
|
||||
gt convoy list
|
||||
```
|
||||
|
||||
### Minimal Mode (No Tmux)
|
||||
|
||||
Run individual runtime instances manually. Gas Town just tracks state.
|
||||
|
||||
```bash
|
||||
gt convoy create "Fix bugs" issue-123 # Create convoy (sling auto-creates if skipped)
|
||||
gt sling issue-123 myproject # Assign to worker
|
||||
claude --resume # Agent reads mail, runs work
|
||||
claude --resume # Agent reads mail, runs work (Claude)
|
||||
# or: codex # Start Codex in the workspace
|
||||
gt convoy list # Check progress
|
||||
```
|
||||
|
||||
### Pick Your Roles
|
||||
### Beads Formula Workflow
|
||||
|
||||
Gas Town is modular. Run what you need:
|
||||
**Best for:** Predefined, repeatable processes
|
||||
|
||||
- **Polecats only**: Manual spawning, no monitoring
|
||||
- **+ Witness**: Automatic worker lifecycle, stuck detection
|
||||
- **+ Refinery**: Merge queue, code review
|
||||
- **+ Mayor**: Cross-project coordination
|
||||
Formulas are TOML-defined workflows stored in `.beads/formulas/`.
|
||||
|
||||
## Cooking Formulas
|
||||
|
||||
Formulas define structured workflows. Cook them, sling them to agents.
|
||||
|
||||
### Basic Example
|
||||
**Example Formula** (`.beads/formulas/release.formula.toml`):
|
||||
|
||||
```toml
|
||||
# .beads/formulas/shiny.formula.toml
|
||||
formula = "shiny"
|
||||
description = "Design before code, review before ship"
|
||||
description = "Standard release process"
|
||||
formula = "release"
|
||||
version = 1
|
||||
|
||||
[[steps]]
|
||||
id = "design"
|
||||
description = "Think about architecture"
|
||||
|
||||
[[steps]]
|
||||
id = "implement"
|
||||
needs = ["design"]
|
||||
|
||||
[[steps]]
|
||||
id = "test"
|
||||
needs = ["implement"]
|
||||
|
||||
[[steps]]
|
||||
id = "submit"
|
||||
needs = ["test"]
|
||||
```
|
||||
|
||||
### Using Formulas
|
||||
|
||||
```bash
|
||||
bd formula list # See available formulas
|
||||
bd cook shiny # Cook into a protomolecule
|
||||
bd mol pour shiny --var feature=auth # Create runnable molecule
|
||||
gt convoy create "Auth feature" gt-xyz # Track with convoy
|
||||
gt sling gt-xyz myproject # Assign to worker
|
||||
gt convoy list # Monitor progress
|
||||
```
|
||||
|
||||
### What Happens
|
||||
|
||||
1. **Cook** expands the formula into a protomolecule (frozen template)
|
||||
2. **Pour** creates a molecule (live workflow) with steps as beads
|
||||
3. **Worker executes** each step, closing beads as it goes
|
||||
4. **Crash recovery**: Worker restarts, reads molecule, continues from last step
|
||||
|
||||
### Example: Beads Release Molecule
|
||||
|
||||
A real workflow for releasing a new beads version:
|
||||
|
||||
```toml
|
||||
formula = "beads-release"
|
||||
description = "Version bump and release workflow"
|
||||
[vars.version]
|
||||
description = "The semantic version to release (e.g., 1.2.0)"
|
||||
required = true
|
||||
|
||||
[[steps]]
|
||||
id = "bump-version"
|
||||
description = "Update version in version.go and CHANGELOG"
|
||||
|
||||
[[steps]]
|
||||
id = "update-deps"
|
||||
needs = ["bump-version"]
|
||||
description = "Run go mod tidy, update go.sum"
|
||||
title = "Bump version"
|
||||
description = "Run ./scripts/bump-version.sh {{version}}"
|
||||
|
||||
[[steps]]
|
||||
id = "run-tests"
|
||||
needs = ["update-deps"]
|
||||
description = "Full test suite, check for regressions"
|
||||
title = "Run tests"
|
||||
description = "Run make test"
|
||||
needs = ["bump-version"]
|
||||
|
||||
[[steps]]
|
||||
id = "build-binaries"
|
||||
id = "build"
|
||||
title = "Build"
|
||||
description = "Run make build"
|
||||
needs = ["run-tests"]
|
||||
description = "Cross-compile for all platforms"
|
||||
|
||||
[[steps]]
|
||||
id = "create-tag"
|
||||
needs = ["build-binaries"]
|
||||
description = "Git tag with version, push to origin"
|
||||
title = "Create release tag"
|
||||
description = "Run git tag -a v{{version}} -m 'Release v{{version}}'"
|
||||
needs = ["build"]
|
||||
|
||||
[[steps]]
|
||||
id = "publish-release"
|
||||
id = "publish"
|
||||
title = "Publish"
|
||||
description = "Run ./scripts/publish.sh"
|
||||
needs = ["create-tag"]
|
||||
description = "Create GitHub release with binaries"
|
||||
```
|
||||
|
||||
Cook it, pour it, sling it. The polecat runs through each step, and if it crashes
|
||||
after `run-tests`, a new polecat picks up at `build-binaries`.
|
||||
**Execute:**
|
||||
|
||||
### Formula Composition
|
||||
```bash
|
||||
# List available formulas
|
||||
bd formula list
|
||||
|
||||
```toml
|
||||
# Extend an existing formula
|
||||
formula = "shiny-enterprise"
|
||||
extends = ["shiny"]
|
||||
# Run a formula with variables
|
||||
bd cook release --var version=1.2.0
|
||||
|
||||
[compose]
|
||||
aspects = ["security-audit"] # Add cross-cutting concerns
|
||||
# Create formula instance for tracking
|
||||
bd mol pour release --var version=1.2.0
|
||||
```
|
||||
|
||||
### Manual Convoy Workflow
|
||||
|
||||
**Best for:** Direct control over work distribution
|
||||
|
||||
```bash
|
||||
# Create convoy manually
|
||||
gt convoy create "Bug Fixes" --human
|
||||
|
||||
# Add issues
|
||||
gt convoy add-issue bug-101 bug-102
|
||||
|
||||
# Assign to specific agents
|
||||
gt sling bug-101 myproject/my-agent
|
||||
|
||||
# Check status
|
||||
gt convoy show
|
||||
```
|
||||
|
||||
## Runtime Configuration
|
||||
|
||||
Gas Town supports multiple AI coding runtimes. Per-rig runtime settings are in `settings/config.json`.
|
||||
|
||||
```json
|
||||
{
|
||||
"runtime": {
|
||||
"provider": "codex",
|
||||
"command": "codex",
|
||||
"args": [],
|
||||
"prompt_mode": "none"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
|
||||
- Claude uses hooks in `.claude/settings.json` for mail injection and startup.
|
||||
- For Codex, set `project_doc_fallback_filenames = ["CLAUDE.md"]` in
|
||||
`~/.codex/config.toml` so role instructions are picked up.
|
||||
- For runtimes without hooks (e.g., Codex), Gas Town sends a startup fallback
|
||||
after the session is ready: `gt prime`, optional `gt mail check --inject`
|
||||
for autonomous roles, and `gt nudge deacon session-started`.
|
||||
|
||||
## Key Commands
|
||||
|
||||
### For Humans (Overseer)
|
||||
### Workspace Management
|
||||
|
||||
```bash
|
||||
gt start # Start Gas Town (daemon + agents)
|
||||
gt shutdown # Graceful shutdown
|
||||
gt status # Town overview
|
||||
gt <role> attach # Jump into any agent session
|
||||
# e.g., gt mayor attach, gt witness attach
|
||||
gt install <path> # Initialize workspace
|
||||
gt rig add <name> <repo> # Add project
|
||||
gt rig list # List projects
|
||||
gt crew add <name> --rig <rig> # Create crew workspace
|
||||
```
|
||||
|
||||
### Agent Operations
|
||||
|
||||
```bash
|
||||
gt agents # List active agents
|
||||
gt sling <issue> <rig> # Assign work to agent
|
||||
gt sling <issue> <rig> --agent cursor # Override runtime for this sling/spawn
|
||||
gt mayor attach # Start Mayor session
|
||||
gt mayor start --agent auggie # Run Mayor with a specific agent alias
|
||||
gt prime # Alternative to mayor attach
|
||||
```
|
||||
|
||||
**Built-in agent presets**: `claude`, `gemini`, `codex`, `cursor`, `auggie`, `amp`
|
||||
|
||||
### Convoy (Work Tracking)
|
||||
|
||||
```bash
|
||||
gt convoy create <name> [issues...] # Create convoy
|
||||
gt convoy list # List all convoys
|
||||
gt convoy show [id] # Show convoy details
|
||||
gt convoy add-issue <issue> # Add issue to convoy
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
```bash
|
||||
gt config agent list [--json] # List all agents (built-in + custom)
|
||||
gt config agent get <name> # Show agent configuration
|
||||
gt config agent set <name> <cmd> # Create or update custom agent
|
||||
gt config agent remove <name> # Remove custom agent (built-ins protected)
|
||||
gt config default-agent [name] # Get or set town default agent
|
||||
```
|
||||
|
||||
**Example**: Use a cheaper model for most work:
|
||||
```bash
|
||||
# Set custom agent command
|
||||
gt config agent set claude-glm "claude-glm --model glm-4"
|
||||
gt config agent set codex-low "codex --thinking low"
|
||||
|
||||
# Set default agent
|
||||
gt config default-agent claude-glm
|
||||
|
||||
# View config
|
||||
gt config show
|
||||
```
|
||||
|
||||
Most other work happens through agents - just ask them.
|
||||
|
||||
### For Agents
|
||||
### Beads Integration
|
||||
|
||||
```bash
|
||||
# Convoy (primary dashboard)
|
||||
gt convoy list # Active work across all rigs
|
||||
gt convoy status <id> # Detailed convoy progress
|
||||
gt convoy create "name" <issues> # Create new convoy
|
||||
|
||||
# Work assignment
|
||||
gt sling <bead> <rig> # Assign work to polecat
|
||||
bd ready # Show available work
|
||||
bd list --status=in_progress # Active work
|
||||
|
||||
# Communication
|
||||
gt mail inbox # Check messages
|
||||
gt mail send <addr> -s "..." -m "..."
|
||||
|
||||
# Lifecycle
|
||||
gt handoff # Request session cycle
|
||||
gt peek <agent> # Check agent health
|
||||
|
||||
# Diagnostics
|
||||
gt doctor # Health check
|
||||
gt doctor --fix # Auto-repair
|
||||
bd formula list # List formulas
|
||||
bd cook <formula> # Execute formula
|
||||
bd mol pour <formula> # Create trackable instance
|
||||
bd mol list # List active instances
|
||||
```
|
||||
|
||||
## Cooking Formulas
|
||||
|
||||
Gas Town includes built-in formulas for common workflows. See `.beads/formulas/` for available recipes.
|
||||
|
||||
## Dashboard
|
||||
|
||||
Web-based dashboard for monitoring Gas Town activity.
|
||||
Gas Town includes a web dashboard for monitoring:
|
||||
|
||||
```bash
|
||||
# Start the dashboard
|
||||
# Start dashboard
|
||||
gt dashboard --port 8080
|
||||
|
||||
# Open in browser
|
||||
open http://localhost:8080
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- **Convoy tracking** - View all active convoys with progress bars and work status
|
||||
- **Polecat workers** - See active worker sessions and their activity status
|
||||
- **Refinery status** - Monitor merge queue and PR processing
|
||||
- **Auto-refresh** - Updates every 10 seconds via htmx
|
||||
Features:
|
||||
|
||||
Work status indicators:
|
||||
| Status | Color | Meaning |
|
||||
|--------|-------|---------|
|
||||
| `complete` | Green | All tracked items done |
|
||||
| `active` | Green | Recent activity (< 1 min) |
|
||||
| `stale` | Yellow | Activity 1-5 min ago |
|
||||
| `stuck` | Red | Activity > 5 min ago |
|
||||
| `waiting` | Gray | No assignee/activity |
|
||||
- Real-time agent status
|
||||
- Convoy progress tracking
|
||||
- Hook state visualization
|
||||
- Configuration management
|
||||
|
||||
## Advanced Concepts
|
||||
|
||||
### The Propulsion Principle
|
||||
|
||||
Gas Town uses git hooks as a propulsion mechanism. Each hook is a git worktree with:
|
||||
|
||||
1. **Persistent state** - Work survives agent restarts
|
||||
2. **Version control** - All changes tracked in git
|
||||
3. **Rollback capability** - Revert to any previous state
|
||||
4. **Multi-agent coordination** - Shared through git
|
||||
|
||||
### Hook Lifecycle
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> Created: Agent spawned
|
||||
Created --> Active: Work assigned
|
||||
Active --> Suspended: Agent paused
|
||||
Suspended --> Active: Agent resumed
|
||||
Active --> Completed: Work done
|
||||
Completed --> Archived: Hook archived
|
||||
Archived --> [*]
|
||||
```
|
||||
|
||||
### MEOW (Mayor-Enhanced Orchestration Workflow)
|
||||
|
||||
MEOW is the recommended pattern:
|
||||
|
||||
1. **Tell the Mayor** - Describe what you want
|
||||
2. **Mayor analyzes** - Breaks down into tasks
|
||||
3. **Convoy creation** - Mayor creates convoy with issues
|
||||
4. **Agent spawning** - Mayor spawns appropriate agents
|
||||
5. **Work distribution** - Issues slung to agents via hooks
|
||||
6. **Progress monitoring** - Track through convoy status
|
||||
7. **Completion** - Mayor summarizes results
|
||||
|
||||
## Shell Completions
|
||||
|
||||
Enable tab completion for `gt` commands:
|
||||
|
||||
### Bash
|
||||
|
||||
```bash
|
||||
# Add to ~/.bashrc
|
||||
source <(gt completion bash)
|
||||
# Bash
|
||||
gt completion bash > /etc/bash_completion.d/gt
|
||||
|
||||
# Or install permanently
|
||||
gt completion bash > /usr/local/etc/bash_completion.d/gt
|
||||
```
|
||||
|
||||
### Zsh
|
||||
|
||||
```bash
|
||||
# Add to ~/.zshrc (before compinit)
|
||||
source <(gt completion zsh)
|
||||
|
||||
# Or install to fpath
|
||||
# Zsh
|
||||
gt completion zsh > "${fpath[1]}/_gt"
|
||||
```
|
||||
|
||||
### Fish
|
||||
|
||||
```bash
|
||||
# Fish
|
||||
gt completion fish > ~/.config/fish/completions/gt.fish
|
||||
```
|
||||
|
||||
## Roles
|
||||
## Project Roles
|
||||
|
||||
| Role | Scope | Job |
|
||||
|------|-------|-----|
|
||||
| **Overseer** | Human | Sets strategy, reviews output, handles escalations |
|
||||
| **Mayor** | Town-wide | Cross-rig coordination, work dispatch |
|
||||
| **Deacon** | Town-wide | Daemon process, agent lifecycle, plugin execution |
|
||||
| **Witness** | Per-rig | Monitor polecats, nudge stuck workers |
|
||||
| **Refinery** | Per-rig | Merge queue, PR review, integration |
|
||||
| **Polecat** | Per-task | Execute work, file discovered issues, request shutdown |
|
||||
| Role | Description | Primary Interface |
|
||||
| --------------- | ------------------ | -------------------- |
|
||||
| **Mayor** | AI coordinator | `gt mayor attach` |
|
||||
| **Human (You)** | Crew member | Your crew directory |
|
||||
| **Polecat** | Worker agent | Spawned by Mayor |
|
||||
| **Hook** | Persistent storage | Git worktree |
|
||||
| **Convoy** | Work tracker | `gt convoy` commands |
|
||||
|
||||
## The Propulsion Principle
|
||||
## Tips
|
||||
|
||||
> If your hook has work, RUN IT.
|
||||
- **Always start with the Mayor** - It's designed to be your primary interface
|
||||
- **Use convoys for coordination** - They provide visibility across agents
|
||||
- **Leverage hooks for persistence** - Your work won't disappear
|
||||
- **Create formulas for repeated tasks** - Save time with Beads recipes
|
||||
- **Monitor the dashboard** - Get real-time visibility
|
||||
- **Let the Mayor orchestrate** - It knows how to manage agents
|
||||
|
||||
Agents wake up, check their hook, execute the molecule. No waiting for commands.
|
||||
Molecules survive crashes - any agent can continue where another left off.
|
||||
## Troubleshooting
|
||||
|
||||
---
|
||||
### Agents lose connection
|
||||
|
||||
## Optional: MEOW Deep Dive
|
||||
Check hooks are properly initialized:
|
||||
|
||||
**M**olecular **E**xpression **O**f **W**ork - the full algebra.
|
||||
```bash
|
||||
gt hooks list
|
||||
gt hooks repair
|
||||
```
|
||||
|
||||
### States of Matter
|
||||
### Convoy stuck
|
||||
|
||||
| Phase | Name | Storage | Behavior |
|
||||
|-------|------|---------|----------|
|
||||
| Ice-9 | Formula | `.beads/formulas/` | Source template, composable |
|
||||
| Solid | Protomolecule | `.beads/` | Frozen template, reusable |
|
||||
| Liquid | Mol | `.beads/` | Flowing work, persistent |
|
||||
| Vapor | Wisp | `.beads/` (ephemeral flag) | Transient, for patrols |
|
||||
Force refresh:
|
||||
|
||||
*(Protomolecules are an homage to The Expanse. Ice-9 is a nod to Vonnegut.)*
|
||||
```bash
|
||||
gt convoy refresh <convoy-id>
|
||||
```
|
||||
|
||||
### Operators
|
||||
### Mayor not responding
|
||||
|
||||
| Operator | From → To | Effect |
|
||||
|----------|-----------|--------|
|
||||
| `cook` | Formula → Protomolecule | Expand macros, flatten |
|
||||
| `pour` | Proto → Mol | Instantiate as persistent |
|
||||
| `wisp` | Proto → Wisp | Instantiate as ephemeral |
|
||||
| `squash` | Mol/Wisp → Digest | Condense to permanent record |
|
||||
| `burn` | Wisp → ∅ | Discard without record |
|
||||
Restart Mayor session:
|
||||
|
||||
---
|
||||
```bash
|
||||
gt mayor detach
|
||||
gt mayor attach
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
MIT License - see LICENSE file for details
|
||||
|
||||
---
|
||||
|
||||
**Getting Started:** Run `gt install ~/gt --git && cd ~/gt && gt config agent list && gt mayor attach` (or `gt mayor attach --agent codex`) and tell the Mayor what you want to build!
|
||||
|
||||
@@ -17,7 +17,9 @@ Complete setup guide for Gas Town multi-agent orchestrator.
|
||||
| Tool | Version | Check | Install |
|
||||
|------|---------|-------|---------|
|
||||
| **tmux** | 3.0+ | `tmux -V` | See below |
|
||||
| **Claude Code** | latest | `claude --version` | See [claude.ai/claude-code](https://claude.ai/claude-code) |
|
||||
| **Claude Code** (default) | latest | `claude --version` | See [claude.ai/claude-code](https://claude.ai/claude-code) |
|
||||
| **Codex CLI** (optional) | latest | `codex --version` | See [developers.openai.com/codex/cli](https://developers.openai.com/codex/cli) |
|
||||
| **OpenCode CLI** (optional) | latest | `opencode --version` | See [opencode.ai](https://opencode.ai) |
|
||||
|
||||
## Installing Prerequisites
|
||||
|
||||
@@ -130,22 +132,46 @@ gt doctor # Run health checks
|
||||
gt status # Show workspace status
|
||||
```
|
||||
|
||||
### Step 5: Configure Agents (Optional)
|
||||
|
||||
Gas Town supports built-in runtimes (`claude`, `gemini`, `codex`) plus custom agent aliases.
|
||||
|
||||
```bash
|
||||
# List available agents
|
||||
gt config agent list
|
||||
|
||||
# Create an alias (aliases can encode model/thinking flags)
|
||||
gt config agent set codex-low "codex --thinking low"
|
||||
gt config agent set claude-haiku "claude --model haiku --dangerously-skip-permissions"
|
||||
|
||||
# Set the town default agent (used when a rig doesn't specify one)
|
||||
gt config default-agent codex-low
|
||||
```
|
||||
|
||||
You can also override the agent per command without changing defaults:
|
||||
|
||||
```bash
|
||||
gt start --agent codex-low
|
||||
gt sling issue-123 myproject --agent claude-haiku
|
||||
```
|
||||
|
||||
## Minimal Mode vs Full Stack Mode
|
||||
|
||||
Gas Town supports two operational modes:
|
||||
|
||||
### Minimal Mode (No Daemon)
|
||||
|
||||
Run individual Claude Code instances manually. Gas Town only tracks state.
|
||||
Run individual runtime instances manually. Gas Town only tracks state.
|
||||
|
||||
```bash
|
||||
# Create and assign work
|
||||
gt convoy create "Fix bugs" issue-123
|
||||
gt sling issue-123 myproject
|
||||
|
||||
# Run Claude manually
|
||||
# Run runtime manually
|
||||
cd ~/gt/myproject/polecats/<worker>
|
||||
claude --resume
|
||||
claude --resume # Claude Code
|
||||
# or: codex # Codex CLI
|
||||
|
||||
# Check progress
|
||||
gt convoy list
|
||||
|
||||
@@ -84,29 +84,43 @@ Each agent bead references its role bead via the `role_bead` field.
|
||||
│ └── town.json Town configuration
|
||||
└── <rig>/ Project container (NOT a git clone)
|
||||
├── config.json Rig identity and beads prefix
|
||||
├── .beads/ → mayor/rig/.beads Symlink to canonical beads
|
||||
├── .repo.git/ Bare repo (shared by worktrees)
|
||||
├── mayor/rig/ Mayor's clone (canonical beads)
|
||||
├── refinery/rig/ Worktree on main
|
||||
├── mayor/rig/ Canonical clone (beads live here)
|
||||
│ └── .beads/ Rig-level beads database
|
||||
├── refinery/rig/ Worktree from mayor/rig
|
||||
├── witness/ No clone (monitors only)
|
||||
├── crew/<name>/ Human workspaces
|
||||
└── polecats/<name>/ Worker worktrees
|
||||
├── crew/<name>/ Human workspaces (full clones)
|
||||
└── polecats/<name>/ Worker worktrees from mayor/rig
|
||||
```
|
||||
|
||||
### Worktree Architecture
|
||||
|
||||
Polecats and refinery are git worktrees, not full clones. This enables fast spawning
|
||||
and shared object storage. The worktree base is `mayor/rig`:
|
||||
|
||||
```go
|
||||
// From polecat/manager.go - worktrees are based on mayor/rig
|
||||
git worktree add -b polecat/<name>-<timestamp> polecats/<name>
|
||||
```
|
||||
|
||||
Crew workspaces (`crew/<name>/`) are full git clones for human developers who need
|
||||
independent repos. Polecats are ephemeral and benefit from worktree efficiency.
|
||||
|
||||
## Beads Routing
|
||||
|
||||
The `routes.jsonl` file maps issue ID prefixes to their storage locations:
|
||||
The `routes.jsonl` file maps issue ID prefixes to rig locations (relative to town root):
|
||||
|
||||
```jsonl
|
||||
{"prefix":"hq","path":"/Users/stevey/gt/.beads"}
|
||||
{"prefix":"gt","path":"/Users/stevey/gt/gastown/mayor/rig/.beads"}
|
||||
{"prefix":"hq-","path":"."}
|
||||
{"prefix":"gt-","path":"gastown/mayor/rig"}
|
||||
{"prefix":"bd-","path":"beads/mayor/rig"}
|
||||
```
|
||||
|
||||
Routes point to `mayor/rig` because that's where the canonical `.beads/` lives.
|
||||
This enables transparent cross-rig beads operations:
|
||||
|
||||
```bash
|
||||
bd show hq-mayor # Routes to town beads
|
||||
bd show gt-xyz # Routes to gastown rig beads
|
||||
bd show hq-mayor # Routes to town beads (~/.gt/.beads)
|
||||
bd show gt-xyz # Routes to gastown/mayor/rig/.beads
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
94
docs/glossary.md
Normal file
94
docs/glossary.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Gas Town Glossary
|
||||
|
||||
Gas Town is an agentic development environment for managing multiple Claude Code instances simultaneously using the `gt` and `bd` (Beads) binaries, coordinated with tmux in git-managed directories.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### MEOW (Molecular Expression of Work)
|
||||
Breaking large goals into detailed instructions for agents. Supported by Beads, Epics, Formulas, and Molecules. MEOW ensures work is decomposed into trackable, atomic units that agents can execute autonomously.
|
||||
|
||||
### GUPP (Gas Town Universal Propulsion Principle)
|
||||
"If there is work on your Hook, YOU MUST RUN IT." This principle ensures agents autonomously proceed with available work without waiting for external input. GUPP is the heartbeat of autonomous operation.
|
||||
|
||||
### NDI (Nondeterministic Idempotence)
|
||||
The overarching goal ensuring useful outcomes through orchestration of potentially unreliable processes. Persistent Beads and oversight agents (Witness, Deacon) guarantee eventual workflow completion even when individual operations may fail or produce varying results.
|
||||
|
||||
## Environments
|
||||
|
||||
### Town
|
||||
The management headquarters (e.g., `~/gt/`). The Town coordinates all workers across multiple Rigs and houses town-level agents like Mayor and Deacon.
|
||||
|
||||
### Rig
|
||||
A project-specific Git repository under Gas Town management. Each Rig has its own Polecats, Refinery, Witness, and Crew members. Rigs are where actual development work happens.
|
||||
|
||||
## Town-Level Roles
|
||||
|
||||
### Mayor
|
||||
Chief-of-staff agent responsible for initiating Convoys, coordinating work distribution, and notifying users of important events. The Mayor operates from the town level and has visibility across all Rigs.
|
||||
|
||||
### Deacon
|
||||
Daemon beacon running continuous Patrol cycles. The Deacon ensures worker activity, monitors system health, and triggers recovery when agents become unresponsive. Think of the Deacon as the system's watchdog.
|
||||
|
||||
### Dogs
|
||||
The Deacon's crew of maintenance agents handling background tasks like cleanup, health checks, and system maintenance.
|
||||
|
||||
### Boot (the Dog)
|
||||
A special Dog that checks the Deacon every 5 minutes, ensuring the watchdog itself is still watching. This creates a chain of accountability.
|
||||
|
||||
## Rig-Level Roles
|
||||
|
||||
### Polecat
|
||||
Ephemeral worker agents that produce Merge Requests. Polecats are spawned for specific tasks, complete their work, and are then cleaned up. They work in isolated git worktrees to avoid conflicts.
|
||||
|
||||
### Refinery
|
||||
Manages the Merge Queue for a Rig. The Refinery intelligently merges changes from Polecats, handling conflicts and ensuring code quality before changes reach the main branch.
|
||||
|
||||
### Witness
|
||||
Patrol agent that oversees Polecats and the Refinery within a Rig. The Witness monitors progress, detects stuck agents, and can trigger recovery actions.
|
||||
|
||||
### Crew
|
||||
Long-lived, named agents for persistent collaboration. Unlike ephemeral Polecats, Crew members maintain context across sessions and are ideal for ongoing work relationships.
|
||||
|
||||
## Work Units
|
||||
|
||||
### Bead
|
||||
Git-backed atomic work unit stored in JSONL format. Beads are the fundamental unit of work tracking in Gas Town. They can represent issues, tasks, epics, or any trackable work item.
|
||||
|
||||
### Formula
|
||||
TOML-based workflow source template. Formulas define reusable patterns for common operations like patrol cycles, code review, or deployment.
|
||||
|
||||
### Protomolecule
|
||||
A template class for instantiating Molecules. Protomolecules define the structure and steps of a workflow without being tied to specific work items.
|
||||
|
||||
### Molecule
|
||||
Durable chained Bead workflows. Molecules represent multi-step processes where each step is tracked as a Bead. They survive agent restarts and ensure complex workflows complete.
|
||||
|
||||
### Wisp
|
||||
Ephemeral Beads destroyed after runs. Wisps are lightweight work items used for transient operations that don't need permanent tracking.
|
||||
|
||||
### Hook
|
||||
A special pinned Bead for each agent. The Hook is an agent's primary work queue - when work appears on your Hook, GUPP dictates you must run it.
|
||||
|
||||
## Workflow Commands
|
||||
|
||||
### Convoy
|
||||
Primary work-order wrapping related Beads. Convoys group related tasks together and can be assigned to multiple workers. Created with `gt convoy create`.
|
||||
|
||||
### Slinging
|
||||
Assigning work to agents via `gt sling`. When you sling work to a Polecat or Crew member, you're putting it on their Hook for execution.
|
||||
|
||||
### Nudging
|
||||
Real-time messaging between agents with `gt nudge`. Nudges allow immediate communication without going through the mail system.
|
||||
|
||||
### Handoff
|
||||
Agent session refresh via `/handoff`. When context gets full or an agent needs a fresh start, handoff transfers work state to a new session.
|
||||
|
||||
### Seance
|
||||
Communicating with previous sessions via `gt seance`. Allows agents to query their predecessors for context and decisions from earlier work.
|
||||
|
||||
### Patrol
|
||||
Ephemeral loop maintaining system heartbeat. Patrol agents (Deacon, Witness) continuously cycle through health checks and trigger actions as needed.
|
||||
|
||||
---
|
||||
|
||||
*This glossary was contributed by [Clay Shirky](https://github.com/cshirky) in [Issue #80](https://github.com/steveyegge/gastown/issues/80).*
|
||||
300
docs/property-layers.md
Normal file
300
docs/property-layers.md
Normal file
@@ -0,0 +1,300 @@
|
||||
# Property Layers: Multi-Level Configuration
|
||||
|
||||
> Implementation guide for Gas Town's configuration system.
|
||||
> Created: 2025-01-06
|
||||
|
||||
## Overview
|
||||
|
||||
Gas Town uses a layered property system for configuration. Properties are
|
||||
looked up through multiple layers, with earlier layers overriding later ones.
|
||||
This enables both local control and global coordination.
|
||||
|
||||
## The Four Layers
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 1. WISP LAYER (transient, town-local) │
|
||||
│ Location: <rig>/.beads-wisp/config/ │
|
||||
│ Synced: Never │
|
||||
│ Use: Temporary local overrides │
|
||||
└─────────────────────────────┬───────────────────────────────┘
|
||||
│ if missing
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 2. RIG BEAD LAYER (persistent, synced globally) │
|
||||
│ Location: <rig>/.beads/ (rig identity bead labels) │
|
||||
│ Synced: Via git (all clones see it) │
|
||||
│ Use: Project-wide operational state │
|
||||
└─────────────────────────────┬───────────────────────────────┘
|
||||
│ if missing
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 3. TOWN DEFAULTS │
|
||||
│ Location: ~/gt/config.json or ~/gt/.beads/ │
|
||||
│ Synced: N/A (per-town) │
|
||||
│ Use: Town-wide policies │
|
||||
└─────────────────────────────┬───────────────────────────────┘
|
||||
│ if missing
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 4. SYSTEM DEFAULTS (compiled in) │
|
||||
│ Use: Fallback when nothing else specified │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Lookup Behavior
|
||||
|
||||
### Override Semantics (Default)
|
||||
|
||||
For most properties, the first non-nil value wins:
|
||||
|
||||
```go
|
||||
func GetConfig(key string) interface{} {
|
||||
if val := wisp.Get(key); val != nil {
|
||||
if val == Blocked { return nil }
|
||||
return val
|
||||
}
|
||||
if val := rigBead.GetLabel(key); val != nil {
|
||||
return val
|
||||
}
|
||||
if val := townDefaults.Get(key); val != nil {
|
||||
return val
|
||||
}
|
||||
return systemDefaults[key]
|
||||
}
|
||||
```
|
||||
|
||||
### Stacking Semantics (Integers)
|
||||
|
||||
For integer properties, values from wisp and bead layers **add** to the base:
|
||||
|
||||
```go
|
||||
func GetIntConfig(key string) int {
|
||||
base := getBaseDefault(key) // Town or system default
|
||||
beadAdj := rigBead.GetInt(key) // 0 if missing
|
||||
wispAdj := wisp.GetInt(key) // 0 if missing
|
||||
return base + beadAdj + wispAdj
|
||||
}
|
||||
```
|
||||
|
||||
This enables temporary adjustments without changing the base value.
|
||||
|
||||
### Blocking Inheritance
|
||||
|
||||
You can explicitly block a property from being inherited:
|
||||
|
||||
```bash
|
||||
gt rig config set gastown auto_restart --block
|
||||
```
|
||||
|
||||
This creates a "blocked" marker in the wisp layer. Even if the rig bead
|
||||
or defaults say `auto_restart: true`, the lookup returns nil.
|
||||
|
||||
## Rig Identity Beads
|
||||
|
||||
Each rig has an identity bead for operational state:
|
||||
|
||||
```yaml
|
||||
id: gt-rig-gastown
|
||||
type: rig
|
||||
name: gastown
|
||||
repo: git@github.com:steveyegge/gastown.git
|
||||
prefix: gt
|
||||
|
||||
labels:
|
||||
- status:operational
|
||||
- priority:normal
|
||||
```
|
||||
|
||||
These beads sync via git, so all clones of the rig see the same state.
|
||||
|
||||
## Two-Level Rig Control
|
||||
|
||||
### Level 1: Park (Local, Ephemeral)
|
||||
|
||||
```bash
|
||||
gt rig park gastown # Stop services, daemon won't restart
|
||||
gt rig unpark gastown # Allow services to run
|
||||
```
|
||||
|
||||
- Stored in wisp layer (`.beads-wisp/config/`)
|
||||
- Only affects this town
|
||||
- Disappears on cleanup
|
||||
- Use: Local maintenance, debugging
|
||||
|
||||
### Level 2: Dock (Global, Persistent)
|
||||
|
||||
```bash
|
||||
gt rig dock gastown # Set status:docked label on rig bead
|
||||
gt rig undock gastown # Remove label
|
||||
```
|
||||
|
||||
- Stored on rig identity bead
|
||||
- Syncs to all clones via git
|
||||
- Permanent until explicitly changed
|
||||
- Use: Project-wide maintenance, coordinated downtime
|
||||
|
||||
### Daemon Behavior
|
||||
|
||||
The daemon checks both levels before auto-restarting:
|
||||
|
||||
```go
|
||||
func shouldAutoRestart(rig *Rig) bool {
|
||||
status := rig.GetConfig("status")
|
||||
if status == "parked" || status == "docked" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Keys
|
||||
|
||||
| Key | Type | Behavior | Description |
|
||||
|-----|------|----------|-------------|
|
||||
| `status` | string | Override | operational/parked/docked |
|
||||
| `auto_restart` | bool | Override | Daemon auto-restart behavior |
|
||||
| `max_polecats` | int | Override | Maximum concurrent polecats |
|
||||
| `priority_adjustment` | int | **Stack** | Scheduling priority modifier |
|
||||
| `maintenance_window` | string | Override | When maintenance allowed |
|
||||
| `dnd` | bool | Override | Do not disturb mode |
|
||||
|
||||
## Commands
|
||||
|
||||
### View Configuration
|
||||
|
||||
```bash
|
||||
gt rig config show gastown # Show effective config (all layers)
|
||||
gt rig config show gastown --layer # Show which layer each value comes from
|
||||
```
|
||||
|
||||
### Set Configuration
|
||||
|
||||
```bash
|
||||
# Set in wisp layer (local, ephemeral)
|
||||
gt rig config set gastown key value
|
||||
|
||||
# Set in bead layer (global, permanent)
|
||||
gt rig config set gastown key value --global
|
||||
|
||||
# Block inheritance
|
||||
gt rig config set gastown key --block
|
||||
|
||||
# Clear from wisp layer
|
||||
gt rig config unset gastown key
|
||||
```
|
||||
|
||||
### Rig Lifecycle
|
||||
|
||||
```bash
|
||||
gt rig park gastown # Local: stop + prevent restart
|
||||
gt rig unpark gastown # Local: allow restart
|
||||
|
||||
gt rig dock gastown # Global: mark as offline
|
||||
gt rig undock gastown # Global: mark as operational
|
||||
|
||||
gt rig status gastown # Show current state
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Temporary Priority Boost
|
||||
|
||||
```bash
|
||||
# Base priority: 0 (from defaults)
|
||||
# Give this rig temporary priority boost for urgent work
|
||||
|
||||
gt rig config set gastown priority_adjustment 10
|
||||
|
||||
# Effective priority: 0 + 10 = 10
|
||||
# When done, clear it:
|
||||
|
||||
gt rig config unset gastown priority_adjustment
|
||||
```
|
||||
|
||||
### Local Maintenance
|
||||
|
||||
```bash
|
||||
# I'm upgrading the local clone, don't restart services
|
||||
gt rig park gastown
|
||||
|
||||
# ... do maintenance ...
|
||||
|
||||
gt rig unpark gastown
|
||||
```
|
||||
|
||||
### Project-Wide Maintenance
|
||||
|
||||
```bash
|
||||
# Major refactor in progress, all clones should pause
|
||||
gt rig dock gastown
|
||||
|
||||
# Syncs via git - other towns see the rig as docked
|
||||
bd sync
|
||||
|
||||
# When done:
|
||||
gt rig undock gastown
|
||||
bd sync
|
||||
```
|
||||
|
||||
### Block Auto-Restart Locally
|
||||
|
||||
```bash
|
||||
# Rig bead says auto_restart: true
|
||||
# But I'm debugging and don't want that here
|
||||
|
||||
gt rig config set gastown auto_restart --block
|
||||
|
||||
# Now auto_restart returns nil for this town only
|
||||
```
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Wisp Storage
|
||||
|
||||
Wisp config stored in `.beads-wisp/config/<rig>.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"rig": "gastown",
|
||||
"values": {
|
||||
"status": "parked",
|
||||
"priority_adjustment": 10
|
||||
},
|
||||
"blocked": ["auto_restart"]
|
||||
}
|
||||
```
|
||||
|
||||
### Rig Bead Labels
|
||||
|
||||
Rig operational state stored as labels on the rig identity bead:
|
||||
|
||||
```bash
|
||||
bd label add gt-rig-gastown status:docked
|
||||
bd label remove gt-rig-gastown status:docked
|
||||
```
|
||||
|
||||
### Daemon Integration
|
||||
|
||||
The daemon's lifecycle manager checks config before starting services:
|
||||
|
||||
```go
|
||||
func (d *Daemon) maybeStartRigServices(rig string) {
|
||||
r := d.getRig(rig)
|
||||
|
||||
status := r.GetConfig("status")
|
||||
if status == "parked" || status == "docked" {
|
||||
log.Info("Rig %s is offline, skipping auto-start", rig)
|
||||
return
|
||||
}
|
||||
|
||||
d.ensureWitness(rig)
|
||||
d.ensureRefinery(rig)
|
||||
}
|
||||
```
|
||||
|
||||
## Related Documents
|
||||
|
||||
- `~/gt/docs/hop/PROPERTY-LAYERS.md` - Strategic architecture
|
||||
- `wisp-architecture.md` - Wisp system design
|
||||
- `agent-as-bead.md` - Agent identity beads (similar pattern)
|
||||
@@ -7,24 +7,38 @@ Technical reference for Gas Town internals. Read the README first.
|
||||
```
|
||||
~/gt/ Town root
|
||||
├── .beads/ Town-level beads (hq-* prefix)
|
||||
├── mayor/ Mayor config
|
||||
│ └── town.json
|
||||
├── mayor/ Mayor agent home (town coordinator)
|
||||
│ ├── town.json Town configuration
|
||||
│ ├── CLAUDE.md Mayor context (on disk)
|
||||
│ └── .claude/settings.json Mayor Claude settings
|
||||
├── deacon/ Deacon agent home (background supervisor)
|
||||
│ └── .claude/settings.json Deacon settings (context via gt prime)
|
||||
└── <rig>/ Project container (NOT a git clone)
|
||||
├── config.json Rig identity
|
||||
├── .beads/ → mayor/rig/.beads
|
||||
├── .repo.git/ Bare repo (shared by worktrees)
|
||||
├── mayor/rig/ Mayor's clone (canonical beads)
|
||||
├── refinery/rig/ Worktree on main
|
||||
├── witness/ No clone (monitors only)
|
||||
├── crew/<name>/ Human workspaces
|
||||
└── polecats/<name>/ Worker worktrees
|
||||
│ └── CLAUDE.md Per-rig mayor context (on disk)
|
||||
├── witness/ Witness agent home (monitors only)
|
||||
│ └── .claude/settings.json (context via gt prime)
|
||||
├── refinery/ Refinery settings parent
|
||||
│ ├── .claude/settings.json
|
||||
│ └── rig/ Worktree on main
|
||||
│ └── CLAUDE.md Refinery context (on disk)
|
||||
├── crew/ Crew settings parent (shared)
|
||||
│ ├── .claude/settings.json (context via gt prime)
|
||||
│ └── <name>/rig/ Human workspaces
|
||||
└── polecats/ Polecat settings parent (shared)
|
||||
├── .claude/settings.json (context via gt prime)
|
||||
└── <name>/rig/ Worker worktrees
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
|
||||
- Rig root is a container, not a clone
|
||||
- `.repo.git/` is bare - refinery and polecats are worktrees
|
||||
- Mayor clone holds canonical `.beads/`, others inherit via redirect
|
||||
- Per-rig `mayor/rig/` holds canonical `.beads/`, others inherit via redirect
|
||||
- Settings placed in parent dirs (not git clones) for upward traversal
|
||||
|
||||
## Beads Routing
|
||||
|
||||
@@ -204,6 +218,123 @@ gt mol step done <step> # Complete a molecule step
|
||||
| `GT_RIG` | Rig name for rig-level agents |
|
||||
| `GT_POLECAT` | Polecat name (for polecats only) |
|
||||
|
||||
## Agent Working Directories and Settings
|
||||
|
||||
Each agent runs in a specific working directory and has its own Claude settings.
|
||||
Understanding this hierarchy is essential for proper configuration.
|
||||
|
||||
### Working Directories by Role
|
||||
|
||||
| Role | Working Directory | Notes |
|
||||
|------|-------------------|-------|
|
||||
| **Mayor** | `~/gt/mayor/` | Town-level coordinator, isolated from rigs |
|
||||
| **Deacon** | `~/gt/deacon/` | Background supervisor daemon |
|
||||
| **Witness** | `~/gt/<rig>/witness/` | No git clone, monitors polecats only |
|
||||
| **Refinery** | `~/gt/<rig>/refinery/rig/` | Worktree on main branch |
|
||||
| **Crew** | `~/gt/<rig>/crew/<name>/rig/` | Persistent human workspace clone |
|
||||
| **Polecat** | `~/gt/<rig>/polecats/<name>/rig/` | Ephemeral worker worktree |
|
||||
|
||||
Note: The per-rig `<rig>/mayor/rig/` directory is NOT a working directory—it's
|
||||
a git clone that holds the canonical `.beads/` database for that rig.
|
||||
|
||||
### Settings File Locations
|
||||
|
||||
Claude Code searches for `.claude/settings.json` starting from the working
|
||||
directory and traversing upward. Settings are placed in **parent directories**
|
||||
(not inside git clones) so they're found via directory traversal without
|
||||
polluting source repositories:
|
||||
|
||||
```
|
||||
~/gt/
|
||||
├── mayor/.claude/settings.json # Mayor settings
|
||||
├── deacon/.claude/settings.json # Deacon settings
|
||||
└── <rig>/
|
||||
├── witness/.claude/settings.json # Witness settings (no rig/ subdir)
|
||||
├── refinery/.claude/settings.json # Found by refinery/rig/ via traversal
|
||||
├── crew/.claude/settings.json # Shared by all crew/<name>/rig/
|
||||
└── polecats/.claude/settings.json # Shared by all polecats/<name>/rig/
|
||||
```
|
||||
|
||||
**Why parent directories?** Agents working in git clones (like `refinery/rig/`)
|
||||
would pollute the source repo if settings were placed there. By putting settings
|
||||
one level up, Claude finds them via upward traversal, and all workers of the
|
||||
same type share the same settings.
|
||||
|
||||
### CLAUDE.md Locations
|
||||
|
||||
Role context is delivered via CLAUDE.md files or ephemeral injection:
|
||||
|
||||
| Role | CLAUDE.md Location | Method |
|
||||
|------|-------------------|--------|
|
||||
| **Mayor** | `~/gt/mayor/CLAUDE.md` | On disk |
|
||||
| **Deacon** | (none) | Injected via `gt prime` at SessionStart |
|
||||
| **Witness** | (none) | Injected via `gt prime` at SessionStart |
|
||||
| **Refinery** | `<rig>/refinery/rig/CLAUDE.md` | On disk (inside worktree) |
|
||||
| **Crew** | (none) | Injected via `gt prime` at SessionStart |
|
||||
| **Polecat** | (none) | Injected via `gt prime` at SessionStart |
|
||||
|
||||
Additionally, each rig has `<rig>/mayor/rig/CLAUDE.md` for the per-rig mayor clone
|
||||
(used for beads operations, not a running agent).
|
||||
|
||||
**Why ephemeral injection?** Writing CLAUDE.md into git clones would:
|
||||
1. Pollute source repos when agents commit/push
|
||||
2. Leak Gas Town internals into project history
|
||||
3. Conflict with project-specific CLAUDE.md files
|
||||
|
||||
The `gt prime` command runs at SessionStart hook and injects context without
|
||||
persisting it to disk.
|
||||
|
||||
### Sparse Checkout (Source Repo Isolation)
|
||||
|
||||
When agents work on source repositories that have their own Claude Code configuration,
|
||||
Gas Town uses git sparse checkout to exclude all context files:
|
||||
|
||||
```bash
|
||||
# Automatically configured for worktrees - excludes:
|
||||
# - .claude/ : settings, rules, agents, commands
|
||||
# - CLAUDE.md : primary context file
|
||||
# - CLAUDE.local.md: personal context file
|
||||
# - .mcp.json : MCP server configuration
|
||||
git sparse-checkout set --no-cone '/*' '!/.claude/' '!/CLAUDE.md' '!/CLAUDE.local.md' '!/.mcp.json'
|
||||
```
|
||||
|
||||
This ensures agents use Gas Town's context, not the source repo's instructions.
|
||||
|
||||
**Doctor check**: `gt doctor` verifies sparse checkout is configured correctly.
|
||||
Run `gt doctor --fix` to update legacy configurations missing the newer patterns.
|
||||
|
||||
### Settings Inheritance
|
||||
|
||||
Claude Code's settings search order (first match wins):
|
||||
|
||||
1. `.claude/settings.json` in current working directory
|
||||
2. `.claude/settings.json` in parent directories (traversing up)
|
||||
3. `~/.claude/settings.json` (user global settings)
|
||||
|
||||
Gas Town places settings at each agent's working directory root, so agents
|
||||
find their role-specific settings before reaching any parent or global config.
|
||||
|
||||
### Settings Templates
|
||||
|
||||
Gas Town uses two settings templates based on role type:
|
||||
|
||||
| Type | Roles | Key Difference |
|
||||
|------|-------|----------------|
|
||||
| **Interactive** | Mayor, Crew | Mail injected on `UserPromptSubmit` hook |
|
||||
| **Autonomous** | Polecat, Witness, Refinery, Deacon | Mail injected on `SessionStart` hook |
|
||||
|
||||
Autonomous agents may start without user input, so they need mail checked
|
||||
at session start. Interactive agents wait for user prompts.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
| Problem | Solution |
|
||||
|---------|----------|
|
||||
| Agent using wrong settings | Check `gt doctor`, verify sparse checkout |
|
||||
| Settings not found | Ensure `.claude/settings.json` exists at role home |
|
||||
| Source repo settings leaking | Run `gt doctor --fix` to configure sparse checkout |
|
||||
| Mayor settings affecting polecats | Mayor should run in `mayor/`, not town root |
|
||||
|
||||
## CLI Reference
|
||||
|
||||
### Town Management
|
||||
@@ -228,15 +359,56 @@ gt config agent remove <name> # Remove custom agent (built-ins protected)
|
||||
gt config default-agent [name] # Get or set town default agent
|
||||
```
|
||||
|
||||
**Built-in agents**: `claude`, `gemini`, `codex`
|
||||
**Built-in agents**: `claude`, `gemini`, `codex`, `cursor`, `auggie`, `amp`
|
||||
|
||||
**Custom agents**: Define per-town in `mayor/town.json`:
|
||||
**Custom agents**: Define per-town via CLI or JSON:
|
||||
```bash
|
||||
gt config agent set claude-glm "claude-glm --model glm-4"
|
||||
gt config agent set claude "claude-opus" # Override built-in
|
||||
gt config default-agent claude-glm # Set default
|
||||
```
|
||||
|
||||
**Advanced agent config** (`settings/agents.json`):
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": [],
|
||||
"resume_flag": "--session",
|
||||
"resume_style": "flag",
|
||||
"non_interactive": {
|
||||
"subcommand": "run",
|
||||
"output_flag": "--format json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rig-level agents** (`<rig>/settings/config.json`):
|
||||
```json
|
||||
{
|
||||
"type": "rig-settings",
|
||||
"version": 1,
|
||||
"agent": "opencode",
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": ["--session"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Agent resolution order**: rig-level → town-level → built-in presets.
|
||||
|
||||
For OpenCode autonomous mode, set env var in your shell profile:
|
||||
```bash
|
||||
export OPENCODE_PERMISSION='{"*":"allow"}'
|
||||
```
|
||||
|
||||
### Rig Management
|
||||
|
||||
```bash
|
||||
@@ -264,12 +436,19 @@ Note: "Swarm" is ephemeral (workers on a convoy's issues). See [Convoys](convoy.
|
||||
# Standard workflow: convoy first, then sling
|
||||
gt convoy create "Feature X" gt-abc gt-def
|
||||
gt sling gt-abc <rig> # Assign to polecat
|
||||
gt sling gt-def <rig> --molecule=<proto> # With workflow template
|
||||
gt sling gt-abc <rig> --agent codex # Override runtime for this sling/spawn
|
||||
gt sling <proto> --on gt-def <rig> # With workflow template
|
||||
|
||||
# Quick sling (auto-creates convoy)
|
||||
gt sling <bead> <rig> # Auto-convoy for dashboard visibility
|
||||
```
|
||||
|
||||
Agent overrides:
|
||||
|
||||
- `gt start --agent <alias>` overrides the Mayor/Deacon runtime for this launch.
|
||||
- `gt mayor start|attach|restart --agent <alias>` and `gt deacon start|attach|restart --agent <alias>` do the same.
|
||||
- `gt start crew <name> --agent <alias>` and `gt crew at <name> --agent <alias>` override the crew worker runtime.
|
||||
|
||||
### Communication
|
||||
|
||||
```bash
|
||||
|
||||
220
docs/reviews/infrastructure-review.md
Normal file
220
docs/reviews/infrastructure-review.md
Normal file
@@ -0,0 +1,220 @@
|
||||
# Infrastructure & Utilities Code Review
|
||||
|
||||
**Review ID**: gt-a02fj.8
|
||||
**Date**: 2026-01-04
|
||||
**Reviewer**: gastown/polecats/interceptor (polecat gus)
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Reviewed 14 infrastructure packages for dead code, missing abstractions, performance concerns, and error handling consistency. Found significant cleanup opportunities totaling ~44% dead code in constants package and an entire unused package (keepalive).
|
||||
|
||||
---
|
||||
|
||||
## 1. Dead Code Inventory
|
||||
|
||||
### Critical: Entire Package Unused
|
||||
|
||||
| Package | Status | Recommendation |
|
||||
|---------|--------|----------------|
|
||||
| `internal/keepalive/` | 100% unused | **DELETE ENTIRE PACKAGE** |
|
||||
|
||||
The keepalive package (5 functions) was removed from the codebase on Dec 30, 2025 as part of the shift to feed-based activation. No imports exist anywhere.
|
||||
|
||||
### High Priority: Functions to Remove
|
||||
|
||||
| Package | Function | Location | Notes |
|
||||
|---------|----------|----------|-------|
|
||||
| `config` | `NewExampleAgentRegistry()` | agents.go:361-381 | Zero usage in codebase |
|
||||
| `constants` | `DirMayor`, `DirPolecats`, `DirCrew`, etc. | constants.go:32-59 | 9 unused directory constants |
|
||||
| `constants` | `FileRigsJSON`, `FileTownJSON`, etc. | constants.go:62-74 | 4 unused file constants |
|
||||
| `constants` | `BranchMain`, `BranchBeadsSync`, etc. | constants.go:77-89 | 4 unused branch constants |
|
||||
| `constants` | `RigBeadsPath()`, `RigPolecatsPath()`, etc. | constants.go | 5 unused path helper functions |
|
||||
| `doctor` | `itoa()` | daemon_check.go:93-111 | Duplicate of `strconv.Itoa()` |
|
||||
| `lock` | `DetectCollisions()` | lock.go:367-402 | Superseded by doctor checks |
|
||||
| `events` | `BootPayload()` | events.go:186-191 | Never called |
|
||||
| `events` | `TypePatrolStarted`, `TypeSessionEnd` | events.go:50,54 | Never emitted |
|
||||
| `events` | `VisibilityBoth` | events.go:32 | Never set |
|
||||
| `boot` | `DeaconDir()` | boot.go:235-237 | Exported but never called |
|
||||
| `dog` | `IdleCount()`, `WorkingCount()` | manager.go:532-562 | Inlined in callers |
|
||||
|
||||
### Medium Priority: Duplicate Definitions
|
||||
|
||||
| Package | Item | Duplicate Location | Action |
|
||||
|---------|------|-------------------|--------|
|
||||
| `constants` | `RigSettingsPath()` | Also in config/loader.go:673 | Remove from constants |
|
||||
| `util` | Atomic write pattern | Also in mrqueue/, wisp/ | Consolidate to util |
|
||||
| `doctor` | `findRigs()` | 3 identical implementations | Extract shared helper |
|
||||
|
||||
---
|
||||
|
||||
## 2. Utility Consolidation Plan
|
||||
|
||||
### Pattern: Atomic Write (Priority: HIGH)
|
||||
|
||||
**Current state**: Duplicated in 3+ locations
|
||||
- `util/atomic.go` (canonical)
|
||||
- `mrqueue/mrqueue.go` (duplicate)
|
||||
- `wisp/io.go` (duplicate)
|
||||
- `polecat/pending.go` (NON-ATOMIC - bug!)
|
||||
|
||||
**Action**:
|
||||
1. Fix `polecat/pending.go:SavePending()` to use `util.AtomicWriteJSON`
|
||||
2. Replace inline atomic writes in mrqueue and wisp with util calls
|
||||
|
||||
### Pattern: Rig Discovery (Priority: HIGH)
|
||||
|
||||
**Current state**: 7+ implementations scattered across doctor package
|
||||
- `BranchCheck.findPersistentRoleDirs()`
|
||||
- `OrphanSessionCheck.getValidRigs()`
|
||||
- `PatrolMoleculesExistCheck.discoverRigs()`
|
||||
- `config_check.go.findAllRigs()`
|
||||
- Multiple `findCrewDirs()` implementations
|
||||
|
||||
**Action**: Create `internal/workspace/discovery.go`:
|
||||
```go
|
||||
type RigDiscovery struct { ... }
|
||||
func (d *RigDiscovery) FindAllRigs() []string
|
||||
func (d *RigDiscovery) FindCrewDirs(rig string) []string
|
||||
func (d *RigDiscovery) FindPolecatDirs(rig string) []string
|
||||
```
|
||||
|
||||
### Pattern: Clone Validation (Priority: MEDIUM)
|
||||
|
||||
**Current state**: Duplicate logic in doctor checks
|
||||
- `rig_check.go`: Validates .git, runs git status
|
||||
- `branch_check.go`: Similar traversal logic
|
||||
|
||||
**Action**: Create `internal/workspace/clone.go`:
|
||||
```go
|
||||
type CloneValidator struct { ... }
|
||||
func (v *CloneValidator) ValidateClone(path string) error
|
||||
func (v *CloneValidator) GetCloneInfo(path string) (*CloneInfo, error)
|
||||
```
|
||||
|
||||
### Pattern: Tmux Session Handling (Priority: MEDIUM)
|
||||
|
||||
**Current state**: Fragmented across lock, doctor, daemon
|
||||
- `lock/lock.go`: `getActiveTmuxSessions()`
|
||||
- `doctor/identity_check.go`: Similar logic
|
||||
- `cmd/agents.go`: Uses `tmux.NewTmux()`
|
||||
|
||||
**Action**: Consolidate into `internal/tmux/sessions.go`
|
||||
|
||||
### Pattern: Load/Validate Config Files (Priority: LOW)
|
||||
|
||||
**Current state**: 8 near-identical Load* functions in config/loader.go
|
||||
- `LoadTownConfig`, `LoadRigsConfig`, `LoadRigConfig`, etc.
|
||||
|
||||
**Action**: Create generic loader using Go generics:
|
||||
```go
|
||||
func loadConfigFile[T Validator](path string) (*T, error)
|
||||
```
|
||||
|
||||
### Pattern: Math Utilities (Priority: LOW)
|
||||
|
||||
**Current state**: `min()`, `max()`, `min3()`, `abs()` in suggest/suggest.go
|
||||
|
||||
**Action**: If needed elsewhere, move to `internal/util/math.go`
|
||||
|
||||
---
|
||||
|
||||
## 3. Performance Concerns
|
||||
|
||||
### Critical: File I/O Per-Event
|
||||
|
||||
| Package | Issue | Impact | Recommendation |
|
||||
|---------|-------|--------|----------------|
|
||||
| `events` | Opens/closes file for every event | High on busy systems | Batch writes or buffered logger |
|
||||
| `townlog` | Opens/closes file per log entry | Medium | Same as events |
|
||||
| `events` | `workspace.FindFromCwd()` on every Log() | Low-medium | Cache town root |
|
||||
|
||||
### Critical: Process Tree Walking
|
||||
|
||||
| Package | Issue | Impact | Recommendation |
|
||||
|---------|-------|--------|----------------|
|
||||
| `doctor/orphan_check` | `hasCrewAncestor()` calls `ps` in loop | O(n) subprocess calls | Batch gather process info |
|
||||
|
||||
### High: Directory Traversal Inefficiencies
|
||||
|
||||
| Package | Issue | Impact | Recommendation |
|
||||
|---------|-------|--------|----------------|
|
||||
| `doctor/hook_check` | Uses `exec.Command("find")` | Subprocess overhead | Use `filepath.Walk` |
|
||||
| `lock` | `FindAllLocks()` - unbounded Walk | Scales poorly | Add depth limits |
|
||||
| `townlog` | `TailEvents()` reads entire file | Memory for large logs | Implement true tail |
|
||||
|
||||
### Medium: Redundant Operations
|
||||
|
||||
| Package | Issue | Recommendation |
|
||||
|---------|-------|----------------|
|
||||
| `dog` | `List()` + iterate = double work | Provide `CountByState()` |
|
||||
| `dog` | Creates new git.Git per worktree | Cache or batch |
|
||||
| `doctor/rig_check` | Runs git status twice per polecat | Combine operations |
|
||||
| `checkpoint/Capture` | 3 separate git commands | Use combined flags |
|
||||
|
||||
### Low: JSON Formatting Overhead
|
||||
|
||||
| Package | Issue | Recommendation |
|
||||
|---------|-------|----------------|
|
||||
| `lock` | `MarshalIndent()` for lock files | Use `Marshal()` (no indentation needed) |
|
||||
| `townlog` | No compression for old logs | Consider gzip rotation |
|
||||
|
||||
---
|
||||
|
||||
## 4. Error Handling Issues
|
||||
|
||||
### Pattern: Silent Failures
|
||||
|
||||
| Package | Location | Issue | Fix |
|
||||
|---------|----------|-------|-----|
|
||||
| `events` | All callers | 19 instances of `_ = events.LogFeed()` | Standardize: always ignore or always check |
|
||||
| `townlog` | `ParseLogLines()` | Silently skips malformed lines | Log warnings |
|
||||
| `lock` | Lines 91, 180, 194-195 | Silent `_ =` without comments | Document intent |
|
||||
| `checkpoint` | `Capture()` | Returns nil error but git commands fail | Return actual errors |
|
||||
| `deps` | `BeadsUnknown` case | Silently passes | Log warning or fail |
|
||||
|
||||
### Pattern: Inconsistent State Handling
|
||||
|
||||
| Package | Issue | Recommendation |
|
||||
|---------|-------|----------------|
|
||||
| `dog/Get()` | Returns minimal Dog if state missing | Document or error |
|
||||
| `config/GetAccount()` | Returns pointer to loop variable (bug!) | Return by value |
|
||||
| `boot` | `LoadStatus()` returns empty struct if missing | Document behavior |
|
||||
|
||||
### Bug: Missing Role Mapping
|
||||
|
||||
| Package | Issue | Impact |
|
||||
|---------|-------|--------|
|
||||
| `claude` | `RoleTypeFor()` missing `deacon`, `crew` | Wrong settings applied |
|
||||
|
||||
---
|
||||
|
||||
## 5. Testing Gaps
|
||||
|
||||
| Package | Gap | Priority |
|
||||
|---------|-----|----------|
|
||||
| `checkpoint` | No unit tests | HIGH (crash recovery) |
|
||||
| `dog` | 4 tests, major paths untested | HIGH |
|
||||
| `deps` | Minimal failure path testing | MEDIUM |
|
||||
| `claude` | No tests | LOW |
|
||||
|
||||
---
|
||||
|
||||
## Summary Statistics
|
||||
|
||||
| Category | Count | Packages Affected |
|
||||
|----------|-------|-------------------|
|
||||
| **Dead Code Items** | 25+ | config, constants, doctor, lock, events, boot, dog, keepalive |
|
||||
| **Duplicate Patterns** | 6 | util, doctor, config, lock |
|
||||
| **Performance Issues** | 12 | events, townlog, doctor, dog, lock, checkpoint |
|
||||
| **Error Handling Issues** | 15 | events, townlog, lock, checkpoint, deps, claude |
|
||||
| **Testing Gaps** | 4 packages | checkpoint, dog, deps, claude |
|
||||
|
||||
## Recommended Priority
|
||||
|
||||
1. **Delete keepalive package** (entire package unused)
|
||||
2. **Fix claude/RoleTypeFor()** (incorrect behavior)
|
||||
3. **Fix config/GetAccount()** (pointer to stack bug)
|
||||
4. **Fix polecat/pending.go** (non-atomic writes)
|
||||
5. **Delete 21 unused constants** (maintenance burden)
|
||||
6. **Consolidate atomic write pattern** (DRY)
|
||||
7. **Add checkpoint tests** (crash recovery critical)
|
||||
@@ -26,7 +26,7 @@ These roles manage the Gas Town system itself:
|
||||
|
||||
| Role | Description | Lifecycle |
|
||||
|------|-------------|-----------|
|
||||
| **Mayor** | Global coordinator at town root | Singleton, persistent |
|
||||
| **Mayor** | Global coordinator at mayor/ | Singleton, persistent |
|
||||
| **Deacon** | Background supervisor daemon ([watchdog chain](watchdog-chain.md)) | Singleton, persistent |
|
||||
| **Witness** | Per-rig polecat lifecycle manager | One per rig, persistent |
|
||||
| **Refinery** | Per-rig merge queue processor | One per rig, persistent |
|
||||
|
||||
@@ -82,11 +82,11 @@ The daemon runs a heartbeat tick every 3 minutes:
|
||||
func (d *Daemon) heartbeatTick() {
|
||||
d.ensureBootRunning() // 1. Spawn Boot for triage
|
||||
d.checkDeaconHeartbeat() // 2. Belt-and-suspenders fallback
|
||||
d.ensureWitnessesRunning() // 3. Witness health
|
||||
d.triggerPendingSpawns() // 4. Bootstrap polecats
|
||||
d.processLifecycleRequests() // 5. Cycle/restart requests
|
||||
d.checkStaleAgents() // 6. Timeout detection
|
||||
// ... more checks
|
||||
d.ensureWitnessesRunning() // 3. Witness health (checks tmux directly)
|
||||
d.ensureRefineriesRunning() // 4. Refinery health (checks tmux directly)
|
||||
d.triggerPendingSpawns() // 5. Bootstrap polecats
|
||||
d.processLifecycleRequests() // 6. Cycle/restart requests
|
||||
// Agent state derived from tmux, not recorded in beads (gt-zecmc)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -190,7 +190,7 @@ Multiple layers ensure recovery:
|
||||
|
||||
1. **Boot triage** - Intelligent observation, first line
|
||||
2. **Daemon checkDeaconHeartbeat()** - Belt-and-suspenders if Boot fails
|
||||
3. **Daemon checkStaleAgents()** - Timeout-based detection
|
||||
3. **Tmux-based discovery** - Daemon checks tmux sessions directly (no bead state)
|
||||
4. **Human escalation** - Mail to overseer for unrecoverable states
|
||||
|
||||
## State Files
|
||||
@@ -239,9 +239,11 @@ gt deacon health-check
|
||||
|
||||
### Status Shows Wrong State
|
||||
|
||||
**Symptom**: `gt status` shows "stopped" for running agents
|
||||
**Cause**: Bead state and tmux state diverged
|
||||
**Fix**: Reconcile with `gt sync-status` or restart agent
|
||||
**Symptom**: `gt status` shows wrong state for agents
|
||||
**Cause**: Previously bead state and tmux state could diverge
|
||||
**Fix**: As of gt-zecmc, status derives state from tmux directly (no bead state for
|
||||
observable conditions like running/stopped). Non-observable states (stuck, awaiting-gate)
|
||||
are still stored in beads.
|
||||
|
||||
## Design Decision: Keep Separation
|
||||
|
||||
@@ -284,7 +286,7 @@ The separation is correct; these bugs need fixing:
|
||||
|
||||
1. **Session confusion** (gt-sgzsb): Boot spawns in wrong session
|
||||
2. **Zombie blocking** (gt-j1i0r): Daemon can't kill zombie sessions
|
||||
3. **Status mismatch** (gt-doih4): Bead vs tmux state divergence
|
||||
3. ~~**Status mismatch** (gt-doih4): Bead vs tmux state divergence~~ → FIXED in gt-zecmc
|
||||
4. **Ensure semantics** (gt-ekc5u): Start should kill zombies first
|
||||
|
||||
## Summary
|
||||
|
||||
495
dog-pool-architecture.md
Normal file
495
dog-pool-architecture.md
Normal file
@@ -0,0 +1,495 @@
|
||||
# Dog Pool Architecture for Concurrent Shutdown Dances
|
||||
|
||||
> Design document for gt-fsld8
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Boot needs to run multiple shutdown-dance molecules concurrently when multiple death
|
||||
warrants are issued. The current hook design only allows one molecule per agent.
|
||||
|
||||
Example scenario:
|
||||
- Warrant 1: Kill stuck polecat Toast (60s into interrogation)
|
||||
- Warrant 2: Kill stuck polecat Shadow (just started)
|
||||
- Warrant 3: Kill stuck witness (120s into interrogation)
|
||||
|
||||
All three need concurrent tracking, independent timeouts, and separate outcomes.
|
||||
|
||||
## Design Decision: Lightweight State Machines
|
||||
|
||||
After analyzing the options, the shutdown-dance does NOT need Claude sessions.
|
||||
The dance is a deterministic state machine:
|
||||
|
||||
```
|
||||
WARRANT -> INTERROGATE -> EVALUATE -> PARDON|EXECUTE
|
||||
```
|
||||
|
||||
Each step is mechanical:
|
||||
1. Send a tmux message (no LLM needed)
|
||||
2. Wait for timeout or response (timer)
|
||||
3. Check tmux output for ALIVE keyword (string match)
|
||||
4. Repeat or terminate
|
||||
|
||||
**Decision**: Dogs are lightweight Go routines, not Claude sessions.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────┐
|
||||
│ BOOT │
|
||||
│ (Claude session in tmux) │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Dog Manager │ │
|
||||
│ │ │ │
|
||||
│ │ Pool: [Dog1, Dog2, Dog3, ...] (goroutines + state files) │ │
|
||||
│ │ │ │
|
||||
│ │ allocate() → Dog │ │
|
||||
│ │ release(Dog) │ │
|
||||
│ │ status() → []DogStatus │ │
|
||||
│ └──────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ Boot's job: │
|
||||
│ - Watch for warrants (file or event) │
|
||||
│ - Allocate dog from pool │
|
||||
│ - Monitor dog progress │
|
||||
│ - Handle dog completion/failure │
|
||||
│ - Report results │
|
||||
└────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Dog Structure
|
||||
|
||||
```go
|
||||
// Dog represents a shutdown-dance executor
|
||||
type Dog struct {
|
||||
ID string // Unique ID (e.g., "dog-1704567890123")
|
||||
Warrant *Warrant // The death warrant being processed
|
||||
State ShutdownDanceState
|
||||
Attempt int // Current interrogation attempt (1-3)
|
||||
StartedAt time.Time
|
||||
StateFile string // Persistent state: ~/gt/deacon/dogs/active/<id>.json
|
||||
}
|
||||
|
||||
type ShutdownDanceState string
|
||||
|
||||
const (
|
||||
StateIdle ShutdownDanceState = "idle"
|
||||
StateInterrogating ShutdownDanceState = "interrogating" // Sent message, waiting
|
||||
StateEvaluating ShutdownDanceState = "evaluating" // Checking response
|
||||
StatePardoned ShutdownDanceState = "pardoned" // Session responded
|
||||
StateExecuting ShutdownDanceState = "executing" // Killing session
|
||||
StateComplete ShutdownDanceState = "complete" // Done, ready for cleanup
|
||||
StateFailed ShutdownDanceState = "failed" // Dog crashed/errored
|
||||
)
|
||||
|
||||
type Warrant struct {
|
||||
ID string // Bead ID for the warrant
|
||||
Target string // Session to interrogate (e.g., "gt-gastown-Toast")
|
||||
Reason string // Why warrant was issued
|
||||
Requester string // Who filed the warrant
|
||||
FiledAt time.Time
|
||||
}
|
||||
```
|
||||
|
||||
## Pool Design
|
||||
|
||||
### Fixed Pool Size
|
||||
|
||||
**Decision**: Fixed pool of 5 dogs, configurable via environment.
|
||||
|
||||
Rationale:
|
||||
- Dynamic sizing adds complexity without clear benefit
|
||||
- 5 concurrent shutdown dances handles worst-case scenarios
|
||||
- If pool exhausted, warrants queue (better than infinite dog spawning)
|
||||
- Memory footprint is negligible (goroutines + small state files)
|
||||
|
||||
```go
|
||||
const (
|
||||
DefaultPoolSize = 5
|
||||
MaxPoolSize = 20
|
||||
)
|
||||
|
||||
type DogPool struct {
|
||||
mu sync.Mutex
|
||||
dogs []*Dog // All dogs in pool
|
||||
idle chan *Dog // Channel of available dogs
|
||||
active map[string]*Dog // ID -> Dog for active dogs
|
||||
stateDir string // ~/gt/deacon/dogs/active/
|
||||
}
|
||||
|
||||
func (p *DogPool) Allocate(warrant *Warrant) (*Dog, error) {
|
||||
select {
|
||||
case dog := <-p.idle:
|
||||
dog.Warrant = warrant
|
||||
dog.State = StateInterrogating
|
||||
dog.Attempt = 1
|
||||
dog.StartedAt = time.Now()
|
||||
p.active[dog.ID] = dog
|
||||
return dog, nil
|
||||
default:
|
||||
return nil, ErrPoolExhausted
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DogPool) Release(dog *Dog) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
delete(p.active, dog.ID)
|
||||
dog.Reset()
|
||||
p.idle <- dog
|
||||
}
|
||||
```
|
||||
|
||||
### Why Not Dynamic Pool?
|
||||
|
||||
Considered but rejected:
|
||||
- Adding dogs on demand increases complexity
|
||||
- No clear benefit - warrants rarely exceed 5 concurrent
|
||||
- If needed, raise DefaultPoolSize
|
||||
- Simpler to reason about fixed resources
|
||||
|
||||
## Communication: State Files + Events
|
||||
|
||||
### State Persistence
|
||||
|
||||
Each active dog writes state to `~/gt/deacon/dogs/active/<id>.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "dog-1704567890123",
|
||||
"warrant": {
|
||||
"id": "gt-abc123",
|
||||
"target": "gt-gastown-Toast",
|
||||
"reason": "no_response_health_check",
|
||||
"requester": "deacon",
|
||||
"filed_at": "2026-01-07T20:15:00Z"
|
||||
},
|
||||
"state": "interrogating",
|
||||
"attempt": 2,
|
||||
"started_at": "2026-01-07T20:15:00Z",
|
||||
"last_message_at": "2026-01-07T20:16:00Z",
|
||||
"next_timeout": "2026-01-07T20:18:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Boot Monitoring
|
||||
|
||||
Boot monitors dogs via:
|
||||
1. **Polling**: `gt dog status --active` every tick
|
||||
2. **Completion files**: Dogs write `<id>.done` when complete
|
||||
|
||||
```go
|
||||
type DogResult struct {
|
||||
DogID string
|
||||
Warrant *Warrant
|
||||
Outcome DogOutcome // pardoned | executed | failed
|
||||
Duration time.Duration
|
||||
Details string
|
||||
}
|
||||
|
||||
type DogOutcome string
|
||||
|
||||
const (
|
||||
OutcomePardoned DogOutcome = "pardoned" // Session responded
|
||||
OutcomeExecuted DogOutcome = "executed" // Session killed
|
||||
OutcomeFailed DogOutcome = "failed" // Dog crashed
|
||||
)
|
||||
```
|
||||
|
||||
### Why Not Mail?
|
||||
|
||||
Considered but rejected for dog<->boot communication:
|
||||
- Mail is async, poll-based - adds latency
|
||||
- State files are simpler for local coordination
|
||||
- Dogs don't need complex inter-agent communication
|
||||
- Keep mail for external coordination (Witness, Mayor)
|
||||
|
||||
## Shutdown Dance State Machine
|
||||
|
||||
Each dog executes this state machine:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ INTERROGATING │ │
|
||||
│ │ │
|
||||
│ 1. Send health check │ │
|
||||
│ 2. Start timeout timer │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
│ timeout or response │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ EVALUATING │ │
|
||||
│ │ │
|
||||
│ Check tmux output for │ │
|
||||
│ ALIVE keyword │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
┌───────┴───────┐ │
|
||||
│ │ │
|
||||
▼ ▼ │
|
||||
[ALIVE found] [No ALIVE] │
|
||||
│ │ │
|
||||
│ │ attempt < 3? │
|
||||
│ ├──────────────────────────────────→─┘
|
||||
│ │ yes: attempt++, longer timeout
|
||||
│ │
|
||||
│ │ no: attempt == 3
|
||||
▼ ▼
|
||||
┌─────────┐ ┌─────────────┐
|
||||
│ PARDONED│ │ EXECUTING │
|
||||
│ │ │ │
|
||||
│ Cancel │ │ Kill tmux │
|
||||
│ warrant │ │ session │
|
||||
└────┬────┘ └──────┬──────┘
|
||||
│ │
|
||||
└────────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────┐
|
||||
│ COMPLETE │
|
||||
│ │
|
||||
│ Write result │
|
||||
│ Release dog │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
### Timeout Gates
|
||||
|
||||
| Attempt | Timeout | Cumulative Wait |
|
||||
|---------|---------|-----------------|
|
||||
| 1 | 60s | 60s |
|
||||
| 2 | 120s | 180s (3 min) |
|
||||
| 3 | 240s | 420s (7 min) |
|
||||
|
||||
### Health Check Message
|
||||
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within {timeout}s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: {attempt}/3
|
||||
```
|
||||
|
||||
### Response Detection
|
||||
|
||||
```go
|
||||
func (d *Dog) CheckForResponse() bool {
|
||||
tm := tmux.NewTmux()
|
||||
output, err := tm.CapturePane(d.Warrant.Target, 50) // Last 50 lines
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Any output after our health check counts as alive
|
||||
// Specifically look for ALIVE keyword for explicit response
|
||||
return strings.Contains(output, "ALIVE")
|
||||
}
|
||||
```
|
||||
|
||||
## Dog Implementation
|
||||
|
||||
### Not Reusing Polecat Infrastructure
|
||||
|
||||
**Decision**: Dogs do NOT reuse polecat infrastructure.
|
||||
|
||||
Rationale:
|
||||
- Polecats are Claude sessions with molecules, hooks, sandboxes
|
||||
- Dogs are simple state machine executors
|
||||
- Polecats have 3-layer lifecycle (session/sandbox/slot)
|
||||
- Dogs have single-layer lifecycle (just state)
|
||||
- Different resource profiles, different management
|
||||
|
||||
What dogs DO share:
|
||||
- tmux utilities for message sending/capture
|
||||
- State file patterns
|
||||
- Pool allocation pattern
|
||||
|
||||
### Dog Execution Loop
|
||||
|
||||
```go
|
||||
func (d *Dog) Run(ctx context.Context) DogResult {
|
||||
d.State = StateInterrogating
|
||||
d.saveState()
|
||||
|
||||
for d.Attempt <= 3 {
|
||||
// Send interrogation message
|
||||
if err := d.sendHealthCheck(); err != nil {
|
||||
return d.fail(err)
|
||||
}
|
||||
|
||||
// Wait for timeout or context cancellation
|
||||
timeout := d.timeoutForAttempt(d.Attempt)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return d.fail(ctx.Err())
|
||||
case <-time.After(timeout):
|
||||
// Timeout reached
|
||||
}
|
||||
|
||||
// Evaluate response
|
||||
d.State = StateEvaluating
|
||||
d.saveState()
|
||||
|
||||
if d.CheckForResponse() {
|
||||
// Session is alive
|
||||
return d.pardon()
|
||||
}
|
||||
|
||||
// No response - try again or execute
|
||||
d.Attempt++
|
||||
if d.Attempt <= 3 {
|
||||
d.State = StateInterrogating
|
||||
d.saveState()
|
||||
}
|
||||
}
|
||||
|
||||
// All attempts exhausted - execute warrant
|
||||
return d.execute()
|
||||
}
|
||||
```
|
||||
|
||||
## Failure Handling
|
||||
|
||||
### Dog Crashes Mid-Dance
|
||||
|
||||
If a dog crashes (Boot process restarts, system crash):
|
||||
|
||||
1. State files persist in `~/gt/deacon/dogs/active/`
|
||||
2. On Boot restart, scan for orphaned state files
|
||||
3. Resume or restart based on state:
|
||||
|
||||
| State | Recovery Action |
|
||||
|------------------|------------------------------------|
|
||||
| interrogating | Restart from current attempt |
|
||||
| evaluating | Check response, continue |
|
||||
| executing | Verify kill, mark complete |
|
||||
| pardoned/complete| Already done, clean up |
|
||||
|
||||
```go
|
||||
func (p *DogPool) RecoverOrphans() error {
|
||||
files, _ := filepath.Glob(p.stateDir + "/*.json")
|
||||
for _, f := range files {
|
||||
state := loadDogState(f)
|
||||
if state.State != StateComplete && state.State != StatePardoned {
|
||||
dog := p.allocateForRecovery(state)
|
||||
go dog.Resume()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Handling Pool Exhaustion
|
||||
|
||||
If all dogs are busy when new warrant arrives:
|
||||
|
||||
```go
|
||||
func (b *Boot) HandleWarrant(warrant *Warrant) error {
|
||||
dog, err := b.pool.Allocate(warrant)
|
||||
if err == ErrPoolExhausted {
|
||||
// Queue the warrant for later processing
|
||||
b.warrantQueue.Push(warrant)
|
||||
b.log("Warrant %s queued (pool exhausted)", warrant.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
result := dog.Run(b.ctx)
|
||||
b.handleResult(result)
|
||||
b.pool.Release(dog)
|
||||
|
||||
// Check queue for pending warrants
|
||||
if next := b.warrantQueue.Pop(); next != nil {
|
||||
b.HandleWarrant(next)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
~/gt/deacon/dogs/
|
||||
├── boot/ # Boot's working directory
|
||||
│ ├── CLAUDE.md # Boot context
|
||||
│ └── .boot-status.json # Boot execution status
|
||||
├── active/ # Active dog state files
|
||||
│ ├── dog-123.json # Dog 1 state
|
||||
│ ├── dog-456.json # Dog 2 state
|
||||
│ └── ...
|
||||
├── completed/ # Completed dance records (for audit)
|
||||
│ ├── dog-789.json # Historical record
|
||||
│ └── ...
|
||||
└── warrants/ # Pending warrant queue
|
||||
├── warrant-abc.json
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Command Interface
|
||||
|
||||
```bash
|
||||
# Pool status
|
||||
gt dog pool status
|
||||
# Output:
|
||||
# Dog Pool: 3/5 active
|
||||
# dog-123: interrogating Toast (attempt 2, 45s remaining)
|
||||
# dog-456: executing Shadow
|
||||
# dog-789: idle
|
||||
|
||||
# Manual dog operations (for debugging)
|
||||
gt dog pool allocate <warrant-id>
|
||||
gt dog pool release <dog-id>
|
||||
|
||||
# View active dances
|
||||
gt dog dances
|
||||
# Output:
|
||||
# Active Shutdown Dances:
|
||||
# dog-123 → Toast: Interrogating (2/3), timeout in 45s
|
||||
# dog-456 → Shadow: Executing warrant
|
||||
|
||||
# View warrant queue
|
||||
gt dog warrants
|
||||
# Output:
|
||||
# Pending Warrants: 2
|
||||
# 1. gt-abc: witness-gastown (stuck_no_progress)
|
||||
# 2. gt-def: polecat-Copper (crash_loop)
|
||||
```
|
||||
|
||||
## Integration with Existing Dogs
|
||||
|
||||
The existing `dog` package (`internal/dog/`) manages Deacon's multi-rig helper dogs.
|
||||
Those are different from shutdown-dance dogs:
|
||||
|
||||
| Aspect | Helper Dogs (existing) | Dance Dogs (new) |
|
||||
|-----------------|-----------------------------|-----------------------------|
|
||||
| Purpose | Cross-rig infrastructure | Shutdown dance execution |
|
||||
| Sessions | Claude sessions | Goroutines (no Claude) |
|
||||
| Worktrees | One per rig | None |
|
||||
| Lifecycle | Long-lived, reusable | Ephemeral per warrant |
|
||||
| State | idle/working | Dance state machine |
|
||||
|
||||
**Recommendation**: Use different package to avoid confusion:
|
||||
- `internal/dog/` - existing helper dogs
|
||||
- `internal/shutdown/` - shutdown dance pool
|
||||
|
||||
## Summary: Answers to Design Questions
|
||||
|
||||
| Question | Answer |
|
||||
|----------|--------|
|
||||
| How many Dogs in pool? | Fixed: 5 (configurable via GT_DOG_POOL_SIZE) |
|
||||
| How do Dogs communicate with Boot? | State files + completion markers |
|
||||
| Are Dogs tmux sessions? | No - goroutines with state machine |
|
||||
| Reuse polecat infrastructure? | No - too heavyweight, different model |
|
||||
| What if Dog dies mid-dance? | State file recovery on Boot restart |
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [x] Architecture document for Dog pool
|
||||
- [x] Clear allocation/deallocation protocol
|
||||
- [x] Failure handling for Dog crashes
|
||||
7
go.mod
7
go.mod
@@ -7,6 +7,8 @@ require (
|
||||
github.com/charmbracelet/bubbles v0.21.0
|
||||
github.com/charmbracelet/bubbletea v1.3.10
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/go-rod/rod v0.116.2
|
||||
github.com/gofrs/flock v0.13.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
golang.org/x/term v0.38.0
|
||||
@@ -34,5 +36,10 @@ require (
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/ysmood/fetchup v0.2.3 // indirect
|
||||
github.com/ysmood/goob v0.4.0 // indirect
|
||||
github.com/ysmood/got v0.40.0 // indirect
|
||||
github.com/ysmood/gson v0.7.3 // indirect
|
||||
github.com/ysmood/leakless v0.9.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
)
|
||||
|
||||
26
go.sum
26
go.sum
@@ -27,8 +27,14 @@ github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEX
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA=
|
||||
github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg=
|
||||
github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
|
||||
github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
@@ -47,6 +53,8 @@ github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELU
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -54,8 +62,24 @@ github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
|
||||
github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
|
||||
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
|
||||
github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
|
||||
github.com/ysmood/gop v0.2.0 h1:+tFrG0TWPxT6p9ZaZs+VY+opCvHU8/3Fk6BaNv6kqKg=
|
||||
github.com/ysmood/gop v0.2.0/go.mod h1:rr5z2z27oGEbyB787hpEcx4ab8cCiPnKxn0SUHt6xzk=
|
||||
github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q=
|
||||
github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg=
|
||||
github.com/ysmood/gotrace v0.6.0 h1:SyI1d4jclswLhg7SWTL6os3L1WOKeNn/ZtzVQF8QmdY=
|
||||
github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
|
||||
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
|
||||
github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
|
||||
github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU=
|
||||
github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
@@ -68,3 +92,5 @@ golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
76
internal/agent/state.go
Normal file
76
internal/agent/state.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Package agent provides shared types and utilities for Gas Town agents
|
||||
// (witness, refinery, deacon, etc.).
|
||||
package agent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/util"
|
||||
)
|
||||
|
||||
// State represents an agent's running state.
|
||||
type State string
|
||||
|
||||
const (
|
||||
// StateStopped means the agent is not running.
|
||||
StateStopped State = "stopped"
|
||||
|
||||
// StateRunning means the agent is actively operating.
|
||||
StateRunning State = "running"
|
||||
|
||||
// StatePaused means the agent is paused (not operating but not stopped).
|
||||
StatePaused State = "paused"
|
||||
)
|
||||
|
||||
// StateManager handles loading and saving agent state to disk.
|
||||
// It uses generics to work with any state type.
|
||||
type StateManager[T any] struct {
|
||||
stateFilePath string
|
||||
defaultFactory func() *T
|
||||
}
|
||||
|
||||
// NewStateManager creates a new StateManager for the given state file path.
|
||||
// The defaultFactory function is called when the state file doesn't exist
|
||||
// to create a new state with default values.
|
||||
func NewStateManager[T any](rigPath, stateFileName string, defaultFactory func() *T) *StateManager[T] {
|
||||
return &StateManager[T]{
|
||||
stateFilePath: filepath.Join(rigPath, ".runtime", stateFileName),
|
||||
defaultFactory: defaultFactory,
|
||||
}
|
||||
}
|
||||
|
||||
// StateFile returns the path to the state file.
|
||||
func (m *StateManager[T]) StateFile() string {
|
||||
return m.stateFilePath
|
||||
}
|
||||
|
||||
// Load loads agent state from disk.
|
||||
// If the file doesn't exist, returns a new state created by the default factory.
|
||||
func (m *StateManager[T]) Load() (*T, error) {
|
||||
data, err := os.ReadFile(m.stateFilePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return m.defaultFactory(), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var state T
|
||||
if err := json.Unmarshal(data, &state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// Save persists agent state to disk using atomic write.
|
||||
func (m *StateManager[T]) Save(state *T) error {
|
||||
dir := filepath.Dir(m.stateFilePath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return util.AtomicWriteJSON(m.stateFilePath, state)
|
||||
}
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
)
|
||||
|
||||
// Common errors
|
||||
@@ -71,15 +73,186 @@ func ResolveBeadsDir(workDir string) string {
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Detect redirect chains: check if resolved path also has a redirect
|
||||
resolvedRedirect := filepath.Join(resolved, "redirect")
|
||||
if _, err := os.Stat(resolvedRedirect); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: redirect chain detected: %s -> %s (which also has a redirect)\n", beadsDir, resolved)
|
||||
// Don't follow chains - just return the first resolved path
|
||||
// The target's redirect is likely errant and should be removed
|
||||
// Follow redirect chains (e.g., crew/.beads -> rig/.beads -> mayor/rig/.beads)
|
||||
// This is intentional for the rig-level redirect architecture.
|
||||
// Limit depth to prevent infinite loops from misconfigured redirects.
|
||||
return resolveBeadsDirWithDepth(resolved, 3)
|
||||
}
|
||||
|
||||
// resolveBeadsDirWithDepth follows redirect chains with a depth limit.
|
||||
func resolveBeadsDirWithDepth(beadsDir string, maxDepth int) string {
|
||||
if maxDepth <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "Warning: redirect chain too deep at %s, stopping\n", beadsDir)
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
return resolved
|
||||
redirectPath := filepath.Join(beadsDir, "redirect")
|
||||
data, err := os.ReadFile(redirectPath) //nolint:gosec // G304: path is constructed internally
|
||||
if err != nil {
|
||||
// No redirect, this is the final destination
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
redirectTarget := strings.TrimSpace(string(data))
|
||||
if redirectTarget == "" {
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Resolve relative to parent of beadsDir (the workDir)
|
||||
workDir := filepath.Dir(beadsDir)
|
||||
resolved := filepath.Clean(filepath.Join(workDir, redirectTarget))
|
||||
|
||||
// Detect circular redirect
|
||||
if resolved == beadsDir {
|
||||
fmt.Fprintf(os.Stderr, "Warning: circular redirect detected in %s, stopping\n", redirectPath)
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Recursively follow
|
||||
return resolveBeadsDirWithDepth(resolved, maxDepth-1)
|
||||
}
|
||||
|
||||
// cleanBeadsRuntimeFiles removes gitignored runtime files from a .beads directory
|
||||
// while preserving tracked files (formulas/, README.md, config.yaml, .gitignore).
|
||||
// This is safe to call even if the directory doesn't exist.
|
||||
func cleanBeadsRuntimeFiles(beadsDir string) error {
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
return nil // Nothing to clean
|
||||
}
|
||||
|
||||
// Runtime files/patterns that are gitignored and safe to remove
|
||||
runtimePatterns := []string{
|
||||
// SQLite databases
|
||||
"*.db", "*.db-*", "*.db?*",
|
||||
// Daemon runtime
|
||||
"daemon.lock", "daemon.log", "daemon.pid", "bd.sock",
|
||||
// Sync state
|
||||
"sync-state.json", "last-touched", "metadata.json",
|
||||
// Version tracking
|
||||
".local_version",
|
||||
// Redirect file (we're about to recreate it)
|
||||
"redirect",
|
||||
// Merge artifacts
|
||||
"beads.base.*", "beads.left.*", "beads.right.*",
|
||||
// JSONL files (tracked but will be redirected, safe to remove in worktrees)
|
||||
"issues.jsonl", "interactions.jsonl",
|
||||
// Runtime directories
|
||||
"mq",
|
||||
}
|
||||
|
||||
var firstErr error
|
||||
for _, pattern := range runtimePatterns {
|
||||
matches, err := filepath.Glob(filepath.Join(beadsDir, pattern))
|
||||
if err != nil {
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
for _, match := range matches {
|
||||
if err := os.RemoveAll(match); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
// SetupRedirect creates a .beads/redirect file for a worktree to point to the rig's shared beads.
|
||||
// This is used by crew, polecats, and refinery worktrees to share the rig's beads database.
|
||||
//
|
||||
// Parameters:
|
||||
// - townRoot: the town root directory (e.g., ~/gt)
|
||||
// - worktreePath: the worktree directory (e.g., <rig>/crew/<name> or <rig>/refinery/rig)
|
||||
//
|
||||
// The function:
|
||||
// 1. Computes the relative path from worktree to rig-level .beads
|
||||
// 2. Cleans up runtime files (preserving tracked files like formulas/)
|
||||
// 3. Creates the redirect file
|
||||
//
|
||||
// Safety: This function refuses to create redirects in the canonical beads location
|
||||
// (mayor/rig) to prevent circular redirect chains.
|
||||
func SetupRedirect(townRoot, worktreePath string) error {
|
||||
// Get rig root from worktree path
|
||||
// worktreePath = <town>/<rig>/crew/<name> or <town>/<rig>/refinery/rig etc.
|
||||
relPath, err := filepath.Rel(townRoot, worktreePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing relative path: %w", err)
|
||||
}
|
||||
parts := strings.Split(filepath.ToSlash(relPath), "/")
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("invalid worktree path: must be at least 2 levels deep from town root")
|
||||
}
|
||||
|
||||
// Safety check: prevent creating redirect in canonical beads location (mayor/rig)
|
||||
// This would create a circular redirect chain since rig/.beads redirects to mayor/rig/.beads
|
||||
if len(parts) >= 2 && parts[1] == "mayor" {
|
||||
return fmt.Errorf("cannot create redirect in canonical beads location (mayor/rig)")
|
||||
}
|
||||
|
||||
rigRoot := filepath.Join(townRoot, parts[0])
|
||||
rigBeadsPath := filepath.Join(rigRoot, ".beads")
|
||||
mayorBeadsPath := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
|
||||
// Check rig-level .beads first, fall back to mayor/rig/.beads (tracked beads architecture)
|
||||
usesMayorFallback := false
|
||||
if _, err := os.Stat(rigBeadsPath); os.IsNotExist(err) {
|
||||
// No rig/.beads - check for mayor/rig/.beads (tracked beads architecture)
|
||||
if _, err := os.Stat(mayorBeadsPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("no beads found at %s or %s", rigBeadsPath, mayorBeadsPath)
|
||||
}
|
||||
// Using mayor fallback - warn user to run bd doctor
|
||||
fmt.Fprintf(os.Stderr, "Warning: rig .beads not found at %s, using %s\n", rigBeadsPath, mayorBeadsPath)
|
||||
fmt.Fprintf(os.Stderr, " Run 'bd doctor' to fix rig beads configuration\n")
|
||||
usesMayorFallback = true
|
||||
}
|
||||
|
||||
// Clean up runtime files in .beads/ but preserve tracked files (formulas/, README.md, etc.)
|
||||
worktreeBeadsDir := filepath.Join(worktreePath, ".beads")
|
||||
if err := cleanBeadsRuntimeFiles(worktreeBeadsDir); err != nil {
|
||||
return fmt.Errorf("cleaning runtime files: %w", err)
|
||||
}
|
||||
|
||||
// Create .beads directory if it doesn't exist
|
||||
if err := os.MkdirAll(worktreeBeadsDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating .beads dir: %w", err)
|
||||
}
|
||||
|
||||
// Compute relative path from worktree to rig root
|
||||
// e.g., crew/<name> (depth 2) -> ../../.beads
|
||||
// refinery/rig (depth 2) -> ../../.beads
|
||||
depth := len(parts) - 1 // subtract 1 for rig name itself
|
||||
upPath := strings.Repeat("../", depth)
|
||||
|
||||
var redirectPath string
|
||||
if usesMayorFallback {
|
||||
// Direct redirect to mayor/rig/.beads since rig/.beads doesn't exist
|
||||
redirectPath = upPath + "mayor/rig/.beads"
|
||||
} else {
|
||||
redirectPath = upPath + ".beads"
|
||||
|
||||
// Check if rig-level beads has a redirect (tracked beads case).
|
||||
// If so, redirect directly to the final destination to avoid chains.
|
||||
// The bd CLI doesn't support redirect chains, so we must skip intermediate hops.
|
||||
rigRedirectPath := filepath.Join(rigBeadsPath, "redirect")
|
||||
if data, err := os.ReadFile(rigRedirectPath); err == nil {
|
||||
rigRedirectTarget := strings.TrimSpace(string(data))
|
||||
if rigRedirectTarget != "" {
|
||||
// Rig has redirect (e.g., "mayor/rig/.beads" for tracked beads).
|
||||
// Redirect worktree directly to the final destination.
|
||||
redirectPath = upPath + rigRedirectTarget
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create redirect file
|
||||
redirectFile := filepath.Join(worktreeBeadsDir, "redirect")
|
||||
if err := os.WriteFile(redirectFile, []byte(redirectPath+"\n"), 0644); err != nil {
|
||||
return fmt.Errorf("creating redirect file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Issue represents a beads issue.
|
||||
@@ -249,6 +422,13 @@ func (b *Beads) run(args ...string) ([]byte, error) {
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
||||
// Run executes a bd command and returns stdout.
|
||||
// This is a public wrapper around the internal run method for cases where
|
||||
// callers need to run arbitrary bd commands.
|
||||
func (b *Beads) Run(args ...string) ([]byte, error) {
|
||||
return b.run(args...)
|
||||
}
|
||||
|
||||
// wrapError wraps bd errors with context.
|
||||
func (b *Beads) wrapError(err error, stderr string, args []string) error {
|
||||
stderr = strings.TrimSpace(stderr)
|
||||
@@ -593,7 +773,7 @@ func (b *Beads) Update(id string, opts UpdateOptions) error {
|
||||
}
|
||||
|
||||
// Close closes one or more issues.
|
||||
// If CLAUDE_SESSION_ID is set in the environment, it is passed to bd close
|
||||
// If a runtime session ID is set in the environment, it is passed to bd close
|
||||
// for work attribution tracking (see decision 009-session-events-architecture.md).
|
||||
func (b *Beads) Close(ids ...string) error {
|
||||
if len(ids) == 0 {
|
||||
@@ -603,7 +783,7 @@ func (b *Beads) Close(ids ...string) error {
|
||||
args := append([]string{"close"}, ids...)
|
||||
|
||||
// Pass session ID for work attribution if available
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
@@ -612,7 +792,7 @@ func (b *Beads) Close(ids ...string) error {
|
||||
}
|
||||
|
||||
// CloseWithReason closes one or more issues with a reason.
|
||||
// If CLAUDE_SESSION_ID is set in the environment, it is passed to bd close
|
||||
// If a runtime session ID is set in the environment, it is passed to bd close
|
||||
// for work attribution tracking (see decision 009-session-events-architecture.md).
|
||||
func (b *Beads) CloseWithReason(reason string, ids ...string) error {
|
||||
if len(ids) == 0 {
|
||||
@@ -623,7 +803,7 @@ func (b *Beads) CloseWithReason(reason string, ids ...string) error {
|
||||
args = append(args, "--reason="+reason)
|
||||
|
||||
// Pass session ID for work attribution if available
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
@@ -1036,6 +1216,38 @@ func (b *Beads) UpdateAgentState(id string, state string, hookBead *string) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHookBead sets the hook_bead slot on an agent bead.
|
||||
// This is a convenience wrapper that only sets the hook without changing agent_state.
|
||||
// Per gt-zecmc: agent_state ("running", "dead", "idle") is observable from tmux
|
||||
// and should not be recorded in beads ("discover, don't track" principle).
|
||||
func (b *Beads) SetHookBead(agentBeadID, hookBeadID string) error {
|
||||
// Set the hook using bd slot set
|
||||
// This updates the hook_bead column directly in SQLite
|
||||
_, err := b.run("slot", "set", agentBeadID, "hook", hookBeadID)
|
||||
if err != nil {
|
||||
// If slot is already occupied, clear it first then retry
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "already occupied") {
|
||||
_, _ = b.run("slot", "clear", agentBeadID, "hook")
|
||||
_, err = b.run("slot", "set", agentBeadID, "hook", hookBeadID)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting hook: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearHookBead clears the hook_bead slot on an agent bead.
|
||||
// Used when work is complete or unslung.
|
||||
func (b *Beads) ClearHookBead(agentBeadID string) error {
|
||||
_, err := b.run("slot", "clear", agentBeadID, "hook")
|
||||
if err != nil {
|
||||
return fmt.Errorf("clearing hook: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAgentCleanupStatus updates the cleanup_status field in an agent bead.
|
||||
// This is called by the polecat to self-report its git state (ZFC compliance).
|
||||
// Valid statuses: clean, has_uncommitted, has_stash, has_unpushed
|
||||
@@ -1633,3 +1845,113 @@ func (b *Beads) MergeSlotEnsureExists() (string, error) {
|
||||
|
||||
return status.ID, nil
|
||||
}
|
||||
|
||||
// ===== Rig Identity Beads =====
|
||||
|
||||
// RigFields contains the fields specific to rig identity beads.
|
||||
type RigFields struct {
|
||||
Repo string // Git URL for the rig's repository
|
||||
Prefix string // Beads prefix for this rig (e.g., "gt", "bd")
|
||||
State string // Operational state: active, archived, maintenance
|
||||
}
|
||||
|
||||
// FormatRigDescription formats the description field for a rig identity bead.
|
||||
func FormatRigDescription(name string, fields *RigFields) string {
|
||||
if fields == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, fmt.Sprintf("Rig identity bead for %s.", name))
|
||||
lines = append(lines, "")
|
||||
|
||||
if fields.Repo != "" {
|
||||
lines = append(lines, fmt.Sprintf("repo: %s", fields.Repo))
|
||||
}
|
||||
if fields.Prefix != "" {
|
||||
lines = append(lines, fmt.Sprintf("prefix: %s", fields.Prefix))
|
||||
}
|
||||
if fields.State != "" {
|
||||
lines = append(lines, fmt.Sprintf("state: %s", fields.State))
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseRigFields extracts rig fields from an issue's description.
|
||||
func ParseRigFields(description string) *RigFields {
|
||||
fields := &RigFields{}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "repo":
|
||||
fields.Repo = value
|
||||
case "prefix":
|
||||
fields.Prefix = value
|
||||
case "state":
|
||||
fields.State = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// CreateRigBead creates a rig identity bead for tracking rig metadata.
|
||||
// The ID format is: <prefix>-rig-<name> (e.g., gt-rig-gastown)
|
||||
// Use RigBeadID() helper to generate correct IDs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateRigBead(id, title string, fields *RigFields) (*Issue, error) {
|
||||
description := FormatRigDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--type=rig",
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
if actor := os.Getenv("BD_ACTOR"); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// RigBeadIDWithPrefix generates a rig identity bead ID using the specified prefix.
|
||||
// Format: <prefix>-rig-<name> (e.g., gt-rig-gastown)
|
||||
func RigBeadIDWithPrefix(prefix, name string) string {
|
||||
return fmt.Sprintf("%s-rig-%s", prefix, name)
|
||||
}
|
||||
|
||||
// RigBeadID generates a rig identity bead ID using "gt" prefix.
|
||||
// For non-gastown rigs, use RigBeadIDWithPrefix with the rig's configured prefix.
|
||||
func RigBeadID(name string) string {
|
||||
return RigBeadIDWithPrefix("gt", name)
|
||||
}
|
||||
|
||||
@@ -88,9 +88,9 @@ func TestWrapError(t *testing.T) {
|
||||
b := New("/test")
|
||||
|
||||
tests := []struct {
|
||||
stderr string
|
||||
wantErr error
|
||||
wantNil bool
|
||||
stderr string
|
||||
wantErr error
|
||||
wantNil bool
|
||||
}{
|
||||
{"not a beads repository", ErrNotARepo, false},
|
||||
{"No .beads directory found", ErrNotARepo, false},
|
||||
@@ -127,7 +127,6 @@ func TestIntegration(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Walk up to find .beads
|
||||
dir := cwd
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(dir, ".beads")); err == nil {
|
||||
@@ -140,6 +139,11 @@ func TestIntegration(t *testing.T) {
|
||||
dir = parent
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(dir, ".beads", "beads.db")
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
t.Skip("no beads.db found (JSONL-only repo)")
|
||||
}
|
||||
|
||||
b := New(dir)
|
||||
|
||||
// Sync database with JSONL before testing to avoid "Database out of sync" errors.
|
||||
@@ -201,10 +205,10 @@ func TestIntegration(t *testing.T) {
|
||||
// TestParseMRFields tests parsing MR fields from issue descriptions.
|
||||
func TestParseMRFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
issue *Issue
|
||||
wantNil bool
|
||||
wantFields *MRFields
|
||||
name string
|
||||
issue *Issue
|
||||
wantNil bool
|
||||
wantFields *MRFields
|
||||
}{
|
||||
{
|
||||
name: "nil issue",
|
||||
@@ -521,8 +525,8 @@ author: someone
|
||||
target: main`,
|
||||
},
|
||||
fields: &MRFields{
|
||||
Branch: "polecat/Capable/gt-ghi",
|
||||
Target: "integration/epic",
|
||||
Branch: "polecat/Capable/gt-ghi",
|
||||
Target: "integration/epic",
|
||||
CloseReason: "merged",
|
||||
},
|
||||
want: `branch: polecat/Capable/gt-ghi
|
||||
@@ -1032,10 +1036,10 @@ func TestParseAgentBeadID(t *testing.T) {
|
||||
// Parseable but not valid agent roles (IsAgentSessionBead will reject)
|
||||
{"gt-abc123", "", "abc123", "", true}, // Parses as town-level but not valid role
|
||||
// Other prefixes (bd-, hq-)
|
||||
{"bd-mayor", "", "mayor", "", true}, // bd prefix town-level
|
||||
{"bd-beads-witness", "beads", "witness", "", true}, // bd prefix rig-level singleton
|
||||
{"bd-beads-polecat-pearl", "beads", "polecat", "pearl", true}, // bd prefix rig-level named
|
||||
{"hq-mayor", "", "mayor", "", true}, // hq prefix town-level
|
||||
{"bd-mayor", "", "mayor", "", true}, // bd prefix town-level
|
||||
{"bd-beads-witness", "beads", "witness", "", true}, // bd prefix rig-level singleton
|
||||
{"bd-beads-polecat-pearl", "beads", "polecat", "pearl", true}, // bd prefix rig-level named
|
||||
{"hq-mayor", "", "mayor", "", true}, // hq prefix town-level
|
||||
// Truly invalid patterns
|
||||
{"x-mayor", "", "", "", false}, // Prefix too short (1 char)
|
||||
{"abcd-mayor", "", "", "", false}, // Prefix too long (4 chars)
|
||||
@@ -1502,3 +1506,293 @@ func TestDelegationTerms(t *testing.T) {
|
||||
t.Errorf("parsed.CreditShare = %d, want %d", parsed.CreditShare, terms.CreditShare)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetupRedirect tests the beads redirect setup for worktrees.
|
||||
func TestSetupRedirect(t *testing.T) {
|
||||
t.Run("crew worktree with local beads", func(t *testing.T) {
|
||||
// Setup: town/rig/.beads (local, no redirect)
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// Create rig structure
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
// Run SetupRedirect
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect was created
|
||||
redirectPath := filepath.Join(crewPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("crew worktree with tracked beads", func(t *testing.T) {
|
||||
// Setup: town/rig/.beads/redirect -> mayor/rig/.beads (tracked)
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
mayorRigBeads := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// Create rig structure with tracked beads
|
||||
if err := os.MkdirAll(mayorRigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
// Create rig-level redirect to mayor/rig/.beads
|
||||
if err := os.WriteFile(filepath.Join(rigBeads, "redirect"), []byte("mayor/rig/.beads\n"), 0644); err != nil {
|
||||
t.Fatalf("write rig redirect: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
// Run SetupRedirect
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect goes directly to mayor/rig/.beads (no chain - bd CLI doesn't support chains)
|
||||
redirectPath := filepath.Join(crewPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../mayor/rig/.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
|
||||
// Verify redirect resolves correctly
|
||||
resolved := ResolveBeadsDir(crewPath)
|
||||
// crew/max -> ../../mayor/rig/.beads (direct, no chain)
|
||||
if resolved != mayorRigBeads {
|
||||
t.Errorf("resolved = %q, want %q", resolved, mayorRigBeads)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("polecat worktree", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
polecatPath := filepath.Join(rigRoot, "polecats", "worker1")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(polecatPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir polecat: %v", err)
|
||||
}
|
||||
|
||||
if err := SetupRedirect(townRoot, polecatPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
redirectPath := filepath.Join(polecatPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("refinery worktree", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
refineryPath := filepath.Join(rigRoot, "refinery", "rig")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(refineryPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir refinery: %v", err)
|
||||
}
|
||||
|
||||
if err := SetupRedirect(townRoot, refineryPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
redirectPath := filepath.Join(refineryPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("cleans runtime files but preserves tracked files", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
crewBeads := filepath.Join(crewPath, ".beads")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
// Simulate worktree with both runtime and tracked files
|
||||
if err := os.MkdirAll(crewBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew beads: %v", err)
|
||||
}
|
||||
// Runtime files (should be removed)
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "beads.db"), []byte("fake db"), 0644); err != nil {
|
||||
t.Fatalf("write fake db: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "issues.jsonl"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatalf("write issues.jsonl: %v", err)
|
||||
}
|
||||
// Tracked files (should be preserved)
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "config.yaml"), []byte("prefix: test"), 0644); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "README.md"), []byte("# Beads"), 0644); err != nil {
|
||||
t.Fatalf("write README: %v", err)
|
||||
}
|
||||
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify runtime files were cleaned up
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "beads.db")); !os.IsNotExist(err) {
|
||||
t.Error("beads.db should have been removed")
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "issues.jsonl")); !os.IsNotExist(err) {
|
||||
t.Error("issues.jsonl should have been removed")
|
||||
}
|
||||
|
||||
// Verify tracked files were preserved
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "config.yaml")); err != nil {
|
||||
t.Errorf("config.yaml should have been preserved: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "README.md")); err != nil {
|
||||
t.Errorf("README.md should have been preserved: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect was created
|
||||
redirectPath := filepath.Join(crewBeads, "redirect")
|
||||
if _, err := os.Stat(redirectPath); err != nil {
|
||||
t.Errorf("redirect file should exist: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("rejects mayor/rig canonical location", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
mayorRigPath := filepath.Join(rigRoot, "mayor", "rig")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(mayorRigPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
err := SetupRedirect(townRoot, mayorRigPath)
|
||||
if err == nil {
|
||||
t.Error("SetupRedirect should reject mayor/rig location")
|
||||
}
|
||||
if err != nil && !strings.Contains(err.Error(), "canonical") {
|
||||
t.Errorf("error should mention canonical location, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("rejects path too shallow", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
|
||||
if err := os.MkdirAll(rigRoot, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig: %v", err)
|
||||
}
|
||||
|
||||
err := SetupRedirect(townRoot, rigRoot)
|
||||
if err == nil {
|
||||
t.Error("SetupRedirect should reject rig root (too shallow)")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fails if rig beads missing", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// No rig/.beads or mayor/rig/.beads created
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
err := SetupRedirect(townRoot, crewPath)
|
||||
if err == nil {
|
||||
t.Error("SetupRedirect should fail if rig .beads missing")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("crew worktree with mayor/rig beads only", func(t *testing.T) {
|
||||
// Setup: no rig/.beads, only mayor/rig/.beads exists
|
||||
// This is the tracked beads architecture where rig root has no .beads directory
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
mayorRigBeads := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// Create only mayor/rig/.beads (no rig/.beads)
|
||||
if err := os.MkdirAll(mayorRigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
// Run SetupRedirect - should succeed and point to mayor/rig/.beads
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect points to mayor/rig/.beads
|
||||
redirectPath := filepath.Join(crewPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../mayor/rig/.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
|
||||
// Verify redirect resolves correctly
|
||||
resolved := ResolveBeadsDir(crewPath)
|
||||
if resolved != mayorRigBeads {
|
||||
t.Errorf("resolved = %q, want %q", resolved, mayorRigBeads)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -57,7 +57,12 @@ func LoadRoutes(beadsDir string) ([]Route, error) {
|
||||
// If the prefix already exists, it updates the path.
|
||||
func AppendRoute(townRoot string, route Route) error {
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
return AppendRouteToDir(beadsDir, route)
|
||||
}
|
||||
|
||||
// AppendRouteToDir appends a route to routes.jsonl in the given beads directory.
|
||||
// If the prefix already exists, it updates the path.
|
||||
func AppendRouteToDir(beadsDir string, route Route) error {
|
||||
// Load existing routes
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil {
|
||||
@@ -185,3 +190,60 @@ func FindConflictingPrefixes(beadsDir string) (map[string][]string, error) {
|
||||
|
||||
return conflicts, nil
|
||||
}
|
||||
|
||||
// ExtractPrefix extracts the prefix from a bead ID.
|
||||
// For example, "ap-qtsup.16" returns "ap-", "hq-cv-abc" returns "hq-".
|
||||
// Returns empty string if no valid prefix found (empty input, no hyphen,
|
||||
// or hyphen at position 0 which would indicate an invalid prefix).
|
||||
func ExtractPrefix(beadID string) string {
|
||||
if beadID == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
idx := strings.Index(beadID, "-")
|
||||
if idx <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return beadID[:idx+1]
|
||||
}
|
||||
|
||||
// GetRigPathForPrefix returns the rig path for a given bead ID prefix.
|
||||
// The townRoot should be the Gas Town root directory (e.g., ~/gt).
|
||||
// Returns the full absolute path to the rig directory, or empty string if not found.
|
||||
// For town-level beads (path="."), returns townRoot.
|
||||
func GetRigPathForPrefix(townRoot, prefix string) string {
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil || routes == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, r := range routes {
|
||||
if r.Prefix == prefix {
|
||||
if r.Path == "." {
|
||||
return townRoot // Town-level beads
|
||||
}
|
||||
return filepath.Join(townRoot, r.Path)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// ResolveHookDir determines the directory for running bd update on a bead.
|
||||
// Since bd update doesn't support routing or redirects, we must resolve the
|
||||
// actual rig directory from the bead's prefix. hookWorkDir is only used as
|
||||
// a fallback if prefix resolution fails.
|
||||
func ResolveHookDir(townRoot, beadID, hookWorkDir string) string {
|
||||
// Always try prefix resolution first - bd update needs the actual rig dir
|
||||
prefix := ExtractPrefix(beadID)
|
||||
if rigPath := GetRigPathForPrefix(townRoot, prefix); rigPath != "" {
|
||||
return rigPath
|
||||
}
|
||||
// Fallback to hookWorkDir if provided
|
||||
if hookWorkDir != "" {
|
||||
return hookWorkDir
|
||||
}
|
||||
return townRoot
|
||||
}
|
||||
|
||||
@@ -52,6 +52,143 @@ func TestGetPrefixForRig_NoRoutesFile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
beadID string
|
||||
expected string
|
||||
}{
|
||||
{"ap-qtsup.16", "ap-"},
|
||||
{"hq-cv-abc", "hq-"},
|
||||
{"gt-mol-xyz", "gt-"},
|
||||
{"bd-123", "bd-"},
|
||||
{"", ""},
|
||||
{"nohyphen", ""},
|
||||
{"-startswithhyphen", ""}, // Leading hyphen = invalid prefix
|
||||
{"-", ""}, // Just hyphen = invalid
|
||||
{"a-", "a-"}, // Trailing hyphen is valid
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.beadID, func(t *testing.T) {
|
||||
result := ExtractPrefix(tc.beadID)
|
||||
if result != tc.expected {
|
||||
t.Errorf("ExtractPrefix(%q) = %q, want %q", tc.beadID, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRigPathForPrefix(t *testing.T) {
|
||||
// Create a temporary directory with routes.jsonl
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
routesContent := `{"prefix": "ap-", "path": "ai_platform/mayor/rig"}
|
||||
{"prefix": "gt-", "path": "gastown/mayor/rig"}
|
||||
{"prefix": "hq-", "path": "."}
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "routes.jsonl"), []byte(routesContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
prefix string
|
||||
expected string
|
||||
}{
|
||||
{"ap-", filepath.Join(tmpDir, "ai_platform/mayor/rig")},
|
||||
{"gt-", filepath.Join(tmpDir, "gastown/mayor/rig")},
|
||||
{"hq-", tmpDir}, // Town-level beads return townRoot
|
||||
{"unknown-", ""}, // Unknown prefix returns empty
|
||||
{"", ""}, // Empty prefix returns empty
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.prefix, func(t *testing.T) {
|
||||
result := GetRigPathForPrefix(tmpDir, tc.prefix)
|
||||
if result != tc.expected {
|
||||
t.Errorf("GetRigPathForPrefix(%q, %q) = %q, want %q", tmpDir, tc.prefix, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRigPathForPrefix_NoRoutesFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
// No routes.jsonl file
|
||||
|
||||
result := GetRigPathForPrefix(tmpDir, "ap-")
|
||||
if result != "" {
|
||||
t.Errorf("Expected empty string when no routes file, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveHookDir(t *testing.T) {
|
||||
// Create a temporary directory with routes.jsonl
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
routesContent := `{"prefix": "ap-", "path": "ai_platform/mayor/rig"}
|
||||
{"prefix": "hq-", "path": "."}
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "routes.jsonl"), []byte(routesContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
beadID string
|
||||
hookWorkDir string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "prefix resolution takes precedence over hookWorkDir",
|
||||
beadID: "ap-test",
|
||||
hookWorkDir: "/custom/path",
|
||||
expected: filepath.Join(tmpDir, "ai_platform/mayor/rig"),
|
||||
},
|
||||
{
|
||||
name: "resolves rig path from prefix",
|
||||
beadID: "ap-test",
|
||||
hookWorkDir: "",
|
||||
expected: filepath.Join(tmpDir, "ai_platform/mayor/rig"),
|
||||
},
|
||||
{
|
||||
name: "town-level bead returns townRoot",
|
||||
beadID: "hq-test",
|
||||
hookWorkDir: "",
|
||||
expected: tmpDir,
|
||||
},
|
||||
{
|
||||
name: "unknown prefix uses hookWorkDir as fallback",
|
||||
beadID: "xx-unknown",
|
||||
hookWorkDir: "/fallback/path",
|
||||
expected: "/fallback/path",
|
||||
},
|
||||
{
|
||||
name: "unknown prefix without hookWorkDir falls back to townRoot",
|
||||
beadID: "xx-unknown",
|
||||
hookWorkDir: "",
|
||||
expected: tmpDir,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := ResolveHookDir(tmpDir, tc.beadID, tc.hookWorkDir)
|
||||
if result != tc.expected {
|
||||
t.Errorf("ResolveHookDir(%q, %q, %q) = %q, want %q",
|
||||
tmpDir, tc.beadID, tc.hookWorkDir, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentBeadIDsWithPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -197,6 +197,11 @@ func (b *Boot) spawnTmux() error {
|
||||
// Launch Claude with environment exported inline and initial triage prompt
|
||||
// The "gt boot triage" prompt tells Boot to immediately start triage (GUPP principle)
|
||||
startCmd := config.BuildAgentStartupCommand("boot", "deacon-boot", "", "gt boot triage")
|
||||
// Wait for shell to be ready before sending keys (prevents "can't find pane" under load)
|
||||
if err := b.tmux.WaitForShellReady(SessionName, 5*time.Second); err != nil {
|
||||
_ = b.tmux.KillSession(SessionName)
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
if err := b.tmux.SendKeys(SessionName, startCmd); err != nil {
|
||||
return fmt.Errorf("sending startup command: %w", err)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
)
|
||||
|
||||
// Filename is the checkpoint file name within the polecat directory.
|
||||
@@ -84,7 +86,7 @@ func Write(polecatDir string, cp *Checkpoint) error {
|
||||
|
||||
// Set session ID from environment if available
|
||||
if cp.SessionID == "" {
|
||||
cp.SessionID = os.Getenv("CLAUDE_SESSION_ID")
|
||||
cp.SessionID = runtime.SessionIDFromEnv()
|
||||
if cp.SessionID == "" {
|
||||
cp.SessionID = fmt.Sprintf("pid-%d", os.Getpid())
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime && gt mail check --inject && gt nudge deacon session-started"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime && gt mail check --inject && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -20,7 +20,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -31,7 +31,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt mail check --inject"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt mail check --inject"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -42,7 +42,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt costs record"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt costs record"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime && gt nudge deacon session-started"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -20,7 +20,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -31,7 +31,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt mail check --inject"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt mail check --inject"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -42,7 +42,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt costs record"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt costs record"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ const (
|
||||
// RoleTypeFor returns the RoleType for a given role name.
|
||||
func RoleTypeFor(role string) RoleType {
|
||||
switch role {
|
||||
case "polecat", "witness", "refinery":
|
||||
case "polecat", "witness", "refinery", "deacon":
|
||||
return Autonomous
|
||||
default:
|
||||
return Interactive
|
||||
@@ -35,20 +35,27 @@ func RoleTypeFor(role string) RoleType {
|
||||
}
|
||||
|
||||
// EnsureSettings ensures .claude/settings.json exists in the given directory.
|
||||
// For worktrees, we use sparse checkout to exclude source repo's .claude/ directory,
|
||||
// so our settings.json is the only one Claude Code sees.
|
||||
func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
return EnsureSettingsAt(workDir, roleType, ".claude", "settings.json")
|
||||
}
|
||||
|
||||
// EnsureSettingsAt ensures a settings file exists at a custom directory/file.
|
||||
// If the file doesn't exist, it copies the appropriate template based on role type.
|
||||
// If the file already exists, it's left unchanged.
|
||||
func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
claudeDir := filepath.Join(workDir, ".claude")
|
||||
settingsPath := filepath.Join(claudeDir, "settings.json")
|
||||
func EnsureSettingsAt(workDir string, roleType RoleType, settingsDir, settingsFile string) error {
|
||||
claudeDir := filepath.Join(workDir, settingsDir)
|
||||
settingsPath := filepath.Join(claudeDir, settingsFile)
|
||||
|
||||
// If settings already exist, don't overwrite
|
||||
if _, err := os.Stat(settingsPath); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create .claude directory if needed
|
||||
// Create settings directory if needed
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating .claude directory: %w", err)
|
||||
return fmt.Errorf("creating settings directory: %w", err)
|
||||
}
|
||||
|
||||
// Select template based on role type
|
||||
@@ -78,3 +85,8 @@ func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
func EnsureSettingsForRole(workDir, role string) error {
|
||||
return EnsureSettings(workDir, RoleTypeFor(role))
|
||||
}
|
||||
|
||||
// EnsureSettingsForRoleAt is a convenience function that combines RoleTypeFor and EnsureSettingsAt.
|
||||
func EnsureSettingsForRoleAt(workDir, role, settingsDir, settingsFile string) error {
|
||||
return EnsureSettingsAt(workDir, RoleTypeFor(role), settingsDir, settingsFile)
|
||||
}
|
||||
|
||||
@@ -264,6 +264,25 @@ Examples:
|
||||
RunE: runAccountStatus,
|
||||
}
|
||||
|
||||
var accountSwitchCmd = &cobra.Command{
|
||||
Use: "switch <handle>",
|
||||
Short: "Switch to a different account",
|
||||
Long: `Switch the active Claude Code account.
|
||||
|
||||
This command:
|
||||
1. Backs up ~/.claude to the current account's config_dir (if needed)
|
||||
2. Creates a symlink from ~/.claude to the target account's config_dir
|
||||
3. Updates the default account in accounts.json
|
||||
|
||||
After switching, you must restart Claude Code for the change to take effect.
|
||||
|
||||
Examples:
|
||||
gt account switch work # Switch to work account
|
||||
gt account switch personal # Switch to personal account`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runAccountSwitch,
|
||||
}
|
||||
|
||||
func runAccountStatus(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
@@ -318,6 +337,122 @@ func runAccountStatus(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runAccountSwitch(cmd *cobra.Command, args []string) error {
|
||||
targetHandle := args[0]
|
||||
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
}
|
||||
|
||||
accountsPath := constants.MayorAccountsPath(townRoot)
|
||||
cfg, err := config.LoadAccountsConfig(accountsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading accounts config: %w", err)
|
||||
}
|
||||
|
||||
// Check if target account exists
|
||||
targetAcct := cfg.GetAccount(targetHandle)
|
||||
if targetAcct == nil {
|
||||
// List available accounts
|
||||
var handles []string
|
||||
for h := range cfg.Accounts {
|
||||
handles = append(handles, h)
|
||||
}
|
||||
sort.Strings(handles)
|
||||
return fmt.Errorf("account '%s' not found. Available accounts: %v", targetHandle, handles)
|
||||
}
|
||||
|
||||
// Get ~/.claude path
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting home directory: %w", err)
|
||||
}
|
||||
claudeDir := home + "/.claude"
|
||||
|
||||
// Check current state of ~/.claude
|
||||
fileInfo, err := os.Lstat(claudeDir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("checking ~/.claude: %w", err)
|
||||
}
|
||||
|
||||
// Determine current account (if any) by checking symlink target
|
||||
var currentHandle string
|
||||
if err == nil && fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
// It's a symlink - find which account it points to
|
||||
linkTarget, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading symlink: %w", err)
|
||||
}
|
||||
for h, acct := range cfg.Accounts {
|
||||
if acct.ConfigDir == linkTarget {
|
||||
currentHandle = h
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if already on target account
|
||||
if currentHandle == targetHandle {
|
||||
fmt.Printf("Already on account '%s'\n", targetHandle)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle the case where ~/.claude is a real directory (not a symlink)
|
||||
if err == nil && fileInfo.Mode()&os.ModeSymlink == 0 && fileInfo.IsDir() {
|
||||
// It's a real directory - need to move it
|
||||
// Try to find which account it belongs to based on default
|
||||
if currentHandle == "" && cfg.Default != "" {
|
||||
currentHandle = cfg.Default
|
||||
}
|
||||
|
||||
if currentHandle != "" {
|
||||
currentAcct := cfg.GetAccount(currentHandle)
|
||||
if currentAcct != nil {
|
||||
// Move ~/.claude to the current account's config_dir
|
||||
fmt.Printf("Moving ~/.claude to %s...\n", currentAcct.ConfigDir)
|
||||
|
||||
// Remove the target config dir if it exists (it might be empty from account add)
|
||||
if _, err := os.Stat(currentAcct.ConfigDir); err == nil {
|
||||
if err := os.RemoveAll(currentAcct.ConfigDir); err != nil {
|
||||
return fmt.Errorf("removing existing config dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Rename(claudeDir, currentAcct.ConfigDir); err != nil {
|
||||
return fmt.Errorf("moving ~/.claude to %s: %w", currentAcct.ConfigDir, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("~/.claude is a directory but no default account is set. Please set a default account first with 'gt account default <handle>'")
|
||||
}
|
||||
} else if err == nil && fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
// It's a symlink - remove it so we can create a new one
|
||||
if err := os.Remove(claudeDir); err != nil {
|
||||
return fmt.Errorf("removing existing symlink: %w", err)
|
||||
}
|
||||
}
|
||||
// If ~/.claude doesn't exist, that's fine - we'll create the symlink
|
||||
|
||||
// Create symlink to target account
|
||||
if err := os.Symlink(targetAcct.ConfigDir, claudeDir); err != nil {
|
||||
return fmt.Errorf("creating symlink to %s: %w", targetAcct.ConfigDir, err)
|
||||
}
|
||||
|
||||
// Update default account
|
||||
cfg.Default = targetHandle
|
||||
if err := config.SaveAccountsConfig(accountsPath, cfg); err != nil {
|
||||
return fmt.Errorf("saving accounts config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Switched to account '%s'\n", targetHandle)
|
||||
fmt.Printf("~/.claude -> %s\n", targetAcct.ConfigDir)
|
||||
fmt.Println()
|
||||
fmt.Println(style.Warning.Render("⚠️ Restart Claude Code for the change to take effect"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Add flags
|
||||
accountListCmd.Flags().BoolVar(&accountJSON, "json", false, "Output as JSON")
|
||||
@@ -330,6 +465,7 @@ func init() {
|
||||
accountCmd.AddCommand(accountAddCmd)
|
||||
accountCmd.AddCommand(accountDefaultCmd)
|
||||
accountCmd.AddCommand(accountStatusCmd)
|
||||
accountCmd.AddCommand(accountSwitchCmd)
|
||||
|
||||
rootCmd.AddCommand(accountCmd)
|
||||
}
|
||||
|
||||
299
internal/cmd/account_test.go
Normal file
299
internal/cmd/account_test.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
// setupTestTownForAccount creates a minimal Gas Town workspace with accounts.
|
||||
func setupTestTownForAccount(t *testing.T) (townRoot string, accountsDir string) {
|
||||
t.Helper()
|
||||
|
||||
townRoot = t.TempDir()
|
||||
|
||||
// Create mayor directory with required files
|
||||
mayorDir := filepath.Join(townRoot, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor: %v", err)
|
||||
}
|
||||
|
||||
// Create town.json
|
||||
townConfig := &config.TownConfig{
|
||||
Type: "town",
|
||||
Version: config.CurrentTownVersion,
|
||||
Name: "test-town",
|
||||
PublicName: "Test Town",
|
||||
CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
}
|
||||
townConfigPath := filepath.Join(mayorDir, "town.json")
|
||||
if err := config.SaveTownConfig(townConfigPath, townConfig); err != nil {
|
||||
t.Fatalf("save town.json: %v", err)
|
||||
}
|
||||
|
||||
// Create empty rigs.json
|
||||
rigsConfig := &config.RigsConfig{
|
||||
Version: 1,
|
||||
Rigs: make(map[string]config.RigEntry),
|
||||
}
|
||||
rigsPath := filepath.Join(mayorDir, "rigs.json")
|
||||
if err := config.SaveRigsConfig(rigsPath, rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
|
||||
// Create accounts directory
|
||||
accountsDir = filepath.Join(t.TempDir(), "claude-accounts")
|
||||
if err := os.MkdirAll(accountsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir accounts: %v", err)
|
||||
}
|
||||
|
||||
return townRoot, accountsDir
|
||||
}
|
||||
|
||||
func TestAccountSwitch(t *testing.T) {
|
||||
t.Run("switch between accounts", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
// Create fake home directory for ~/.claude
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
// Create account config directories
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
personalConfigDir := filepath.Join(accountsDir, "personal")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(personalConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir personal config: %v", err)
|
||||
}
|
||||
|
||||
// Create accounts.json with two accounts
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Accounts["personal"] = config.Account{
|
||||
Email: "steve@personal.com",
|
||||
ConfigDir: personalConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create initial symlink to work account
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.Symlink(workConfigDir, claudeDir); err != nil {
|
||||
t.Fatalf("create symlink: %v", err)
|
||||
}
|
||||
|
||||
// Change to town root
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Run switch to personal
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"personal"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify symlink points to personal
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != personalConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, personalConfigDir)
|
||||
}
|
||||
|
||||
// Verify default was updated
|
||||
loadedCfg, err := config.LoadAccountsConfig(accountsPath)
|
||||
if err != nil {
|
||||
t.Fatalf("load accounts: %v", err)
|
||||
}
|
||||
if loadedCfg.Default != "personal" {
|
||||
t.Errorf("default = %q, want 'personal'", loadedCfg.Default)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("already on target account", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create symlink already pointing to work
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.Symlink(workConfigDir, claudeDir); err != nil {
|
||||
t.Fatalf("create symlink: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to work (should be no-op)
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"work"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Symlink should still point to work
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != workConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, workConfigDir)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nonexistent account", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to nonexistent account
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"nonexistent"})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent account")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("real directory gets moved", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
personalConfigDir := filepath.Join(accountsDir, "personal")
|
||||
// Don't create workConfigDir - it will be created by moving ~/.claude
|
||||
if err := os.MkdirAll(personalConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir personal config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Accounts["personal"] = config.Account{
|
||||
Email: "steve@personal.com",
|
||||
ConfigDir: personalConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create ~/.claude as a real directory with a marker file
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude: %v", err)
|
||||
}
|
||||
markerFile := filepath.Join(claudeDir, "marker.txt")
|
||||
if err := os.WriteFile(markerFile, []byte("test"), 0644); err != nil {
|
||||
t.Fatalf("write marker: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to personal
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"personal"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify ~/.claude is now a symlink to personal
|
||||
fileInfo, err := os.Lstat(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("lstat .claude: %v", err)
|
||||
}
|
||||
if fileInfo.Mode()&os.ModeSymlink == 0 {
|
||||
t.Error("~/.claude is not a symlink")
|
||||
}
|
||||
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != personalConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, personalConfigDir)
|
||||
}
|
||||
|
||||
// Verify original content was moved to work config dir
|
||||
movedMarker := filepath.Join(workConfigDir, "marker.txt")
|
||||
if _, err := os.Stat(movedMarker); err != nil {
|
||||
t.Errorf("marker file not moved to work config dir: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -443,6 +443,73 @@ func TestBeadsRemoveRoute(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestSlingCrossRigRoutingResolution verifies that sling can resolve rig paths
|
||||
// for cross-rig bead hooking using ExtractPrefix and GetRigPathForPrefix.
|
||||
// This is the fix for https://github.com/steveyegge/gastown/issues/148
|
||||
func TestSlingCrossRigRoutingResolution(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
|
||||
tests := []struct {
|
||||
beadID string
|
||||
expectedPath string // Relative to townRoot, or "." for town-level
|
||||
}{
|
||||
{"gt-mol-abc", "gastown/mayor/rig"},
|
||||
{"tr-task-xyz", "testrig/mayor/rig"},
|
||||
{"hq-cv-123", "."}, // Town-level beads
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.beadID, func(t *testing.T) {
|
||||
// Step 1: Extract prefix from bead ID
|
||||
prefix := beads.ExtractPrefix(tc.beadID)
|
||||
if prefix == "" {
|
||||
t.Fatalf("ExtractPrefix(%q) returned empty", tc.beadID)
|
||||
}
|
||||
|
||||
// Step 2: Resolve rig path from prefix
|
||||
rigPath := beads.GetRigPathForPrefix(townRoot, prefix)
|
||||
if rigPath == "" {
|
||||
t.Fatalf("GetRigPathForPrefix(%q, %q) returned empty", townRoot, prefix)
|
||||
}
|
||||
|
||||
// Step 3: Verify the path is correct
|
||||
var expectedFull string
|
||||
if tc.expectedPath == "." {
|
||||
expectedFull = townRoot
|
||||
} else {
|
||||
expectedFull = filepath.Join(townRoot, tc.expectedPath)
|
||||
}
|
||||
|
||||
if rigPath != expectedFull {
|
||||
t.Errorf("GetRigPathForPrefix resolved to %q, want %q", rigPath, expectedFull)
|
||||
}
|
||||
|
||||
// Step 4: Verify the .beads directory exists at that path
|
||||
beadsDir := filepath.Join(rigPath, ".beads")
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
t.Errorf(".beads directory doesn't exist at resolved path: %s", beadsDir)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSlingCrossRigUnknownPrefix verifies behavior for unknown prefixes.
|
||||
func TestSlingCrossRigUnknownPrefix(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
|
||||
// An unknown prefix should return empty string
|
||||
unknownBeadID := "xx-unknown-123"
|
||||
prefix := beads.ExtractPrefix(unknownBeadID)
|
||||
if prefix != "xx-" {
|
||||
t.Fatalf("ExtractPrefix(%q) = %q, want %q", unknownBeadID, prefix, "xx-")
|
||||
}
|
||||
|
||||
rigPath := beads.GetRigPathForPrefix(townRoot, prefix)
|
||||
if rigPath != "" {
|
||||
t.Errorf("GetRigPathForPrefix for unknown prefix returned %q, want empty", rigPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBeadsGetPrefixForRig verifies prefix lookup by rig name.
|
||||
func TestBeadsGetPrefixForRig(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
132
internal/cmd/beads_version.go
Normal file
132
internal/cmd/beads_version.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// Package cmd provides CLI commands for the gt tool.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MinBeadsVersion is the minimum required beads version for Gas Town.
|
||||
// This version must include custom type support (bd-i54l).
|
||||
const MinBeadsVersion = "0.44.0"
|
||||
|
||||
// beadsVersion represents a parsed semantic version.
|
||||
type beadsVersion struct {
|
||||
major int
|
||||
minor int
|
||||
patch int
|
||||
}
|
||||
|
||||
// parseBeadsVersion parses a version string like "0.44.0" into components.
|
||||
func parseBeadsVersion(v string) (beadsVersion, error) {
|
||||
// Strip leading 'v' if present
|
||||
v = strings.TrimPrefix(v, "v")
|
||||
|
||||
// Split on dots
|
||||
parts := strings.Split(v, ".")
|
||||
if len(parts) < 2 {
|
||||
return beadsVersion{}, fmt.Errorf("invalid version format: %s", v)
|
||||
}
|
||||
|
||||
major, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return beadsVersion{}, fmt.Errorf("invalid major version: %s", parts[0])
|
||||
}
|
||||
|
||||
minor, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return beadsVersion{}, fmt.Errorf("invalid minor version: %s", parts[1])
|
||||
}
|
||||
|
||||
patch := 0
|
||||
if len(parts) >= 3 {
|
||||
// Handle versions like "0.44.0-dev" - take only numeric prefix
|
||||
patchStr := parts[2]
|
||||
if idx := strings.IndexFunc(patchStr, func(r rune) bool {
|
||||
return r < '0' || r > '9'
|
||||
}); idx != -1 {
|
||||
patchStr = patchStr[:idx]
|
||||
}
|
||||
if patchStr != "" {
|
||||
patch, err = strconv.Atoi(patchStr)
|
||||
if err != nil {
|
||||
return beadsVersion{}, fmt.Errorf("invalid patch version: %s", parts[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return beadsVersion{major: major, minor: minor, patch: patch}, nil
|
||||
}
|
||||
|
||||
// compare returns -1 if v < other, 0 if equal, 1 if v > other.
|
||||
func (v beadsVersion) compare(other beadsVersion) int {
|
||||
if v.major != other.major {
|
||||
if v.major < other.major {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if v.minor != other.minor {
|
||||
if v.minor < other.minor {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if v.patch != other.patch {
|
||||
if v.patch < other.patch {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func getBeadsVersion() (string, error) {
|
||||
cmd := exec.Command("bd", "version")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return "", fmt.Errorf("bd version failed: %s", string(exitErr.Stderr))
|
||||
}
|
||||
return "", fmt.Errorf("failed to run bd: %w (is beads installed?)", err)
|
||||
}
|
||||
|
||||
// Parse output like "bd version 0.44.0 (dev)"
|
||||
// or "bd version 0.44.0"
|
||||
re := regexp.MustCompile(`bd version (\d+\.\d+(?:\.\d+)?(?:-\w+)?)`)
|
||||
matches := re.FindStringSubmatch(string(output))
|
||||
if len(matches) < 2 {
|
||||
return "", fmt.Errorf("could not parse beads version from: %s", strings.TrimSpace(string(output)))
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// CheckBeadsVersion verifies that the installed beads version meets the minimum requirement.
|
||||
// Returns nil if the version is sufficient, or an error with details if not.
|
||||
func CheckBeadsVersion() error {
|
||||
installedStr, err := getBeadsVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot verify beads version: %w", err)
|
||||
}
|
||||
|
||||
installed, err := parseBeadsVersion(installedStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse installed beads version %q: %w", installedStr, err)
|
||||
}
|
||||
|
||||
required, err := parseBeadsVersion(MinBeadsVersion)
|
||||
if err != nil {
|
||||
// This would be a bug in our code
|
||||
return fmt.Errorf("cannot parse required beads version %q: %w", MinBeadsVersion, err)
|
||||
}
|
||||
|
||||
if installed.compare(required) < 0 {
|
||||
return fmt.Errorf("beads version %s is required, but %s is installed\n\nPlease upgrade beads: go install github.com/steveyegge/beads/cmd/bd@latest", MinBeadsVersion, installedStr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
68
internal/cmd/beads_version_test.go
Normal file
68
internal/cmd/beads_version_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseBeadsVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want beadsVersion
|
||||
wantErr bool
|
||||
}{
|
||||
{"0.44.0", beadsVersion{0, 44, 0}, false},
|
||||
{"1.2.3", beadsVersion{1, 2, 3}, false},
|
||||
{"0.44.0-dev", beadsVersion{0, 44, 0}, false},
|
||||
{"v0.44.0", beadsVersion{0, 44, 0}, false},
|
||||
{"0.44", beadsVersion{0, 44, 0}, false},
|
||||
{"10.20.30", beadsVersion{10, 20, 30}, false},
|
||||
{"invalid", beadsVersion{}, true},
|
||||
{"", beadsVersion{}, true},
|
||||
{"a.b.c", beadsVersion{}, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
got, err := parseBeadsVersion(tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseBeadsVersion(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr && got != tt.want {
|
||||
t.Errorf("parseBeadsVersion(%q) = %+v, want %+v", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeadsVersionCompare(t *testing.T) {
|
||||
tests := []struct {
|
||||
v1 string
|
||||
v2 string
|
||||
want int
|
||||
}{
|
||||
{"0.44.0", "0.44.0", 0},
|
||||
{"0.44.0", "0.43.0", 1},
|
||||
{"0.43.0", "0.44.0", -1},
|
||||
{"1.0.0", "0.99.99", 1},
|
||||
{"0.44.1", "0.44.0", 1},
|
||||
{"0.44.0", "0.44.1", -1},
|
||||
{"1.2.3", "1.2.3", 0},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.v1+"_vs_"+tt.v2, func(t *testing.T) {
|
||||
v1, err := parseBeadsVersion(tt.v1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse v1 %q: %v", tt.v1, err)
|
||||
}
|
||||
v2, err := parseBeadsVersion(tt.v2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse v2 %q: %v", tt.v2, err)
|
||||
}
|
||||
|
||||
got := v1.compare(v2)
|
||||
if got != tt.want {
|
||||
t.Errorf("(%s).compare(%s) = %d, want %d", tt.v1, tt.v2, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -153,7 +153,7 @@ func runConfigAgentList(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Collect all agents
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
customAgents := make(map[string]*config.RuntimeConfig)
|
||||
if townSettings.Agents != nil {
|
||||
for name, runtime := range townSettings.Agents {
|
||||
@@ -330,7 +330,7 @@ func runConfigAgentSet(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Agent '%s' set to: %s\n", style.Bold.Render(name), commandLine)
|
||||
|
||||
// Check if this overrides a built-in
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
for _, builtin := range builtInAgents {
|
||||
if name == builtin {
|
||||
fmt.Printf("\n%s\n", style.Dim.Render("(overriding built-in '"+builtin+"' preset)"))
|
||||
@@ -350,7 +350,7 @@ func runConfigAgentRemove(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Check if trying to remove built-in
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
for _, builtin := range builtInAgents {
|
||||
if name == builtin {
|
||||
return fmt.Errorf("cannot remove built-in agent '%s' (use 'gt config agent set' to override it)", name)
|
||||
@@ -415,7 +415,7 @@ func runConfigDefaultAgent(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Verify agent exists
|
||||
isValid := false
|
||||
builtInAgents := []string{"claude", "gemini", "codex"}
|
||||
builtInAgents := config.ListAgentPresets()
|
||||
for _, builtin := range builtInAgents {
|
||||
if name == builtin {
|
||||
isValid = true
|
||||
|
||||
@@ -1130,8 +1130,9 @@ func getIssueDetailsBatch(issueIDs []string) map[string]*issueDetails {
|
||||
return result
|
||||
}
|
||||
|
||||
// Build args: bd show id1 id2 id3 ... --json
|
||||
args := append([]string{"show"}, issueIDs...)
|
||||
// Build args: bd --no-daemon show id1 id2 id3 ... --json
|
||||
// Use --no-daemon to ensure fresh data (avoid stale cache from daemon)
|
||||
args := append([]string{"--no-daemon", "show"}, issueIDs...)
|
||||
args = append(args, "--json")
|
||||
|
||||
showCmd := exec.Command("bd", args...)
|
||||
@@ -1177,7 +1178,8 @@ func getIssueDetailsBatch(issueIDs []string) map[string]*issueDetails {
|
||||
// Prefer getIssueDetailsBatch for multiple issues to avoid N+1 subprocess calls.
|
||||
func getIssueDetails(issueID string) *issueDetails {
|
||||
// Use bd show with routing - it should find the issue in the right rig
|
||||
showCmd := exec.Command("bd", "show", issueID, "--json")
|
||||
// Use --no-daemon to ensure fresh data (avoid stale cache)
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", issueID, "--json")
|
||||
var stdout bytes.Buffer
|
||||
showCmd.Stdout = &stdout
|
||||
|
||||
|
||||
@@ -18,15 +18,24 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
costsJSON bool
|
||||
costsToday bool
|
||||
costsWeek bool
|
||||
costsByRole bool
|
||||
costsByRig bool
|
||||
costsJSON bool
|
||||
costsToday bool
|
||||
costsWeek bool
|
||||
costsByRole bool
|
||||
costsByRig bool
|
||||
costsVerbose bool
|
||||
|
||||
// Record subcommand flags
|
||||
recordSession string
|
||||
recordWorkItem string
|
||||
|
||||
// Digest subcommand flags
|
||||
digestYesterday bool
|
||||
digestDate string
|
||||
digestDryRun bool
|
||||
|
||||
// Migrate subcommand flags
|
||||
migrateDryRun bool
|
||||
)
|
||||
|
||||
var costsCmd = &cobra.Command{
|
||||
@@ -37,24 +46,34 @@ var costsCmd = &cobra.Command{
|
||||
|
||||
By default, shows live costs scraped from running tmux sessions.
|
||||
|
||||
Cost tracking uses ephemeral wisps for individual sessions that are
|
||||
aggregated into daily "Cost Report" digest beads for audit purposes.
|
||||
|
||||
Examples:
|
||||
gt costs # Live costs from running sessions
|
||||
gt costs --today # Today's total from session events
|
||||
gt costs --week # This week's total
|
||||
gt costs --today # Today's costs from wisps (not yet digested)
|
||||
gt costs --week # This week's costs from digest beads + today's wisps
|
||||
gt costs --by-role # Breakdown by role (polecat, witness, etc.)
|
||||
gt costs --by-rig # Breakdown by rig
|
||||
gt costs --json # Output as JSON`,
|
||||
gt costs --json # Output as JSON
|
||||
|
||||
Subcommands:
|
||||
gt costs record # Record session cost as ephemeral wisp (Stop hook)
|
||||
gt costs digest # Aggregate wisps into daily digest bead (Deacon patrol)`,
|
||||
RunE: runCosts,
|
||||
}
|
||||
|
||||
var costsRecordCmd = &cobra.Command{
|
||||
Use: "record",
|
||||
Short: "Record session cost as a bead event (called by Stop hook)",
|
||||
Long: `Record the final cost of a session as a session.ended event in beads.
|
||||
Short: "Record session cost as an ephemeral wisp (called by Stop hook)",
|
||||
Long: `Record the final cost of a session as an ephemeral wisp.
|
||||
|
||||
This command is intended to be called from a Claude Code Stop hook.
|
||||
It captures the final cost from the tmux session and creates an event
|
||||
bead with the cost data.
|
||||
It captures the final cost from the tmux session and creates an ephemeral
|
||||
event that is NOT exported to JSONL (avoiding log-in-database pollution).
|
||||
|
||||
Session cost wisps are aggregated daily by 'gt costs digest' into a single
|
||||
permanent "Cost Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
Examples:
|
||||
gt costs record --session gt-gastown-toast
|
||||
@@ -62,6 +81,46 @@ Examples:
|
||||
RunE: runCostsRecord,
|
||||
}
|
||||
|
||||
var costsDigestCmd = &cobra.Command{
|
||||
Use: "digest",
|
||||
Short: "Aggregate session cost wisps into a daily digest bead",
|
||||
Long: `Aggregate ephemeral session cost wisps into a permanent daily digest.
|
||||
|
||||
This command is intended to be run by Deacon patrol (daily) or manually.
|
||||
It queries session.ended wisps for a target date, creates a single aggregate
|
||||
"Cost Report YYYY-MM-DD" bead, then deletes the source wisps.
|
||||
|
||||
The resulting digest bead is permanent (exported to JSONL, synced via git)
|
||||
and provides an audit trail without log-in-database pollution.
|
||||
|
||||
Examples:
|
||||
gt costs digest --yesterday # Digest yesterday's costs (default for patrol)
|
||||
gt costs digest --date 2026-01-07 # Digest a specific date
|
||||
gt costs digest --yesterday --dry-run # Preview without changes`,
|
||||
RunE: runCostsDigest,
|
||||
}
|
||||
|
||||
var costsMigrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate legacy session.ended beads to the new wisp architecture",
|
||||
Long: `Migrate legacy session.ended event beads to the new cost tracking system.
|
||||
|
||||
This command handles the transition from the old architecture (where each
|
||||
session.ended event was a permanent bead) to the new wisp-based system.
|
||||
|
||||
The migration:
|
||||
1. Finds all open session.ended event beads (should be none if auto-close worked)
|
||||
2. Closes them with reason "migrated to wisp architecture"
|
||||
|
||||
Legacy beads remain in the database for historical queries but won't interfere
|
||||
with the new wisp-based cost tracking.
|
||||
|
||||
Examples:
|
||||
gt costs migrate # Migrate legacy beads
|
||||
gt costs migrate --dry-run # Preview what would be migrated`,
|
||||
RunE: runCostsMigrate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(costsCmd)
|
||||
costsCmd.Flags().BoolVar(&costsJSON, "json", false, "Output as JSON")
|
||||
@@ -69,11 +128,22 @@ func init() {
|
||||
costsCmd.Flags().BoolVar(&costsWeek, "week", false, "Show this week's total from session events")
|
||||
costsCmd.Flags().BoolVar(&costsByRole, "by-role", false, "Show breakdown by role")
|
||||
costsCmd.Flags().BoolVar(&costsByRig, "by-rig", false, "Show breakdown by rig")
|
||||
costsCmd.Flags().BoolVarP(&costsVerbose, "verbose", "v", false, "Show debug output for failures")
|
||||
|
||||
// Add record subcommand
|
||||
costsCmd.AddCommand(costsRecordCmd)
|
||||
costsRecordCmd.Flags().StringVar(&recordSession, "session", "", "Tmux session name to record")
|
||||
costsRecordCmd.Flags().StringVar(&recordWorkItem, "work-item", "", "Work item ID (bead) for attribution")
|
||||
|
||||
// Add digest subcommand
|
||||
costsCmd.AddCommand(costsDigestCmd)
|
||||
costsDigestCmd.Flags().BoolVar(&digestYesterday, "yesterday", false, "Digest yesterday's costs (default for patrol)")
|
||||
costsDigestCmd.Flags().StringVar(&digestDate, "date", "", "Digest a specific date (YYYY-MM-DD)")
|
||||
costsDigestCmd.Flags().BoolVar(&digestDryRun, "dry-run", false, "Preview what would be done without making changes")
|
||||
|
||||
// Add migrate subcommand
|
||||
costsCmd.AddCommand(costsMigrateCmd)
|
||||
costsMigrateCmd.Flags().BoolVar(&migrateDryRun, "dry-run", false, "Preview what would be migrated without making changes")
|
||||
}
|
||||
|
||||
// SessionCost represents cost info for a single session.
|
||||
@@ -150,8 +220,8 @@ func runLiveCosts() error {
|
||||
// Extract cost from content
|
||||
cost := extractCost(content)
|
||||
|
||||
// Check if Claude is running
|
||||
running := t.IsClaudeRunning(session)
|
||||
// Check if an agent appears to be running
|
||||
running := t.IsAgentRunning(session)
|
||||
|
||||
costs = append(costs, SessionCost{
|
||||
Session: session,
|
||||
@@ -180,46 +250,48 @@ func runLiveCosts() error {
|
||||
}
|
||||
|
||||
func runCostsFromLedger() error {
|
||||
// Query session events from beads
|
||||
entries, err := querySessionEvents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session events: %w", err)
|
||||
now := time.Now()
|
||||
var entries []CostEntry
|
||||
var err error
|
||||
|
||||
if costsToday {
|
||||
// For today: query ephemeral wisps (not yet digested)
|
||||
// This gives real-time view of today's costs
|
||||
entries, err = querySessionCostWisps(now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session cost wisps: %w", err)
|
||||
}
|
||||
} else if costsWeek {
|
||||
// For week: query digest beads (costs.digest events)
|
||||
// These are the aggregated daily reports
|
||||
entries, err = queryDigestBeads(7)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying digest beads: %w", err)
|
||||
}
|
||||
|
||||
// Also include today's wisps (not yet digested)
|
||||
todayWisps, _ := querySessionCostWisps(now)
|
||||
entries = append(entries, todayWisps...)
|
||||
} else {
|
||||
// No time filter: query both digests and legacy session.ended events
|
||||
// (for backwards compatibility during migration)
|
||||
entries, err = querySessionEvents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session events: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(entries) == 0 {
|
||||
fmt.Println(style.Dim.Render("No session events found. Costs are recorded when sessions end."))
|
||||
fmt.Println(style.Dim.Render("No cost data found. Costs are recorded when sessions end."))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter entries by time period
|
||||
var filtered []CostEntry
|
||||
now := time.Now()
|
||||
|
||||
for _, entry := range entries {
|
||||
if costsToday {
|
||||
// Today: same day
|
||||
if entry.EndedAt.Year() == now.Year() &&
|
||||
entry.EndedAt.YearDay() == now.YearDay() {
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
} else if costsWeek {
|
||||
// This week: within 7 days
|
||||
weekAgo := now.AddDate(0, 0, -7)
|
||||
if entry.EndedAt.After(weekAgo) {
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
} else {
|
||||
// No time filter
|
||||
filtered = append(filtered, entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate totals
|
||||
var total float64
|
||||
byRole := make(map[string]float64)
|
||||
byRig := make(map[string]float64)
|
||||
|
||||
for _, entry := range filtered {
|
||||
for _, entry := range entries {
|
||||
total += entry.CostUSD
|
||||
byRole[entry.Role] += entry.CostUSD
|
||||
if entry.Rig != "" {
|
||||
@@ -250,7 +322,7 @@ func runCostsFromLedger() error {
|
||||
return outputCostsJSON(output)
|
||||
}
|
||||
|
||||
return outputLedgerHuman(output, filtered)
|
||||
return outputLedgerHuman(output, entries)
|
||||
}
|
||||
|
||||
// SessionEvent represents a session.ended event from beads.
|
||||
@@ -362,6 +434,84 @@ func querySessionEvents() ([]CostEntry, error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// queryDigestBeads queries costs.digest events from the past N days and extracts session entries.
|
||||
func queryDigestBeads(days int) ([]CostEntry, error) {
|
||||
// Get list of event IDs
|
||||
listArgs := []string{
|
||||
"list",
|
||||
"--type=event",
|
||||
"--all",
|
||||
"--limit=0",
|
||||
"--json",
|
||||
}
|
||||
|
||||
listCmd := exec.Command("bd", listArgs...)
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var listItems []EventListItem
|
||||
if err := json.Unmarshal(listOutput, &listItems); err != nil {
|
||||
return nil, fmt.Errorf("parsing event list: %w", err)
|
||||
}
|
||||
|
||||
if len(listItems) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get full details for all events
|
||||
showArgs := []string{"show", "--json"}
|
||||
for _, item := range listItems {
|
||||
showArgs = append(showArgs, item.ID)
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("showing events: %w", err)
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
return nil, fmt.Errorf("parsing event details: %w", err)
|
||||
}
|
||||
|
||||
// Calculate date range
|
||||
now := time.Now()
|
||||
cutoff := now.AddDate(0, 0, -days)
|
||||
|
||||
var entries []CostEntry
|
||||
for _, event := range events {
|
||||
// Filter for costs.digest events only
|
||||
if event.EventKind != "costs.digest" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the digest payload
|
||||
var digest CostDigest
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &digest); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Check date is within range
|
||||
digestDate, err := time.Parse("2006-01-02", digest.Date)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if digestDate.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract individual session entries from the digest
|
||||
entries = append(entries, digest.Sessions...)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// parseSessionName extracts role, rig, and worker from a session name.
|
||||
// Session names follow the pattern: gt-<rig>-<worker> or gt-<global-agent>
|
||||
// Examples:
|
||||
@@ -428,7 +578,6 @@ func extractCost(content string) float64 {
|
||||
return cost
|
||||
}
|
||||
|
||||
|
||||
func outputCostsJSON(output CostsOutput) error {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
@@ -575,9 +724,14 @@ func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("marshaling payload: %w", err)
|
||||
}
|
||||
|
||||
// Build bd create command
|
||||
// Build bd create command for ephemeral wisp
|
||||
// Using --ephemeral creates a wisp that:
|
||||
// - Is stored locally only (not exported to JSONL)
|
||||
// - Won't pollute git history with O(sessions/day) events
|
||||
// - Will be aggregated into daily digests by 'gt costs digest'
|
||||
bdArgs := []string{
|
||||
"create",
|
||||
"--ephemeral",
|
||||
"--type=event",
|
||||
"--title=" + title,
|
||||
"--event-category=session.ended",
|
||||
@@ -594,20 +748,28 @@ func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
// NOTE: We intentionally don't use --rig flag here because it causes
|
||||
// event fields (event_kind, actor, payload) to not be stored properly.
|
||||
// The bd command will auto-detect the correct rig from cwd.
|
||||
// TODO: File beads bug about --rig flag losing event fields.
|
||||
|
||||
// Execute bd create
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
output, err := bdCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating session event: %w\nOutput: %s", err, string(output))
|
||||
return fmt.Errorf("creating session cost wisp: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
eventID := strings.TrimSpace(string(output))
|
||||
wispID := strings.TrimSpace(string(output))
|
||||
|
||||
// Auto-close session cost wisps immediately after creation.
|
||||
// These are informational records that don't need to stay open.
|
||||
// The wisp data is preserved and queryable until digested.
|
||||
closeCmd := exec.Command("bd", "close", wispID, "--reason=auto-closed session cost wisp")
|
||||
if closeErr := closeCmd.Run(); closeErr != nil {
|
||||
// Non-fatal: wisp was created, just couldn't auto-close
|
||||
fmt.Fprintf(os.Stderr, "warning: could not auto-close session cost wisp %s: %v\n", wispID, closeErr)
|
||||
}
|
||||
|
||||
// Output confirmation (silent if cost is zero and no work item)
|
||||
if cost > 0 || recordWorkItem != "" {
|
||||
fmt.Printf("%s Recorded $%.2f for %s (event: %s)", style.Success.Render("✓"), cost, session, eventID)
|
||||
fmt.Printf("%s Recorded $%.2f for %s (wisp: %s)", style.Success.Render("✓"), cost, session, wispID)
|
||||
if recordWorkItem != "" {
|
||||
fmt.Printf(" (work: %s)", recordWorkItem)
|
||||
}
|
||||
@@ -640,9 +802,13 @@ func deriveSessionName() string {
|
||||
return fmt.Sprintf("gt-%s-crew-%s", rig, crew)
|
||||
}
|
||||
|
||||
// Town-level roles (mayor, deacon): gt-{town}-{role}
|
||||
if (role == "mayor" || role == "deacon") && town != "" {
|
||||
return fmt.Sprintf("gt-%s-%s", town, role)
|
||||
// Town-level roles (mayor, deacon): gt-{town}-{role} or gt-{role}
|
||||
if role == "mayor" || role == "deacon" {
|
||||
if town != "" {
|
||||
return fmt.Sprintf("gt-%s-%s", town, role)
|
||||
}
|
||||
// No town set - use simple gt-{role} pattern
|
||||
return fmt.Sprintf("gt-%s", role)
|
||||
}
|
||||
|
||||
// Rig-based roles (witness, refinery): gt-{rig}-{role}
|
||||
@@ -655,12 +821,9 @@ func deriveSessionName() string {
|
||||
|
||||
// detectCurrentTmuxSession returns the current tmux session name if running inside tmux.
|
||||
// Uses `tmux display-message -p '#S'` which prints the session name.
|
||||
// Note: We don't check TMUX env var because it may not be inherited when Claude Code
|
||||
// runs bash commands, even though we are inside a tmux session.
|
||||
func detectCurrentTmuxSession() string {
|
||||
// Check if we're inside tmux
|
||||
if os.Getenv("TMUX") == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
cmd := exec.Command("tmux", "display-message", "-p", "#S")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -669,7 +832,8 @@ func detectCurrentTmuxSession() string {
|
||||
|
||||
session := strings.TrimSpace(string(output))
|
||||
// Only return if it looks like a Gas Town session
|
||||
if strings.HasPrefix(session, constants.SessionPrefix) {
|
||||
// Accept both gt- (rig sessions) and hq- (town-level sessions like hq-mayor)
|
||||
if strings.HasPrefix(session, constants.SessionPrefix) || strings.HasPrefix(session, constants.HQSessionPrefix) {
|
||||
return session
|
||||
}
|
||||
return ""
|
||||
@@ -712,3 +876,451 @@ func buildAgentPath(role, rig, worker string) string {
|
||||
return worker
|
||||
}
|
||||
}
|
||||
|
||||
// CostDigest represents the aggregated daily cost report.
|
||||
type CostDigest struct {
|
||||
Date string `json:"date"`
|
||||
TotalUSD float64 `json:"total_usd"`
|
||||
SessionCount int `json:"session_count"`
|
||||
Sessions []CostEntry `json:"sessions"`
|
||||
ByRole map[string]float64 `json:"by_role"`
|
||||
ByRig map[string]float64 `json:"by_rig,omitempty"`
|
||||
}
|
||||
|
||||
// WispListOutput represents the JSON output from bd mol wisp list.
|
||||
type WispListOutput struct {
|
||||
Wisps []WispItem `json:"wisps"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// WispItem represents a single wisp from bd mol wisp list.
|
||||
type WispItem struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
// runCostsDigest aggregates session cost wisps into a daily digest bead.
|
||||
func runCostsDigest(cmd *cobra.Command, args []string) error {
|
||||
// Determine target date
|
||||
var targetDate time.Time
|
||||
|
||||
if digestDate != "" {
|
||||
parsed, err := time.Parse("2006-01-02", digestDate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid date format (use YYYY-MM-DD): %w", err)
|
||||
}
|
||||
targetDate = parsed
|
||||
} else if digestYesterday {
|
||||
targetDate = time.Now().AddDate(0, 0, -1)
|
||||
} else {
|
||||
return fmt.Errorf("specify --yesterday or --date YYYY-MM-DD")
|
||||
}
|
||||
|
||||
dateStr := targetDate.Format("2006-01-02")
|
||||
|
||||
// Query ephemeral session.ended wisps for target date
|
||||
wisps, err := querySessionCostWisps(targetDate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session cost wisps: %w", err)
|
||||
}
|
||||
|
||||
if len(wisps) == 0 {
|
||||
fmt.Printf("%s No session cost wisps found for %s\n", style.Dim.Render("○"), dateStr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build digest
|
||||
digest := CostDigest{
|
||||
Date: dateStr,
|
||||
Sessions: wisps,
|
||||
ByRole: make(map[string]float64),
|
||||
ByRig: make(map[string]float64),
|
||||
}
|
||||
|
||||
for _, w := range wisps {
|
||||
digest.TotalUSD += w.CostUSD
|
||||
digest.SessionCount++
|
||||
digest.ByRole[w.Role] += w.CostUSD
|
||||
if w.Rig != "" {
|
||||
digest.ByRig[w.Rig] += w.CostUSD
|
||||
}
|
||||
}
|
||||
|
||||
if digestDryRun {
|
||||
fmt.Printf("%s [DRY RUN] Would create Cost Report %s:\n", style.Bold.Render("📊"), dateStr)
|
||||
fmt.Printf(" Total: $%.2f\n", digest.TotalUSD)
|
||||
fmt.Printf(" Sessions: %d\n", digest.SessionCount)
|
||||
fmt.Printf(" By Role:\n")
|
||||
for role, cost := range digest.ByRole {
|
||||
fmt.Printf(" %s: $%.2f\n", role, cost)
|
||||
}
|
||||
if len(digest.ByRig) > 0 {
|
||||
fmt.Printf(" By Rig:\n")
|
||||
for rig, cost := range digest.ByRig {
|
||||
fmt.Printf(" %s: $%.2f\n", rig, cost)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create permanent digest bead
|
||||
digestID, err := createCostDigestBead(digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating digest bead: %w", err)
|
||||
}
|
||||
|
||||
// Delete source wisps (they're ephemeral, use bd mol burn)
|
||||
deletedCount, deleteErr := deleteSessionCostWisps(targetDate)
|
||||
if deleteErr != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to delete some source wisps: %v\n", deleteErr)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Created Cost Report %s (bead: %s)\n", style.Success.Render("✓"), dateStr, digestID)
|
||||
fmt.Printf(" Total: $%.2f from %d sessions\n", digest.TotalUSD, digest.SessionCount)
|
||||
if deletedCount > 0 {
|
||||
fmt.Printf(" Deleted %d source wisps\n", deletedCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// querySessionCostWisps queries ephemeral session.ended events for a target date.
|
||||
func querySessionCostWisps(targetDate time.Time) ([]CostEntry, error) {
|
||||
// List all wisps including closed ones
|
||||
listCmd := exec.Command("bd", "mol", "wisp", "list", "--all", "--json")
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
// No wisps database or command failed
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] wisp list failed: %v\n", err)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var wispList WispListOutput
|
||||
if err := json.Unmarshal(listOutput, &wispList); err != nil {
|
||||
return nil, fmt.Errorf("parsing wisp list: %w", err)
|
||||
}
|
||||
|
||||
if wispList.Count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Batch all wisp IDs into a single bd show call to avoid N+1 queries
|
||||
showArgs := []string{"show", "--json"}
|
||||
for _, wisp := range wispList.Wisps {
|
||||
showArgs = append(showArgs, wisp.ID)
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("showing wisps: %w", err)
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
return nil, fmt.Errorf("parsing wisp details: %w", err)
|
||||
}
|
||||
|
||||
var sessionCostWisps []CostEntry
|
||||
targetDay := targetDate.Format("2006-01-02")
|
||||
|
||||
for _, event := range events {
|
||||
// Filter for session.ended events only
|
||||
if event.EventKind != "session.ended" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse payload
|
||||
var payload SessionPayload
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &payload); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] payload unmarshal failed for event %s: %v\n", event.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Parse ended_at and filter by target date
|
||||
endedAt := event.CreatedAt
|
||||
if payload.EndedAt != "" {
|
||||
if parsed, err := time.Parse(time.RFC3339, payload.EndedAt); err == nil {
|
||||
endedAt = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this event is from the target date
|
||||
if endedAt.Format("2006-01-02") != targetDay {
|
||||
continue
|
||||
}
|
||||
|
||||
sessionCostWisps = append(sessionCostWisps, CostEntry{
|
||||
SessionID: payload.SessionID,
|
||||
Role: payload.Role,
|
||||
Rig: payload.Rig,
|
||||
Worker: payload.Worker,
|
||||
CostUSD: payload.CostUSD,
|
||||
EndedAt: endedAt,
|
||||
WorkItem: event.Target,
|
||||
})
|
||||
}
|
||||
|
||||
return sessionCostWisps, nil
|
||||
}
|
||||
|
||||
// createCostDigestBead creates a permanent bead for the daily cost digest.
|
||||
func createCostDigestBead(digest CostDigest) (string, error) {
|
||||
// Build description with aggregate data
|
||||
var desc strings.Builder
|
||||
desc.WriteString(fmt.Sprintf("Daily cost aggregate for %s.\n\n", digest.Date))
|
||||
desc.WriteString(fmt.Sprintf("**Total:** $%.2f from %d sessions\n\n", digest.TotalUSD, digest.SessionCount))
|
||||
|
||||
if len(digest.ByRole) > 0 {
|
||||
desc.WriteString("## By Role\n")
|
||||
roles := make([]string, 0, len(digest.ByRole))
|
||||
for role := range digest.ByRole {
|
||||
roles = append(roles, role)
|
||||
}
|
||||
sort.Strings(roles)
|
||||
for _, role := range roles {
|
||||
icon := constants.RoleEmoji(role)
|
||||
desc.WriteString(fmt.Sprintf("- %s %s: $%.2f\n", icon, role, digest.ByRole[role]))
|
||||
}
|
||||
desc.WriteString("\n")
|
||||
}
|
||||
|
||||
if len(digest.ByRig) > 0 {
|
||||
desc.WriteString("## By Rig\n")
|
||||
rigs := make([]string, 0, len(digest.ByRig))
|
||||
for rig := range digest.ByRig {
|
||||
rigs = append(rigs, rig)
|
||||
}
|
||||
sort.Strings(rigs)
|
||||
for _, rig := range rigs {
|
||||
desc.WriteString(fmt.Sprintf("- %s: $%.2f\n", rig, digest.ByRig[rig]))
|
||||
}
|
||||
desc.WriteString("\n")
|
||||
}
|
||||
|
||||
// Build payload JSON with full session details
|
||||
payloadJSON, err := json.Marshal(digest)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling digest payload: %w", err)
|
||||
}
|
||||
|
||||
// Create the digest bead (NOT ephemeral - this is permanent)
|
||||
title := fmt.Sprintf("Cost Report %s", digest.Date)
|
||||
bdArgs := []string{
|
||||
"create",
|
||||
"--type=event",
|
||||
"--title=" + title,
|
||||
"--event-category=costs.digest",
|
||||
"--event-payload=" + string(payloadJSON),
|
||||
"--description=" + desc.String(),
|
||||
"--silent",
|
||||
}
|
||||
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
output, err := bdCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating digest bead: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
digestID := strings.TrimSpace(string(output))
|
||||
|
||||
// Auto-close the digest (it's an audit record, not work)
|
||||
closeCmd := exec.Command("bd", "close", digestID, "--reason=daily cost digest")
|
||||
_ = closeCmd.Run() // Best effort
|
||||
|
||||
return digestID, nil
|
||||
}
|
||||
|
||||
// deleteSessionCostWisps deletes ephemeral session.ended wisps for a target date.
|
||||
func deleteSessionCostWisps(targetDate time.Time) (int, error) {
|
||||
// List all wisps
|
||||
listCmd := exec.Command("bd", "mol", "wisp", "list", "--all", "--json")
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] wisp list failed in deletion: %v\n", err)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var wispList WispListOutput
|
||||
if err := json.Unmarshal(listOutput, &wispList); err != nil {
|
||||
return 0, fmt.Errorf("parsing wisp list: %w", err)
|
||||
}
|
||||
|
||||
targetDay := targetDate.Format("2006-01-02")
|
||||
|
||||
// Collect all wisp IDs that match our criteria
|
||||
var wispIDsToDelete []string
|
||||
|
||||
for _, wisp := range wispList.Wisps {
|
||||
// Get full wisp details to check if it's a session.ended event
|
||||
showCmd := exec.Command("bd", "show", wisp.ID, "--json")
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] bd show failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] JSON unmarshal failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(events) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
event := events[0]
|
||||
|
||||
// Only delete session.ended wisps
|
||||
if event.EventKind != "session.ended" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse payload to get ended_at for date filtering
|
||||
var payload SessionPayload
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &payload); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] payload unmarshal failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
endedAt := event.CreatedAt
|
||||
if payload.EndedAt != "" {
|
||||
if parsed, err := time.Parse(time.RFC3339, payload.EndedAt); err == nil {
|
||||
endedAt = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Only delete wisps from the target date
|
||||
if endedAt.Format("2006-01-02") != targetDay {
|
||||
continue
|
||||
}
|
||||
|
||||
wispIDsToDelete = append(wispIDsToDelete, wisp.ID)
|
||||
}
|
||||
|
||||
if len(wispIDsToDelete) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Batch delete all wisps in a single subprocess call
|
||||
burnArgs := append([]string{"mol", "burn", "--force"}, wispIDsToDelete...)
|
||||
burnCmd := exec.Command("bd", burnArgs...)
|
||||
if burnErr := burnCmd.Run(); burnErr != nil {
|
||||
return 0, fmt.Errorf("batch burn failed: %w", burnErr)
|
||||
}
|
||||
|
||||
return len(wispIDsToDelete), nil
|
||||
}
|
||||
|
||||
// runCostsMigrate migrates legacy session.ended beads to the new architecture.
|
||||
func runCostsMigrate(cmd *cobra.Command, args []string) error {
|
||||
// Query all session.ended events (both open and closed)
|
||||
listArgs := []string{
|
||||
"list",
|
||||
"--type=event",
|
||||
"--all",
|
||||
"--limit=0",
|
||||
"--json",
|
||||
}
|
||||
|
||||
listCmd := exec.Command("bd", listArgs...)
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
fmt.Println(style.Dim.Render("No events found or bd command failed"))
|
||||
return nil
|
||||
}
|
||||
|
||||
var listItems []EventListItem
|
||||
if err := json.Unmarshal(listOutput, &listItems); err != nil {
|
||||
return fmt.Errorf("parsing event list: %w", err)
|
||||
}
|
||||
|
||||
if len(listItems) == 0 {
|
||||
fmt.Println(style.Dim.Render("No events found"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get full details for all events
|
||||
showArgs := []string{"show", "--json"}
|
||||
for _, item := range listItems {
|
||||
showArgs = append(showArgs, item.ID)
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("showing events: %w", err)
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
return fmt.Errorf("parsing event details: %w", err)
|
||||
}
|
||||
|
||||
// Find open session.ended events
|
||||
var openEvents []SessionEvent
|
||||
var closedCount int
|
||||
for _, event := range events {
|
||||
if event.EventKind != "session.ended" {
|
||||
continue
|
||||
}
|
||||
if event.Status == "closed" {
|
||||
closedCount++
|
||||
continue
|
||||
}
|
||||
openEvents = append(openEvents, event)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Legacy session.ended beads:\n", style.Bold.Render("📊"))
|
||||
fmt.Printf(" Closed: %d (no action needed)\n", closedCount)
|
||||
fmt.Printf(" Open: %d (will be closed)\n", len(openEvents))
|
||||
|
||||
if len(openEvents) == 0 {
|
||||
fmt.Println(style.Success.Render("\n✓ No migration needed - all session.ended events are already closed"))
|
||||
return nil
|
||||
}
|
||||
|
||||
if migrateDryRun {
|
||||
fmt.Printf("\n%s Would close %d open session.ended events\n", style.Bold.Render("[DRY RUN]"), len(openEvents))
|
||||
for _, event := range openEvents {
|
||||
fmt.Printf(" - %s: %s\n", event.ID, event.Title)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all open session.ended events
|
||||
closedMigrated := 0
|
||||
for _, event := range openEvents {
|
||||
closeCmd := exec.Command("bd", "close", event.ID, "--reason=migrated to wisp architecture")
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: could not close %s: %v\n", event.ID, err)
|
||||
continue
|
||||
}
|
||||
closedMigrated++
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s Migrated %d session.ended events (closed)\n", style.Success.Render("✓"), closedMigrated)
|
||||
fmt.Println(style.Dim.Render("Legacy beads preserved for historical queries."))
|
||||
fmt.Println(style.Dim.Render("New session costs will use ephemeral wisps + daily digests."))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -61,6 +61,20 @@ func TestDeriveSessionName(t *testing.T) {
|
||||
},
|
||||
expected: "gt-ai-deacon",
|
||||
},
|
||||
{
|
||||
name: "mayor session without GT_TOWN",
|
||||
envVars: map[string]string{
|
||||
"GT_ROLE": "mayor",
|
||||
},
|
||||
expected: "gt-mayor",
|
||||
},
|
||||
{
|
||||
name: "deacon session without GT_TOWN",
|
||||
envVars: map[string]string{
|
||||
"GT_ROLE": "deacon",
|
||||
},
|
||||
expected: "gt-deacon",
|
||||
},
|
||||
{
|
||||
name: "no env vars",
|
||||
envVars: map[string]string{},
|
||||
|
||||
@@ -8,16 +8,19 @@ import (
|
||||
|
||||
// Crew command flags
|
||||
var (
|
||||
crewRig string
|
||||
crewBranch bool
|
||||
crewJSON bool
|
||||
crewForce bool
|
||||
crewNoTmux bool
|
||||
crewDetached bool
|
||||
crewMessage string
|
||||
crewAccount string
|
||||
crewAll bool
|
||||
crewDryRun bool
|
||||
crewRig string
|
||||
crewBranch bool
|
||||
crewJSON bool
|
||||
crewForce bool
|
||||
crewPurge bool
|
||||
crewNoTmux bool
|
||||
crewDetached bool
|
||||
crewMessage string
|
||||
crewAccount string
|
||||
crewAgentOverride string
|
||||
crewAll bool
|
||||
crewListAll bool
|
||||
crewDryRun bool
|
||||
)
|
||||
|
||||
var crewCmd = &cobra.Command{
|
||||
@@ -75,7 +78,8 @@ Shows git branch, session state, and git status for each workspace.
|
||||
|
||||
Examples:
|
||||
gt crew list # List in current rig
|
||||
gt crew list --rig greenplace # List in specific rig
|
||||
gt crew list --rig greenplace # List in specific rig
|
||||
gt crew list --all # List in all rigs
|
||||
gt crew list --json # JSON output`,
|
||||
RunE: runCrewList,
|
||||
}
|
||||
@@ -117,11 +121,22 @@ var crewRemoveCmd = &cobra.Command{
|
||||
Checks for uncommitted changes and running sessions before removing.
|
||||
Use --force to skip checks and remove anyway.
|
||||
|
||||
The agent bead is CLOSED by default (preserves CV history). Use --purge
|
||||
to DELETE the agent bead entirely (for accidental/test crew that should
|
||||
leave no trace in the ledger).
|
||||
|
||||
--purge also:
|
||||
- Deletes the agent bead (not just closes it)
|
||||
- Unassigns any beads assigned to this crew member
|
||||
- Clears mail in the agent's inbox
|
||||
- Properly handles git worktrees (not just regular clones)
|
||||
|
||||
Examples:
|
||||
gt crew remove dave # Remove with safety checks
|
||||
gt crew remove dave emma fred # Remove multiple
|
||||
gt crew remove beads/grip beads/fang # Remove from specific rig
|
||||
gt crew remove dave --force # Force remove`,
|
||||
gt crew remove dave --force # Force remove (closes bead)
|
||||
gt crew remove test-crew --purge # Obliterate (deletes bead)`,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: runCrewRemove,
|
||||
}
|
||||
@@ -246,25 +261,23 @@ var crewStartCmd = &cobra.Command{
|
||||
Long: `Start crew workers in a rig, creating workspaces if they don't exist.
|
||||
|
||||
The rig name can be provided as the first argument, or inferred from the
|
||||
current directory. Optionally specify crew member names to start specific
|
||||
workers, or use --all to start all crew members in the rig.
|
||||
current directory. If no crew names are specified, starts all crew in the rig.
|
||||
|
||||
The crew session starts in the background with Claude running and ready.
|
||||
|
||||
Examples:
|
||||
gt crew start gastown joe # Start joe in gastown rig
|
||||
gt crew start gastown --all # Start all crew in gastown rig
|
||||
gt crew start --all # Start all crew (rig inferred from cwd)
|
||||
gt crew start beads grip fang # Start grip and fang in beads rig`,
|
||||
gt crew start beads # Start all crew in beads rig
|
||||
gt crew start # Start all crew (rig inferred from cwd)
|
||||
gt crew start beads grip fang # Start specific crew in beads rig
|
||||
gt crew start gastown joe # Start joe in gastown rig`,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
// With --all, we can have 0 args (infer rig) or 1+ args (rig specified)
|
||||
if crewAll {
|
||||
return nil
|
||||
}
|
||||
// Without --all, need at least rig and one crew name
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("requires rig and crew name, or use --all")
|
||||
}
|
||||
// Allow: 0 args (infer rig, default to --all)
|
||||
// 1 arg (rig specified, default to --all)
|
||||
// 2+ args (rig + specific crew names)
|
||||
return nil
|
||||
},
|
||||
RunE: runCrewStart,
|
||||
@@ -275,8 +288,8 @@ var crewStopCmd = &cobra.Command{
|
||||
Short: "Stop crew workspace session(s)",
|
||||
Long: `Stop one or more running crew workspace sessions.
|
||||
|
||||
Kills the tmux session(s) for the specified crew member(s). Use --all to
|
||||
stop all running crew sessions across all rigs.
|
||||
If a rig name is given alone, stops all crew in that rig. Otherwise stops
|
||||
the specified crew member(s).
|
||||
|
||||
The name can include the rig in slash format (e.g., beads/emma).
|
||||
If not specified, the rig is inferred from the current directory.
|
||||
@@ -285,11 +298,11 @@ Output is captured before stopping for debugging purposes (use --force
|
||||
to skip capture for faster shutdown).
|
||||
|
||||
Examples:
|
||||
gt crew stop dave # Stop dave's session
|
||||
gt crew stop beads/emma beads/grip # Stop multiple from specific rig
|
||||
gt crew stop beads # Stop all crew in beads rig
|
||||
gt crew stop # Stop all crew (rig inferred from cwd)
|
||||
gt crew stop beads/emma # Stop specific crew member
|
||||
gt crew stop dave # Stop dave in current rig
|
||||
gt crew stop --all # Stop all running crew sessions
|
||||
gt crew stop --all --rig beads # Stop all crew in beads rig
|
||||
gt crew stop --all --dry-run # Preview what would be stopped
|
||||
gt crew stop dave --force # Stop without capturing output`,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if crewAll {
|
||||
@@ -298,9 +311,9 @@ Examples:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("requires at least 1 argument (or --all)")
|
||||
}
|
||||
// Allow: 0 args (infer rig, default to --all)
|
||||
// 1 arg (rig name → all in that rig, or crew name → specific crew)
|
||||
// 1+ args (specific crew names)
|
||||
return nil
|
||||
},
|
||||
RunE: runCrewStop,
|
||||
@@ -312,15 +325,18 @@ func init() {
|
||||
crewAddCmd.Flags().BoolVar(&crewBranch, "branch", false, "Create a feature branch (crew/<name>)")
|
||||
|
||||
crewListCmd.Flags().StringVar(&crewRig, "rig", "", "Filter by rig name")
|
||||
crewListCmd.Flags().BoolVar(&crewListAll, "all", false, "List crew workspaces in all rigs")
|
||||
crewListCmd.Flags().BoolVar(&crewJSON, "json", false, "Output as JSON")
|
||||
|
||||
crewAtCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use")
|
||||
crewAtCmd.Flags().BoolVar(&crewNoTmux, "no-tmux", false, "Just print directory path")
|
||||
crewAtCmd.Flags().BoolVarP(&crewDetached, "detached", "d", false, "Start session without attaching")
|
||||
crewAtCmd.Flags().StringVar(&crewAccount, "account", "", "Claude Code account handle to use (overrides default)")
|
||||
crewAtCmd.Flags().StringVar(&crewAgentOverride, "agent", "", "Agent alias to run crew worker with (overrides rig/town default)")
|
||||
|
||||
crewRemoveCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use")
|
||||
crewRemoveCmd.Flags().BoolVar(&crewForce, "force", false, "Force remove (skip safety checks)")
|
||||
crewRemoveCmd.Flags().BoolVar(&crewPurge, "purge", false, "Obliterate: delete agent bead, unassign work, clear mail")
|
||||
|
||||
crewRefreshCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use")
|
||||
crewRefreshCmd.Flags().StringVarP(&crewMessage, "message", "m", "", "Custom handoff message")
|
||||
@@ -339,6 +355,7 @@ func init() {
|
||||
|
||||
crewStartCmd.Flags().BoolVar(&crewAll, "all", false, "Start all crew members in the rig")
|
||||
crewStartCmd.Flags().StringVar(&crewAccount, "account", "", "Claude Code account handle to use")
|
||||
crewStartCmd.Flags().StringVar(&crewAgentOverride, "agent", "", "Agent alias to run crew worker with (overrides rig/town default)")
|
||||
|
||||
crewStopCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use (filter when using --all)")
|
||||
crewStopCmd.Flags().BoolVar(&crewAll, "all", false, "Stop all running crew sessions")
|
||||
|
||||
@@ -56,9 +56,7 @@ func runCrewAdd(cmd *cobra.Command, args []string) error {
|
||||
crewGit := git.NewGit(r.Path)
|
||||
crewMgr := crew.NewManager(r, crewGit)
|
||||
|
||||
// Beads for agent bead creation (use mayor/rig where beads.db lives)
|
||||
// The rig root .beads/ only has config.yaml, no database.
|
||||
bd := beads.New(filepath.Join(r.Path, "mayor", "rig"))
|
||||
bd := beads.New(beads.ResolveBeadsDir(r.Path))
|
||||
|
||||
// Track results
|
||||
var created []string
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -29,7 +30,19 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
// Try to detect from current directory
|
||||
detected, err := detectCrewFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not detect crew workspace from current directory: %w\n\nUsage: gt crew at <name>", err)
|
||||
// Try to show available crew members if we can detect the rig
|
||||
hint := "\n\nUsage: gt crew at <name>"
|
||||
if crewRig != "" {
|
||||
if mgr, _, mgrErr := getCrewManager(crewRig); mgrErr == nil {
|
||||
if members, listErr := mgr.List(); listErr == nil && len(members) > 0 {
|
||||
hint = fmt.Sprintf("\n\nAvailable crew in %s:", crewRig)
|
||||
for _, m := range members {
|
||||
hint += fmt.Sprintf("\n %s", m.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("could not detect crew workspace from current directory: %w%s", err, hint)
|
||||
}
|
||||
name = detected.crewName
|
||||
if crewRig == "" {
|
||||
@@ -61,7 +74,7 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolve account for Claude config
|
||||
// Resolve account for runtime config
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
@@ -75,6 +88,9 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Using account: %s\n", accountHandle)
|
||||
}
|
||||
|
||||
runtimeConfig := config.LoadRuntimeConfig(r.Path)
|
||||
_ = runtime.EnsureSettingsForRole(worker.ClonePath, "crew", runtimeConfig)
|
||||
|
||||
// Check if session exists
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(r.Name, name)
|
||||
@@ -83,15 +99,15 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
|
||||
// Before creating a new session, check if there's already a Claude session
|
||||
// Before creating a new session, check if there's already a runtime session
|
||||
// running in this crew's directory (might have been started manually or via
|
||||
// a different mechanism)
|
||||
if !hasSession {
|
||||
existingSessions, err := t.FindSessionByWorkDir(worker.ClonePath, true)
|
||||
existingSessions, err := t.FindSessionByWorkDir(worker.ClonePath, runtimeConfig.Tmux.ProcessNames)
|
||||
if err == nil && len(existingSessions) > 0 {
|
||||
// Found an existing session with Claude running in this directory
|
||||
// Found an existing session with runtime running in this directory
|
||||
existingSession := existingSessions[0]
|
||||
fmt.Printf("%s Found existing Claude session '%s' in crew directory\n",
|
||||
fmt.Printf("%s Found existing runtime session '%s' in crew directory\n",
|
||||
style.Warning.Render("⚠"),
|
||||
existingSession)
|
||||
fmt.Printf(" Attaching to existing session instead of creating a new one\n")
|
||||
@@ -125,9 +141,9 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
_ = t.SetEnvironment(sessionID, "GT_RIG", r.Name)
|
||||
_ = t.SetEnvironment(sessionID, "GT_CREW", name)
|
||||
|
||||
// Set CLAUDE_CONFIG_DIR for account selection (non-fatal)
|
||||
if claudeConfigDir != "" {
|
||||
_ = t.SetEnvironment(sessionID, "CLAUDE_CONFIG_DIR", claudeConfigDir)
|
||||
// Set runtime config dir for account selection (non-fatal)
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && claudeConfigDir != "" {
|
||||
_ = t.SetEnvironment(sessionID, runtimeConfig.Session.ConfigDirEnv, claudeConfigDir)
|
||||
}
|
||||
|
||||
// Apply rig-based theming (non-fatal: theming failure doesn't affect operation)
|
||||
@@ -146,24 +162,35 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("getting pane ID: %w", err)
|
||||
}
|
||||
|
||||
// Use respawn-pane to replace shell with Claude directly
|
||||
// This gives cleaner lifecycle: Claude exits → session ends (no intermediate shell)
|
||||
// Pass "gt prime" as initial prompt so Claude loads context immediately
|
||||
// Use respawn-pane to replace shell with runtime directly
|
||||
// This gives cleaner lifecycle: runtime exits → session ends (no intermediate shell)
|
||||
// Pass "gt prime" as initial prompt if supported
|
||||
// Export GT_ROLE and BD_ACTOR since tmux SetEnvironment only affects new panes
|
||||
claudeCmd := config.BuildCrewStartupCommand(r.Name, name, r.Path, "gt prime")
|
||||
if err := t.RespawnPane(paneID, claudeCmd); err != nil {
|
||||
return fmt.Errorf("starting claude: %w", err)
|
||||
startupCmd, err := config.BuildCrewStartupCommandWithAgentOverride(r.Name, name, r.Path, "gt prime", crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
// Prepend config dir env if available
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && claudeConfigDir != "" {
|
||||
startupCmd = config.PrependEnv(startupCmd, map[string]string{runtimeConfig.Session.ConfigDirEnv: claudeConfigDir})
|
||||
}
|
||||
if err := t.RespawnPane(paneID, startupCmd); err != nil {
|
||||
return fmt.Errorf("starting runtime: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Created session for %s/%s\n",
|
||||
style.Bold.Render("✓"), r.Name, name)
|
||||
} else {
|
||||
// Session exists - check if Claude is still running
|
||||
// Session exists - check if runtime is still running
|
||||
// Uses both pane command check and UI marker detection to avoid
|
||||
// restarting when user is in a subshell spawned from Claude
|
||||
if !t.IsClaudeRunning(sessionID) {
|
||||
// Claude has exited, restart it using respawn-pane
|
||||
fmt.Printf("Claude exited, restarting...\n")
|
||||
// restarting when user is in a subshell spawned from the runtime
|
||||
agentCfg, _, err := config.ResolveAgentConfigWithOverride(townRoot, r.Path, crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving agent: %w", err)
|
||||
}
|
||||
if !t.IsAgentRunning(sessionID, config.ExpectedPaneCommands(agentCfg)...) {
|
||||
// Runtime has exited, restart it using respawn-pane
|
||||
fmt.Printf("Runtime exited, restarting...\n")
|
||||
|
||||
// Get pane ID for respawn
|
||||
paneID, err := t.GetPaneID(sessionID)
|
||||
@@ -171,12 +198,19 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("getting pane ID: %w", err)
|
||||
}
|
||||
|
||||
// Use respawn-pane to replace shell with Claude directly
|
||||
// Pass "gt prime" as initial prompt so Claude loads context immediately
|
||||
// Use respawn-pane to replace shell with runtime directly
|
||||
// Pass "gt prime" as initial prompt if supported
|
||||
// Export GT_ROLE and BD_ACTOR since tmux SetEnvironment only affects new panes
|
||||
claudeCmd := config.BuildCrewStartupCommand(r.Name, name, r.Path, "gt prime")
|
||||
if err := t.RespawnPane(paneID, claudeCmd); err != nil {
|
||||
return fmt.Errorf("restarting claude: %w", err)
|
||||
startupCmd, err := config.BuildCrewStartupCommandWithAgentOverride(r.Name, name, r.Path, "gt prime", crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
// Prepend config dir env if available
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && claudeConfigDir != "" {
|
||||
startupCmd = config.PrependEnv(startupCmd, map[string]string{runtimeConfig.Session.ConfigDirEnv: claudeConfigDir})
|
||||
}
|
||||
if err := t.RespawnPane(paneID, startupCmd); err != nil {
|
||||
return fmt.Errorf("restarting runtime: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,7 +219,10 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
if isInTmuxSession(sessionID) {
|
||||
// We're in the session at a shell prompt - just start the agent directly
|
||||
// Pass "gt prime" as initial prompt so it loads context immediately
|
||||
agentCfg := config.ResolveAgentConfig(townRoot, r.Path)
|
||||
agentCfg, _, err := config.ResolveAgentConfigWithOverride(townRoot, r.Path, crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving agent: %w", err)
|
||||
}
|
||||
fmt.Printf("Starting %s in current session...\n", agentCfg.Command)
|
||||
return execAgent(agentCfg, "gt prime")
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func detectCrewFromCwd() (*crewDetection, error) {
|
||||
// Look for pattern: <rig>/crew/<name>/...
|
||||
// Minimum: rig, crew, name = 3 parts
|
||||
if len(parts) < 3 {
|
||||
return nil, fmt.Errorf("not in a crew workspace (path too short)")
|
||||
return nil, fmt.Errorf("not inside a crew workspace - specify the crew name or cd into a crew directory (e.g., gastown/crew/max)")
|
||||
}
|
||||
|
||||
rigName := parts[0]
|
||||
@@ -137,7 +137,7 @@ func detectCrewFromCwd() (*crewDetection, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// isShellCommand checks if the command is a shell (meaning Claude has exited).
|
||||
// isShellCommand checks if the command is a shell (meaning the runtime has exited).
|
||||
func isShellCommand(cmd string) bool {
|
||||
shells := constants.SupportedShells
|
||||
for _, shell := range shells {
|
||||
@@ -170,6 +170,29 @@ func execAgent(cfg *config.RuntimeConfig, prompt string) error {
|
||||
return syscall.Exec(agentPath, args, os.Environ())
|
||||
}
|
||||
|
||||
// execRuntime execs the runtime CLI, replacing the current process.
|
||||
// Used when we're already in the target session and just need to start the runtime.
|
||||
// If prompt is provided, it's passed according to the runtime's prompt mode.
|
||||
func execRuntime(prompt, rigPath, configDir string) error {
|
||||
runtimeConfig := config.LoadRuntimeConfig(rigPath)
|
||||
args := runtimeConfig.BuildArgsWithPrompt(prompt)
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("runtime command not configured")
|
||||
}
|
||||
|
||||
binPath, err := exec.LookPath(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("runtime command not found: %w", err)
|
||||
}
|
||||
|
||||
env := os.Environ()
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.ConfigDirEnv != "" && configDir != "" {
|
||||
env = append(env, fmt.Sprintf("%s=%s", runtimeConfig.Session.ConfigDirEnv, configDir))
|
||||
}
|
||||
|
||||
return syscall.Exec(binPath, args, env)
|
||||
}
|
||||
|
||||
// isInTmuxSession checks if we're currently inside the target tmux session.
|
||||
func isInTmuxSession(targetSession string) bool {
|
||||
// TMUX env var format: /tmp/tmux-501/default,12345,0
|
||||
|
||||
@@ -10,11 +10,10 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/mail"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/townlog"
|
||||
@@ -24,6 +23,9 @@ import (
|
||||
func runCrewRemove(cmd *cobra.Command, args []string) error {
|
||||
var lastErr error
|
||||
|
||||
// --purge implies --force
|
||||
forceRemove := crewForce || crewPurge
|
||||
|
||||
for _, arg := range args {
|
||||
name := arg
|
||||
rigOverride := crewRig
|
||||
@@ -44,7 +46,7 @@ func runCrewRemove(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Check for running session (unless forced)
|
||||
if !crewForce {
|
||||
if !forceRemove {
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(r.Name, name)
|
||||
hasSession, _ := t.HasSession(sessionID)
|
||||
@@ -67,44 +69,115 @@ func runCrewRemove(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Killed session %s\n", sessionID)
|
||||
}
|
||||
|
||||
// Remove the crew workspace
|
||||
if err := crewMgr.Remove(name, crewForce); err != nil {
|
||||
if err == crew.ErrCrewNotFound {
|
||||
fmt.Printf("Error removing %s: crew workspace not found\n", arg)
|
||||
} else if err == crew.ErrHasChanges {
|
||||
fmt.Printf("Error removing %s: uncommitted changes (use --force)\n", arg)
|
||||
} else {
|
||||
fmt.Printf("Error removing %s: %v\n", arg, err)
|
||||
}
|
||||
lastErr = err
|
||||
continue
|
||||
// Determine workspace path
|
||||
crewPath := filepath.Join(r.Path, "crew", name)
|
||||
|
||||
// Check if this is a worktree (has .git file) vs regular clone (has .git directory)
|
||||
isWorktree := false
|
||||
gitPath := filepath.Join(crewPath, ".git")
|
||||
if info, err := os.Stat(gitPath); err == nil && !info.IsDir() {
|
||||
isWorktree = true
|
||||
}
|
||||
|
||||
fmt.Printf("%s Removed crew workspace: %s/%s\n",
|
||||
style.Bold.Render("✓"), r.Name, name)
|
||||
// Remove the workspace
|
||||
if isWorktree {
|
||||
// For worktrees, use git worktree remove
|
||||
mayorRigPath := constants.RigMayorPath(r.Path)
|
||||
removeArgs := []string{"worktree", "remove", crewPath}
|
||||
if forceRemove {
|
||||
removeArgs = []string{"worktree", "remove", "--force", crewPath}
|
||||
}
|
||||
removeCmd := exec.Command("git", removeArgs...)
|
||||
removeCmd.Dir = mayorRigPath
|
||||
if output, err := removeCmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf("Error removing worktree %s: %v\n%s", arg, err, string(output))
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s Removed crew worktree: %s/%s\n",
|
||||
style.Bold.Render("✓"), r.Name, name)
|
||||
} else {
|
||||
// For regular clones, use the crew manager
|
||||
if err := crewMgr.Remove(name, forceRemove); err != nil {
|
||||
if err == crew.ErrCrewNotFound {
|
||||
fmt.Printf("Error removing %s: crew workspace not found\n", arg)
|
||||
} else if err == crew.ErrHasChanges {
|
||||
fmt.Printf("Error removing %s: uncommitted changes (use --force)\n", arg)
|
||||
} else {
|
||||
fmt.Printf("Error removing %s: %v\n", arg, err)
|
||||
}
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s Removed crew workspace: %s/%s\n",
|
||||
style.Bold.Render("✓"), r.Name, name)
|
||||
}
|
||||
|
||||
// Close the agent bead if it exists
|
||||
// Use the rig's configured prefix (e.g., "gt" for gastown, "bd" for beads)
|
||||
// Handle agent bead
|
||||
townRoot, _ := workspace.Find(r.Path)
|
||||
if townRoot == "" {
|
||||
townRoot = r.Path
|
||||
}
|
||||
prefix := beads.GetPrefixForRig(townRoot, r.Name)
|
||||
agentBeadID := beads.CrewBeadIDWithPrefix(prefix, r.Name, name)
|
||||
closeArgs := []string{"close", agentBeadID, "--reason=Crew workspace removed"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
closeCmd.Dir = r.Path // Run from rig directory for proper beads resolution
|
||||
if output, err := closeCmd.CombinedOutput(); err != nil {
|
||||
// Non-fatal: bead might not exist or already be closed
|
||||
if !strings.Contains(string(output), "no issue found") &&
|
||||
!strings.Contains(string(output), "already closed") {
|
||||
style.PrintWarning("could not close agent bead %s: %v", agentBeadID, err)
|
||||
|
||||
if crewPurge {
|
||||
// --purge: DELETE the agent bead entirely (obliterate)
|
||||
deleteArgs := []string{"delete", agentBeadID, "--force"}
|
||||
deleteCmd := exec.Command("bd", deleteArgs...)
|
||||
deleteCmd.Dir = r.Path
|
||||
if output, err := deleteCmd.CombinedOutput(); err != nil {
|
||||
// Non-fatal: bead might not exist
|
||||
if !strings.Contains(string(output), "no issue found") &&
|
||||
!strings.Contains(string(output), "not found") {
|
||||
style.PrintWarning("could not delete agent bead %s: %v", agentBeadID, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Deleted agent bead: %s\n", agentBeadID)
|
||||
}
|
||||
|
||||
// Unassign any beads assigned to this crew member
|
||||
agentAddr := fmt.Sprintf("%s/crew/%s", r.Name, name)
|
||||
unassignArgs := []string{"list", "--assignee=" + agentAddr, "--format=id"}
|
||||
unassignCmd := exec.Command("bd", unassignArgs...)
|
||||
unassignCmd.Dir = r.Path
|
||||
if output, err := unassignCmd.CombinedOutput(); err == nil {
|
||||
ids := strings.Fields(strings.TrimSpace(string(output)))
|
||||
for _, id := range ids {
|
||||
if id == "" {
|
||||
continue
|
||||
}
|
||||
updateCmd := exec.Command("bd", "update", id, "--unassign")
|
||||
updateCmd.Dir = r.Path
|
||||
if _, err := updateCmd.CombinedOutput(); err == nil {
|
||||
fmt.Printf("Unassigned: %s\n", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear mail directory if it exists
|
||||
mailDir := filepath.Join(crewPath, "mail")
|
||||
if _, err := os.Stat(mailDir); err == nil {
|
||||
// Mail dir was removed with the workspace, so nothing to do
|
||||
// But if we want to be extra thorough, we could look in town beads
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Closed agent bead: %s\n", agentBeadID)
|
||||
// Default: CLOSE the agent bead (preserves CV history)
|
||||
closeArgs := []string{"close", agentBeadID, "--reason=Crew workspace removed"}
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
closeCmd.Dir = r.Path
|
||||
if output, err := closeCmd.CombinedOutput(); err != nil {
|
||||
// Non-fatal: bead might not exist or already be closed
|
||||
if !strings.Contains(string(output), "no issue found") &&
|
||||
!strings.Contains(string(output), "already closed") {
|
||||
style.PrintWarning("could not close agent bead %s: %v", agentBeadID, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Closed agent bead: %s\n", agentBeadID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,7 +199,7 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the crew worker
|
||||
// Get the crew worker (must exist for refresh)
|
||||
worker, err := crewMgr.Get(name)
|
||||
if err != nil {
|
||||
if err == crew.ErrCrewNotFound {
|
||||
@@ -135,12 +208,6 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("getting crew worker: %w", err)
|
||||
}
|
||||
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(r.Name, name)
|
||||
|
||||
// Check if session exists
|
||||
hasSession, _ := t.HasSession(sessionID)
|
||||
|
||||
// Create handoff message
|
||||
handoffMsg := crewMessage
|
||||
if handoffMsg == "" {
|
||||
@@ -168,47 +235,15 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
fmt.Printf("Sent handoff mail to %s/%s\n", r.Name, name)
|
||||
|
||||
// Kill existing session if running
|
||||
if hasSession {
|
||||
if err := t.KillSession(sessionID); err != nil {
|
||||
return fmt.Errorf("killing old session: %w", err)
|
||||
}
|
||||
fmt.Printf("Killed old session %s\n", sessionID)
|
||||
}
|
||||
|
||||
// Start new session
|
||||
if err := t.NewSession(sessionID, worker.ClonePath); err != nil {
|
||||
return fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Wait for shell to be ready
|
||||
if err := t.WaitForShellReady(sessionID, constants.ShellReadyTimeout); err != nil {
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
|
||||
// Build the startup beacon for predecessor discovery via /resume
|
||||
// Pass it as Claude's initial prompt - processed when Claude is ready
|
||||
address := fmt.Sprintf("%s/crew/%s", r.Name, name)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "refresh",
|
||||
// Use manager's Start() with refresh options
|
||||
err = crewMgr.Start(name, crew.StartOptions{
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "refresh", // Startup nudge topic
|
||||
Interactive: true, // No --dangerously-skip-permissions
|
||||
AgentOverride: crewAgentOverride,
|
||||
})
|
||||
|
||||
// Start claude with environment exports and beacon as initial prompt
|
||||
// Refresh uses regular permissions (no --dangerously-skip-permissions)
|
||||
// SessionStart hook handles context loading (gt prime --hook)
|
||||
claudeCmd := config.BuildCrewStartupCommand(r.Name, name, r.Path, beacon)
|
||||
// Remove --dangerously-skip-permissions for refresh (interactive mode)
|
||||
claudeCmd = strings.Replace(claudeCmd, " --dangerously-skip-permissions", "", 1)
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
return fmt.Errorf("starting claude: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (optional, for status feedback)
|
||||
shells := constants.SupportedShells
|
||||
if err := t.WaitForCommand(sessionID, shells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting crew session: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Refreshed crew workspace: %s/%s\n",
|
||||
@@ -219,18 +254,27 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// runCrewStart starts crew workers in a rig.
|
||||
// args[0] is the rig name (optional if inferrable from cwd)
|
||||
// args[1:] are crew member names (optional, or use --all flag)
|
||||
// If first arg is a valid rig name, it's used as the rig; otherwise rig is inferred from cwd.
|
||||
// Remaining args (or all args if rig is inferred) are crew member names.
|
||||
// Defaults to all crew members if no names specified.
|
||||
func runCrewStart(cmd *cobra.Command, args []string) error {
|
||||
var rigName string
|
||||
var crewNames []string
|
||||
|
||||
if len(args) == 0 {
|
||||
// No args - infer rig from cwd (only valid with --all)
|
||||
// No args - infer rig from cwd
|
||||
rigName = "" // getCrewManager will infer from cwd
|
||||
} else {
|
||||
rigName = args[0]
|
||||
crewNames = args[1:]
|
||||
// Check if first arg is a valid rig name
|
||||
if _, _, err := getRig(args[0]); err == nil {
|
||||
// First arg is a rig name
|
||||
rigName = args[0]
|
||||
crewNames = args[1:]
|
||||
} else {
|
||||
// First arg is not a rig - infer rig from cwd and treat all args as crew names
|
||||
rigName = "" // getCrewManager will infer from cwd
|
||||
crewNames = args
|
||||
}
|
||||
}
|
||||
|
||||
// Get the rig manager and rig (infers from cwd if rigName is empty)
|
||||
@@ -241,8 +285,8 @@ func runCrewStart(cmd *cobra.Command, args []string) error {
|
||||
// Update rigName in case it was inferred
|
||||
rigName = r.Name
|
||||
|
||||
// If --all flag, get all crew members
|
||||
if crewAll {
|
||||
// If --all flag OR no crew names specified, get all crew members
|
||||
if crewAll || len(crewNames) == 0 {
|
||||
workers, err := crewMgr.List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing crew: %w", err)
|
||||
@@ -263,6 +307,7 @@ func runCrewStart(cmd *cobra.Command, args []string) error {
|
||||
// Set the start.go flags before calling runStartCrew
|
||||
startCrewRig = rigName
|
||||
startCrewAccount = crewAccount
|
||||
startCrewAgentOverride = crewAgentOverride
|
||||
|
||||
// Use rig/name format for runStartCrew
|
||||
fullName := rigName + "/" + name
|
||||
@@ -309,81 +354,19 @@ func runCrewRestart(cmd *cobra.Command, args []string) error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the crew worker, create if not exists (idempotent)
|
||||
worker, err := crewMgr.Get(name)
|
||||
if err == crew.ErrCrewNotFound {
|
||||
fmt.Printf("Creating crew workspace %s in %s...\n", name, r.Name)
|
||||
worker, err = crewMgr.Add(name, false) // No feature branch for crew
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating %s: %v\n", arg, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Created crew workspace: %s/%s\n", r.Name, name)
|
||||
} else if err != nil {
|
||||
fmt.Printf("Error getting %s: %v\n", arg, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(r.Name, name)
|
||||
|
||||
// Kill existing session if running
|
||||
if hasSession, _ := t.HasSession(sessionID); hasSession {
|
||||
if err := t.KillSession(sessionID); err != nil {
|
||||
fmt.Printf("Error killing session for %s: %v\n", arg, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Killed session %s\n", sessionID)
|
||||
}
|
||||
|
||||
// Start new session
|
||||
if err := t.NewSession(sessionID, worker.ClonePath); err != nil {
|
||||
fmt.Printf("Error creating session for %s: %v\n", arg, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
// Set environment
|
||||
_ = t.SetEnvironment(sessionID, "GT_ROLE", "crew")
|
||||
// Apply rig-based theming (non-fatal: theming failure doesn't affect operation)
|
||||
theme := getThemeForRig(r.Name)
|
||||
_ = t.ConfigureGasTownSession(sessionID, theme, r.Name, name, "crew")
|
||||
|
||||
// Wait for shell to be ready
|
||||
if err := t.WaitForShellReady(sessionID, constants.ShellReadyTimeout); err != nil {
|
||||
fmt.Printf("Error waiting for shell for %s: %v\n", arg, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
// Build the startup beacon for predecessor discovery via /resume
|
||||
// Pass it as Claude's initial prompt - processed when Claude is ready
|
||||
address := fmt.Sprintf("%s/crew/%s", r.Name, name)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "restart",
|
||||
// Use manager's Start() with restart options
|
||||
// Start() will create workspace if needed (idempotent)
|
||||
err = crewMgr.Start(name, crew.StartOptions{
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "restart", // Startup nudge topic
|
||||
AgentOverride: crewAgentOverride,
|
||||
})
|
||||
|
||||
// Start claude with environment exports and beacon as initial prompt
|
||||
// SessionStart hook handles context loading (gt prime --hook)
|
||||
// The startup protocol tells agent to check mail/hook, no explicit prompt needed
|
||||
claudeCmd := config.BuildCrewStartupCommand(r.Name, name, r.Path, beacon)
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
fmt.Printf("Error starting claude for %s: %v\n", arg, err)
|
||||
if err != nil {
|
||||
fmt.Printf("Error restarting %s: %v\n", arg, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait for Claude to start (optional, for status feedback)
|
||||
shells := constants.SupportedShells
|
||||
if err := t.WaitForCommand(sessionID, shells, constants.ClaudeStartTimeout); err != nil {
|
||||
style.PrintWarning("Timeout waiting for Claude to start for %s: %v", arg, err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Restarted crew workspace: %s/%s\n",
|
||||
style.Bold.Render("✓"), r.Name, name)
|
||||
fmt.Printf("Attach with: %s\n", style.Dim.Render(fmt.Sprintf("gt crew at %s", name)))
|
||||
@@ -443,7 +426,7 @@ func runCrewRestartAll() error {
|
||||
savedRig := crewRig
|
||||
crewRig = agent.Rig
|
||||
|
||||
crewMgr, r, err := getCrewManager(crewRig)
|
||||
crewMgr, _, err := getCrewManager(crewRig)
|
||||
if err != nil {
|
||||
failed++
|
||||
failures = append(failures, fmt.Sprintf("%s: %v", agentName, err))
|
||||
@@ -452,20 +435,16 @@ func runCrewRestartAll() error {
|
||||
continue
|
||||
}
|
||||
|
||||
worker, err := crewMgr.Get(agent.AgentName)
|
||||
// Use manager's Start() with restart options
|
||||
err = crewMgr.Start(agent.AgentName, crew.StartOptions{
|
||||
KillExisting: true, // Kill old session if running
|
||||
Topic: "restart", // Startup nudge topic
|
||||
AgentOverride: crewAgentOverride,
|
||||
})
|
||||
if err != nil {
|
||||
failed++
|
||||
failures = append(failures, fmt.Sprintf("%s: %v", agentName, err))
|
||||
fmt.Printf(" %s %s\n", style.ErrorPrefix, agentName)
|
||||
crewRig = savedRig
|
||||
continue
|
||||
}
|
||||
|
||||
// Restart the session
|
||||
if err := restartCrewSession(r.Name, agent.AgentName, worker.ClonePath); err != nil {
|
||||
failed++
|
||||
failures = append(failures, fmt.Sprintf("%s: %v", agentName, err))
|
||||
fmt.Printf(" %s %s\n", style.ErrorPrefix, agentName)
|
||||
} else {
|
||||
succeeded++
|
||||
fmt.Printf(" %s %s\n", style.SuccessPrefix, agentName)
|
||||
@@ -491,65 +470,31 @@ func runCrewRestartAll() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// restartCrewSession handles the core restart logic for a single crew session.
|
||||
func restartCrewSession(rigName, crewName, clonePath string) error {
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(rigName, crewName)
|
||||
|
||||
// Kill existing session if running
|
||||
if hasSession, _ := t.HasSession(sessionID); hasSession {
|
||||
if err := t.KillSession(sessionID); err != nil {
|
||||
return fmt.Errorf("killing old session: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start new session
|
||||
if err := t.NewSession(sessionID, clonePath); err != nil {
|
||||
return fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Apply rig-based theming
|
||||
theme := getThemeForRig(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionID, theme, rigName, crewName, "crew")
|
||||
|
||||
// Wait for shell to be ready
|
||||
if err := t.WaitForShellReady(sessionID, constants.ShellReadyTimeout); err != nil {
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
|
||||
// Build the startup beacon for predecessor discovery via /resume
|
||||
// Pass it as Claude's initial prompt - processed when Claude is ready
|
||||
address := fmt.Sprintf("%s/crew/%s", rigName, crewName)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "restart",
|
||||
})
|
||||
|
||||
// Start claude with environment exports and beacon as initial prompt
|
||||
// SessionStart hook handles context loading (gt prime --hook)
|
||||
claudeCmd := config.BuildCrewStartupCommand(rigName, crewName, "", beacon)
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
return fmt.Errorf("starting claude: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (optional, for status feedback)
|
||||
shells := constants.SupportedShells
|
||||
if err := t.WaitForCommand(sessionID, shells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal warning
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runCrewStop stops one or more crew workers.
|
||||
// Supports: "name", "rig/name" formats, or --all to stop all.
|
||||
// Supports: "name", "rig/name" formats, "rig" (to stop all in rig), or --all.
|
||||
func runCrewStop(cmd *cobra.Command, args []string) error {
|
||||
// Handle --all flag
|
||||
if crewAll {
|
||||
return runCrewStopAll()
|
||||
}
|
||||
|
||||
// Handle 0 args: default to all in inferred rig
|
||||
if len(args) == 0 {
|
||||
return runCrewStopAll()
|
||||
}
|
||||
|
||||
// Handle 1 arg without "/": check if it's a rig name
|
||||
// If so, stop all crew in that rig
|
||||
if len(args) == 1 && !strings.Contains(args[0], "/") {
|
||||
// Try to interpret as rig name
|
||||
if _, _, err := getRig(args[0]); err == nil {
|
||||
// It's a valid rig name - stop all crew in that rig
|
||||
crewRig = args[0]
|
||||
return runCrewStopAll()
|
||||
}
|
||||
// Not a rig name - fall through to treat as crew name
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
t := tmux.NewTmux()
|
||||
|
||||
@@ -575,12 +520,23 @@ func runCrewStop(cmd *cobra.Command, args []string) error {
|
||||
sessionID := crewSessionName(r.Name, name)
|
||||
|
||||
// Check if session exists
|
||||
hasSession, _ := t.HasSession(sessionID)
|
||||
hasSession, err := t.HasSession(sessionID)
|
||||
if err != nil {
|
||||
fmt.Printf("Error checking session %s: %v\n", sessionID, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
if !hasSession {
|
||||
fmt.Printf("No session found for %s/%s\n", r.Name, name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Dry run - just show what would be stopped
|
||||
if crewDryRun {
|
||||
fmt.Printf("Would stop %s/%s (session: %s)\n", r.Name, name, sessionID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Capture output before stopping (best effort)
|
||||
var output string
|
||||
if !crewForce {
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
@@ -22,43 +24,63 @@ type CrewListItem struct {
|
||||
}
|
||||
|
||||
func runCrewList(cmd *cobra.Command, args []string) error {
|
||||
crewMgr, r, err := getCrewManager(crewRig)
|
||||
if err != nil {
|
||||
return err
|
||||
if crewListAll && crewRig != "" {
|
||||
return fmt.Errorf("cannot use --all with --rig")
|
||||
}
|
||||
|
||||
workers, err := crewMgr.List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing crew workers: %w", err)
|
||||
}
|
||||
|
||||
if len(workers) == 0 {
|
||||
fmt.Println("No crew workspaces found.")
|
||||
return nil
|
||||
var rigs []*rig.Rig
|
||||
if crewListAll {
|
||||
allRigs, _, err := getAllRigs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rigs = allRigs
|
||||
} else {
|
||||
_, r, err := getCrewManager(crewRig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rigs = []*rig.Rig{r}
|
||||
}
|
||||
|
||||
// Check session and git status for each worker
|
||||
t := tmux.NewTmux()
|
||||
var items []CrewListItem
|
||||
|
||||
for _, w := range workers {
|
||||
sessionID := crewSessionName(r.Name, w.Name)
|
||||
hasSession, _ := t.HasSession(sessionID)
|
||||
for _, r := range rigs {
|
||||
crewGit := git.NewGit(r.Path)
|
||||
crewMgr := crew.NewManager(r, crewGit)
|
||||
|
||||
crewGit := git.NewGit(w.ClonePath)
|
||||
gitClean := true
|
||||
if status, err := crewGit.Status(); err == nil {
|
||||
gitClean = status.Clean
|
||||
workers, err := crewMgr.List()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to list crew workers in %s: %v\n", r.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
items = append(items, CrewListItem{
|
||||
Name: w.Name,
|
||||
Rig: r.Name,
|
||||
Branch: w.Branch,
|
||||
Path: w.ClonePath,
|
||||
HasSession: hasSession,
|
||||
GitClean: gitClean,
|
||||
})
|
||||
for _, w := range workers {
|
||||
sessionID := crewSessionName(r.Name, w.Name)
|
||||
hasSession, _ := t.HasSession(sessionID)
|
||||
|
||||
workerGit := git.NewGit(w.ClonePath)
|
||||
gitClean := true
|
||||
if status, err := workerGit.Status(); err == nil {
|
||||
gitClean = status.Clean
|
||||
}
|
||||
|
||||
items = append(items, CrewListItem{
|
||||
Name: w.Name,
|
||||
Rig: r.Name,
|
||||
Branch: w.Branch,
|
||||
Path: w.ClonePath,
|
||||
HasSession: hasSession,
|
||||
GitClean: gitClean,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(items) == 0 {
|
||||
fmt.Println("No crew workspaces found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
if crewJSON {
|
||||
|
||||
127
internal/cmd/crew_list_test.go
Normal file
127
internal/cmd/crew_list_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
func setupTestTownForCrewList(t *testing.T, rigs map[string][]string) string {
|
||||
t.Helper()
|
||||
|
||||
townRoot := t.TempDir()
|
||||
mayorDir := filepath.Join(townRoot, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor: %v", err)
|
||||
}
|
||||
|
||||
townConfig := &config.TownConfig{
|
||||
Type: "town",
|
||||
Version: config.CurrentTownVersion,
|
||||
Name: "test-town",
|
||||
PublicName: "Test Town",
|
||||
CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
}
|
||||
if err := config.SaveTownConfig(filepath.Join(mayorDir, "town.json"), townConfig); err != nil {
|
||||
t.Fatalf("save town.json: %v", err)
|
||||
}
|
||||
|
||||
rigsConfig := &config.RigsConfig{
|
||||
Version: config.CurrentRigsVersion,
|
||||
Rigs: make(map[string]config.RigEntry),
|
||||
}
|
||||
|
||||
for rigName, crewNames := range rigs {
|
||||
rigsConfig.Rigs[rigName] = config.RigEntry{
|
||||
GitURL: "https://example.com/" + rigName + ".git",
|
||||
AddedAt: time.Now(),
|
||||
}
|
||||
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
crewDir := filepath.Join(rigPath, "crew")
|
||||
if err := os.MkdirAll(crewDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew dir: %v", err)
|
||||
}
|
||||
for _, crewName := range crewNames {
|
||||
if err := os.MkdirAll(filepath.Join(crewDir, crewName), 0755); err != nil {
|
||||
t.Fatalf("mkdir crew worker: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := config.SaveRigsConfig(filepath.Join(mayorDir, "rigs.json"), rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
|
||||
return townRoot
|
||||
}
|
||||
|
||||
func TestRunCrewList_AllWithRigErrors(t *testing.T) {
|
||||
townRoot := setupTestTownForCrewList(t, map[string][]string{"rig-a": {"alice"}})
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
crewListAll = true
|
||||
crewRig = "rig-a"
|
||||
defer func() {
|
||||
crewListAll = false
|
||||
crewRig = ""
|
||||
}()
|
||||
|
||||
err := runCrewList(&cobra.Command{}, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for --all with --rig, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunCrewList_AllAggregatesJSON(t *testing.T) {
|
||||
townRoot := setupTestTownForCrewList(t, map[string][]string{
|
||||
"rig-a": {"alice"},
|
||||
"rig-b": {"bob"},
|
||||
})
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
crewListAll = true
|
||||
crewJSON = true
|
||||
crewRig = ""
|
||||
defer func() {
|
||||
crewListAll = false
|
||||
crewJSON = false
|
||||
}()
|
||||
|
||||
output := captureStdout(t, func() {
|
||||
if err := runCrewList(&cobra.Command{}, nil); err != nil {
|
||||
t.Fatalf("runCrewList failed: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
var items []CrewListItem
|
||||
if err := json.Unmarshal([]byte(output), &items); err != nil {
|
||||
t.Fatalf("unmarshal output: %v", err)
|
||||
}
|
||||
if len(items) != 2 {
|
||||
t.Fatalf("expected 2 crew workers, got %d", len(items))
|
||||
}
|
||||
|
||||
rigs := map[string]bool{}
|
||||
for _, item := range items {
|
||||
rigs[item.Rig] = true
|
||||
}
|
||||
if !rigs["rig-a"] || !rigs["rig-b"] {
|
||||
t.Fatalf("expected crew from rig-a and rig-b, got: %#v", rigs)
|
||||
}
|
||||
}
|
||||
@@ -12,10 +12,12 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/claude"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/deacon"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
@@ -88,6 +90,8 @@ Stops the current session (if running) and starts a fresh one.`,
|
||||
RunE: runDeaconRestart,
|
||||
}
|
||||
|
||||
var deaconAgentOverride string
|
||||
|
||||
var deaconHeartbeatCmd = &cobra.Command{
|
||||
Use: "heartbeat [action]",
|
||||
Short: "Update the Deacon heartbeat",
|
||||
@@ -109,7 +113,7 @@ var deaconTriggerPendingCmd = &cobra.Command{
|
||||
|
||||
⚠️ BOOTSTRAP MODE ONLY - Uses regex detection (ZFC violation acceptable).
|
||||
|
||||
This command uses WaitForClaudeReady (regex) to detect when Claude is ready.
|
||||
This command uses WaitForRuntimeReady (regex) to detect when the runtime is ready.
|
||||
This is appropriate for daemon bootstrap when no AI is available.
|
||||
|
||||
In steady-state, the Deacon should use AI-based observation instead:
|
||||
@@ -186,6 +190,50 @@ This helps the Deacon understand which agents may need attention.`,
|
||||
RunE: runDeaconHealthState,
|
||||
}
|
||||
|
||||
var deaconStaleHooksCmd = &cobra.Command{
|
||||
Use: "stale-hooks",
|
||||
Short: "Find and unhook stale hooked beads",
|
||||
Long: `Find beads stuck in 'hooked' status and unhook them if the agent is gone.
|
||||
|
||||
Beads can get stuck in 'hooked' status when agents die or abandon work.
|
||||
This command finds hooked beads older than the threshold (default: 1 hour),
|
||||
checks if the assignee agent is still alive, and unhooks them if not.
|
||||
|
||||
Examples:
|
||||
gt deacon stale-hooks # Find and unhook stale beads
|
||||
gt deacon stale-hooks --dry-run # Preview what would be unhooked
|
||||
gt deacon stale-hooks --max-age=30m # Use 30 minute threshold`,
|
||||
RunE: runDeaconStaleHooks,
|
||||
}
|
||||
|
||||
var deaconPauseCmd = &cobra.Command{
|
||||
Use: "pause",
|
||||
Short: "Pause the Deacon to prevent patrol actions",
|
||||
Long: `Pause the Deacon to prevent it from performing any patrol actions.
|
||||
|
||||
When paused, the Deacon:
|
||||
- Will not create patrol molecules
|
||||
- Will not run health checks
|
||||
- Will not take any autonomous actions
|
||||
- Will display a PAUSED message on startup
|
||||
|
||||
The pause state persists across session restarts. Use 'gt deacon resume'
|
||||
to allow the Deacon to work again.
|
||||
|
||||
Examples:
|
||||
gt deacon pause # Pause with no reason
|
||||
gt deacon pause --reason="testing" # Pause with a reason`,
|
||||
RunE: runDeaconPause,
|
||||
}
|
||||
|
||||
var deaconResumeCmd = &cobra.Command{
|
||||
Use: "resume",
|
||||
Short: "Resume the Deacon to allow patrol actions",
|
||||
Long: `Resume the Deacon so it can perform patrol actions again.
|
||||
|
||||
This removes the pause file and allows the Deacon to work normally.`,
|
||||
RunE: runDeaconResume,
|
||||
}
|
||||
|
||||
var (
|
||||
triggerTimeout time.Duration
|
||||
@@ -198,6 +246,13 @@ var (
|
||||
// Force kill flags
|
||||
forceKillReason string
|
||||
forceKillSkipNotify bool
|
||||
|
||||
// Stale hooks flags
|
||||
staleHooksMaxAge time.Duration
|
||||
staleHooksDryRun bool
|
||||
|
||||
// Pause flags
|
||||
pauseReason string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -211,6 +266,9 @@ func init() {
|
||||
deaconCmd.AddCommand(deaconHealthCheckCmd)
|
||||
deaconCmd.AddCommand(deaconForceKillCmd)
|
||||
deaconCmd.AddCommand(deaconHealthStateCmd)
|
||||
deaconCmd.AddCommand(deaconStaleHooksCmd)
|
||||
deaconCmd.AddCommand(deaconPauseCmd)
|
||||
deaconCmd.AddCommand(deaconResumeCmd)
|
||||
|
||||
// Flags for trigger-pending
|
||||
deaconTriggerPendingCmd.Flags().DurationVar(&triggerTimeout, "timeout", 2*time.Second,
|
||||
@@ -230,6 +288,20 @@ func init() {
|
||||
deaconForceKillCmd.Flags().BoolVar(&forceKillSkipNotify, "skip-notify", false,
|
||||
"Skip sending notification mail to mayor")
|
||||
|
||||
// Flags for stale-hooks
|
||||
deaconStaleHooksCmd.Flags().DurationVar(&staleHooksMaxAge, "max-age", 1*time.Hour,
|
||||
"Maximum age before a hooked bead is considered stale")
|
||||
deaconStaleHooksCmd.Flags().BoolVar(&staleHooksDryRun, "dry-run", false,
|
||||
"Preview what would be unhooked without making changes")
|
||||
|
||||
// Flags for pause
|
||||
deaconPauseCmd.Flags().StringVar(&pauseReason, "reason", "",
|
||||
"Reason for pausing the Deacon")
|
||||
|
||||
deaconStartCmd.Flags().StringVar(&deaconAgentOverride, "agent", "", "Agent alias to run the Deacon with (overrides town default)")
|
||||
deaconAttachCmd.Flags().StringVar(&deaconAgentOverride, "agent", "", "Agent alias to run the Deacon with (overrides town default)")
|
||||
deaconRestartCmd.Flags().StringVar(&deaconAgentOverride, "agent", "", "Agent alias to run the Deacon with (overrides town default)")
|
||||
|
||||
rootCmd.AddCommand(deaconCmd)
|
||||
}
|
||||
|
||||
@@ -247,7 +319,7 @@ func runDeaconStart(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("Deacon session already running. Attach with: gt deacon attach")
|
||||
}
|
||||
|
||||
if err := startDeaconSession(t, sessionName); err != nil {
|
||||
if err := startDeaconSession(t, sessionName, deaconAgentOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -259,7 +331,7 @@ func runDeaconStart(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// startDeaconSession creates and initializes the Deacon tmux session.
|
||||
func startDeaconSession(t *tmux.Tmux, sessionName string) error {
|
||||
func startDeaconSession(t *tmux.Tmux, sessionName, agentOverride string) error {
|
||||
// Find workspace root
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
@@ -274,9 +346,9 @@ func startDeaconSession(t *tmux.Tmux, sessionName string) error {
|
||||
return fmt.Errorf("creating deacon directory: %w", err)
|
||||
}
|
||||
|
||||
// Ensure deacon has patrol hooks (idempotent)
|
||||
if err := ensurePatrolHooks(deaconDir); err != nil {
|
||||
style.PrintWarning("Could not create deacon hooks: %v", err)
|
||||
// Ensure Claude settings exist (autonomous role needs mail in SessionStart)
|
||||
if err := claude.EnsureSettingsForRole(deaconDir, "deacon"); err != nil {
|
||||
style.PrintWarning("Could not create deacon settings: %v", err)
|
||||
}
|
||||
|
||||
// Create session in deacon directory
|
||||
@@ -298,7 +370,11 @@ func startDeaconSession(t *tmux.Tmux, sessionName string) error {
|
||||
// Restarts are handled by daemon via ensureDeaconRunning on each heartbeat
|
||||
// The startup hook handles context loading automatically
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
if err := t.SendKeys(sessionName, config.BuildAgentStartupCommand("deacon", "deacon", "", "")); err != nil {
|
||||
startupCmd, err := config.BuildAgentStartupCommandWithAgentOverride("deacon", "deacon", "", "", agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
if err := t.SendKeys(sessionName, startupCmd); err != nil {
|
||||
return fmt.Errorf("sending command: %w", err)
|
||||
}
|
||||
|
||||
@@ -308,6 +384,9 @@ func startDeaconSession(t *tmux.Tmux, sessionName string) error {
|
||||
}
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
runtimeConfig := config.LoadRuntimeConfig("")
|
||||
_ = runtime.RunStartupFallback(t, sessionName, "deacon", runtimeConfig)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: "deacon",
|
||||
@@ -366,7 +445,7 @@ func runDeaconAttach(cmd *cobra.Command, args []string) error {
|
||||
if !running {
|
||||
// Auto-start if not running
|
||||
fmt.Println("Deacon session not running, starting...")
|
||||
if err := startDeaconSession(t, sessionName); err != nil {
|
||||
if err := startDeaconSession(t, sessionName, deaconAgentOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -381,6 +460,23 @@ func runDeaconStatus(cmd *cobra.Command, args []string) error {
|
||||
|
||||
sessionName := getDeaconSessionName()
|
||||
|
||||
// Check pause state first (most important)
|
||||
townRoot, _ := workspace.FindFromCwdOrError()
|
||||
if townRoot != "" {
|
||||
paused, state, err := deacon.IsPaused(townRoot)
|
||||
if err == nil && paused {
|
||||
fmt.Printf("%s DEACON PAUSED\n", style.Bold.Render("⏸️"))
|
||||
if state.Reason != "" {
|
||||
fmt.Printf(" Reason: %s\n", state.Reason)
|
||||
}
|
||||
fmt.Printf(" Paused at: %s\n", state.PausedAt.Format(time.RFC3339))
|
||||
fmt.Printf(" Paused by: %s\n", state.PausedBy)
|
||||
fmt.Println()
|
||||
fmt.Printf("Resume with: %s\n", style.Dim.Render("gt deacon resume"))
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
@@ -450,6 +546,19 @@ func runDeaconHeartbeat(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check if Deacon is paused - if so, refuse to update heartbeat
|
||||
paused, state, err := deacon.IsPaused(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking pause state: %w", err)
|
||||
}
|
||||
if paused {
|
||||
fmt.Printf("%s Deacon is paused. Use 'gt deacon resume' to unpause.\n", style.Bold.Render("⏸️"))
|
||||
if state.Reason != "" {
|
||||
fmt.Printf(" Reason: %s\n", state.Reason)
|
||||
}
|
||||
return errors.New("Deacon is paused")
|
||||
}
|
||||
|
||||
action := ""
|
||||
if len(args) > 0 {
|
||||
action = strings.Join(args, " ")
|
||||
@@ -526,64 +635,6 @@ func runDeaconTriggerPending(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensurePatrolHooks creates .claude/settings.json with hooks for patrol roles.
|
||||
// This is idempotent - if hooks already exist, it does nothing.
|
||||
func ensurePatrolHooks(workspacePath string) error {
|
||||
settingsPath := filepath.Join(workspacePath, ".claude", "settings.json")
|
||||
|
||||
// Check if already exists
|
||||
if _, err := os.Stat(settingsPath); err == nil {
|
||||
return nil // Already exists
|
||||
}
|
||||
|
||||
claudeDir := filepath.Join(workspacePath, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating .claude dir: %w", err)
|
||||
}
|
||||
|
||||
// Standard patrol hooks
|
||||
// Note: SessionStart nudges Deacon for GUPP backstop (agent wake notification)
|
||||
hooksJSON := `{
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime && gt mail check --inject && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"PreCompact": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"UserPromptSubmit": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt mail check --inject"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
return os.WriteFile(settingsPath, []byte(hooksJSON), 0600)
|
||||
}
|
||||
|
||||
// runDeaconHealthCheck implements the health-check command.
|
||||
// It sends a HEALTH_CHECK nudge to an agent, waits for response, and tracks state.
|
||||
func runDeaconHealthCheck(cmd *cobra.Command, args []string) error {
|
||||
@@ -908,3 +959,132 @@ func updateAgentBeadState(townRoot, agent, state, _ string) { // reason unused b
|
||||
_ = cmd.Run() // Best effort
|
||||
}
|
||||
|
||||
// runDeaconStaleHooks finds and unhooks stale hooked beads.
|
||||
func runDeaconStaleHooks(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
cfg := &deacon.StaleHookConfig{
|
||||
MaxAge: staleHooksMaxAge,
|
||||
DryRun: staleHooksDryRun,
|
||||
}
|
||||
|
||||
result, err := deacon.ScanStaleHooks(townRoot, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scanning stale hooks: %w", err)
|
||||
}
|
||||
|
||||
// Print summary
|
||||
if result.TotalHooked == 0 {
|
||||
fmt.Printf("%s No hooked beads found\n", style.Dim.Render("○"))
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("%s Found %d hooked bead(s), %d stale (older than %s)\n",
|
||||
style.Bold.Render("●"), result.TotalHooked, result.StaleCount, staleHooksMaxAge)
|
||||
|
||||
if result.StaleCount == 0 {
|
||||
fmt.Printf("%s No stale hooked beads\n", style.Dim.Render("○"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Print details for each stale bead
|
||||
for _, r := range result.Results {
|
||||
status := style.Dim.Render("○")
|
||||
action := "skipped (agent alive)"
|
||||
|
||||
if !r.AgentAlive {
|
||||
if staleHooksDryRun {
|
||||
status = style.Bold.Render("?")
|
||||
action = "would unhook (agent dead)"
|
||||
} else if r.Unhooked {
|
||||
status = style.Bold.Render("✓")
|
||||
action = "unhooked (agent dead)"
|
||||
} else if r.Error != "" {
|
||||
status = style.Dim.Render("✗")
|
||||
action = fmt.Sprintf("error: %s", r.Error)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf(" %s %s: %s (age: %s, assignee: %s)\n",
|
||||
status, r.BeadID, action, r.Age, r.Assignee)
|
||||
}
|
||||
|
||||
// Summary
|
||||
if staleHooksDryRun {
|
||||
fmt.Printf("\n%s Dry run - no changes made. Run without --dry-run to unhook.\n",
|
||||
style.Dim.Render("ℹ"))
|
||||
} else if result.Unhooked > 0 {
|
||||
fmt.Printf("\n%s Unhooked %d stale bead(s)\n",
|
||||
style.Bold.Render("✓"), result.Unhooked)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDeaconPause pauses the Deacon to prevent patrol actions.
|
||||
func runDeaconPause(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check if already paused
|
||||
paused, state, err := deacon.IsPaused(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking pause state: %w", err)
|
||||
}
|
||||
if paused {
|
||||
fmt.Printf("%s Deacon is already paused\n", style.Dim.Render("○"))
|
||||
fmt.Printf(" Reason: %s\n", state.Reason)
|
||||
fmt.Printf(" Paused at: %s\n", state.PausedAt.Format(time.RFC3339))
|
||||
fmt.Printf(" Paused by: %s\n", state.PausedBy)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pause the Deacon
|
||||
if err := deacon.Pause(townRoot, pauseReason, "human"); err != nil {
|
||||
return fmt.Errorf("pausing Deacon: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Deacon paused\n", style.Bold.Render("⏸️"))
|
||||
if pauseReason != "" {
|
||||
fmt.Printf(" Reason: %s\n", pauseReason)
|
||||
}
|
||||
fmt.Printf(" Pause file: %s\n", deacon.GetPauseFile(townRoot))
|
||||
fmt.Println()
|
||||
fmt.Printf("The Deacon will not perform any patrol actions until resumed.\n")
|
||||
fmt.Printf("Resume with: %s\n", style.Dim.Render("gt deacon resume"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDeaconResume resumes the Deacon to allow patrol actions.
|
||||
func runDeaconResume(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check if paused
|
||||
paused, _, err := deacon.IsPaused(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking pause state: %w", err)
|
||||
}
|
||||
if !paused {
|
||||
fmt.Printf("%s Deacon is not paused\n", style.Dim.Render("○"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume the Deacon
|
||||
if err := deacon.Resume(townRoot); err != nil {
|
||||
return fmt.Errorf("resuming Deacon: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Deacon resumed\n", style.Bold.Render("▶️"))
|
||||
fmt.Println("The Deacon can now perform patrol actions.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
72
internal/cmd/disable.go
Normal file
72
internal/cmd/disable.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// ABOUTME: Command to disable Gas Town system-wide.
|
||||
// ABOUTME: Sets the global state to disabled so tools work vanilla.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var disableClean bool
|
||||
|
||||
var disableCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Disable Gas Town system-wide",
|
||||
Long: `Disable Gas Town for all agentic coding tools.
|
||||
|
||||
When disabled:
|
||||
- Shell hooks become no-ops
|
||||
- Claude Code SessionStart hooks skip 'gt prime'
|
||||
- Tools work 100% vanilla (no Gas Town behavior)
|
||||
|
||||
The workspace (~/gt) is preserved. Use 'gt enable' to re-enable.
|
||||
|
||||
Flags:
|
||||
--clean Also remove shell integration from ~/.zshrc/~/.bashrc
|
||||
|
||||
Environment overrides still work:
|
||||
GASTOWN_ENABLED=1 - Enable for current session only`,
|
||||
RunE: runDisable,
|
||||
}
|
||||
|
||||
func init() {
|
||||
disableCmd.Flags().BoolVar(&disableClean, "clean", false,
|
||||
"Remove shell integration from RC files")
|
||||
rootCmd.AddCommand(disableCmd)
|
||||
}
|
||||
|
||||
func runDisable(cmd *cobra.Command, args []string) error {
|
||||
if err := state.Disable(); err != nil {
|
||||
return fmt.Errorf("disabling Gas Town: %w", err)
|
||||
}
|
||||
|
||||
if disableClean {
|
||||
if err := removeShellIntegration(); err != nil {
|
||||
fmt.Printf("%s Could not clean shell integration: %v\n",
|
||||
style.Warning.Render("!"), err)
|
||||
} else {
|
||||
fmt.Println(" Removed shell integration from RC files")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("%s Gas Town disabled\n", style.Success.Render("✓"))
|
||||
fmt.Println()
|
||||
fmt.Println("All agentic coding tools now work vanilla.")
|
||||
if !disableClean {
|
||||
fmt.Printf("Use %s to also remove shell hooks\n",
|
||||
style.Dim.Render("gt disable --clean"))
|
||||
}
|
||||
fmt.Printf("Use %s to re-enable\n", style.Dim.Render("gt enable"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeShellIntegration() error {
|
||||
return shell.Remove()
|
||||
}
|
||||
@@ -10,9 +10,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
doctorFix bool
|
||||
doctorVerbose bool
|
||||
doctorRig string
|
||||
doctorFix bool
|
||||
doctorVerbose bool
|
||||
doctorRig string
|
||||
doctorRestartSessions bool
|
||||
)
|
||||
|
||||
var doctorCmd = &cobra.Command{
|
||||
@@ -45,6 +46,10 @@ Clone divergence checks:
|
||||
- persistent-role-branches Detect crew/witness/refinery not on main
|
||||
- clone-divergence Detect clones significantly behind origin/main
|
||||
|
||||
Crew workspace checks:
|
||||
- crew-state Validate crew worker state.json files (fixable)
|
||||
- crew-worktrees Detect stale cross-rig worktrees (fixable)
|
||||
|
||||
Rig checks (with --rig flag):
|
||||
- rig-is-git-repo Verify rig is a valid git repository
|
||||
- git-exclude-configured Check .git/info/exclude has Gas Town dirs (fixable)
|
||||
@@ -56,9 +61,11 @@ Rig checks (with --rig flag):
|
||||
|
||||
Routing checks (fixable):
|
||||
- routes-config Check beads routing configuration
|
||||
- prefix-mismatch Detect rigs.json vs routes.jsonl prefix mismatches (fixable)
|
||||
|
||||
Session hook checks:
|
||||
- session-hooks Check settings.json use session-start.sh
|
||||
- claude-settings Check Claude settings.json match templates (fixable)
|
||||
|
||||
Patrol checks:
|
||||
- patrol-molecules-exist Verify patrol molecules exist
|
||||
@@ -76,6 +83,7 @@ func init() {
|
||||
doctorCmd.Flags().BoolVar(&doctorFix, "fix", false, "Attempt to automatically fix issues")
|
||||
doctorCmd.Flags().BoolVarP(&doctorVerbose, "verbose", "v", false, "Show detailed output")
|
||||
doctorCmd.Flags().StringVar(&doctorRig, "rig", "", "Check specific rig only")
|
||||
doctorCmd.Flags().BoolVar(&doctorRestartSessions, "restart-sessions", false, "Restart patrol sessions when fixing stale settings (use with --fix)")
|
||||
rootCmd.AddCommand(doctorCmd)
|
||||
}
|
||||
|
||||
@@ -88,9 +96,10 @@ func runDoctor(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Create check context
|
||||
ctx := &doctor.CheckContext{
|
||||
TownRoot: townRoot,
|
||||
RigName: doctorRig,
|
||||
Verbose: doctorVerbose,
|
||||
TownRoot: townRoot,
|
||||
RigName: doctorRig,
|
||||
Verbose: doctorVerbose,
|
||||
RestartSessions: doctorRestartSessions,
|
||||
}
|
||||
|
||||
// Create doctor and register checks
|
||||
@@ -99,17 +108,22 @@ func runDoctor(cmd *cobra.Command, args []string) error {
|
||||
// Register workspace-level checks first (fundamental)
|
||||
d.RegisterAll(doctor.WorkspaceChecks()...)
|
||||
|
||||
d.Register(doctor.NewGlobalStateCheck())
|
||||
|
||||
// Register built-in checks
|
||||
d.Register(doctor.NewTownGitCheck())
|
||||
d.Register(doctor.NewDaemonCheck())
|
||||
d.Register(doctor.NewRepoFingerprintCheck())
|
||||
d.Register(doctor.NewBootHealthCheck())
|
||||
d.Register(doctor.NewBeadsDatabaseCheck())
|
||||
d.Register(doctor.NewFormulaCheck())
|
||||
d.Register(doctor.NewBdDaemonCheck())
|
||||
d.Register(doctor.NewPrefixConflictCheck())
|
||||
d.Register(doctor.NewPrefixMismatchCheck())
|
||||
d.Register(doctor.NewRoutesCheck())
|
||||
d.Register(doctor.NewOrphanSessionCheck())
|
||||
d.Register(doctor.NewOrphanProcessCheck())
|
||||
d.Register(doctor.NewGTRootCheck())
|
||||
d.Register(doctor.NewWispGCCheck())
|
||||
d.Register(doctor.NewBranchCheck())
|
||||
d.Register(doctor.NewBeadsSyncOrphanCheck())
|
||||
@@ -125,6 +139,7 @@ func runDoctor(cmd *cobra.Command, args []string) error {
|
||||
d.Register(doctor.NewPatrolPluginsAccessibleCheck())
|
||||
d.Register(doctor.NewPatrolRolesHavePromptsCheck())
|
||||
d.Register(doctor.NewAgentBeadsCheck())
|
||||
d.Register(doctor.NewRigBeadsCheck())
|
||||
|
||||
// NOTE: StaleAttachmentsCheck removed - staleness detection belongs in Deacon molecule
|
||||
|
||||
@@ -133,9 +148,11 @@ func runDoctor(cmd *cobra.Command, args []string) error {
|
||||
d.Register(doctor.NewSessionHookCheck())
|
||||
d.Register(doctor.NewRuntimeGitignoreCheck())
|
||||
d.Register(doctor.NewLegacyGastownCheck())
|
||||
d.Register(doctor.NewClaudeSettingsCheck())
|
||||
|
||||
// Crew workspace checks
|
||||
d.Register(doctor.NewCrewStateCheck())
|
||||
d.Register(doctor.NewCrewWorktreeCheck())
|
||||
d.Register(doctor.NewCommandsCheck())
|
||||
|
||||
// Lifecycle hygiene checks
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/mail"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -161,17 +162,6 @@ func runDone(cmd *cobra.Command, args []string) error {
|
||||
if branch == defaultBranch || branch == "master" {
|
||||
return fmt.Errorf("cannot submit %s/master branch to merge queue", defaultBranch)
|
||||
}
|
||||
|
||||
// Check for unpushed commits - branch must be pushed before MR creation
|
||||
// Use BranchPushedToRemote which handles polecat branches without upstream tracking
|
||||
pushed, unpushedCount, err := g.BranchPushedToRemote(branch, "origin")
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if branch is pushed: %w", err)
|
||||
}
|
||||
if !pushed {
|
||||
return fmt.Errorf("branch has %d unpushed commit(s); run 'git push -u origin %s' first", unpushedCount, branch)
|
||||
}
|
||||
|
||||
// Check that branch has commits ahead of default branch (prevents submitting stale branches)
|
||||
aheadCount, err := g.CommitsAhead(defaultBranch, branch)
|
||||
if err != nil {
|
||||
@@ -366,12 +356,10 @@ func runDone(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateAgentStateOnDone updates the agent bead state when work is complete.
|
||||
// Maps exit type to agent state:
|
||||
// - COMPLETED → "done"
|
||||
// - ESCALATED → "stuck"
|
||||
// - DEFERRED → "idle"
|
||||
// - PHASE_COMPLETE → "awaiting-gate"
|
||||
// updateAgentStateOnDone clears the agent's hook and reports cleanup status.
|
||||
// Per gt-zecmc: observable states ("done", "idle") removed - use tmux to discover.
|
||||
// Non-observable states ("stuck", "awaiting-gate") are still set since they represent
|
||||
// intentional agent decisions that can't be observed from tmux.
|
||||
//
|
||||
// Also self-reports cleanup_status for ZFC compliance (#10).
|
||||
func updateAgentStateOnDone(cwd, townRoot, exitType, _ string) { // issueID unused but kept for future audit logging
|
||||
@@ -394,22 +382,6 @@ func updateAgentStateOnDone(cwd, townRoot, exitType, _ string) { // issueID unus
|
||||
return
|
||||
}
|
||||
|
||||
// Map exit type to agent state
|
||||
var newState string
|
||||
switch exitType {
|
||||
case ExitCompleted:
|
||||
newState = "done"
|
||||
case ExitEscalated:
|
||||
newState = "stuck"
|
||||
case ExitDeferred:
|
||||
newState = "idle"
|
||||
case ExitPhaseComplete:
|
||||
newState = "awaiting-gate"
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
// Update agent bead with new state and clear hook_bead (work is done)
|
||||
// Use rig path for slot commands - bd slot doesn't route from town root
|
||||
var beadsPath string
|
||||
switch ctx.Role {
|
||||
@@ -419,18 +391,47 @@ func updateAgentStateOnDone(cwd, townRoot, exitType, _ string) { // issueID unus
|
||||
beadsPath = filepath.Join(townRoot, ctx.Rig)
|
||||
}
|
||||
bd := beads.New(beadsPath)
|
||||
emptyHook := ""
|
||||
if err := bd.UpdateAgentState(agentBeadID, newState, &emptyHook); err != nil {
|
||||
// Log warning instead of silent ignore - helps debug cross-beads issues
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't update agent %s state on done: %v\n", agentBeadID, err)
|
||||
return
|
||||
|
||||
// BUG FIX (gt-vwjz6): Close hooked beads before clearing the hook.
|
||||
// Previously, the agent's hook_bead slot was cleared but the hooked bead itself
|
||||
// stayed status=hooked forever. Now we close the hooked bead before clearing.
|
||||
if agentBead, err := bd.Show(agentBeadID); err == nil && agentBead.HookBead != "" {
|
||||
hookedBeadID := agentBead.HookBead
|
||||
// Only close if the hooked bead exists and is still in "hooked" status
|
||||
if hookedBead, err := bd.Show(hookedBeadID); err == nil && hookedBead.Status == beads.StatusHooked {
|
||||
if err := bd.Close(hookedBeadID); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't close hooked bead %s: %v\n", hookedBeadID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the hook (work is done) - gt-zecmc
|
||||
if err := bd.ClearHookBead(agentBeadID); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't clear agent %s hook: %v\n", agentBeadID, err)
|
||||
}
|
||||
|
||||
// Only set non-observable states - "stuck" and "awaiting-gate" are intentional
|
||||
// agent decisions that can't be discovered from tmux. Skip "done" and "idle"
|
||||
// since those are observable (no session = done, session + no hook = idle).
|
||||
switch exitType {
|
||||
case ExitEscalated:
|
||||
// "stuck" = agent is requesting help - not observable from tmux
|
||||
if _, err := bd.Run("agent", "state", agentBeadID, "stuck"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't set agent %s to stuck: %v\n", agentBeadID, err)
|
||||
}
|
||||
case ExitPhaseComplete:
|
||||
// "awaiting-gate" = agent is waiting for external trigger - not observable
|
||||
if _, err := bd.Run("agent", "state", agentBeadID, "awaiting-gate"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't set agent %s to awaiting-gate: %v\n", agentBeadID, err)
|
||||
}
|
||||
// ExitCompleted and ExitDeferred don't set state - observable from tmux
|
||||
}
|
||||
|
||||
// ZFC #10: Self-report cleanup status
|
||||
// Compute git state and report so Witness can decide removal safety
|
||||
cleanupStatus := computeCleanupStatus(cwd)
|
||||
if cleanupStatus != "" {
|
||||
if err := bd.UpdateAgentCleanupStatus(agentBeadID, cleanupStatus); err != nil {
|
||||
if cleanupStatus != polecat.CleanupUnknown {
|
||||
if err := bd.UpdateAgentCleanupStatus(agentBeadID, string(cleanupStatus)); err != nil {
|
||||
// Log warning instead of silent ignore
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't update agent %s cleanup status: %v\n", agentBeadID, err)
|
||||
return
|
||||
@@ -461,23 +462,23 @@ func getDispatcherFromBead(cwd, issueID string) string {
|
||||
|
||||
// computeCleanupStatus checks git state and returns the cleanup status.
|
||||
// Returns the most critical issue: has_unpushed > has_stash > has_uncommitted > clean
|
||||
func computeCleanupStatus(cwd string) string {
|
||||
func computeCleanupStatus(cwd string) polecat.CleanupStatus {
|
||||
g := git.NewGit(cwd)
|
||||
status, err := g.CheckUncommittedWork()
|
||||
if err != nil {
|
||||
// If we can't check, report unknown - Witness should be cautious
|
||||
return "unknown"
|
||||
return polecat.CleanupUnknown
|
||||
}
|
||||
|
||||
// Check in priority order (most critical first)
|
||||
if status.UnpushedCommits > 0 {
|
||||
return "has_unpushed"
|
||||
return polecat.CleanupUnpushed
|
||||
}
|
||||
if status.StashCount > 0 {
|
||||
return "has_stash"
|
||||
return polecat.CleanupStash
|
||||
}
|
||||
if status.HasUncommittedChanges {
|
||||
return "has_uncommitted"
|
||||
return polecat.CleanupUncommitted
|
||||
}
|
||||
return "clean"
|
||||
return polecat.CleanupClean
|
||||
}
|
||||
|
||||
@@ -159,9 +159,9 @@ func TestDoneBeadsInitBothCodePaths(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestDoneRedirectChain verifies behavior with chained redirects.
|
||||
// ResolveBeadsDir follows exactly one level of redirect by design - it does NOT
|
||||
// follow chains transitively. This is intentional: chains typically indicate
|
||||
// misconfiguration (e.g., a redirect file that shouldn't exist).
|
||||
// ResolveBeadsDir follows chains up to depth 3 as a safety net for legacy configs.
|
||||
// SetupRedirect avoids creating chains (bd CLI doesn't support them), but if
|
||||
// chains exist we follow them to the final destination.
|
||||
func TestDoneRedirectChain(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
@@ -189,14 +189,15 @@ func TestDoneRedirectChain(t *testing.T) {
|
||||
t.Fatalf("write worktree redirect: %v", err)
|
||||
}
|
||||
|
||||
// ResolveBeadsDir follows exactly one level - stops at intermediate
|
||||
// (A warning is printed about the chain, but intermediate is returned)
|
||||
// ResolveBeadsDir follows chains up to depth 3 as a safety net.
|
||||
// Note: SetupRedirect avoids creating chains (bd CLI doesn't support them),
|
||||
// but if chains exist from legacy configs, we follow them to the final destination.
|
||||
resolved := beads.ResolveBeadsDir(worktreeDir)
|
||||
|
||||
// Should resolve to intermediate (one level), NOT canonical (two levels)
|
||||
if resolved != intermediateBeadsDir {
|
||||
t.Errorf("ResolveBeadsDir should follow one level only: got %s, want %s",
|
||||
resolved, intermediateBeadsDir)
|
||||
// Should resolve to canonical (follows the full chain)
|
||||
if resolved != canonicalBeadsDir {
|
||||
t.Errorf("ResolveBeadsDir should follow chain to final destination: got %s, want %s",
|
||||
resolved, canonicalBeadsDir)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
54
internal/cmd/enable.go
Normal file
54
internal/cmd/enable.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// ABOUTME: Command to enable Gas Town system-wide.
|
||||
// ABOUTME: Sets the global state to enabled for all agentic coding tools.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var enableCmd = &cobra.Command{
|
||||
Use: "enable",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Enable Gas Town system-wide",
|
||||
Long: `Enable Gas Town for all agentic coding tools.
|
||||
|
||||
When enabled:
|
||||
- Shell hooks set GT_TOWN_ROOT and GT_RIG environment variables
|
||||
- Claude Code SessionStart hooks run 'gt prime' for context
|
||||
- Git repos are auto-registered as rigs (configurable)
|
||||
|
||||
Use 'gt disable' to turn off. Use 'gt status --global' to check state.
|
||||
|
||||
Environment overrides:
|
||||
GASTOWN_DISABLED=1 - Disable for current session only
|
||||
GASTOWN_ENABLED=1 - Enable for current session only`,
|
||||
RunE: runEnable,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(enableCmd)
|
||||
}
|
||||
|
||||
func runEnable(cmd *cobra.Command, args []string) error {
|
||||
if err := state.Enable(Version); err != nil {
|
||||
return fmt.Errorf("enabling Gas Town: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Gas Town enabled\n", style.Success.Render("✓"))
|
||||
fmt.Println()
|
||||
fmt.Println("Gas Town will now:")
|
||||
fmt.Println(" • Inject context into Claude Code sessions")
|
||||
fmt.Println(" • Set GT_TOWN_ROOT and GT_RIG environment variables")
|
||||
fmt.Println(" • Auto-register git repos as rigs (if configured)")
|
||||
fmt.Println()
|
||||
fmt.Printf("Use %s to disable, %s to check status\n",
|
||||
style.Dim.Render("gt disable"),
|
||||
style.Dim.Render("gt status --global"))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -182,19 +182,9 @@ func runHandoff(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Report agent state as stopped (ZFC: agents self-report state)
|
||||
cwd, _ := os.Getwd()
|
||||
if townRoot, _ := workspace.FindFromCwd(); townRoot != "" {
|
||||
if roleInfo, err := GetRoleWithContext(cwd, townRoot); err == nil {
|
||||
reportAgentState(RoleContext{
|
||||
Role: roleInfo.Role,
|
||||
Rig: roleInfo.Rig,
|
||||
Polecat: roleInfo.Polecat,
|
||||
TownRoot: townRoot,
|
||||
WorkDir: cwd,
|
||||
}, "stopped")
|
||||
}
|
||||
}
|
||||
// NOTE: reportAgentState("stopped") removed (gt-zecmc)
|
||||
// Agent liveness is observable from tmux - no need to record it in bead.
|
||||
// "Discover, don't track" principle: reality is truth, state is derived.
|
||||
|
||||
// Clear scrollback history before respawn (resets copy-mode from [0/N] to [0/0])
|
||||
if err := t.ClearHistory(pane); err != nil {
|
||||
@@ -323,6 +313,17 @@ func resolvePathToSession(path string) (string, error) {
|
||||
return "", fmt.Errorf("cannot parse path '%s' - expected <rig>/<polecat>, <rig>/crew/<name>, <rig>/witness, or <rig>/refinery", path)
|
||||
}
|
||||
|
||||
// claudeEnvVars lists the Claude-related environment variables to propagate
|
||||
// during handoff. These vars aren't inherited by tmux respawn-pane's fresh shell.
|
||||
var claudeEnvVars = []string{
|
||||
// Claude API and config
|
||||
"ANTHROPIC_API_KEY",
|
||||
"CLAUDE_CODE_USE_BEDROCK",
|
||||
// AWS vars for Bedrock
|
||||
"AWS_PROFILE",
|
||||
"AWS_REGION",
|
||||
}
|
||||
|
||||
// buildRestartCommand creates the command to run when respawning a session's pane.
|
||||
// This needs to be the actual command to execute (e.g., claude), not a session attach command.
|
||||
// The command includes a cd to the correct working directory for the role.
|
||||
@@ -345,14 +346,36 @@ func buildRestartCommand(sessionName string) (string, error) {
|
||||
// For respawn-pane, we:
|
||||
// 1. cd to the right directory (role's canonical home)
|
||||
// 2. export GT_ROLE and BD_ACTOR so role detection works correctly
|
||||
// 3. run claude with "gt prime" as initial prompt (triggers GUPP)
|
||||
// 3. export Claude-related env vars (not inherited by fresh shell)
|
||||
// 4. run claude with "gt prime" as initial prompt (triggers GUPP)
|
||||
// Use exec to ensure clean process replacement.
|
||||
// IMPORTANT: Passing "gt prime" as argument injects it as the first prompt,
|
||||
// which triggers the agent to execute immediately. Without this, agents
|
||||
// wait for user input despite all GUPP prompting in hooks.
|
||||
runtimeCmd := config.GetRuntimeCommandWithPrompt("", "gt prime")
|
||||
|
||||
// Build environment exports - role vars first, then Claude vars
|
||||
var exports []string
|
||||
if gtRole != "" {
|
||||
return fmt.Sprintf("cd %s && export GT_ROLE=%s BD_ACTOR=%s GIT_AUTHOR_NAME=%s && exec %s", workDir, gtRole, gtRole, gtRole, runtimeCmd), nil
|
||||
runtimeConfig := config.LoadRuntimeConfig("")
|
||||
exports = append(exports, "GT_ROLE="+gtRole)
|
||||
exports = append(exports, "BD_ACTOR="+gtRole)
|
||||
exports = append(exports, "GIT_AUTHOR_NAME="+gtRole)
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.SessionIDEnv != "" {
|
||||
exports = append(exports, "GT_SESSION_ID_ENV="+runtimeConfig.Session.SessionIDEnv)
|
||||
}
|
||||
}
|
||||
|
||||
// Add Claude-related env vars from current environment
|
||||
for _, name := range claudeEnvVars {
|
||||
if val := os.Getenv(name); val != "" {
|
||||
// Shell-escape the value in case it contains special chars
|
||||
exports = append(exports, fmt.Sprintf("%s=%q", name, val))
|
||||
}
|
||||
}
|
||||
|
||||
if len(exports) > 0 {
|
||||
return fmt.Sprintf("cd %s && export %s && exec %s", workDir, strings.Join(exports, " "), runtimeCmd), nil
|
||||
}
|
||||
return fmt.Sprintf("cd %s && exec %s", workDir, runtimeCmd), nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
@@ -172,7 +173,7 @@ func runHook(_ *cobra.Command, args []string) error {
|
||||
// Close completed molecule bead (use bd close --force for pinned)
|
||||
closeArgs := []string{"close", existing.ID, "--force",
|
||||
"--reason=Auto-replaced by gt hook (molecule complete)"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
@@ -105,12 +105,14 @@ func discoverHooks(townRoot string) ([]HookInfo, error) {
|
||||
var hooks []HookInfo
|
||||
|
||||
// Scan known locations for .claude/settings.json
|
||||
// NOTE: Mayor settings are at ~/gt/mayor/.claude/, NOT ~/gt/.claude/
|
||||
// Settings at town root would pollute all child workspaces.
|
||||
locations := []struct {
|
||||
path string
|
||||
agent string
|
||||
}{
|
||||
{filepath.Join(townRoot, "mayor", ".claude", "settings.json"), "mayor/"},
|
||||
{filepath.Join(townRoot, ".claude", "settings.json"), "town-root"},
|
||||
{filepath.Join(townRoot, "deacon", ".claude", "settings.json"), "deacon/"},
|
||||
}
|
||||
|
||||
// Scan rigs
|
||||
@@ -137,7 +139,7 @@ func discoverHooks(townRoot string) ([]HookInfo, error) {
|
||||
polecatsDir := filepath.Join(rigPath, "polecats")
|
||||
if polecats, err := os.ReadDir(polecatsDir); err == nil {
|
||||
for _, p := range polecats {
|
||||
if p.IsDir() {
|
||||
if p.IsDir() && !strings.HasPrefix(p.Name(), ".") {
|
||||
locations = append(locations, struct {
|
||||
path string
|
||||
agent string
|
||||
|
||||
@@ -16,9 +16,12 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/deps"
|
||||
"github.com/steveyegge/gastown/internal/formula"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/templates"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
"github.com/steveyegge/gastown/internal/wrappers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -30,6 +33,8 @@ var (
|
||||
installGit bool
|
||||
installGitHub string
|
||||
installPublic bool
|
||||
installShell bool
|
||||
installWrappers bool
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
@@ -55,7 +60,8 @@ Examples:
|
||||
gt install ~/gt --no-beads # Skip .beads/ initialization
|
||||
gt install ~/gt --git # Also init git with .gitignore
|
||||
gt install ~/gt --github=user/repo # Create private GitHub repo (default)
|
||||
gt install ~/gt --github=user/repo --public # Create public GitHub repo`,
|
||||
gt install ~/gt --github=user/repo --public # Create public GitHub repo
|
||||
gt install ~/gt --shell # Install shell integration (sets GT_TOWN_ROOT/GT_RIG)`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runInstall,
|
||||
}
|
||||
@@ -69,6 +75,8 @@ func init() {
|
||||
installCmd.Flags().BoolVar(&installGit, "git", false, "Initialize git with .gitignore")
|
||||
installCmd.Flags().StringVar(&installGitHub, "github", "", "Create GitHub repo (format: owner/repo, private by default)")
|
||||
installCmd.Flags().BoolVar(&installPublic, "public", false, "Make GitHub repo public (use with --github)")
|
||||
installCmd.Flags().BoolVar(&installShell, "shell", false, "Install shell integration (sets GT_TOWN_ROOT/GT_RIG env vars)")
|
||||
installCmd.Flags().BoolVar(&installWrappers, "wrappers", false, "Install gt-codex/gt-opencode wrapper scripts to ~/bin/")
|
||||
rootCmd.AddCommand(installCmd)
|
||||
}
|
||||
|
||||
@@ -172,20 +180,46 @@ func runInstall(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
fmt.Printf(" ✓ Created mayor/rigs.json\n")
|
||||
|
||||
// Create Mayor CLAUDE.md at HQ root (Mayor runs from there)
|
||||
if err := createMayorCLAUDEmd(absPath, absPath); err != nil {
|
||||
// Create Mayor CLAUDE.md at mayor/ (Mayor's canonical home)
|
||||
// IMPORTANT: CLAUDE.md must be in ~/gt/mayor/, NOT ~/gt/
|
||||
// CLAUDE.md at town root would be inherited by ALL agents via directory traversal,
|
||||
// causing crew/polecat/etc to receive Mayor-specific instructions.
|
||||
if err := createMayorCLAUDEmd(mayorDir, absPath); err != nil {
|
||||
fmt.Printf(" %s Could not create CLAUDE.md: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Created CLAUDE.md\n")
|
||||
fmt.Printf(" ✓ Created mayor/CLAUDE.md\n")
|
||||
}
|
||||
|
||||
// Ensure Mayor has Claude settings with SessionStart hooks.
|
||||
// This ensures gt prime runs on Claude startup, which outputs the Mayor
|
||||
// delegation protocol - critical for preventing direct implementation.
|
||||
if err := claude.EnsureSettingsForRole(absPath, "mayor"); err != nil {
|
||||
fmt.Printf(" %s Could not create .claude/settings.json: %v\n", style.Dim.Render("⚠"), err)
|
||||
// Create mayor settings (mayor runs from ~/gt/mayor/)
|
||||
// IMPORTANT: Settings must be in ~/gt/mayor/.claude/, NOT ~/gt/.claude/
|
||||
// Settings at town root would be found by ALL agents via directory traversal,
|
||||
// causing crew/polecat/etc to cd to town root before running commands.
|
||||
// mayorDir already defined above
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
fmt.Printf(" %s Could not create mayor directory: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else if err := claude.EnsureSettingsForRole(mayorDir, "mayor"); err != nil {
|
||||
fmt.Printf(" %s Could not create mayor settings: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Created .claude/settings.json\n")
|
||||
fmt.Printf(" ✓ Created mayor/.claude/settings.json\n")
|
||||
}
|
||||
|
||||
// Create deacon directory and settings (deacon runs from ~/gt/deacon/)
|
||||
deaconDir := filepath.Join(absPath, "deacon")
|
||||
if err := os.MkdirAll(deaconDir, 0755); err != nil {
|
||||
fmt.Printf(" %s Could not create deacon directory: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else if err := claude.EnsureSettingsForRole(deaconDir, "deacon"); err != nil {
|
||||
fmt.Printf(" %s Could not create deacon settings: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Created deacon/.claude/settings.json\n")
|
||||
}
|
||||
|
||||
// Initialize git BEFORE beads so that bd can compute repository fingerprint.
|
||||
// The fingerprint is required for the daemon to start properly.
|
||||
if installGit || installGitHub != "" {
|
||||
fmt.Println()
|
||||
if err := InitGitForHarness(absPath, installGitHub, !installPublic); err != nil {
|
||||
return fmt.Errorf("git initialization failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize town-level beads database (optional)
|
||||
@@ -234,11 +268,26 @@ func runInstall(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf(" ✓ Created .claude/commands/ (slash commands for all agents)\n")
|
||||
}
|
||||
|
||||
// Initialize git if requested (--git or --github implies --git)
|
||||
if installGit || installGitHub != "" {
|
||||
if installShell {
|
||||
fmt.Println()
|
||||
if err := InitGitForHarness(absPath, installGitHub, !installPublic); err != nil {
|
||||
return fmt.Errorf("git initialization failed: %w", err)
|
||||
if err := shell.Install(); err != nil {
|
||||
fmt.Printf(" %s Could not install shell integration: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Installed shell integration (%s)\n", shell.RCFilePath(shell.DetectShell()))
|
||||
}
|
||||
if err := state.Enable(Version); err != nil {
|
||||
fmt.Printf(" %s Could not enable Gas Town: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Enabled Gas Town globally\n")
|
||||
}
|
||||
}
|
||||
|
||||
if installWrappers {
|
||||
fmt.Println()
|
||||
if err := wrappers.Install(); err != nil {
|
||||
fmt.Printf(" %s Could not install wrapper scripts: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
fmt.Printf(" ✓ Installed gt-codex and gt-opencode to %s\n", wrappers.BinDir())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,36 +301,22 @@ func runInstall(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
fmt.Printf(" %d. Add a rig: %s\n", step, style.Dim.Render("gt rig add <name> <git-url>"))
|
||||
step++
|
||||
fmt.Printf(" %d. (Optional) Configure agents: %s\n", step, style.Dim.Render("gt config agent list"))
|
||||
step++
|
||||
fmt.Printf(" %d. Enter the Mayor's office: %s\n", step, style.Dim.Render("gt mayor attach"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createMayorCLAUDEmd(hqRoot, townRoot string) error {
|
||||
tmpl, err := templates.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get town name for session names
|
||||
func createMayorCLAUDEmd(mayorDir, townRoot string) error {
|
||||
townName, _ := workspace.GetTownName(townRoot)
|
||||
|
||||
data := templates.RoleData{
|
||||
Role: "mayor",
|
||||
TownRoot: townRoot,
|
||||
TownName: townName,
|
||||
WorkDir: hqRoot,
|
||||
MayorSession: session.MayorSessionName(),
|
||||
DeaconSession: session.DeaconSessionName(),
|
||||
}
|
||||
|
||||
content, err := tmpl.RenderRole("mayor", data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
claudePath := filepath.Join(hqRoot, "CLAUDE.md")
|
||||
return os.WriteFile(claudePath, []byte(content), 0644)
|
||||
return templates.CreateMayorCLAUDEmd(
|
||||
mayorDir,
|
||||
townRoot,
|
||||
townName,
|
||||
session.MayorSessionName(),
|
||||
session.DeaconSessionName(),
|
||||
)
|
||||
}
|
||||
|
||||
func writeJSON(path string, data interface{}) error {
|
||||
@@ -309,6 +344,16 @@ func initTownBeads(townPath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Configure custom types for Gas Town (agent, role, rig, convoy).
|
||||
// These were extracted from beads core in v0.46.0 and now require explicit config.
|
||||
customTypes := "agent,role,rig,convoy,event"
|
||||
configCmd := exec.Command("bd", "config", "set", "types.custom", customTypes)
|
||||
configCmd.Dir = townPath
|
||||
if configOutput, configErr := configCmd.CombinedOutput(); configErr != nil {
|
||||
// Non-fatal: older beads versions don't need this, newer ones do
|
||||
fmt.Printf(" %s Could not set custom types: %s\n", style.Dim.Render("⚠"), strings.TrimSpace(string(configOutput)))
|
||||
}
|
||||
|
||||
// Ensure database has repository fingerprint (GH #25).
|
||||
// This is idempotent - safe on both new and legacy (pre-0.17.5) databases.
|
||||
// Without fingerprint, the bd daemon fails to start silently.
|
||||
@@ -317,6 +362,13 @@ func initTownBeads(townPath string) error {
|
||||
fmt.Printf(" %s Could not verify repo fingerprint: %v\n", style.Dim.Render("⚠"), err)
|
||||
}
|
||||
|
||||
// Ensure routes.jsonl has an explicit town-level mapping for hq-* beads.
|
||||
// This keeps hq-* operations stable even when invoked from rig worktrees.
|
||||
if err := beads.AppendRoute(townPath, beads.Route{Prefix: "hq-", Path: "."}); err != nil {
|
||||
// Non-fatal: routing still works in many contexts, but explicit mapping is preferred.
|
||||
fmt.Printf(" %s Could not update routes.jsonl: %v\n", style.Dim.Render("⚠"), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -333,11 +385,25 @@ func ensureRepoFingerprint(beadsPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureCustomTypes registers Gas Town custom issue types with beads.
|
||||
// Beads core only supports built-in types (bug, feature, task, etc.).
|
||||
// Gas Town needs custom types: agent, role, rig, convoy, slot.
|
||||
// This is idempotent - safe to call multiple times.
|
||||
func ensureCustomTypes(beadsPath string) error {
|
||||
cmd := exec.Command("bd", "config", "set", "types.custom", "agent,role,rig,convoy,slot")
|
||||
cmd.Dir = beadsPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("bd config set types.custom: %s", strings.TrimSpace(string(output)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// initTownAgentBeads creates town-level agent and role beads using hq- prefix.
|
||||
// This creates:
|
||||
// - hq-mayor, hq-deacon (agent beads for town-level agents)
|
||||
// - hq-mayor-role, hq-deacon-role, hq-witness-role, hq-refinery-role,
|
||||
// hq-polecat-role, hq-crew-role (role definition beads)
|
||||
// - hq-mayor, hq-deacon (agent beads for town-level agents)
|
||||
// - hq-mayor-role, hq-deacon-role, hq-witness-role, hq-refinery-role,
|
||||
// hq-polecat-role, hq-crew-role (role definition beads)
|
||||
//
|
||||
// These beads are stored in town beads (~/gt/.beads/) and are shared across all rigs.
|
||||
// Rig-level agent beads (witness, refinery) are created by gt rig add in rig beads.
|
||||
@@ -354,6 +420,13 @@ func ensureRepoFingerprint(beadsPath string) error {
|
||||
func initTownAgentBeads(townPath string) error {
|
||||
bd := beads.New(townPath)
|
||||
|
||||
// bd init doesn't enable "custom" issue types by default, but Gas Town uses
|
||||
// agent/role beads during install and runtime. Ensure these types are enabled
|
||||
// before attempting to create any town-level system beads.
|
||||
if err := ensureBeadsCustomTypes(townPath, []string{"agent", "role", "rig", "convoy", "slot"}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Role beads (global templates)
|
||||
roleDefs := []struct {
|
||||
id string
|
||||
@@ -472,3 +545,17 @@ func initTownAgentBeads(townPath string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureBeadsCustomTypes(workDir string, types []string) error {
|
||||
if len(types) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("bd", "config", "set", "types.custom", strings.Join(types, ","))
|
||||
cmd.Dir = workDir
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("bd config set types.custom failed: %s", strings.TrimSpace(string(output)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -61,9 +61,18 @@ func TestInstallCreatesCorrectStructure(t *testing.T) {
|
||||
t.Errorf("rigs.json should be empty, got %d rigs", len(rigsConfig.Rigs))
|
||||
}
|
||||
|
||||
// Verify CLAUDE.md exists
|
||||
claudePath := filepath.Join(hqPath, "CLAUDE.md")
|
||||
assertFileExists(t, claudePath, "CLAUDE.md")
|
||||
// Verify CLAUDE.md exists in mayor/ (not town root, to avoid inheritance pollution)
|
||||
claudePath := filepath.Join(hqPath, "mayor", "CLAUDE.md")
|
||||
assertFileExists(t, claudePath, "mayor/CLAUDE.md")
|
||||
|
||||
// Verify Claude settings exist in mayor/.claude/ (not town root/.claude/)
|
||||
// Mayor settings go here to avoid polluting child workspaces via directory traversal
|
||||
mayorSettingsPath := filepath.Join(hqPath, "mayor", ".claude", "settings.json")
|
||||
assertFileExists(t, mayorSettingsPath, "mayor/.claude/settings.json")
|
||||
|
||||
// Verify deacon settings exist in deacon/.claude/
|
||||
deaconSettingsPath := filepath.Join(hqPath, "deacon", ".claude", "settings.json")
|
||||
assertFileExists(t, deaconSettingsPath, "deacon/.claude/settings.json")
|
||||
}
|
||||
|
||||
// TestInstallBeadsHasCorrectPrefix validates that beads is initialized
|
||||
@@ -134,6 +143,21 @@ func TestInstallTownRoleSlots(t *testing.T) {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Log install output for CI debugging
|
||||
t.Logf("gt install output:\n%s", output)
|
||||
|
||||
// Verify beads directory was created
|
||||
beadsDir := filepath.Join(hqPath, ".beads")
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
t.Fatalf("beads directory not created at %s", beadsDir)
|
||||
}
|
||||
|
||||
// List beads for debugging
|
||||
listCmd := exec.Command("bd", "--no-daemon", "list", "--type=agent")
|
||||
listCmd.Dir = hqPath
|
||||
listOutput, _ := listCmd.CombinedOutput()
|
||||
t.Logf("bd list --type=agent output:\n%s", listOutput)
|
||||
|
||||
assertSlotValue(t, hqPath, "hq-mayor", "role", "hq-mayor-role")
|
||||
assertSlotValue(t, hqPath, "hq-deacon", "role", "hq-deacon-role")
|
||||
}
|
||||
|
||||
@@ -1,24 +1,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/mayor"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
// getMayorSessionName returns the Mayor session name.
|
||||
func getMayorSessionName() string {
|
||||
return session.MayorSessionName()
|
||||
}
|
||||
|
||||
var mayorCmd = &cobra.Command{
|
||||
Use: "mayor",
|
||||
Aliases: []string{"may"},
|
||||
@@ -31,6 +21,8 @@ The Mayor is the global coordinator for Gas Town, running as a persistent
|
||||
tmux session. Use the subcommands to start, stop, attach, and check status.`,
|
||||
}
|
||||
|
||||
var mayorAgentOverride string
|
||||
|
||||
var mayorStartCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start the Mayor session",
|
||||
@@ -84,24 +76,38 @@ func init() {
|
||||
mayorCmd.AddCommand(mayorStatusCmd)
|
||||
mayorCmd.AddCommand(mayorRestartCmd)
|
||||
|
||||
mayorStartCmd.Flags().StringVar(&mayorAgentOverride, "agent", "", "Agent alias to run the Mayor with (overrides town default)")
|
||||
mayorAttachCmd.Flags().StringVar(&mayorAgentOverride, "agent", "", "Agent alias to run the Mayor with (overrides town default)")
|
||||
mayorRestartCmd.Flags().StringVar(&mayorAgentOverride, "agent", "", "Agent alias to run the Mayor with (overrides town default)")
|
||||
|
||||
rootCmd.AddCommand(mayorCmd)
|
||||
}
|
||||
|
||||
func runMayorStart(cmd *cobra.Command, args []string) error {
|
||||
t := tmux.NewTmux()
|
||||
|
||||
sessionName := getMayorSessionName()
|
||||
|
||||
// Check if session already exists
|
||||
running, err := t.HasSession(sessionName)
|
||||
// getMayorManager returns a mayor manager for the current workspace.
|
||||
func getMayorManager() (*mayor.Manager, error) {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
return nil, fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
if running {
|
||||
return fmt.Errorf("Mayor session already running. Attach with: gt mayor attach")
|
||||
return mayor.NewManager(townRoot), nil
|
||||
}
|
||||
|
||||
// getMayorSessionName returns the Mayor session name.
|
||||
func getMayorSessionName() string {
|
||||
return mayor.SessionName()
|
||||
}
|
||||
|
||||
func runMayorStart(cmd *cobra.Command, args []string) error {
|
||||
mgr, err := getMayorManager()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := startMayorSession(t, sessionName); err != nil {
|
||||
fmt.Println("Starting Mayor session...")
|
||||
if err := mgr.Start(mayorAgentOverride); err != nil {
|
||||
if err == mayor.ErrAlreadyRunning {
|
||||
return fmt.Errorf("Mayor session already running. Attach with: gt mayor attach")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -112,83 +118,18 @@ func runMayorStart(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// startMayorSession creates and initializes the Mayor tmux session.
|
||||
func startMayorSession(t *tmux.Tmux, sessionName string) error {
|
||||
// Find workspace root
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Create session in workspace root
|
||||
fmt.Println("Starting Mayor session...")
|
||||
if err := t.NewSession(sessionName, townRoot); err != nil {
|
||||
return fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Set environment (non-fatal: session works without these)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", "mayor")
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", "mayor")
|
||||
|
||||
// Apply Mayor theme (non-fatal: theming failure doesn't affect operation)
|
||||
// Note: ConfigureGasTownSession includes cycle bindings
|
||||
theme := tmux.MayorTheme()
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, "", "Mayor", "coordinator")
|
||||
|
||||
// Launch Claude - the startup hook handles 'gt prime' automatically
|
||||
// Use SendKeysDelayed to allow shell initialization after NewSession
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
// Mayor uses default runtime config (empty rigPath) since it's not rig-specific
|
||||
claudeCmd := config.BuildAgentStartupCommand("mayor", "mayor", "", "")
|
||||
if err := t.SendKeysDelayed(sessionName, claudeCmd, 200); err != nil {
|
||||
return fmt.Errorf("sending command: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: "mayor",
|
||||
Sender: "human",
|
||||
Topic: "cold-start",
|
||||
}) // Non-fatal
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send the propulsion nudge to trigger autonomous coordination.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
_ = t.NudgeSession(sessionName, session.PropulsionNudgeForRole("mayor", townRoot)) // Non-fatal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runMayorStop(cmd *cobra.Command, args []string) error {
|
||||
t := tmux.NewTmux()
|
||||
|
||||
sessionName := getMayorSessionName()
|
||||
|
||||
// Check if session exists
|
||||
running, err := t.HasSession(sessionName)
|
||||
mgr, err := getMayorManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
if !running {
|
||||
return errors.New("Mayor session is not running")
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Stopping Mayor session...")
|
||||
|
||||
// Try graceful shutdown first (best-effort interrupt)
|
||||
_ = t.SendKeysRaw(sessionName, "C-c")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Kill the session
|
||||
if err := t.KillSession(sessionName); err != nil {
|
||||
return fmt.Errorf("killing session: %w", err)
|
||||
if err := mgr.Stop(); err != nil {
|
||||
if err == mayor.ErrNotRunning {
|
||||
return fmt.Errorf("Mayor session is not running")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s Mayor session stopped.\n", style.Bold.Render("✓"))
|
||||
@@ -196,84 +137,68 @@ func runMayorStop(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func runMayorAttach(cmd *cobra.Command, args []string) error {
|
||||
t := tmux.NewTmux()
|
||||
mgr, err := getMayorManager()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sessionName := getMayorSessionName()
|
||||
|
||||
// Check if session exists
|
||||
running, err := t.HasSession(sessionName)
|
||||
running, err := mgr.IsRunning()
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
if !running {
|
||||
// Auto-start if not running
|
||||
fmt.Println("Mayor session not running, starting...")
|
||||
if err := startMayorSession(t, sessionName); err != nil {
|
||||
if err := mgr.Start(mayorAgentOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Use shared attach helper (smart: links if inside tmux, attaches if outside)
|
||||
return attachToTmuxSession(sessionName)
|
||||
return attachToTmuxSession(mgr.SessionName())
|
||||
}
|
||||
|
||||
func runMayorStatus(cmd *cobra.Command, args []string) error {
|
||||
t := tmux.NewTmux()
|
||||
|
||||
sessionName := getMayorSessionName()
|
||||
|
||||
running, err := t.HasSession(sessionName)
|
||||
mgr, err := getMayorManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if running {
|
||||
// Get session info for more details
|
||||
info, err := t.GetSessionInfo(sessionName)
|
||||
if err == nil {
|
||||
status := "detached"
|
||||
if info.Attached {
|
||||
status = "attached"
|
||||
}
|
||||
info, err := mgr.Status()
|
||||
if err != nil {
|
||||
if err == mayor.ErrNotRunning {
|
||||
fmt.Printf("%s Mayor session is %s\n",
|
||||
style.Bold.Render("●"),
|
||||
style.Bold.Render("running"))
|
||||
fmt.Printf(" Status: %s\n", status)
|
||||
fmt.Printf(" Created: %s\n", info.Created)
|
||||
fmt.Printf("\nAttach with: %s\n", style.Dim.Render("gt mayor attach"))
|
||||
} else {
|
||||
fmt.Printf("%s Mayor session is %s\n",
|
||||
style.Bold.Render("●"),
|
||||
style.Bold.Render("running"))
|
||||
style.Dim.Render("○"),
|
||||
"not running")
|
||||
fmt.Printf("\nStart with: %s\n", style.Dim.Render("gt mayor start"))
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("%s Mayor session is %s\n",
|
||||
style.Dim.Render("○"),
|
||||
"not running")
|
||||
fmt.Printf("\nStart with: %s\n", style.Dim.Render("gt mayor start"))
|
||||
return fmt.Errorf("checking status: %w", err)
|
||||
}
|
||||
|
||||
status := "detached"
|
||||
if info.Attached {
|
||||
status = "attached"
|
||||
}
|
||||
fmt.Printf("%s Mayor session is %s\n",
|
||||
style.Bold.Render("●"),
|
||||
style.Bold.Render("running"))
|
||||
fmt.Printf(" Status: %s\n", status)
|
||||
fmt.Printf(" Created: %s\n", info.Created)
|
||||
fmt.Printf("\nAttach with: %s\n", style.Dim.Render("gt mayor attach"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runMayorRestart(cmd *cobra.Command, args []string) error {
|
||||
t := tmux.NewTmux()
|
||||
|
||||
sessionName := getMayorSessionName()
|
||||
|
||||
running, err := t.HasSession(sessionName)
|
||||
mgr, err := getMayorManager()
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if running {
|
||||
// Stop the current session (best-effort interrupt before kill)
|
||||
fmt.Println("Stopping Mayor session...")
|
||||
_ = t.SendKeysRaw(sessionName, "C-c")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if err := t.KillSession(sessionName); err != nil {
|
||||
return fmt.Errorf("killing session: %w", err)
|
||||
}
|
||||
// Stop if running (ignore not-running error)
|
||||
if err := mgr.Stop(); err != nil && err != mayor.ErrNotRunning {
|
||||
return fmt.Errorf("stopping session: %w", err)
|
||||
}
|
||||
|
||||
// Start fresh
|
||||
|
||||
@@ -87,7 +87,7 @@ func detectAgentBeadID() (string, error) {
|
||||
return "", fmt.Errorf("cannot determine agent identity (role: %s)", roleCtx.Role)
|
||||
}
|
||||
|
||||
beadID := buildAgentBeadID(identity, roleCtx.Role)
|
||||
beadID := buildAgentBeadID(identity, roleCtx.Role, townRoot)
|
||||
if beadID == "" {
|
||||
return "", fmt.Errorf("cannot build agent bead ID for identity: %s", identity)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -28,9 +29,15 @@ import (
|
||||
// - "gastown/crew/max" -> "gt-gastown-crew-max"
|
||||
//
|
||||
// If role is unknown, it tries to infer from the identity string.
|
||||
func buildAgentBeadID(identity string, role Role) string {
|
||||
// townRoot is needed to look up the rig's configured prefix.
|
||||
func buildAgentBeadID(identity string, role Role, townRoot string) string {
|
||||
parts := strings.Split(identity, "/")
|
||||
|
||||
// Helper to get prefix for a rig
|
||||
getPrefix := func(rig string) string {
|
||||
return config.GetRigPrefix(townRoot, rig)
|
||||
}
|
||||
|
||||
// If role is unknown or empty, try to infer from identity
|
||||
if role == RoleUnknown || role == Role("") {
|
||||
switch {
|
||||
@@ -39,18 +46,18 @@ func buildAgentBeadID(identity string, role Role) string {
|
||||
case identity == "deacon":
|
||||
return beads.DeaconBeadIDTown()
|
||||
case len(parts) == 2 && parts[1] == "witness":
|
||||
return beads.WitnessBeadID(parts[0])
|
||||
return beads.WitnessBeadIDWithPrefix(getPrefix(parts[0]), parts[0])
|
||||
case len(parts) == 2 && parts[1] == "refinery":
|
||||
return beads.RefineryBeadID(parts[0])
|
||||
return beads.RefineryBeadIDWithPrefix(getPrefix(parts[0]), parts[0])
|
||||
case len(parts) == 2:
|
||||
// Assume rig/name is a polecat
|
||||
return beads.PolecatBeadID(parts[0], parts[1])
|
||||
return beads.PolecatBeadIDWithPrefix(getPrefix(parts[0]), parts[0], parts[1])
|
||||
case len(parts) == 3 && parts[1] == "crew":
|
||||
// rig/crew/name - crew member
|
||||
return beads.CrewBeadID(parts[0], parts[2])
|
||||
return beads.CrewBeadIDWithPrefix(getPrefix(parts[0]), parts[0], parts[2])
|
||||
case len(parts) == 3 && parts[1] == "polecats":
|
||||
// rig/polecats/name - explicit polecat
|
||||
return beads.PolecatBeadID(parts[0], parts[2])
|
||||
return beads.PolecatBeadIDWithPrefix(getPrefix(parts[0]), parts[0], parts[2])
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
@@ -63,26 +70,26 @@ func buildAgentBeadID(identity string, role Role) string {
|
||||
return beads.DeaconBeadIDTown()
|
||||
case RoleWitness:
|
||||
if len(parts) >= 1 {
|
||||
return beads.WitnessBeadID(parts[0])
|
||||
return beads.WitnessBeadIDWithPrefix(getPrefix(parts[0]), parts[0])
|
||||
}
|
||||
return ""
|
||||
case RoleRefinery:
|
||||
if len(parts) >= 1 {
|
||||
return beads.RefineryBeadID(parts[0])
|
||||
return beads.RefineryBeadIDWithPrefix(getPrefix(parts[0]), parts[0])
|
||||
}
|
||||
return ""
|
||||
case RolePolecat:
|
||||
// Handle both 2-part (rig/name) and 3-part (rig/polecats/name) formats
|
||||
if len(parts) == 3 && parts[1] == "polecats" {
|
||||
return beads.PolecatBeadID(parts[0], parts[2])
|
||||
return beads.PolecatBeadIDWithPrefix(getPrefix(parts[0]), parts[0], parts[2])
|
||||
}
|
||||
if len(parts) >= 2 {
|
||||
return beads.PolecatBeadID(parts[0], parts[1])
|
||||
return beads.PolecatBeadIDWithPrefix(getPrefix(parts[0]), parts[0], parts[1])
|
||||
}
|
||||
return ""
|
||||
case RoleCrew:
|
||||
if len(parts) >= 3 && parts[1] == "crew" {
|
||||
return beads.CrewBeadID(parts[0], parts[2])
|
||||
return beads.CrewBeadIDWithPrefix(getPrefix(parts[0]), parts[0], parts[2])
|
||||
}
|
||||
return ""
|
||||
default:
|
||||
@@ -318,7 +325,7 @@ func runMoleculeStatus(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Try to find agent bead and read hook slot
|
||||
// This is the preferred method - agent beads have a hook_bead field
|
||||
agentBeadID := buildAgentBeadID(target, roleCtx.Role)
|
||||
agentBeadID := buildAgentBeadID(target, roleCtx.Role, townRoot)
|
||||
var hookBead *beads.Issue
|
||||
|
||||
if agentBeadID != "" {
|
||||
|
||||
@@ -47,6 +47,9 @@ var (
|
||||
|
||||
// Integration status flags
|
||||
mqIntegrationStatusJSON bool
|
||||
|
||||
// Integration create flags
|
||||
mqIntegrationCreateBranch string
|
||||
)
|
||||
|
||||
var mqCmd = &cobra.Command{
|
||||
@@ -190,18 +193,31 @@ var mqIntegrationCreateCmd = &cobra.Command{
|
||||
Short: "Create an integration branch for an epic",
|
||||
Long: `Create an integration branch for batch work on an epic.
|
||||
|
||||
Creates a branch named integration/<epic-id> from main and pushes it
|
||||
to origin. Future MRs for this epic's children can target this branch.
|
||||
Creates a branch from main and pushes it to origin. Future MRs for this
|
||||
epic's children can target this branch.
|
||||
|
||||
Branch naming:
|
||||
Default: integration/<epic-id>
|
||||
Config: Set merge_queue.integration_branch_template in rig settings
|
||||
Override: Use --branch flag for one-off customization
|
||||
|
||||
Template variables:
|
||||
{epic} - Full epic ID (e.g., "RA-123")
|
||||
{prefix} - Epic prefix before first hyphen (e.g., "RA")
|
||||
{user} - Git user.name (e.g., "klauern")
|
||||
|
||||
Actions:
|
||||
1. Verify epic exists
|
||||
2. Create branch integration/<epic-id> from main
|
||||
2. Create branch from main (using template or --branch)
|
||||
3. Push to origin
|
||||
4. Store integration branch info in epic metadata
|
||||
4. Store actual branch name in epic metadata
|
||||
|
||||
Example:
|
||||
Examples:
|
||||
gt mq integration create gt-auth-epic
|
||||
# Creates integration/gt-auth-epic from main`,
|
||||
# Creates integration/gt-auth-epic (default)
|
||||
|
||||
gt mq integration create RA-123 --branch "klauern/PROJ-1234/{epic}"
|
||||
# Creates klauern/PROJ-1234/RA-123`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runMqIntegrationCreate,
|
||||
}
|
||||
@@ -287,6 +303,7 @@ func init() {
|
||||
mqCmd.AddCommand(mqStatusCmd)
|
||||
|
||||
// Integration branch subcommands
|
||||
mqIntegrationCreateCmd.Flags().StringVar(&mqIntegrationCreateBranch, "branch", "", "Override branch name template (supports {epic}, {prefix}, {user})")
|
||||
mqIntegrationCmd.AddCommand(mqIntegrationCreateCmd)
|
||||
|
||||
// Integration land flags
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -16,6 +17,141 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
// Integration branch template constants
|
||||
const defaultIntegrationBranchTemplate = "integration/{epic}"
|
||||
|
||||
// invalidBranchCharsRegex matches characters that are invalid in git branch names.
|
||||
// Git branch names cannot contain: ~ ^ : \ space, .., @{, or end with .lock
|
||||
var invalidBranchCharsRegex = regexp.MustCompile(`[~^:\s\\]|\.\.|\.\.|@\{`)
|
||||
|
||||
// buildIntegrationBranchName expands an integration branch template with variables.
|
||||
// Variables supported:
|
||||
// - {epic}: Full epic ID (e.g., "RA-123")
|
||||
// - {prefix}: Epic prefix before first hyphen (e.g., "RA")
|
||||
// - {user}: Git user.name (e.g., "klauern")
|
||||
//
|
||||
// If template is empty, uses defaultIntegrationBranchTemplate.
|
||||
func buildIntegrationBranchName(template, epicID string) string {
|
||||
if template == "" {
|
||||
template = defaultIntegrationBranchTemplate
|
||||
}
|
||||
|
||||
result := template
|
||||
result = strings.ReplaceAll(result, "{epic}", epicID)
|
||||
result = strings.ReplaceAll(result, "{prefix}", extractEpicPrefix(epicID))
|
||||
|
||||
// Git user (optional - leaves placeholder if not available)
|
||||
if user := getGitUserName(); user != "" {
|
||||
result = strings.ReplaceAll(result, "{user}", user)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// extractEpicPrefix extracts the prefix from an epic ID (before the first hyphen).
|
||||
// Examples: "RA-123" -> "RA", "PROJ-456" -> "PROJ", "abc" -> "abc"
|
||||
func extractEpicPrefix(epicID string) string {
|
||||
if idx := strings.Index(epicID, "-"); idx > 0 {
|
||||
return epicID[:idx]
|
||||
}
|
||||
return epicID
|
||||
}
|
||||
|
||||
// getGitUserName returns the git user.name config value, or empty if not set.
|
||||
func getGitUserName() string {
|
||||
cmd := exec.Command("git", "config", "user.name")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// validateBranchName checks if a branch name is valid for git.
|
||||
// Returns an error if the branch name contains invalid characters.
|
||||
func validateBranchName(branchName string) error {
|
||||
if branchName == "" {
|
||||
return fmt.Errorf("branch name cannot be empty")
|
||||
}
|
||||
|
||||
// Check for invalid characters
|
||||
if invalidBranchCharsRegex.MatchString(branchName) {
|
||||
return fmt.Errorf("branch name %q contains invalid characters (~ ^ : \\ space, .., or @{)", branchName)
|
||||
}
|
||||
|
||||
// Check for .lock suffix
|
||||
if strings.HasSuffix(branchName, ".lock") {
|
||||
return fmt.Errorf("branch name %q cannot end with .lock", branchName)
|
||||
}
|
||||
|
||||
// Check for leading/trailing slashes or dots
|
||||
if strings.HasPrefix(branchName, "/") || strings.HasSuffix(branchName, "/") {
|
||||
return fmt.Errorf("branch name %q cannot start or end with /", branchName)
|
||||
}
|
||||
if strings.HasPrefix(branchName, ".") || strings.HasSuffix(branchName, ".") {
|
||||
return fmt.Errorf("branch name %q cannot start or end with .", branchName)
|
||||
}
|
||||
|
||||
// Check for consecutive slashes
|
||||
if strings.Contains(branchName, "//") {
|
||||
return fmt.Errorf("branch name %q cannot contain consecutive slashes", branchName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getIntegrationBranchField extracts the integration_branch field from an epic's description.
|
||||
// Returns empty string if the field is not found.
|
||||
func getIntegrationBranchField(description string) string {
|
||||
if description == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
lines := strings.Split(description, "\n")
|
||||
for _, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if strings.HasPrefix(strings.ToLower(trimmed), "integration_branch:") {
|
||||
value := strings.TrimPrefix(trimmed, "integration_branch:")
|
||||
value = strings.TrimPrefix(value, "Integration_branch:")
|
||||
value = strings.TrimPrefix(value, "INTEGRATION_BRANCH:")
|
||||
// Handle case variations
|
||||
for _, prefix := range []string{"integration_branch:", "Integration_branch:", "INTEGRATION_BRANCH:"} {
|
||||
if strings.HasPrefix(trimmed, prefix) {
|
||||
value = strings.TrimPrefix(trimmed, prefix)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Re-parse properly - the prefix removal above is messy
|
||||
parts := strings.SplitN(trimmed, ":", 2)
|
||||
if len(parts) == 2 {
|
||||
return strings.TrimSpace(parts[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getIntegrationBranchTemplate returns the integration branch template to use.
|
||||
// Priority: CLI flag > rig config > default
|
||||
func getIntegrationBranchTemplate(rigPath, cliOverride string) string {
|
||||
if cliOverride != "" {
|
||||
return cliOverride
|
||||
}
|
||||
|
||||
// Try to load rig settings
|
||||
settingsPath := filepath.Join(rigPath, "settings", "config.json")
|
||||
settings, err := config.LoadRigSettings(settingsPath)
|
||||
if err != nil {
|
||||
return defaultIntegrationBranchTemplate
|
||||
}
|
||||
|
||||
if settings.MergeQueue != nil && settings.MergeQueue.IntegrationBranchTemplate != "" {
|
||||
return settings.MergeQueue.IntegrationBranchTemplate
|
||||
}
|
||||
|
||||
return defaultIntegrationBranchTemplate
|
||||
}
|
||||
|
||||
// IntegrationStatusOutput is the JSON output structure for integration status.
|
||||
type IntegrationStatusOutput struct {
|
||||
Epic string `json:"epic"`
|
||||
@@ -66,8 +202,14 @@ func runMqIntegrationCreate(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("'%s' is a %s, not an epic", epicID, epic.Type)
|
||||
}
|
||||
|
||||
// Build integration branch name
|
||||
branchName := "integration/" + epicID
|
||||
// Build integration branch name from template
|
||||
template := getIntegrationBranchTemplate(r.Path, mqIntegrationCreateBranch)
|
||||
branchName := buildIntegrationBranchName(template, epicID)
|
||||
|
||||
// Validate the branch name
|
||||
if err := validateBranchName(branchName); err != nil {
|
||||
return fmt.Errorf("invalid branch name: %w", err)
|
||||
}
|
||||
|
||||
// Initialize git for the rig
|
||||
g := git.NewGit(r.Path)
|
||||
@@ -185,9 +327,6 @@ func runMqIntegrationLand(cmd *cobra.Command, args []string) error {
|
||||
bd := beads.New(r.Path)
|
||||
g := git.NewGit(r.Path)
|
||||
|
||||
// Build integration branch name
|
||||
branchName := "integration/" + epicID
|
||||
|
||||
// Show what we're about to do
|
||||
if mqIntegrationLandDryRun {
|
||||
fmt.Printf("%s Dry run - no changes will be made\n\n", style.Bold.Render("🔍"))
|
||||
@@ -206,6 +345,13 @@ func runMqIntegrationLand(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("'%s' is a %s, not an epic", epicID, epic.Type)
|
||||
}
|
||||
|
||||
// Get integration branch name from epic metadata (stored at create time)
|
||||
// Fall back to default template for backward compatibility with old epics
|
||||
branchName := getIntegrationBranchField(epic.Description)
|
||||
if branchName == "" {
|
||||
branchName = buildIntegrationBranchName(defaultIntegrationBranchTemplate, epicID)
|
||||
}
|
||||
|
||||
fmt.Printf("Landing integration branch for epic: %s\n", epicID)
|
||||
fmt.Printf(" Title: %s\n\n", epic.Title)
|
||||
|
||||
@@ -455,8 +601,21 @@ func runMqIntegrationStatus(cmd *cobra.Command, args []string) error {
|
||||
// Initialize beads for the rig
|
||||
bd := beads.New(r.Path)
|
||||
|
||||
// Build integration branch name
|
||||
branchName := "integration/" + epicID
|
||||
// Fetch epic to get stored branch name
|
||||
epic, err := bd.Show(epicID)
|
||||
if err != nil {
|
||||
if err == beads.ErrNotFound {
|
||||
return fmt.Errorf("epic '%s' not found", epicID)
|
||||
}
|
||||
return fmt.Errorf("fetching epic: %w", err)
|
||||
}
|
||||
|
||||
// Get integration branch name from epic metadata (stored at create time)
|
||||
// Fall back to default template for backward compatibility with old epics
|
||||
branchName := getIntegrationBranchField(epic.Description)
|
||||
if branchName == "" {
|
||||
branchName = buildIntegrationBranchName(defaultIntegrationBranchTemplate, epicID)
|
||||
}
|
||||
|
||||
// Initialize git for the rig
|
||||
g := git.NewGit(r.Path)
|
||||
@@ -492,8 +651,8 @@ func runMqIntegrationStatus(cmd *cobra.Command, args []string) error {
|
||||
aheadCount = 0 // Non-fatal
|
||||
}
|
||||
|
||||
// Query for MRs targeting this integration branch
|
||||
targetBranch := "integration/" + epicID
|
||||
// Query for MRs targeting this integration branch (use resolved name)
|
||||
targetBranch := branchName
|
||||
|
||||
// Get all merge-request issues
|
||||
allMRs, err := bd.List(beads.ListOptions{
|
||||
|
||||
@@ -90,17 +90,6 @@ func runMqSubmit(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("cannot submit %s/master branch to merge queue", defaultBranch)
|
||||
}
|
||||
|
||||
// CRITICAL: Verify branch is pushed before creating MR bead
|
||||
// This prevents work loss when MR is created but commits aren't on remote.
|
||||
// See: gt-2hwi9 (Polecats not pushing before signaling done)
|
||||
pushed, unpushedCount, err := g.BranchPushedToRemote(branch, "origin")
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if branch is pushed: %w", err)
|
||||
}
|
||||
if !pushed {
|
||||
return fmt.Errorf("branch has %d unpushed commit(s); run 'git push -u origin %s' first", unpushedCount, branch)
|
||||
}
|
||||
|
||||
// Parse branch info
|
||||
info := parseBranchName(branch)
|
||||
|
||||
|
||||
@@ -434,3 +434,247 @@ func TestFilterMRsByTarget_NoMRFields(t *testing.T) {
|
||||
t.Errorf("filterMRsByTarget() should filter out issues without MR fields, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for configurable integration branch naming (Issue #104)
|
||||
|
||||
func TestBuildIntegrationBranchName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
template string
|
||||
epicID string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "default template",
|
||||
template: "",
|
||||
epicID: "RA-123",
|
||||
want: "integration/RA-123",
|
||||
},
|
||||
{
|
||||
name: "explicit default template",
|
||||
template: "integration/{epic}",
|
||||
epicID: "PROJ-456",
|
||||
want: "integration/PROJ-456",
|
||||
},
|
||||
{
|
||||
name: "custom template with prefix",
|
||||
template: "{prefix}/{epic}",
|
||||
epicID: "RA-123",
|
||||
want: "RA/RA-123",
|
||||
},
|
||||
{
|
||||
name: "complex template",
|
||||
template: "feature/{prefix}/work/{epic}",
|
||||
epicID: "PROJ-789",
|
||||
want: "feature/PROJ/work/PROJ-789",
|
||||
},
|
||||
{
|
||||
name: "epic without hyphen",
|
||||
template: "{prefix}/{epic}",
|
||||
epicID: "epicname",
|
||||
want: "epicname/epicname",
|
||||
},
|
||||
{
|
||||
name: "user variable left as-is without git config",
|
||||
template: "{user}/{epic}",
|
||||
epicID: "RA-123",
|
||||
// Note: {user} is replaced with git user.name if available,
|
||||
// otherwise left as placeholder. In tests, it depends on git config.
|
||||
want: "", // We'll check pattern instead
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := buildIntegrationBranchName(tt.template, tt.epicID)
|
||||
if tt.want == "" {
|
||||
// For user variable test, just check {epic} was replaced
|
||||
if stringContains(got, "{epic}") {
|
||||
t.Errorf("buildIntegrationBranchName() = %q, should have replaced {epic}", got)
|
||||
}
|
||||
} else if got != tt.want {
|
||||
t.Errorf("buildIntegrationBranchName() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractEpicPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
epicID string
|
||||
want string
|
||||
}{
|
||||
{"RA-123", "RA"},
|
||||
{"PROJ-456", "PROJ"},
|
||||
{"gt-auth-epic", "gt"},
|
||||
{"epicname", "epicname"},
|
||||
{"X-1", "X"},
|
||||
{"-123", "-123"}, // No prefix before hyphen, return full string
|
||||
{"", ""},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.epicID, func(t *testing.T) {
|
||||
got := extractEpicPrefix(tt.epicID)
|
||||
if got != tt.want {
|
||||
t.Errorf("extractEpicPrefix(%q) = %q, want %q", tt.epicID, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateBranchName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
branchName string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid simple branch",
|
||||
branchName: "integration/gt-epic",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid nested branch",
|
||||
branchName: "user/project/feature",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid with hyphens and underscores",
|
||||
branchName: "user-name/feature_branch",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty branch name",
|
||||
branchName: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains tilde",
|
||||
branchName: "branch~1",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains caret",
|
||||
branchName: "branch^2",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains colon",
|
||||
branchName: "branch:ref",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains space",
|
||||
branchName: "branch name",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains backslash",
|
||||
branchName: "branch\\name",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains double dot",
|
||||
branchName: "branch..name",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "contains at-brace",
|
||||
branchName: "branch@{name}",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ends with .lock",
|
||||
branchName: "branch.lock",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "starts with slash",
|
||||
branchName: "/branch",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ends with slash",
|
||||
branchName: "branch/",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "starts with dot",
|
||||
branchName: ".branch",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "ends with dot",
|
||||
branchName: "branch.",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "consecutive slashes",
|
||||
branchName: "branch//name",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateBranchName(tt.branchName)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("validateBranchName(%q) error = %v, wantErr %v", tt.branchName, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIntegrationBranchField(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "field at beginning",
|
||||
description: "integration_branch: klauern/PROJ-123/RA-epic\nSome description",
|
||||
want: "klauern/PROJ-123/RA-epic",
|
||||
},
|
||||
{
|
||||
name: "field in middle",
|
||||
description: "Some text\nintegration_branch: custom/branch\nMore text",
|
||||
want: "custom/branch",
|
||||
},
|
||||
{
|
||||
name: "field with extra whitespace",
|
||||
description: " integration_branch: spaced/branch \nOther content",
|
||||
want: "spaced/branch",
|
||||
},
|
||||
{
|
||||
name: "no integration_branch field",
|
||||
description: "Just a plain description\nWith multiple lines",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "mixed case field name",
|
||||
description: "Integration_branch: CamelCase/branch",
|
||||
want: "CamelCase/branch",
|
||||
},
|
||||
{
|
||||
name: "default format",
|
||||
description: "integration_branch: integration/gt-epic\nEpic for auth work",
|
||||
want: "integration/gt-epic",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := getIntegrationBranchField(tt.description)
|
||||
if got != tt.want {
|
||||
t.Errorf("getIntegrationBranchField() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
@@ -90,7 +90,6 @@ Examples:
|
||||
RunE: runPolecatRemove,
|
||||
}
|
||||
|
||||
|
||||
var polecatSyncCmd = &cobra.Command{
|
||||
Use: "sync <rig>/<polecat>",
|
||||
Short: "Sync beads for a polecat",
|
||||
@@ -130,15 +129,15 @@ Examples:
|
||||
}
|
||||
|
||||
var (
|
||||
polecatSyncAll bool
|
||||
polecatSyncFromMain bool
|
||||
polecatStatusJSON bool
|
||||
polecatGitStateJSON bool
|
||||
polecatGCDryRun bool
|
||||
polecatNukeAll bool
|
||||
polecatNukeDryRun bool
|
||||
polecatNukeForce bool
|
||||
polecatCheckRecoveryJSON bool
|
||||
polecatSyncAll bool
|
||||
polecatSyncFromMain bool
|
||||
polecatStatusJSON bool
|
||||
polecatGitStateJSON bool
|
||||
polecatGCDryRun bool
|
||||
polecatNukeAll bool
|
||||
polecatNukeDryRun bool
|
||||
polecatNukeForce bool
|
||||
polecatCheckRecoveryJSON bool
|
||||
)
|
||||
|
||||
var polecatGCCmd = &cobra.Command{
|
||||
@@ -361,7 +360,7 @@ func runPolecatList(cmd *cobra.Command, args []string) error {
|
||||
for _, r := range rigs {
|
||||
polecatGit := git.NewGit(r.Path)
|
||||
mgr := polecat.NewManager(r, polecatGit)
|
||||
sessMgr := session.NewManager(t, r)
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
|
||||
polecats, err := mgr.List()
|
||||
if err != nil {
|
||||
@@ -370,7 +369,7 @@ func runPolecatList(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
for _, p := range polecats {
|
||||
running, _ := sessMgr.IsRunning(p.Name)
|
||||
running, _ := polecatMgr.IsRunning(p.Name)
|
||||
allPolecats = append(allPolecats, PolecatListItem{
|
||||
Rig: r.Name,
|
||||
Name: p.Name,
|
||||
@@ -525,8 +524,8 @@ func runPolecatRemove(cmd *cobra.Command, args []string) error {
|
||||
for _, p := range toRemove {
|
||||
// Check if session is running
|
||||
if !polecatForce {
|
||||
sessMgr := session.NewManager(t, p.r)
|
||||
running, _ := sessMgr.IsRunning(p.polecatName)
|
||||
polecatMgr := polecat.NewSessionManager(t, p.r)
|
||||
running, _ := polecatMgr.IsRunning(p.polecatName)
|
||||
if running {
|
||||
removeErrors = append(removeErrors, fmt.Sprintf("%s/%s: session is running (stop first or use --force)", p.rigName, p.polecatName))
|
||||
continue
|
||||
@@ -580,7 +579,7 @@ func runPolecatSync(cmd *cobra.Command, args []string) error {
|
||||
polecatName = ""
|
||||
}
|
||||
|
||||
mgr, r, err := getPolecatManager(rigName)
|
||||
mgr, _, err := getPolecatManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -607,10 +606,15 @@ func runPolecatSync(cmd *cobra.Command, args []string) error {
|
||||
// Sync each polecat
|
||||
var syncErrors []string
|
||||
for _, name := range polecatsToSync {
|
||||
polecatDir := filepath.Join(r.Path, "polecats", name)
|
||||
// Get polecat to get correct clone path (handles old vs new structure)
|
||||
p, err := mgr.Get(name)
|
||||
if err != nil {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: %v", name, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Check directory exists
|
||||
if _, err := os.Stat(polecatDir); os.IsNotExist(err) {
|
||||
if _, err := os.Stat(p.ClonePath); os.IsNotExist(err) {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: directory not found", name))
|
||||
continue
|
||||
}
|
||||
@@ -624,7 +628,7 @@ func runPolecatSync(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Syncing %s/%s...\n", rigName, name)
|
||||
|
||||
syncCmd := exec.Command("bd", syncArgs...)
|
||||
syncCmd.Dir = polecatDir
|
||||
syncCmd.Dir = p.ClonePath
|
||||
output, err := syncCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: %v", name, err))
|
||||
@@ -682,11 +686,11 @@ func runPolecatStatus(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get session info
|
||||
t := tmux.NewTmux()
|
||||
sessMgr := session.NewManager(t, r)
|
||||
sessInfo, err := sessMgr.Status(polecatName)
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
sessInfo, err := polecatMgr.Status(polecatName)
|
||||
if err != nil {
|
||||
// Non-fatal - continue without session info
|
||||
sessInfo = &session.Info{
|
||||
sessInfo = &polecat.SessionInfo{
|
||||
Polecat: polecatName,
|
||||
Running: false,
|
||||
}
|
||||
@@ -970,13 +974,13 @@ func getGitState(worktreePath string) (*GitState, error) {
|
||||
|
||||
// RecoveryStatus represents whether a polecat needs recovery or is safe to nuke.
|
||||
type RecoveryStatus struct {
|
||||
Rig string `json:"rig"`
|
||||
Polecat string `json:"polecat"`
|
||||
CleanupStatus string `json:"cleanup_status"`
|
||||
NeedsRecovery bool `json:"needs_recovery"`
|
||||
Verdict string `json:"verdict"` // SAFE_TO_NUKE or NEEDS_RECOVERY
|
||||
Branch string `json:"branch,omitempty"`
|
||||
Issue string `json:"issue,omitempty"`
|
||||
Rig string `json:"rig"`
|
||||
Polecat string `json:"polecat"`
|
||||
CleanupStatus polecat.CleanupStatus `json:"cleanup_status"`
|
||||
NeedsRecovery bool `json:"needs_recovery"`
|
||||
Verdict string `json:"verdict"` // SAFE_TO_NUKE or NEEDS_RECOVERY
|
||||
Branch string `json:"branch,omitempty"`
|
||||
Issue string `json:"issue,omitempty"`
|
||||
}
|
||||
|
||||
func runPolecatCheckRecovery(cmd *cobra.Command, args []string) error {
|
||||
@@ -1015,38 +1019,35 @@ func runPolecatCheckRecovery(cmd *cobra.Command, args []string) error {
|
||||
// This handles polecats that haven't self-reported yet
|
||||
gitState, gitErr := getGitState(p.ClonePath)
|
||||
if gitErr != nil {
|
||||
status.CleanupStatus = "unknown"
|
||||
status.CleanupStatus = polecat.CleanupUnknown
|
||||
status.NeedsRecovery = true
|
||||
status.Verdict = "NEEDS_RECOVERY"
|
||||
} else if gitState.Clean {
|
||||
status.CleanupStatus = "clean"
|
||||
status.CleanupStatus = polecat.CleanupClean
|
||||
status.NeedsRecovery = false
|
||||
status.Verdict = "SAFE_TO_NUKE"
|
||||
} else if gitState.UnpushedCommits > 0 {
|
||||
status.CleanupStatus = "has_unpushed"
|
||||
status.CleanupStatus = polecat.CleanupUnpushed
|
||||
status.NeedsRecovery = true
|
||||
status.Verdict = "NEEDS_RECOVERY"
|
||||
} else if gitState.StashCount > 0 {
|
||||
status.CleanupStatus = "has_stash"
|
||||
status.CleanupStatus = polecat.CleanupStash
|
||||
status.NeedsRecovery = true
|
||||
status.Verdict = "NEEDS_RECOVERY"
|
||||
} else {
|
||||
status.CleanupStatus = "has_uncommitted"
|
||||
status.CleanupStatus = polecat.CleanupUncommitted
|
||||
status.NeedsRecovery = true
|
||||
status.Verdict = "NEEDS_RECOVERY"
|
||||
}
|
||||
} else {
|
||||
// Use cleanup_status from agent bead
|
||||
status.CleanupStatus = fields.CleanupStatus
|
||||
switch fields.CleanupStatus {
|
||||
case "clean":
|
||||
status.CleanupStatus = polecat.CleanupStatus(fields.CleanupStatus)
|
||||
if status.CleanupStatus.IsSafe() {
|
||||
status.NeedsRecovery = false
|
||||
status.Verdict = "SAFE_TO_NUKE"
|
||||
case "has_uncommitted", "has_unpushed", "has_stash":
|
||||
status.NeedsRecovery = true
|
||||
status.Verdict = "NEEDS_RECOVERY"
|
||||
default:
|
||||
// Unknown or empty - be conservative
|
||||
} else {
|
||||
// RequiresRecovery covers uncommitted, stash, unpushed
|
||||
// Unknown/empty also treated conservatively
|
||||
status.NeedsRecovery = true
|
||||
status.Verdict = "NEEDS_RECOVERY"
|
||||
}
|
||||
@@ -1275,28 +1276,42 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
} else {
|
||||
// Check cleanup_status from agent bead
|
||||
switch fields.CleanupStatus {
|
||||
case "clean":
|
||||
cleanupStatus := polecat.CleanupStatus(fields.CleanupStatus)
|
||||
switch cleanupStatus {
|
||||
case polecat.CleanupClean:
|
||||
// OK
|
||||
case "has_unpushed":
|
||||
case polecat.CleanupUnpushed:
|
||||
reasons = append(reasons, "has unpushed commits")
|
||||
case "has_uncommitted":
|
||||
case polecat.CleanupUncommitted:
|
||||
reasons = append(reasons, "has uncommitted changes")
|
||||
case "has_stash":
|
||||
case polecat.CleanupStash:
|
||||
reasons = append(reasons, "has stashed changes")
|
||||
case "unknown", "":
|
||||
case polecat.CleanupUnknown, "":
|
||||
reasons = append(reasons, "cleanup status unknown")
|
||||
default:
|
||||
reasons = append(reasons, fmt.Sprintf("cleanup status: %s", fields.CleanupStatus))
|
||||
reasons = append(reasons, fmt.Sprintf("cleanup status: %s", cleanupStatus))
|
||||
}
|
||||
|
||||
// Check 3: Work on hook (check both Issue.HookBead from slot and fields.HookBead)
|
||||
// Only flag as blocking if the hooked bead is still in an active status.
|
||||
// If the hooked bead was closed externally (gt-jc7bq), don't block nuke.
|
||||
hookBead := agentIssue.HookBead
|
||||
if hookBead == "" {
|
||||
hookBead = fields.HookBead
|
||||
}
|
||||
if hookBead != "" {
|
||||
reasons = append(reasons, fmt.Sprintf("has work on hook (%s)", hookBead))
|
||||
// Check if hooked bead is still active (not closed)
|
||||
hookedIssue, err := bd.Show(hookBead)
|
||||
if err == nil && hookedIssue != nil {
|
||||
// Only block if bead is still active (not closed)
|
||||
if hookedIssue.Status != "closed" {
|
||||
reasons = append(reasons, fmt.Sprintf("has work on hook (%s)", hookBead))
|
||||
}
|
||||
// If closed, the hook is stale - don't block nuke
|
||||
} else {
|
||||
// Can't verify hooked bead - be conservative
|
||||
reasons = append(reasons, fmt.Sprintf("has work on hook (%s, unverified)", hookBead))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1374,10 +1389,11 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
fmt.Printf(" - Hook: %s\n", style.Dim.Render("unknown (no agent bead)"))
|
||||
} else {
|
||||
if fields.CleanupStatus == "clean" {
|
||||
cleanupStatus := polecat.CleanupStatus(fields.CleanupStatus)
|
||||
if cleanupStatus.IsSafe() {
|
||||
fmt.Printf(" - Git state: %s\n", style.Success.Render("clean"))
|
||||
} else if fields.CleanupStatus != "" {
|
||||
fmt.Printf(" - Git state: %s (%s)\n", style.Error.Render("dirty"), fields.CleanupStatus)
|
||||
} else if cleanupStatus.RequiresRecovery() {
|
||||
fmt.Printf(" - Git state: %s (%s)\n", style.Error.Render("dirty"), cleanupStatus)
|
||||
} else {
|
||||
fmt.Printf(" - Git state: %s\n", style.Warning.Render("unknown"))
|
||||
}
|
||||
@@ -1387,7 +1403,13 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
hookBead = fields.HookBead
|
||||
}
|
||||
if hookBead != "" {
|
||||
fmt.Printf(" - Hook: %s (%s)\n", style.Error.Render("has work"), hookBead)
|
||||
// Check if hooked bead is still active
|
||||
hookedIssue, err := bd.Show(hookBead)
|
||||
if err == nil && hookedIssue != nil && hookedIssue.Status == "closed" {
|
||||
fmt.Printf(" - Hook: %s (%s, closed - stale)\n", style.Warning.Render("stale"), hookBead)
|
||||
} else {
|
||||
fmt.Printf(" - Hook: %s (%s)\n", style.Error.Render("has work"), hookBead)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" - Hook: %s\n", style.Success.Render("empty"))
|
||||
}
|
||||
@@ -1416,10 +1438,10 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Step 1: Kill session (force mode - no graceful shutdown)
|
||||
sessMgr := session.NewManager(t, p.r)
|
||||
running, _ := sessMgr.IsRunning(p.polecatName)
|
||||
polecatMgr := polecat.NewSessionManager(t, p.r)
|
||||
running, _ := polecatMgr.IsRunning(p.polecatName)
|
||||
if running {
|
||||
if err := sessMgr.Stop(p.polecatName, true); err != nil {
|
||||
if err := polecatMgr.Stop(p.polecatName, true); err != nil {
|
||||
fmt.Printf(" %s session kill failed: %v\n", style.Warning.Render("⚠"), err)
|
||||
// Continue anyway - worktree removal will still work
|
||||
} else {
|
||||
@@ -1460,7 +1482,7 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
// Step 5: Close agent bead (if exists)
|
||||
agentBeadID := beads.PolecatBeadID(p.rigName, p.polecatName)
|
||||
closeArgs := []string{"close", agentBeadID, "--reason=nuked"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
201
internal/cmd/polecat_dotdir_test.go
Normal file
201
internal/cmd/polecat_dotdir_test.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
func TestDiscoverHooksSkipsPolecatDotDirs(t *testing.T) {
|
||||
townRoot := setupTestTownForDotDir(t)
|
||||
rigPath := filepath.Join(townRoot, "gastown")
|
||||
|
||||
settingsPath := filepath.Join(rigPath, "polecats", ".claude", ".claude", "settings.json")
|
||||
if err := os.MkdirAll(filepath.Dir(settingsPath), 0755); err != nil {
|
||||
t.Fatalf("mkdir settings dir: %v", err)
|
||||
}
|
||||
|
||||
settings := `{"hooks":{"SessionStart":[{"matcher":"*","hooks":[{"type":"Stop","command":"echo hi"}]}]}}`
|
||||
if err := os.WriteFile(settingsPath, []byte(settings), 0644); err != nil {
|
||||
t.Fatalf("write settings: %v", err)
|
||||
}
|
||||
|
||||
hooks, err := discoverHooks(townRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("discoverHooks: %v", err)
|
||||
}
|
||||
|
||||
if len(hooks) != 0 {
|
||||
t.Fatalf("expected no hooks, got %d", len(hooks))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartPolecatsWithWorkSkipsDotDirs(t *testing.T) {
|
||||
townRoot := setupTestTownForDotDir(t)
|
||||
rigName := "gastown"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
addRigEntry(t, townRoot, rigName)
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(rigPath, "polecats", ".claude"), 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude polecat: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(rigPath, "polecats", "toast"), 0755); err != nil {
|
||||
t.Fatalf("mkdir polecat: %v", err)
|
||||
}
|
||||
|
||||
binDir := t.TempDir()
|
||||
bdScript := `#!/bin/sh
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
shift
|
||||
fi
|
||||
cmd="$1"
|
||||
case "$cmd" in
|
||||
list)
|
||||
if [ "$(basename "$PWD")" = ".claude" ]; then
|
||||
echo '[{"id":"gt-1"}]'
|
||||
else
|
||||
echo '[]'
|
||||
fi
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
`
|
||||
writeScript(t, binDir, "bd", bdScript)
|
||||
|
||||
tmuxScript := `#!/bin/sh
|
||||
if [ "$1" = "has-session" ]; then
|
||||
echo "tmux error" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
`
|
||||
writeScript(t, binDir, "tmux", tmuxScript)
|
||||
|
||||
t.Setenv("PATH", fmt.Sprintf("%s:%s", binDir, os.Getenv("PATH")))
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir town root: %v", err)
|
||||
}
|
||||
|
||||
started, errs := startPolecatsWithWork(townRoot, rigName)
|
||||
|
||||
if len(started) != 0 {
|
||||
t.Fatalf("expected no polecats started, got %v", started)
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
t.Fatalf("expected no errors, got %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSessionCheckSkipsDotDirs(t *testing.T) {
|
||||
townRoot := setupTestTownForDotDir(t)
|
||||
rigName := "gastown"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
addRigEntry(t, townRoot, rigName)
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(rigPath, "polecats", ".claude"), 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude polecat: %v", err)
|
||||
}
|
||||
|
||||
binDir := t.TempDir()
|
||||
tmuxScript := `#!/bin/sh
|
||||
if [ "$1" = "has-session" ]; then
|
||||
echo "can't find session" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
`
|
||||
writeScript(t, binDir, "tmux", tmuxScript)
|
||||
t.Setenv("PATH", fmt.Sprintf("%s:%s", binDir, os.Getenv("PATH")))
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir town root: %v", err)
|
||||
}
|
||||
|
||||
output := captureStdout(t, func() {
|
||||
if err := runSessionCheck(&cobra.Command{}, []string{rigName}); err != nil {
|
||||
t.Fatalf("runSessionCheck: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if strings.Contains(output, ".claude") {
|
||||
t.Fatalf("expected .claude to be ignored, output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func addRigEntry(t *testing.T, townRoot, rigName string) {
|
||||
t.Helper()
|
||||
|
||||
rigsPath := filepath.Join(townRoot, "mayor", "rigs.json")
|
||||
rigsConfig, err := config.LoadRigsConfig(rigsPath)
|
||||
if err != nil {
|
||||
t.Fatalf("load rigs.json: %v", err)
|
||||
}
|
||||
if rigsConfig.Rigs == nil {
|
||||
rigsConfig.Rigs = make(map[string]config.RigEntry)
|
||||
}
|
||||
rigsConfig.Rigs[rigName] = config.RigEntry{
|
||||
GitURL: "file:///dev/null",
|
||||
AddedAt: time.Now(),
|
||||
}
|
||||
if err := config.SaveRigsConfig(rigsPath, rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setupTestTownForDotDir(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
townRoot := t.TempDir()
|
||||
|
||||
mayorDir := filepath.Join(townRoot, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor: %v", err)
|
||||
}
|
||||
|
||||
rigsPath := filepath.Join(mayorDir, "rigs.json")
|
||||
rigsConfig := &config.RigsConfig{
|
||||
Version: 1,
|
||||
Rigs: make(map[string]config.RigEntry),
|
||||
}
|
||||
if err := config.SaveRigsConfig(rigsPath, rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
|
||||
return townRoot
|
||||
}
|
||||
|
||||
func writeScript(t *testing.T, dir, name, content string) {
|
||||
t.Helper()
|
||||
|
||||
path := filepath.Join(dir, name)
|
||||
if err := os.WriteFile(path, []byte(content), 0755); err != nil {
|
||||
t.Fatalf("write %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -39,6 +38,7 @@ type SlingSpawnOptions struct {
|
||||
Account string // Claude Code account handle to use
|
||||
Create bool // Create polecat if it doesn't exist (currently always true for sling)
|
||||
HookBead string // Bead ID to set as hook_bead at spawn time (atomic assignment)
|
||||
Agent string // Agent override for this spawn (e.g., "gemini", "codex", "claude-haiku")
|
||||
}
|
||||
|
||||
// SpawnPolecatForSling creates a fresh polecat and optionally starts its session.
|
||||
@@ -122,8 +122,11 @@ func SpawnPolecatForSling(rigName string, opts SlingSpawnOptions) (*SpawnedPolec
|
||||
fmt.Printf("Polecat created. Agent must be started manually.\n\n")
|
||||
fmt.Printf("To start the agent:\n")
|
||||
fmt.Printf(" cd %s\n", polecatObj.ClonePath)
|
||||
// Use rig's configured agent command
|
||||
agentCmd := config.ResolveAgentConfig(townRoot, r.Path).BuildCommand()
|
||||
// Use rig's configured agent command, unless overridden.
|
||||
agentCmd, err := config.GetRuntimeCommandWithAgentOverride(r.Path, opts.Agent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmt.Printf(" %s\n\n", agentCmd)
|
||||
fmt.Printf("Agent will discover work via gt prime on startup.\n")
|
||||
|
||||
@@ -136,7 +139,7 @@ func SpawnPolecatForSling(rigName string, opts SlingSpawnOptions) (*SpawnedPolec
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Resolve account for Claude config
|
||||
// Resolve account for runtime config
|
||||
accountsPath := constants.MayorAccountsPath(townRoot)
|
||||
claudeConfigDir, accountHandle, err := config.ResolveAccountConfigDir(accountsPath, opts.Account)
|
||||
if err != nil {
|
||||
@@ -148,22 +151,29 @@ func SpawnPolecatForSling(rigName string, opts SlingSpawnOptions) (*SpawnedPolec
|
||||
|
||||
// Start session
|
||||
t := tmux.NewTmux()
|
||||
sessMgr := session.NewManager(t, r)
|
||||
polecatSessMgr := polecat.NewSessionManager(t, r)
|
||||
|
||||
// Check if already running
|
||||
running, _ := sessMgr.IsRunning(polecatName)
|
||||
running, _ := polecatSessMgr.IsRunning(polecatName)
|
||||
if !running {
|
||||
fmt.Printf("Starting session for %s/%s...\n", rigName, polecatName)
|
||||
startOpts := session.StartOptions{
|
||||
ClaudeConfigDir: claudeConfigDir,
|
||||
startOpts := polecat.SessionStartOptions{
|
||||
RuntimeConfigDir: claudeConfigDir,
|
||||
}
|
||||
if err := sessMgr.Start(polecatName, startOpts); err != nil {
|
||||
if opts.Agent != "" {
|
||||
cmd, err := config.BuildPolecatStartupCommandWithAgentOverride(rigName, polecatName, r.Path, "", opts.Agent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startOpts.Command = cmd
|
||||
}
|
||||
if err := polecatSessMgr.Start(polecatName, startOpts); err != nil {
|
||||
return nil, fmt.Errorf("starting session: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get session name and pane
|
||||
sessionName := sessMgr.SessionName(polecatName)
|
||||
sessionName := polecatSessMgr.SessionName(polecatName)
|
||||
pane, err := getSessionPane(sessionName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting pane for %s: %w", sessionName, err)
|
||||
|
||||
@@ -17,9 +17,13 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/checkpoint"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/deacon"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/lock"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/templates"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -79,12 +83,15 @@ func init() {
|
||||
type RoleContext = RoleInfo
|
||||
|
||||
func runPrime(cmd *cobra.Command, args []string) error {
|
||||
if !state.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting current directory: %w", err)
|
||||
}
|
||||
|
||||
// Find town root
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding workspace: %w", err)
|
||||
@@ -146,10 +153,14 @@ func runPrime(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Ensure beads redirect exists for worktree-based roles
|
||||
ensureBeadsRedirect(ctx)
|
||||
// Skip if there's a role/location mismatch to avoid creating bad redirects
|
||||
if !roleInfo.Mismatch {
|
||||
ensureBeadsRedirect(ctx)
|
||||
}
|
||||
|
||||
// Report agent state as running (ZFC: agents self-report state)
|
||||
reportAgentState(ctx, "running")
|
||||
// NOTE: reportAgentState("running") removed (gt-zecmc)
|
||||
// Agent liveness is observable from tmux - no need to record it in bead.
|
||||
// "Discover, don't track" principle: reality is truth, state is derived.
|
||||
|
||||
// Emit session_start event for seance discovery
|
||||
emitSessionEvent(ctx)
|
||||
@@ -308,12 +319,22 @@ func outputPrimeContext(ctx RoleContext) error {
|
||||
// Get town name for session names
|
||||
townName, _ := workspace.GetTownName(ctx.TownRoot)
|
||||
|
||||
// Get default branch from rig config (default to "main" if not set)
|
||||
defaultBranch := "main"
|
||||
if ctx.Rig != "" && ctx.TownRoot != "" {
|
||||
rigPath := filepath.Join(ctx.TownRoot, ctx.Rig)
|
||||
if rigCfg, err := rig.LoadRigConfig(rigPath); err == nil && rigCfg.DefaultBranch != "" {
|
||||
defaultBranch = rigCfg.DefaultBranch
|
||||
}
|
||||
}
|
||||
|
||||
data := templates.RoleData{
|
||||
Role: roleName,
|
||||
RigName: ctx.Rig,
|
||||
TownRoot: ctx.TownRoot,
|
||||
TownName: townName,
|
||||
WorkDir: ctx.WorkDir,
|
||||
DefaultBranch: defaultBranch,
|
||||
Polecat: ctx.Polecat,
|
||||
MayorSession: session.MayorSessionName(),
|
||||
DeaconSession: session.DeaconSessionName(),
|
||||
@@ -360,7 +381,7 @@ func outputMayorContext(ctx RoleContext) {
|
||||
fmt.Println("- `gt mail inbox` - Check your messages")
|
||||
fmt.Println("- `gt mail read <id>` - Read a specific message")
|
||||
fmt.Println("- `gt status` - Show overall town status")
|
||||
fmt.Println("- `gt rigs` - List all rigs")
|
||||
fmt.Println("- `gt rig list` - List all rigs")
|
||||
fmt.Println("- `bd ready` - Issues ready to work")
|
||||
fmt.Println()
|
||||
fmt.Println("## Hookable Mail")
|
||||
@@ -586,6 +607,11 @@ func outputStartupDirective(ctx RoleContext) {
|
||||
fmt.Println(" - If attachment found → **RUN IT** (no human input needed)")
|
||||
fmt.Println(" - If no attachment → await user instruction")
|
||||
case RoleDeacon:
|
||||
// Skip startup protocol if paused - the pause message was already shown
|
||||
paused, _, _ := deacon.IsPaused(ctx.TownRoot)
|
||||
if paused {
|
||||
return
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
@@ -894,11 +920,19 @@ func showMoleculeProgress(b *beads.Beads, rootID string) {
|
||||
|
||||
// outputDeaconPatrolContext shows patrol molecule status for the Deacon.
|
||||
// Deacon uses wisps (Wisp:true issues in main .beads/) for patrol cycles.
|
||||
// Deacon is a town-level role, so it uses town root beads (not rig beads).
|
||||
func outputDeaconPatrolContext(ctx RoleContext) {
|
||||
// Check if Deacon is paused - if so, output PAUSED message and skip patrol context
|
||||
paused, state, err := deacon.IsPaused(ctx.TownRoot)
|
||||
if err == nil && paused {
|
||||
outputDeaconPausedMessage(state)
|
||||
return
|
||||
}
|
||||
|
||||
cfg := PatrolConfig{
|
||||
RoleName: "deacon",
|
||||
PatrolMolName: "mol-deacon-patrol",
|
||||
BeadsDir: ctx.WorkDir,
|
||||
BeadsDir: ctx.TownRoot, // Town-level role uses town root beads
|
||||
Assignee: "deacon",
|
||||
HeaderEmoji: "🔄",
|
||||
HeaderTitle: "Patrol Status (Wisp-based)",
|
||||
@@ -914,6 +948,32 @@ func outputDeaconPatrolContext(ctx RoleContext) {
|
||||
outputPatrolContext(cfg)
|
||||
}
|
||||
|
||||
// outputDeaconPausedMessage outputs a prominent PAUSED message for the Deacon.
|
||||
// When paused, the Deacon must not perform any patrol actions.
|
||||
func outputDeaconPausedMessage(state *deacon.PauseState) {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s\n\n", style.Bold.Render("## ⏸️ DEACON PAUSED"))
|
||||
fmt.Println("You are paused and must NOT perform any patrol actions.")
|
||||
fmt.Println()
|
||||
if state.Reason != "" {
|
||||
fmt.Printf("Reason: %s\n", state.Reason)
|
||||
}
|
||||
fmt.Printf("Paused at: %s\n", state.PausedAt.Format(time.RFC3339))
|
||||
if state.PausedBy != "" {
|
||||
fmt.Printf("Paused by: %s\n", state.PausedBy)
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("Wait for human to run `gt deacon resume` before working.")
|
||||
fmt.Println()
|
||||
fmt.Println("**DO NOT:**")
|
||||
fmt.Println("- Create patrol molecules")
|
||||
fmt.Println("- Run heartbeats")
|
||||
fmt.Println("- Check agent health")
|
||||
fmt.Println("- Take any autonomous actions")
|
||||
fmt.Println()
|
||||
fmt.Println("You may respond to direct human questions.")
|
||||
}
|
||||
|
||||
// outputWitnessPatrolContext shows patrol molecule status for the Witness.
|
||||
// Witness AUTO-BONDS its patrol molecule on startup if one isn't already running.
|
||||
func outputWitnessPatrolContext(ctx RoleContext) {
|
||||
@@ -1174,89 +1234,9 @@ func acquireIdentityLock(ctx RoleContext) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// reportAgentState updates the agent bead to report the agent's current state.
|
||||
// This implements ZFC-compliant self-reporting of agent state.
|
||||
// Agents call this on startup (running) and shutdown (stopped).
|
||||
// For crew workers, creates the agent bead if it doesn't exist.
|
||||
func reportAgentState(ctx RoleContext, state string) {
|
||||
agentBeadID := getAgentBeadID(ctx)
|
||||
if agentBeadID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Use the beads API directly to update agent state
|
||||
// This is more reliable than shelling out to bd
|
||||
bd := beads.New(ctx.WorkDir)
|
||||
|
||||
// Check if agent bead exists, create if needed (especially for crew workers)
|
||||
if _, err := bd.Show(agentBeadID); err != nil {
|
||||
// Agent bead doesn't exist - create it
|
||||
fields := getAgentFields(ctx, state)
|
||||
if fields != nil {
|
||||
_, createErr := bd.CreateAgentBead(agentBeadID, agentBeadID, fields)
|
||||
if createErr != nil {
|
||||
// Silently ignore - beads might not be configured
|
||||
return
|
||||
}
|
||||
// Bead created with initial state, no need to update
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Update existing agent bead state
|
||||
if err := bd.UpdateAgentState(agentBeadID, state, nil); err != nil {
|
||||
// Silently ignore errors - don't fail prime if state reporting fails
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// getAgentFields returns the AgentFields for creating a new agent bead.
|
||||
func getAgentFields(ctx RoleContext, state string) *beads.AgentFields {
|
||||
switch ctx.Role {
|
||||
case RoleCrew:
|
||||
return &beads.AgentFields{
|
||||
RoleType: "crew",
|
||||
Rig: ctx.Rig,
|
||||
AgentState: state,
|
||||
RoleBead: beads.RoleBeadIDTown("crew"),
|
||||
}
|
||||
case RolePolecat:
|
||||
return &beads.AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: ctx.Rig,
|
||||
AgentState: state,
|
||||
RoleBead: beads.RoleBeadIDTown("polecat"),
|
||||
}
|
||||
case RoleMayor:
|
||||
return &beads.AgentFields{
|
||||
RoleType: "mayor",
|
||||
AgentState: state,
|
||||
RoleBead: beads.RoleBeadIDTown("mayor"),
|
||||
}
|
||||
case RoleDeacon:
|
||||
return &beads.AgentFields{
|
||||
RoleType: "deacon",
|
||||
AgentState: state,
|
||||
RoleBead: beads.RoleBeadIDTown("deacon"),
|
||||
}
|
||||
case RoleWitness:
|
||||
return &beads.AgentFields{
|
||||
RoleType: "witness",
|
||||
Rig: ctx.Rig,
|
||||
AgentState: state,
|
||||
RoleBead: beads.RoleBeadIDTown("witness"),
|
||||
}
|
||||
case RoleRefinery:
|
||||
return &beads.AgentFields{
|
||||
RoleType: "refinery",
|
||||
Rig: ctx.Rig,
|
||||
AgentState: state,
|
||||
RoleBead: beads.RoleBeadIDTown("refinery"),
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// NOTE: reportAgentState() and getAgentFields() were removed in gt-zecmc.
|
||||
// Agent liveness is now discovered from tmux, not recorded in beads.
|
||||
// "Discover, don't track" principle: observable state should not be recorded.
|
||||
|
||||
// getAgentBeadID returns the agent bead ID for the current role.
|
||||
// Town-level agents (mayor, deacon) use hq- prefix; rig-scoped agents use the rig's prefix.
|
||||
@@ -1298,103 +1278,22 @@ func getAgentBeadID(ctx RoleContext) string {
|
||||
|
||||
// ensureBeadsRedirect ensures the .beads/redirect file exists for worktree-based roles.
|
||||
// This handles cases where git clean or other operations delete the redirect file.
|
||||
//
|
||||
// IMPORTANT: This function includes safety checks to prevent creating redirects in
|
||||
// the canonical beads location (mayor/rig/.beads), which would cause circular redirects.
|
||||
// Uses the shared SetupRedirect helper which handles both tracked and local beads.
|
||||
func ensureBeadsRedirect(ctx RoleContext) {
|
||||
// Only applies to crew and polecat roles (they use shared beads)
|
||||
if ctx.Role != RoleCrew && ctx.Role != RolePolecat {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the rig root (parent of crew/ or polecats/)
|
||||
relPath, err := filepath.Rel(ctx.TownRoot, ctx.WorkDir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
parts := strings.Split(filepath.ToSlash(relPath), "/")
|
||||
if len(parts) < 1 {
|
||||
return
|
||||
}
|
||||
rigRoot := filepath.Join(ctx.TownRoot, parts[0])
|
||||
|
||||
// SAFETY CHECK: Prevent creating redirect in canonical beads location
|
||||
// If workDir is inside mayor/rig/, we should NOT create a redirect there
|
||||
// This prevents circular redirects like mayor/rig/.beads/redirect -> ../../mayor/rig/.beads
|
||||
mayorRigPath := filepath.Join(rigRoot, "mayor", "rig")
|
||||
workDirAbs, _ := filepath.Abs(ctx.WorkDir)
|
||||
mayorRigPathAbs, _ := filepath.Abs(mayorRigPath)
|
||||
if strings.HasPrefix(workDirAbs, mayorRigPathAbs) {
|
||||
// We're inside mayor/rig/ - this is not a polecat/crew worker location
|
||||
// Role detection may be wrong (e.g., GT_ROLE env var mismatch)
|
||||
// Do NOT create a redirect here
|
||||
// Only applies to worktree-based roles that use shared beads
|
||||
if ctx.Role != RoleCrew && ctx.Role != RolePolecat && ctx.Role != RoleRefinery {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if redirect already exists
|
||||
beadsDir := filepath.Join(ctx.WorkDir, ".beads")
|
||||
redirectPath := filepath.Join(beadsDir, "redirect")
|
||||
|
||||
redirectPath := filepath.Join(ctx.WorkDir, ".beads", "redirect")
|
||||
if _, err := os.Stat(redirectPath); err == nil {
|
||||
// Redirect exists, nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
// Determine the correct redirect path based on role and rig structure
|
||||
var redirectContent string
|
||||
|
||||
// Check for shared beads locations in order of preference:
|
||||
// 1. rig/mayor/rig/.beads/ (if mayor rig clone exists)
|
||||
// 2. rig/.beads/ (rig root beads)
|
||||
mayorRigBeads := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
rigRootBeads := filepath.Join(rigRoot, ".beads")
|
||||
|
||||
if _, err := os.Stat(mayorRigBeads); err == nil {
|
||||
// Use mayor/rig/.beads
|
||||
if ctx.Role == RoleCrew {
|
||||
// crew/<name>/.beads -> ../../mayor/rig/.beads
|
||||
redirectContent = "../../mayor/rig/.beads"
|
||||
} else {
|
||||
// polecats/<name>/.beads -> ../../mayor/rig/.beads
|
||||
redirectContent = "../../mayor/rig/.beads"
|
||||
}
|
||||
} else if _, err := os.Stat(rigRootBeads); err == nil {
|
||||
// Use rig root .beads
|
||||
if ctx.Role == RoleCrew {
|
||||
// crew/<name>/.beads -> ../../.beads
|
||||
redirectContent = "../../.beads"
|
||||
} else {
|
||||
// polecats/<name>/.beads -> ../../.beads
|
||||
redirectContent = "../../.beads"
|
||||
}
|
||||
} else {
|
||||
// No shared beads found, nothing to redirect to
|
||||
return
|
||||
}
|
||||
|
||||
// SAFETY CHECK: Verify the redirect won't be circular
|
||||
// Resolve the redirect target and check it's not the same as our beads dir
|
||||
resolvedTarget := filepath.Join(ctx.WorkDir, redirectContent)
|
||||
resolvedTarget = filepath.Clean(resolvedTarget)
|
||||
if resolvedTarget == beadsDir {
|
||||
// Would create circular redirect - don't do it
|
||||
return
|
||||
}
|
||||
|
||||
// Create .beads directory if needed
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
// Silently fail - not critical
|
||||
return
|
||||
}
|
||||
|
||||
// Write redirect file
|
||||
if err := os.WriteFile(redirectPath, []byte(redirectContent+"\n"), 0644); err != nil {
|
||||
// Silently fail - not critical
|
||||
return
|
||||
}
|
||||
|
||||
// Note: We don't print a message here to avoid cluttering prime output
|
||||
// The redirect is silently restored
|
||||
// Use shared helper - silently ignore errors during prime
|
||||
_ = beads.SetupRedirect(ctx.TownRoot, ctx.WorkDir)
|
||||
}
|
||||
|
||||
// checkPendingEscalations queries for open escalation beads and displays them prominently.
|
||||
@@ -1605,22 +1504,17 @@ func outputSessionMetadata(ctx RoleContext) {
|
||||
// resolveSessionIDForPrime finds the session ID from available sources.
|
||||
// Priority: GT_SESSION_ID env, CLAUDE_SESSION_ID env, persisted file, fallback.
|
||||
func resolveSessionIDForPrime(actor string) string {
|
||||
// 1. GT_SESSION_ID (new canonical)
|
||||
if id := os.Getenv("GT_SESSION_ID"); id != "" {
|
||||
// 1. Try runtime's session ID lookup (checks GT_SESSION_ID_ENV, then CLAUDE_SESSION_ID)
|
||||
if id := runtime.SessionIDFromEnv(); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
// 2. CLAUDE_SESSION_ID (legacy/Claude Code)
|
||||
if id := os.Getenv("CLAUDE_SESSION_ID"); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
// 3. Persisted session file (from gt prime --hook)
|
||||
// 2. Persisted session file (from gt prime --hook)
|
||||
if id := ReadPersistedSessionID(); id != "" {
|
||||
return id
|
||||
}
|
||||
|
||||
// 4. Fallback to generated identifier
|
||||
// 3. Fallback to generated identifier
|
||||
return fmt.Sprintf("%s-%d", actor, os.Getpid())
|
||||
}
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/refinery"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/wisp"
|
||||
"github.com/steveyegge/gastown/internal/witness"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -366,12 +366,16 @@ func runRigAdd(cmd *cobra.Command, args []string) error {
|
||||
// - Otherwise route to rig root (where initBeads creates the database)
|
||||
// The conditional routing is necessary because initBeads creates the database at
|
||||
// "<rig>/.beads", while repos with tracked beads have their database at mayor/rig/.beads.
|
||||
var beadsWorkDir string
|
||||
if newRig.Config.Prefix != "" {
|
||||
routePath := name
|
||||
mayorRigBeads := filepath.Join(townRoot, name, "mayor", "rig", ".beads")
|
||||
if _, err := os.Stat(mayorRigBeads); err == nil {
|
||||
// Source repo has .beads/ tracked - route to mayor/rig
|
||||
routePath = name + "/mayor/rig"
|
||||
beadsWorkDir = filepath.Join(townRoot, name, "mayor", "rig")
|
||||
} else {
|
||||
beadsWorkDir = filepath.Join(townRoot, name)
|
||||
}
|
||||
route := beads.Route{
|
||||
Prefix: newRig.Config.Prefix + "-",
|
||||
@@ -383,6 +387,23 @@ func runRigAdd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Create rig identity bead
|
||||
if newRig.Config.Prefix != "" && beadsWorkDir != "" {
|
||||
bd := beads.New(beadsWorkDir)
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(newRig.Config.Prefix, name)
|
||||
fields := &beads.RigFields{
|
||||
Repo: gitURL,
|
||||
Prefix: newRig.Config.Prefix,
|
||||
State: "active",
|
||||
}
|
||||
if _, err := bd.CreateRigBead(rigBeadID, name, fields); err != nil {
|
||||
// Non-fatal: rig is functional without the identity bead
|
||||
fmt.Printf(" %s Could not create rig identity bead: %v\n", style.Warning.Render("!"), err)
|
||||
} else {
|
||||
fmt.Printf(" Created rig identity bead: %s\n", rigBeadID)
|
||||
}
|
||||
}
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
|
||||
// Read default branch from rig config
|
||||
@@ -737,15 +758,14 @@ func runRigBoot(cmd *cobra.Command, args []string) error {
|
||||
skipped = append(skipped, "witness (already running)")
|
||||
} else {
|
||||
fmt.Printf(" Starting witness...\n")
|
||||
// Use ensureWitnessSession to create tmux session (same as gt witness start)
|
||||
created, err := ensureWitnessSession(rigName, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting witness: %w", err)
|
||||
}
|
||||
if created {
|
||||
// Update manager state to reflect running session
|
||||
witMgr := witness.NewManager(r)
|
||||
_ = witMgr.Start() // non-fatal: state file update
|
||||
witMgr := witness.NewManager(r)
|
||||
if err := witMgr.Start(false); err != nil {
|
||||
if err == witness.ErrAlreadyRunning {
|
||||
skipped = append(skipped, "witness (already running)")
|
||||
} else {
|
||||
return fmt.Errorf("starting witness: %w", err)
|
||||
}
|
||||
} else {
|
||||
started = append(started, "witness")
|
||||
}
|
||||
}
|
||||
@@ -818,13 +838,15 @@ func runRigStart(cmd *cobra.Command, args []string) error {
|
||||
skipped = append(skipped, "witness")
|
||||
} else {
|
||||
fmt.Printf(" Starting witness...\n")
|
||||
created, err := ensureWitnessSession(rigName, r)
|
||||
if err != nil {
|
||||
fmt.Printf(" %s Failed to start witness: %v\n", style.Warning.Render("⚠"), err)
|
||||
hasError = true
|
||||
} else if created {
|
||||
witMgr := witness.NewManager(r)
|
||||
_ = witMgr.Start()
|
||||
witMgr := witness.NewManager(r)
|
||||
if err := witMgr.Start(false); err != nil {
|
||||
if err == witness.ErrAlreadyRunning {
|
||||
skipped = append(skipped, "witness")
|
||||
} else {
|
||||
fmt.Printf(" %s Failed to start witness: %v\n", style.Warning.Render("⚠"), err)
|
||||
hasError = true
|
||||
}
|
||||
} else {
|
||||
started = append(started, "witness")
|
||||
}
|
||||
}
|
||||
@@ -935,11 +957,11 @@ func runRigShutdown(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// 1. Stop all polecat sessions
|
||||
t := tmux.NewTmux()
|
||||
sessMgr := session.NewManager(t, r)
|
||||
infos, err := sessMgr.List()
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
infos, err := polecatMgr.List()
|
||||
if err == nil && len(infos) > 0 {
|
||||
fmt.Printf(" Stopping %d polecat session(s)...\n", len(infos))
|
||||
if err := sessMgr.StopAll(rigShutdownForce); err != nil {
|
||||
if err := polecatMgr.StopAll(rigShutdownForce); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("polecat sessions: %v", err))
|
||||
}
|
||||
}
|
||||
@@ -1025,6 +1047,17 @@ func runRigStatus(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Header
|
||||
fmt.Printf("%s\n", style.Bold.Render(rigName))
|
||||
|
||||
// Operational state
|
||||
opState, opSource := getRigOperationalState(townRoot, rigName)
|
||||
if opState == "OPERATIONAL" {
|
||||
fmt.Printf(" Status: %s\n", style.Success.Render(opState))
|
||||
} else if opState == "PARKED" {
|
||||
fmt.Printf(" Status: %s (%s)\n", style.Warning.Render(opState), opSource)
|
||||
} else if opState == "DOCKED" {
|
||||
fmt.Printf(" Status: %s (%s)\n", style.Dim.Render(opState), opSource)
|
||||
}
|
||||
|
||||
fmt.Printf(" Path: %s\n", r.Path)
|
||||
if r.Config != nil && r.Config.Prefix != "" {
|
||||
fmt.Printf(" Beads prefix: %s-\n", r.Config.Prefix)
|
||||
@@ -1201,11 +1234,11 @@ func runRigStop(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// 1. Stop all polecat sessions
|
||||
t := tmux.NewTmux()
|
||||
sessMgr := session.NewManager(t, r)
|
||||
infos, err := sessMgr.List()
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
infos, err := polecatMgr.List()
|
||||
if err == nil && len(infos) > 0 {
|
||||
fmt.Printf(" Stopping %d polecat session(s)...\n", len(infos))
|
||||
if err := sessMgr.StopAll(rigStopForce); err != nil {
|
||||
if err := polecatMgr.StopAll(rigStopForce); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("polecat sessions: %v", err))
|
||||
}
|
||||
}
|
||||
@@ -1334,11 +1367,11 @@ func runRigRestart(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf(" Stopping...\n")
|
||||
|
||||
// 1. Stop all polecat sessions
|
||||
sessMgr := session.NewManager(t, r)
|
||||
infos, err := sessMgr.List()
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
infos, err := polecatMgr.List()
|
||||
if err == nil && len(infos) > 0 {
|
||||
fmt.Printf(" Stopping %d polecat session(s)...\n", len(infos))
|
||||
if err := sessMgr.StopAll(rigRestartForce); err != nil {
|
||||
if err := polecatMgr.StopAll(rigRestartForce); err != nil {
|
||||
stopErrors = append(stopErrors, fmt.Sprintf("polecat sessions: %v", err))
|
||||
}
|
||||
}
|
||||
@@ -1385,12 +1418,14 @@ func runRigRestart(cmd *cobra.Command, args []string) error {
|
||||
skipped = append(skipped, "witness")
|
||||
} else {
|
||||
fmt.Printf(" Starting witness...\n")
|
||||
created, err := ensureWitnessSession(rigName, r)
|
||||
if err != nil {
|
||||
fmt.Printf(" %s Failed to start witness: %v\n", style.Warning.Render("⚠"), err)
|
||||
startErrors = append(startErrors, fmt.Sprintf("witness: %v", err))
|
||||
} else if created {
|
||||
_ = witMgr.Start()
|
||||
if err := witMgr.Start(false); err != nil {
|
||||
if err == witness.ErrAlreadyRunning {
|
||||
skipped = append(skipped, "witness")
|
||||
} else {
|
||||
fmt.Printf(" %s Failed to start witness: %v\n", style.Warning.Render("⚠"), err)
|
||||
startErrors = append(startErrors, fmt.Sprintf("witness: %v", err))
|
||||
}
|
||||
} else {
|
||||
started = append(started, "witness")
|
||||
}
|
||||
}
|
||||
@@ -1448,3 +1483,48 @@ func runRigRestart(cmd *cobra.Command, args []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getRigOperationalState returns the operational state and source for a rig.
|
||||
// It checks the wisp layer first (local/ephemeral), then rig bead labels (global).
|
||||
// Returns state ("OPERATIONAL", "PARKED", or "DOCKED") and source ("local", "global - synced", or "default").
|
||||
func getRigOperationalState(townRoot, rigName string) (state string, source string) {
|
||||
// Check wisp layer first (local/ephemeral overrides)
|
||||
wispConfig := wisp.NewConfig(townRoot, rigName)
|
||||
if status := wispConfig.GetString("status"); status != "" {
|
||||
switch strings.ToLower(status) {
|
||||
case "parked":
|
||||
return "PARKED", "local"
|
||||
case "docked":
|
||||
return "DOCKED", "local"
|
||||
}
|
||||
}
|
||||
|
||||
// Check rig bead labels (global/synced)
|
||||
// Rig identity bead ID: <prefix>-rig-<name>
|
||||
// Look for status:docked or status:parked labels
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
rigBeadsDir := beads.ResolveBeadsDir(rigPath)
|
||||
bd := beads.NewWithBeadsDir(rigPath, rigBeadsDir)
|
||||
|
||||
// Try to find the rig identity bead
|
||||
// Convention: <prefix>-rig-<rigName>
|
||||
if rigCfg, err := rig.LoadRigConfig(rigPath); err == nil && rigCfg.Beads != nil {
|
||||
rigBeadID := fmt.Sprintf("%s-rig-%s", rigCfg.Beads.Prefix, rigName)
|
||||
if issue, err := bd.Show(rigBeadID); err == nil {
|
||||
for _, label := range issue.Labels {
|
||||
if strings.HasPrefix(label, "status:") {
|
||||
statusValue := strings.TrimPrefix(label, "status:")
|
||||
switch strings.ToLower(statusValue) {
|
||||
case "docked":
|
||||
return "DOCKED", "global - synced"
|
||||
case "parked":
|
||||
return "PARKED", "global - synced"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default: operational
|
||||
return "OPERATIONAL", "default"
|
||||
}
|
||||
|
||||
322
internal/cmd/rig_config.go
Normal file
322
internal/cmd/rig_config.go
Normal file
@@ -0,0 +1,322 @@
|
||||
// Package cmd provides CLI commands for the gt tool.
|
||||
// This file implements the gt rig config commands for viewing and manipulating
|
||||
// rig configuration across property layers.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/wisp"
|
||||
)
|
||||
|
||||
var rigConfigCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "View and manage rig configuration",
|
||||
Long: `View and manage rig configuration across property layers.
|
||||
|
||||
Configuration is looked up through multiple layers:
|
||||
1. Wisp layer (transient, local) - .beads-wisp/config/
|
||||
2. Bead layer (persistent, synced) - rig identity bead labels
|
||||
3. Town defaults - ~/gt/settings/config.json
|
||||
4. System defaults - compiled-in fallbacks
|
||||
|
||||
Most properties use override semantics (first non-nil wins).
|
||||
Integer properties like priority_adjustment use stacking semantics (values add).`,
|
||||
RunE: requireSubcommand,
|
||||
}
|
||||
|
||||
var rigConfigShowCmd = &cobra.Command{
|
||||
Use: "show <rig>",
|
||||
Short: "Show effective configuration for a rig",
|
||||
Long: `Show the effective configuration for a rig.
|
||||
|
||||
By default, shows only the resolved values. Use --layers to see
|
||||
which layer each value comes from.
|
||||
|
||||
Example output:
|
||||
gt rig config show gastown --layers
|
||||
Key Value Source
|
||||
status parked wisp
|
||||
priority_adjustment 10 bead
|
||||
auto_restart true system
|
||||
max_polecats 4 town`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runRigConfigShow,
|
||||
}
|
||||
|
||||
var rigConfigSetCmd = &cobra.Command{
|
||||
Use: "set <rig> <key> [value]",
|
||||
Short: "Set a configuration value",
|
||||
Long: `Set a configuration value in the wisp layer (local, ephemeral).
|
||||
|
||||
Use --global to set in the bead layer (persistent, synced globally).
|
||||
Use --block to explicitly block a key (prevents inheritance).
|
||||
|
||||
Examples:
|
||||
gt rig config set gastown status parked # Wisp layer
|
||||
gt rig config set gastown status docked --global # Bead layer
|
||||
gt rig config set gastown auto_restart --block # Block inheritance`,
|
||||
Args: cobra.RangeArgs(2, 3),
|
||||
RunE: runRigConfigSet,
|
||||
}
|
||||
|
||||
var rigConfigUnsetCmd = &cobra.Command{
|
||||
Use: "unset <rig> <key>",
|
||||
Short: "Remove a configuration value from the wisp layer",
|
||||
Long: `Remove a configuration value from the wisp layer.
|
||||
|
||||
This clears both regular values and blocked markers for the key.
|
||||
Values set in the bead layer remain unchanged.
|
||||
|
||||
Example:
|
||||
gt rig config unset gastown status`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: runRigConfigUnset,
|
||||
}
|
||||
|
||||
// Flags
|
||||
var (
|
||||
rigConfigShowLayers bool
|
||||
rigConfigSetGlobal bool
|
||||
rigConfigSetBlock bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
rigCmd.AddCommand(rigConfigCmd)
|
||||
rigConfigCmd.AddCommand(rigConfigShowCmd)
|
||||
rigConfigCmd.AddCommand(rigConfigSetCmd)
|
||||
rigConfigCmd.AddCommand(rigConfigUnsetCmd)
|
||||
|
||||
rigConfigShowCmd.Flags().BoolVar(&rigConfigShowLayers, "layers", false, "Show which layer each value comes from")
|
||||
|
||||
rigConfigSetCmd.Flags().BoolVar(&rigConfigSetGlobal, "global", false, "Set in bead layer (persistent, synced)")
|
||||
rigConfigSetCmd.Flags().BoolVar(&rigConfigSetBlock, "block", false, "Block inheritance for this key")
|
||||
}
|
||||
|
||||
func runRigConfigShow(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
townRoot, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Collect all known keys
|
||||
allKeys := getConfigKeys(townRoot, r)
|
||||
|
||||
if rigConfigShowLayers {
|
||||
// Show with sources
|
||||
fmt.Printf("%-25s %-15s %s\n", "Key", "Value", "Source")
|
||||
fmt.Printf("%-25s %-15s %s\n", "---", "-----", "------")
|
||||
for _, key := range allKeys {
|
||||
result := r.GetConfigWithSource(key)
|
||||
valueStr := formatValue(result.Value)
|
||||
sourceStr := string(result.Source)
|
||||
if result.Source == rig.SourceBlocked {
|
||||
valueStr = "(blocked)"
|
||||
}
|
||||
fmt.Printf("%-25s %-15s %s\n", key, valueStr, sourceStr)
|
||||
}
|
||||
} else {
|
||||
// Show only effective values
|
||||
fmt.Printf("%-25s %s\n", "Key", "Value")
|
||||
fmt.Printf("%-25s %s\n", "---", "-----")
|
||||
for _, key := range allKeys {
|
||||
result := r.GetConfigWithSource(key)
|
||||
if result.Source == rig.SourceNone {
|
||||
continue // Skip unset keys
|
||||
}
|
||||
valueStr := formatValue(result.Value)
|
||||
if result.Source == rig.SourceBlocked {
|
||||
valueStr = "(blocked)"
|
||||
}
|
||||
fmt.Printf("%-25s %s\n", key, valueStr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runRigConfigSet(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
key := args[1]
|
||||
|
||||
// Validate: --block requires no value, otherwise value is required
|
||||
if rigConfigSetBlock {
|
||||
if len(args) > 2 {
|
||||
return fmt.Errorf("--block does not take a value")
|
||||
}
|
||||
} else {
|
||||
if len(args) < 3 {
|
||||
return fmt.Errorf("value is required (use --block to block inheritance instead)")
|
||||
}
|
||||
}
|
||||
|
||||
townRoot, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rigConfigSetBlock {
|
||||
// Block inheritance via wisp layer
|
||||
wispCfg := wisp.NewConfig(townRoot, r.Name)
|
||||
if err := wispCfg.Block(key); err != nil {
|
||||
return fmt.Errorf("blocking %s: %w", key, err)
|
||||
}
|
||||
fmt.Printf("%s Blocked %s for rig %s\n", style.Success.Render("✓"), key, rigName)
|
||||
return nil
|
||||
}
|
||||
|
||||
value := args[2]
|
||||
|
||||
if rigConfigSetGlobal {
|
||||
// Set in bead layer (rig identity bead labels)
|
||||
if err := setBeadLabel(townRoot, r, key, value); err != nil {
|
||||
return fmt.Errorf("setting bead label: %w", err)
|
||||
}
|
||||
fmt.Printf("%s Set %s=%s in bead layer for rig %s\n", style.Success.Render("✓"), key, value, rigName)
|
||||
} else {
|
||||
// Set in wisp layer
|
||||
wispCfg := wisp.NewConfig(townRoot, r.Name)
|
||||
// Try to parse as appropriate type
|
||||
var typedValue interface{} = value
|
||||
if b, err := strconv.ParseBool(value); err == nil {
|
||||
typedValue = b
|
||||
} else if i, err := strconv.Atoi(value); err == nil {
|
||||
typedValue = i
|
||||
}
|
||||
if err := wispCfg.Set(key, typedValue); err != nil {
|
||||
return fmt.Errorf("setting %s: %w", key, err)
|
||||
}
|
||||
fmt.Printf("%s Set %s=%s in wisp layer for rig %s\n", style.Success.Render("✓"), key, value, rigName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runRigConfigUnset(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
key := args[1]
|
||||
|
||||
townRoot, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wispCfg := wisp.NewConfig(townRoot, r.Name)
|
||||
if err := wispCfg.Unset(key); err != nil {
|
||||
return fmt.Errorf("unsetting %s: %w", key, err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Unset %s from wisp layer for rig %s\n", style.Success.Render("✓"), key, rigName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getConfigKeys returns all known configuration keys, sorted.
|
||||
func getConfigKeys(townRoot string, r *rig.Rig) []string {
|
||||
keySet := make(map[string]bool)
|
||||
|
||||
// System defaults
|
||||
for k := range rig.SystemDefaults {
|
||||
keySet[k] = true
|
||||
}
|
||||
|
||||
// Wisp keys
|
||||
wispCfg := wisp.NewConfig(townRoot, r.Name)
|
||||
for _, k := range wispCfg.Keys() {
|
||||
keySet[k] = true
|
||||
}
|
||||
|
||||
// Bead labels (from rig identity bead)
|
||||
prefix := "gt"
|
||||
if r.Config != nil && r.Config.Prefix != "" {
|
||||
prefix = r.Config.Prefix
|
||||
}
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(prefix, r.Name)
|
||||
beadsDir := beads.ResolveBeadsDir(r.Path)
|
||||
bd := beads.NewWithBeadsDir(townRoot, beadsDir)
|
||||
if issue, err := bd.Show(rigBeadID); err == nil {
|
||||
for _, label := range issue.Labels {
|
||||
// Labels are in format "key:value"
|
||||
for i, c := range label {
|
||||
if c == ':' {
|
||||
keySet[label[:i]] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort keys
|
||||
keys := make([]string, 0, len(keySet))
|
||||
for k := range keySet {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// setBeadLabel sets a label on the rig identity bead.
|
||||
func setBeadLabel(townRoot string, r *rig.Rig, key, value string) error {
|
||||
prefix := "gt"
|
||||
if r.Config != nil && r.Config.Prefix != "" {
|
||||
prefix = r.Config.Prefix
|
||||
}
|
||||
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(prefix, r.Name)
|
||||
beadsDir := beads.ResolveBeadsDir(r.Path)
|
||||
bd := beads.NewWithBeadsDir(townRoot, beadsDir)
|
||||
|
||||
// Check if bead exists
|
||||
issue, err := bd.Show(rigBeadID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rig identity bead %s not found (run 'gt rig add' to create it)", rigBeadID)
|
||||
}
|
||||
|
||||
// Build new labels list: remove existing key:* and add new key:value
|
||||
newLabels := make([]string, 0, len(issue.Labels)+1)
|
||||
keyPrefix := key + ":"
|
||||
for _, label := range issue.Labels {
|
||||
if len(label) > len(keyPrefix) && label[:len(keyPrefix)] == keyPrefix {
|
||||
continue // Remove old value for this key
|
||||
}
|
||||
newLabels = append(newLabels, label)
|
||||
}
|
||||
newLabels = append(newLabels, key+":"+value)
|
||||
|
||||
// Update the bead
|
||||
return bd.Update(rigBeadID, beads.UpdateOptions{
|
||||
SetLabels: newLabels,
|
||||
})
|
||||
}
|
||||
|
||||
// formatValue formats a config value for display.
|
||||
func formatValue(v interface{}) string {
|
||||
if v == nil {
|
||||
return "(nil)"
|
||||
}
|
||||
switch val := v.(type) {
|
||||
case bool:
|
||||
if val {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
case int:
|
||||
return strconv.Itoa(val)
|
||||
case int64:
|
||||
return strconv.FormatInt(val, 10)
|
||||
case float64:
|
||||
return strconv.FormatFloat(val, 'f', -1, 64)
|
||||
case string:
|
||||
return val
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
||||
147
internal/cmd/rig_detect.go
Normal file
147
internal/cmd/rig_detect.go
Normal file
@@ -0,0 +1,147 @@
|
||||
// ABOUTME: Hidden command for shell hook to detect rigs and update cache.
|
||||
// ABOUTME: Called by shell integration to set GT_TOWN_ROOT and GT_RIG env vars.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var rigDetectCache string
|
||||
|
||||
var rigDetectCmd = &cobra.Command{
|
||||
Use: "detect [path]",
|
||||
Short: "Detect rig from repository path (internal use)",
|
||||
Hidden: true,
|
||||
Long: `Detect rig from a repository path and optionally cache the result.
|
||||
|
||||
This is an internal command used by shell integration. It checks if the given
|
||||
path is inside a Gas Town rig and outputs shell variable assignments.
|
||||
|
||||
When --cache is specified, the result is written to ~/.cache/gastown/rigs.cache
|
||||
for fast lookups by the shell hook.
|
||||
|
||||
Output format (to stdout):
|
||||
export GT_TOWN_ROOT=/path/to/town
|
||||
export GT_RIG=rigname
|
||||
|
||||
Or if not in a rig:
|
||||
unset GT_TOWN_ROOT GT_RIG`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runRigDetect,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rigCmd.AddCommand(rigDetectCmd)
|
||||
rigDetectCmd.Flags().StringVar(&rigDetectCache, "cache", "", "Repository path to cache detection result for")
|
||||
}
|
||||
|
||||
func runRigDetect(cmd *cobra.Command, args []string) error {
|
||||
checkPath := "."
|
||||
if len(args) > 0 {
|
||||
checkPath = args[0]
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(checkPath)
|
||||
if err != nil {
|
||||
return outputNotInRig()
|
||||
}
|
||||
|
||||
townRoot, err := workspace.Find(absPath)
|
||||
if err != nil || townRoot == "" {
|
||||
return outputNotInRig()
|
||||
}
|
||||
|
||||
rigName := detectRigFromPath(townRoot, absPath)
|
||||
|
||||
if rigName != "" {
|
||||
fmt.Printf("export GT_TOWN_ROOT=%q\n", townRoot)
|
||||
fmt.Printf("export GT_RIG=%q\n", rigName)
|
||||
} else {
|
||||
fmt.Printf("export GT_TOWN_ROOT=%q\n", townRoot)
|
||||
fmt.Println("unset GT_RIG")
|
||||
}
|
||||
|
||||
if rigDetectCache != "" {
|
||||
if err := updateRigCache(rigDetectCache, townRoot, rigName); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: could not update cache: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func detectRigFromPath(townRoot, absPath string) string {
|
||||
rel, err := filepath.Rel(townRoot, absPath)
|
||||
if err != nil || strings.HasPrefix(rel, "..") {
|
||||
return ""
|
||||
}
|
||||
|
||||
parts := strings.Split(rel, string(filepath.Separator))
|
||||
if len(parts) == 0 || parts[0] == "." {
|
||||
return ""
|
||||
}
|
||||
|
||||
candidateRig := parts[0]
|
||||
|
||||
switch candidateRig {
|
||||
case "mayor", "deacon", ".beads", ".claude", ".git", "plugins":
|
||||
return ""
|
||||
}
|
||||
|
||||
rigConfigPath := filepath.Join(townRoot, candidateRig, "config.json")
|
||||
if _, err := os.Stat(rigConfigPath); err == nil {
|
||||
return candidateRig
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func outputNotInRig() error {
|
||||
fmt.Println("unset GT_TOWN_ROOT GT_RIG")
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateRigCache(repoRoot, townRoot, rigName string) error {
|
||||
cacheDir := state.CacheDir()
|
||||
if err := os.MkdirAll(cacheDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cachePath := filepath.Join(cacheDir, "rigs.cache")
|
||||
|
||||
existing := make(map[string]string)
|
||||
if data, err := os.ReadFile(cachePath); err == nil {
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
if idx := strings.Index(line, ":"); idx > 0 {
|
||||
existing[line[:idx]] = line[idx+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var value string
|
||||
if rigName != "" {
|
||||
value = fmt.Sprintf("export GT_TOWN_ROOT=%q; export GT_RIG=%q", townRoot, rigName)
|
||||
} else if townRoot != "" {
|
||||
value = fmt.Sprintf("export GT_TOWN_ROOT=%q; unset GT_RIG", townRoot)
|
||||
} else {
|
||||
value = "unset GT_TOWN_ROOT GT_RIG"
|
||||
}
|
||||
|
||||
existing[repoRoot] = value
|
||||
|
||||
var lines []string
|
||||
for k, v := range existing {
|
||||
lines = append(lines, k+":"+v)
|
||||
}
|
||||
|
||||
return os.WriteFile(cachePath, []byte(strings.Join(lines, "\n")+"\n"), 0644)
|
||||
}
|
||||
253
internal/cmd/rig_dock.go
Normal file
253
internal/cmd/rig_dock.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/refinery"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/witness"
|
||||
)
|
||||
|
||||
// RigDockedLabel is the label set on rig identity beads when docked.
|
||||
const RigDockedLabel = "status:docked"
|
||||
|
||||
var rigDockCmd = &cobra.Command{
|
||||
Use: "dock <rig>",
|
||||
Short: "Dock a rig (global, persistent shutdown)",
|
||||
Long: `Dock a rig to persistently disable it across all clones.
|
||||
|
||||
Docking a rig:
|
||||
- Stops the witness if running
|
||||
- Stops the refinery if running
|
||||
- Sets status:docked label on the rig identity bead
|
||||
- Syncs via git so all clones see the docked status
|
||||
|
||||
This is a Level 2 (global/persistent) operation:
|
||||
- Affects all clones of this rig (via git sync)
|
||||
- Persists until explicitly undocked
|
||||
- The daemon respects this status and won't auto-restart agents
|
||||
|
||||
Use 'gt rig undock' to resume normal operation.
|
||||
|
||||
Examples:
|
||||
gt rig dock gastown
|
||||
gt rig dock beads`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runRigDock,
|
||||
}
|
||||
|
||||
var rigUndockCmd = &cobra.Command{
|
||||
Use: "undock <rig>",
|
||||
Short: "Undock a rig (remove global docked status)",
|
||||
Long: `Undock a rig to remove the persistent docked status.
|
||||
|
||||
Undocking a rig:
|
||||
- Removes the status:docked label from the rig identity bead
|
||||
- Syncs via git so all clones see the undocked status
|
||||
- Allows the daemon to auto-restart agents
|
||||
- Does NOT automatically start agents (use 'gt rig start' for that)
|
||||
|
||||
Examples:
|
||||
gt rig undock gastown
|
||||
gt rig undock beads`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runRigUndock,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rigCmd.AddCommand(rigDockCmd)
|
||||
rigCmd.AddCommand(rigUndockCmd)
|
||||
}
|
||||
|
||||
func runRigDock(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
// Get rig
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get rig prefix for bead ID
|
||||
prefix := "gt" // default
|
||||
if r.Config != nil && r.Config.Prefix != "" {
|
||||
prefix = r.Config.Prefix
|
||||
}
|
||||
|
||||
// Find the rig identity bead
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(prefix, rigName)
|
||||
bd := beads.New(r.BeadsPath())
|
||||
|
||||
// Check if rig bead exists, create if not
|
||||
rigBead, err := bd.Show(rigBeadID)
|
||||
if err != nil {
|
||||
// Rig identity bead doesn't exist (legacy rig) - create it
|
||||
fmt.Printf(" Creating rig identity bead %s...\n", rigBeadID)
|
||||
rigBead, err = bd.CreateRigBead(rigBeadID, rigName, &beads.RigFields{
|
||||
Repo: r.GitURL,
|
||||
Prefix: prefix,
|
||||
State: "active",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating rig identity bead: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if already docked
|
||||
for _, label := range rigBead.Labels {
|
||||
if label == RigDockedLabel {
|
||||
fmt.Printf("%s Rig %s is already docked\n", style.Dim.Render("•"), rigName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Docking rig %s...\n", style.Bold.Render(rigName))
|
||||
|
||||
var stoppedAgents []string
|
||||
|
||||
t := tmux.NewTmux()
|
||||
|
||||
// Stop witness if running
|
||||
witnessSession := fmt.Sprintf("gt-%s-witness", rigName)
|
||||
witnessRunning, _ := t.HasSession(witnessSession)
|
||||
if witnessRunning {
|
||||
fmt.Printf(" Stopping witness...\n")
|
||||
witMgr := witness.NewManager(r)
|
||||
if err := witMgr.Stop(); err != nil {
|
||||
fmt.Printf(" %s Failed to stop witness: %v\n", style.Warning.Render("!"), err)
|
||||
} else {
|
||||
stoppedAgents = append(stoppedAgents, "Witness stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Stop refinery if running
|
||||
refinerySession := fmt.Sprintf("gt-%s-refinery", rigName)
|
||||
refineryRunning, _ := t.HasSession(refinerySession)
|
||||
if refineryRunning {
|
||||
fmt.Printf(" Stopping refinery...\n")
|
||||
refMgr := refinery.NewManager(r)
|
||||
if err := refMgr.Stop(); err != nil {
|
||||
fmt.Printf(" %s Failed to stop refinery: %v\n", style.Warning.Render("!"), err)
|
||||
} else {
|
||||
stoppedAgents = append(stoppedAgents, "Refinery stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Set docked label on rig identity bead
|
||||
if err := bd.Update(rigBeadID, beads.UpdateOptions{
|
||||
AddLabels: []string{RigDockedLabel},
|
||||
}); err != nil {
|
||||
return fmt.Errorf("setting docked label: %w", err)
|
||||
}
|
||||
|
||||
// Sync beads to propagate to other clones
|
||||
fmt.Printf(" Syncing beads...\n")
|
||||
syncCmd := exec.Command("bd", "sync")
|
||||
syncCmd.Dir = r.BeadsPath()
|
||||
if output, err := syncCmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf(" %s bd sync warning: %v\n%s", style.Warning.Render("!"), err, string(output))
|
||||
}
|
||||
|
||||
// Output
|
||||
fmt.Printf("%s Rig %s docked (global)\n", style.Success.Render("✓"), rigName)
|
||||
fmt.Printf(" Label added: %s\n", RigDockedLabel)
|
||||
for _, msg := range stoppedAgents {
|
||||
fmt.Printf(" %s\n", msg)
|
||||
}
|
||||
fmt.Printf(" Run '%s' to propagate to other clones\n", style.Dim.Render("bd sync"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runRigUndock(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
// Get rig and town root
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get rig prefix for bead ID
|
||||
prefix := "gt" // default
|
||||
if r.Config != nil && r.Config.Prefix != "" {
|
||||
prefix = r.Config.Prefix
|
||||
}
|
||||
|
||||
// Find the rig identity bead
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(prefix, rigName)
|
||||
bd := beads.New(r.BeadsPath())
|
||||
|
||||
// Check if rig bead exists, create if not
|
||||
rigBead, err := bd.Show(rigBeadID)
|
||||
if err != nil {
|
||||
// Rig identity bead doesn't exist (legacy rig) - can't be docked
|
||||
fmt.Printf("%s Rig %s has no identity bead and is not docked\n", style.Dim.Render("•"), rigName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if actually docked
|
||||
isDocked := false
|
||||
for _, label := range rigBead.Labels {
|
||||
if label == RigDockedLabel {
|
||||
isDocked = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isDocked {
|
||||
fmt.Printf("%s Rig %s is not docked\n", style.Dim.Render("•"), rigName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove docked label from rig identity bead
|
||||
if err := bd.Update(rigBeadID, beads.UpdateOptions{
|
||||
RemoveLabels: []string{RigDockedLabel},
|
||||
}); err != nil {
|
||||
return fmt.Errorf("removing docked label: %w", err)
|
||||
}
|
||||
|
||||
// Sync beads to propagate to other clones
|
||||
fmt.Printf(" Syncing beads...\n")
|
||||
syncCmd := exec.Command("bd", "sync")
|
||||
syncCmd.Dir = r.BeadsPath()
|
||||
if output, err := syncCmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf(" %s bd sync warning: %v\n%s", style.Warning.Render("!"), err, string(output))
|
||||
}
|
||||
|
||||
fmt.Printf("%s Rig %s undocked\n", style.Success.Render("✓"), rigName)
|
||||
fmt.Printf(" Label removed: %s\n", RigDockedLabel)
|
||||
fmt.Printf(" Daemon can now auto-restart agents\n")
|
||||
fmt.Printf(" Use '%s' to start agents immediately\n", style.Dim.Render("gt rig start "+rigName))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRigDocked checks if a rig is docked by checking for the status:docked label
|
||||
// on the rig identity bead. This function is exported for use by the daemon.
|
||||
func IsRigDocked(townRoot, rigName, prefix string) bool {
|
||||
// Construct the rig beads path
|
||||
rigPath := townRoot + "/" + rigName
|
||||
beadsPath := rigPath + "/mayor/rig"
|
||||
if _, err := exec.Command("test", "-d", beadsPath).CombinedOutput(); err != nil {
|
||||
beadsPath = rigPath
|
||||
}
|
||||
|
||||
bd := beads.New(beadsPath)
|
||||
rigBeadID := beads.RigBeadIDWithPrefix(prefix, rigName)
|
||||
|
||||
rigBead, err := bd.Show(rigBeadID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, label := range rigBead.Labels {
|
||||
if label == RigDockedLabel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -30,9 +30,10 @@ func createTestGitRepo(t *testing.T, name string) string {
|
||||
t.Fatalf("mkdir repo: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo
|
||||
// Initialize git repo with explicit main branch
|
||||
// (system default may vary, causing checkout failures)
|
||||
cmds := [][]string{
|
||||
{"git", "init"},
|
||||
{"git", "init", "--initial-branch=main"},
|
||||
{"git", "config", "user.email", "test@test.com"},
|
||||
{"git", "config", "user.name", "Test User"},
|
||||
}
|
||||
@@ -253,6 +254,40 @@ func TestRigAddCreatesCorrectStructure(t *testing.T) {
|
||||
} else if info.IsDir() {
|
||||
t.Errorf("refinery/rig/.git should be a file (worktree), not a directory")
|
||||
}
|
||||
|
||||
// Verify Claude settings are created in correct locations (outside git repos).
|
||||
// Settings in parent directories are inherited by agents via directory traversal,
|
||||
// without polluting the source repos.
|
||||
expectedSettings := []struct {
|
||||
path string
|
||||
desc string
|
||||
}{
|
||||
{filepath.Join(rigPath, "witness", ".claude", "settings.json"), "witness/.claude/settings.json"},
|
||||
{filepath.Join(rigPath, "refinery", ".claude", "settings.json"), "refinery/.claude/settings.json"},
|
||||
{filepath.Join(rigPath, "crew", ".claude", "settings.json"), "crew/.claude/settings.json"},
|
||||
{filepath.Join(rigPath, "polecats", ".claude", "settings.json"), "polecats/.claude/settings.json"},
|
||||
}
|
||||
|
||||
for _, s := range expectedSettings {
|
||||
if _, err := os.Stat(s.path); err != nil {
|
||||
t.Errorf("%s not found: %v", s.desc, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify settings are NOT created inside source repos (these would be wrong)
|
||||
wrongLocations := []struct {
|
||||
path string
|
||||
desc string
|
||||
}{
|
||||
{filepath.Join(rigPath, "witness", "rig", ".claude", "settings.json"), "witness/rig/.claude (inside source repo)"},
|
||||
{filepath.Join(rigPath, "refinery", "rig", ".claude", "settings.json"), "refinery/rig/.claude (inside source repo)"},
|
||||
}
|
||||
|
||||
for _, w := range wrongLocations {
|
||||
if _, err := os.Stat(w.path); err == nil {
|
||||
t.Errorf("%s should NOT exist (settings would pollute source repo)", w.desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRigAddInitializesBeads verifies that beads is initialized with
|
||||
|
||||
149
internal/cmd/rig_park.go
Normal file
149
internal/cmd/rig_park.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/refinery"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/wisp"
|
||||
"github.com/steveyegge/gastown/internal/witness"
|
||||
)
|
||||
|
||||
// RigStatusKey is the wisp config key for rig operational status.
|
||||
const RigStatusKey = "status"
|
||||
|
||||
// RigStatusParked is the value indicating a rig is parked.
|
||||
const RigStatusParked = "parked"
|
||||
|
||||
var rigParkCmd = &cobra.Command{
|
||||
Use: "park <rig>",
|
||||
Short: "Park a rig (stops agents, daemon won't auto-restart)",
|
||||
Long: `Park a rig to temporarily disable it.
|
||||
|
||||
Parking a rig:
|
||||
- Stops the witness if running
|
||||
- Stops the refinery if running
|
||||
- Sets status=parked in the wisp layer (local/ephemeral)
|
||||
- The daemon respects this status and won't auto-restart agents
|
||||
|
||||
This is a Level 1 (local/ephemeral) operation:
|
||||
- Only affects this town
|
||||
- Disappears on wisp cleanup
|
||||
- Use 'gt rig unpark' to resume normal operation
|
||||
|
||||
Examples:
|
||||
gt rig park gastown
|
||||
gt rig park beads`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runRigPark,
|
||||
}
|
||||
|
||||
var rigUnparkCmd = &cobra.Command{
|
||||
Use: "unpark <rig>",
|
||||
Short: "Unpark a rig (allow daemon to auto-restart agents)",
|
||||
Long: `Unpark a rig to resume normal operation.
|
||||
|
||||
Unparking a rig:
|
||||
- Removes the parked status from the wisp layer
|
||||
- Allows the daemon to auto-restart agents
|
||||
- Does NOT automatically start agents (use 'gt rig start' for that)
|
||||
|
||||
Examples:
|
||||
gt rig unpark gastown
|
||||
gt rig unpark beads`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runRigUnpark,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rigCmd.AddCommand(rigParkCmd)
|
||||
rigCmd.AddCommand(rigUnparkCmd)
|
||||
}
|
||||
|
||||
func runRigPark(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
// Get rig and town root
|
||||
townRoot, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Parking rig %s...\n", style.Bold.Render(rigName))
|
||||
|
||||
var stoppedAgents []string
|
||||
|
||||
t := tmux.NewTmux()
|
||||
|
||||
// Stop witness if running
|
||||
witnessSession := fmt.Sprintf("gt-%s-witness", rigName)
|
||||
witnessRunning, _ := t.HasSession(witnessSession)
|
||||
if witnessRunning {
|
||||
fmt.Printf(" Stopping witness...\n")
|
||||
witMgr := witness.NewManager(r)
|
||||
if err := witMgr.Stop(); err != nil {
|
||||
fmt.Printf(" %s Failed to stop witness: %v\n", style.Warning.Render("!"), err)
|
||||
} else {
|
||||
stoppedAgents = append(stoppedAgents, "Witness stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Stop refinery if running
|
||||
refinerySession := fmt.Sprintf("gt-%s-refinery", rigName)
|
||||
refineryRunning, _ := t.HasSession(refinerySession)
|
||||
if refineryRunning {
|
||||
fmt.Printf(" Stopping refinery...\n")
|
||||
refMgr := refinery.NewManager(r)
|
||||
if err := refMgr.Stop(); err != nil {
|
||||
fmt.Printf(" %s Failed to stop refinery: %v\n", style.Warning.Render("!"), err)
|
||||
} else {
|
||||
stoppedAgents = append(stoppedAgents, "Refinery stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Set parked status in wisp layer
|
||||
wispCfg := wisp.NewConfig(townRoot, rigName)
|
||||
if err := wispCfg.Set(RigStatusKey, RigStatusParked); err != nil {
|
||||
return fmt.Errorf("setting parked status: %w", err)
|
||||
}
|
||||
|
||||
// Output
|
||||
fmt.Printf("%s Rig %s parked (local only)\n", style.Success.Render("✓"), rigName)
|
||||
for _, msg := range stoppedAgents {
|
||||
fmt.Printf(" %s\n", msg)
|
||||
}
|
||||
fmt.Printf(" Daemon will not auto-restart\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runRigUnpark(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
// Get rig and town root
|
||||
townRoot, _, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove parked status from wisp layer
|
||||
wispCfg := wisp.NewConfig(townRoot, rigName)
|
||||
if err := wispCfg.Unset(RigStatusKey); err != nil {
|
||||
return fmt.Errorf("clearing parked status: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Rig %s unparked\n", style.Success.Render("✓"), rigName)
|
||||
fmt.Printf(" Daemon can now auto-restart agents\n")
|
||||
fmt.Printf(" Use '%s' to start agents immediately\n", style.Dim.Render("gt rig start "+rigName))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRigParked checks if a rig is parked in the wisp layer.
|
||||
// This function is exported for use by the daemon.
|
||||
func IsRigParked(townRoot, rigName string) bool {
|
||||
wispCfg := wisp.NewConfig(townRoot, rigName)
|
||||
return wispCfg.GetString(RigStatusKey) == RigStatusParked
|
||||
}
|
||||
186
internal/cmd/rig_quick_add.go
Normal file
186
internal/cmd/rig_quick_add.go
Normal file
@@ -0,0 +1,186 @@
|
||||
// ABOUTME: Quick-add command for adding a repo to Gas Town with minimal friction.
|
||||
// ABOUTME: Used by shell hook for automatic "add to Gas Town?" prompts.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var (
|
||||
quickAddUser string
|
||||
quickAddYes bool
|
||||
quickAddQuiet bool
|
||||
)
|
||||
|
||||
var rigQuickAddCmd = &cobra.Command{
|
||||
Use: "quick-add [path]",
|
||||
Short: "Quickly add current repo to Gas Town",
|
||||
Hidden: true,
|
||||
Long: `Quickly add a git repository to Gas Town with minimal interaction.
|
||||
|
||||
This command is designed for the shell hook's "Add to Gas Town?" prompt.
|
||||
It infers the rig name from the directory and git URL from the remote.
|
||||
|
||||
Examples:
|
||||
gt rig quick-add # Add current directory
|
||||
gt rig quick-add ~/Repos/myproject # Add specific path
|
||||
gt rig quick-add --yes # Non-interactive`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runRigQuickAdd,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rigCmd.AddCommand(rigQuickAddCmd)
|
||||
rigQuickAddCmd.Flags().StringVar(&quickAddUser, "user", "", "Crew workspace name (default: $USER)")
|
||||
rigQuickAddCmd.Flags().BoolVar(&quickAddYes, "yes", false, "Non-interactive, assume yes")
|
||||
rigQuickAddCmd.Flags().BoolVar(&quickAddQuiet, "quiet", false, "Minimal output")
|
||||
}
|
||||
|
||||
func runRigQuickAdd(cmd *cobra.Command, args []string) error {
|
||||
targetPath := "."
|
||||
if len(args) > 0 {
|
||||
targetPath = args[0]
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolving path: %w", err)
|
||||
}
|
||||
|
||||
if townRoot, err := workspace.Find(absPath); err == nil && townRoot != "" {
|
||||
return fmt.Errorf("already part of a Gas Town workspace: %s", townRoot)
|
||||
}
|
||||
|
||||
gitRoot, err := findGitRoot(absPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not a git repository: %w", err)
|
||||
}
|
||||
|
||||
gitURL, err := findGitRemoteURL(gitRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no git remote found: %w", err)
|
||||
}
|
||||
|
||||
rigName := sanitizeRigName(filepath.Base(gitRoot))
|
||||
|
||||
townRoot, err := findOrCreateTown()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding Gas Town: %w", err)
|
||||
}
|
||||
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
if _, err := os.Stat(rigPath); err == nil {
|
||||
return fmt.Errorf("rig %q already exists in %s", rigName, townRoot)
|
||||
}
|
||||
|
||||
originalName := filepath.Base(gitRoot)
|
||||
if rigName != originalName && !quickAddQuiet {
|
||||
fmt.Printf("Note: Using %q as rig name (sanitized from %q)\n", rigName, originalName)
|
||||
}
|
||||
|
||||
if !quickAddQuiet {
|
||||
fmt.Printf("Adding %s to Gas Town...\n", style.Bold.Render(rigName))
|
||||
fmt.Printf(" Repository: %s\n", gitURL)
|
||||
fmt.Printf(" Town: %s\n", townRoot)
|
||||
}
|
||||
|
||||
addArgs := []string{"rig", "add", rigName, gitURL}
|
||||
addCmd := exec.Command("gt", addArgs...)
|
||||
addCmd.Dir = townRoot
|
||||
addCmd.Stdout = os.Stdout
|
||||
addCmd.Stderr = os.Stderr
|
||||
if err := addCmd.Run(); err != nil {
|
||||
fmt.Printf("\n%s Failed to add rig. You can try manually:\n", style.Warning.Render("⚠"))
|
||||
fmt.Printf(" cd %s && gt rig add %s %s\n", townRoot, rigName, gitURL)
|
||||
return fmt.Errorf("gt rig add failed: %w", err)
|
||||
}
|
||||
|
||||
user := quickAddUser
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
if user == "" {
|
||||
user = "default"
|
||||
}
|
||||
|
||||
if !quickAddQuiet {
|
||||
fmt.Printf("\nCreating crew workspace for %s...\n", user)
|
||||
}
|
||||
|
||||
crewArgs := []string{"crew", "add", user, "--rig", rigName}
|
||||
crewCmd := exec.Command("gt", crewArgs...)
|
||||
crewCmd.Dir = filepath.Join(townRoot, rigName)
|
||||
crewCmd.Stdout = os.Stdout
|
||||
crewCmd.Stderr = os.Stderr
|
||||
if err := crewCmd.Run(); err != nil {
|
||||
fmt.Printf(" %s Could not create crew workspace: %v\n", style.Dim.Render("⚠"), err)
|
||||
fmt.Printf(" Run manually: cd %s && gt crew add %s --rig %s\n", filepath.Join(townRoot, rigName), user, rigName)
|
||||
}
|
||||
|
||||
crewPath := filepath.Join(townRoot, rigName, "crew", user)
|
||||
if !quickAddQuiet {
|
||||
fmt.Printf("\n%s Added to Gas Town!\n", style.Success.Render("✓"))
|
||||
fmt.Printf("\nYour workspace: %s\n", style.Bold.Render(crewPath))
|
||||
}
|
||||
|
||||
fmt.Printf("GT_CREW_PATH=%s\n", crewPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findGitRoot(path string) (string, error) {
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
cmd.Dir = path
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
||||
func findGitRemoteURL(gitRoot string) (string, error) {
|
||||
cmd := exec.Command("git", "remote", "get-url", "origin")
|
||||
cmd.Dir = gitRoot
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
||||
func sanitizeRigName(name string) string {
|
||||
name = strings.ReplaceAll(name, "-", "_")
|
||||
name = strings.ReplaceAll(name, ".", "_")
|
||||
name = strings.ReplaceAll(name, " ", "_")
|
||||
return name
|
||||
}
|
||||
|
||||
func findOrCreateTown() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
candidates := []string{
|
||||
filepath.Join(home, "gt"),
|
||||
filepath.Join(home, "gastown"),
|
||||
}
|
||||
|
||||
for _, path := range candidates {
|
||||
mayorDir := filepath.Join(path, "mayor")
|
||||
if _, err := os.Stat(mayorDir); err == nil {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no Gas Town found - run 'gt install ~/gt' first")
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func (info RoleInfo) ActorString() string {
|
||||
func getRoleHome(role Role, rig, polecat, townRoot string) string {
|
||||
switch role {
|
||||
case RoleMayor:
|
||||
return townRoot
|
||||
return filepath.Join(townRoot, "mayor")
|
||||
case RoleDeacon:
|
||||
return filepath.Join(townRoot, "deacon")
|
||||
case RoleWitness:
|
||||
@@ -423,7 +423,7 @@ func runRoleList(cmd *cobra.Command, args []string) error {
|
||||
name Role
|
||||
desc string
|
||||
}{
|
||||
{RoleMayor, "Global coordinator at town root"},
|
||||
{RoleMayor, "Global coordinator at mayor/"},
|
||||
{RoleDeacon, "Background supervisor daemon"},
|
||||
{RoleWitness, "Per-rig polecat lifecycle manager"},
|
||||
{RoleRefinery, "Per-rig merge queue processor"},
|
||||
|
||||
@@ -16,6 +16,30 @@ var rootCmd = &cobra.Command{
|
||||
|
||||
It coordinates agent spawning, work distribution, and communication
|
||||
across distributed teams of AI agents working on shared codebases.`,
|
||||
PersistentPreRunE: checkBeadsDependency,
|
||||
}
|
||||
|
||||
// Commands that don't require beads to be installed/checked.
|
||||
// These are basic utility commands that should work without beads.
|
||||
var beadsExemptCommands = map[string]bool{
|
||||
"version": true,
|
||||
"help": true,
|
||||
"completion": true,
|
||||
}
|
||||
|
||||
// checkBeadsDependency verifies beads meets minimum version requirements.
|
||||
// Skips check for exempt commands (version, help, completion).
|
||||
func checkBeadsDependency(cmd *cobra.Command, args []string) error {
|
||||
// Get the root command name being run
|
||||
cmdName := cmd.Name()
|
||||
|
||||
// Skip check for exempt commands
|
||||
if beadsExemptCommands[cmdName] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check beads version
|
||||
return CheckBeadsVersion()
|
||||
}
|
||||
|
||||
// Execute runs the root command and returns an exit code.
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/suggest"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
@@ -224,16 +224,16 @@ func parseAddress(addr string) (rigName, polecatName string, err error) {
|
||||
}
|
||||
|
||||
// getSessionManager creates a session manager for the given rig.
|
||||
func getSessionManager(rigName string) (*session.Manager, *rig.Rig, error) {
|
||||
func getSessionManager(rigName string) (*polecat.SessionManager, *rig.Rig, error) {
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
t := tmux.NewTmux()
|
||||
mgr := session.NewManager(t, r)
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
|
||||
return mgr, r, nil
|
||||
return polecatMgr, r, nil
|
||||
}
|
||||
|
||||
func runSessionStart(cmd *cobra.Command, args []string) error {
|
||||
@@ -242,7 +242,7 @@ func runSessionStart(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mgr, r, err := getSessionManager(rigName)
|
||||
polecatMgr, r, err := getSessionManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -261,12 +261,12 @@ func runSessionStart(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("%s", suggest.FormatSuggestion("Polecat", polecatName, suggestions, hint))
|
||||
}
|
||||
|
||||
opts := session.StartOptions{
|
||||
opts := polecat.SessionStartOptions{
|
||||
Issue: sessionIssue,
|
||||
}
|
||||
|
||||
fmt.Printf("Starting session for %s/%s...\n", rigName, polecatName)
|
||||
if err := mgr.Start(polecatName, opts); err != nil {
|
||||
if err := polecatMgr.Start(polecatName, opts); err != nil {
|
||||
return fmt.Errorf("starting session: %w", err)
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ func runSessionStop(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mgr, _, err := getSessionManager(rigName)
|
||||
polecatMgr, _, err := getSessionManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -300,7 +300,7 @@ func runSessionStop(cmd *cobra.Command, args []string) error {
|
||||
} else {
|
||||
fmt.Printf("Stopping session for %s/%s...\n", rigName, polecatName)
|
||||
}
|
||||
if err := mgr.Stop(polecatName, sessionForce); err != nil {
|
||||
if err := polecatMgr.Stop(polecatName, sessionForce); err != nil {
|
||||
return fmt.Errorf("stopping session: %w", err)
|
||||
}
|
||||
|
||||
@@ -326,13 +326,13 @@ func runSessionAttach(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mgr, _, err := getSessionManager(rigName)
|
||||
polecatMgr, _, err := getSessionManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Attach (this replaces the process)
|
||||
return mgr.Attach(polecatName)
|
||||
return polecatMgr.Attach(polecatName)
|
||||
}
|
||||
|
||||
// SessionListItem represents a session in list output.
|
||||
@@ -381,8 +381,8 @@ func runSessionList(cmd *cobra.Command, args []string) error {
|
||||
var allSessions []SessionListItem
|
||||
|
||||
for _, r := range rigs {
|
||||
mgr := session.NewManager(t, r)
|
||||
infos, err := mgr.List()
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
infos, err := polecatMgr.List()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -428,7 +428,7 @@ func runSessionCapture(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mgr, _, err := getSessionManager(rigName)
|
||||
polecatMgr, _, err := getSessionManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -446,7 +446,7 @@ func runSessionCapture(cmd *cobra.Command, args []string) error {
|
||||
lines = n
|
||||
}
|
||||
|
||||
output, err := mgr.Capture(polecatName, lines)
|
||||
output, err := polecatMgr.Capture(polecatName, lines)
|
||||
if err != nil {
|
||||
return fmt.Errorf("capturing output: %w", err)
|
||||
}
|
||||
@@ -475,12 +475,12 @@ func runSessionInject(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("no message provided (use -m or -f)")
|
||||
}
|
||||
|
||||
mgr, _, err := getSessionManager(rigName)
|
||||
polecatMgr, _, err := getSessionManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.Inject(polecatName, message); err != nil {
|
||||
if err := polecatMgr.Inject(polecatName, message); err != nil {
|
||||
return fmt.Errorf("injecting message: %w", err)
|
||||
}
|
||||
|
||||
@@ -495,13 +495,13 @@ func runSessionRestart(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mgr, _, err := getSessionManager(rigName)
|
||||
polecatMgr, _, err := getSessionManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if running
|
||||
running, err := mgr.IsRunning(polecatName)
|
||||
running, err := polecatMgr.IsRunning(polecatName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
@@ -513,15 +513,15 @@ func runSessionRestart(cmd *cobra.Command, args []string) error {
|
||||
} else {
|
||||
fmt.Printf("Stopping session for %s/%s...\n", rigName, polecatName)
|
||||
}
|
||||
if err := mgr.Stop(polecatName, sessionForce); err != nil {
|
||||
if err := polecatMgr.Stop(polecatName, sessionForce); err != nil {
|
||||
return fmt.Errorf("stopping session: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start fresh session
|
||||
fmt.Printf("Starting session for %s/%s...\n", rigName, polecatName)
|
||||
opts := session.StartOptions{}
|
||||
if err := mgr.Start(polecatName, opts); err != nil {
|
||||
opts := polecat.SessionStartOptions{}
|
||||
if err := polecatMgr.Start(polecatName, opts); err != nil {
|
||||
return fmt.Errorf("starting session: %w", err)
|
||||
}
|
||||
|
||||
@@ -537,13 +537,13 @@ func runSessionStatus(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mgr, _, err := getSessionManager(rigName)
|
||||
polecatMgr, _, err := getSessionManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get session info
|
||||
info, err := mgr.Status(polecatName)
|
||||
info, err := polecatMgr.Status(polecatName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting status: %w", err)
|
||||
}
|
||||
@@ -649,6 +649,9 @@ func runSessionCheck(cmd *cobra.Command, args []string) error {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
polecatName := entry.Name()
|
||||
sessionName := fmt.Sprintf("gt-%s-%s", r.Name, polecatName)
|
||||
totalChecked++
|
||||
|
||||
99
internal/cmd/shell.go
Normal file
99
internal/cmd/shell.go
Normal file
@@ -0,0 +1,99 @@
|
||||
// ABOUTME: Shell integration management commands.
|
||||
// ABOUTME: Install/remove shell hooks without full HQ setup.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var shellCmd = &cobra.Command{
|
||||
Use: "shell",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Manage shell integration",
|
||||
RunE: requireSubcommand,
|
||||
}
|
||||
|
||||
var shellInstallCmd = &cobra.Command{
|
||||
Use: "install",
|
||||
Short: "Install or update shell integration",
|
||||
Long: `Install or update the Gas Town shell integration.
|
||||
|
||||
This adds a hook to your shell RC file that:
|
||||
- Sets GT_TOWN_ROOT and GT_RIG when you cd into a Gas Town rig
|
||||
- Offers to add new git repos to Gas Town on first visit
|
||||
|
||||
Run this after upgrading gt to get the latest shell hook features.`,
|
||||
RunE: runShellInstall,
|
||||
}
|
||||
|
||||
var shellRemoveCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove shell integration",
|
||||
RunE: runShellRemove,
|
||||
}
|
||||
|
||||
var shellStatusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show shell integration status",
|
||||
RunE: runShellStatus,
|
||||
}
|
||||
|
||||
func init() {
|
||||
shellCmd.AddCommand(shellInstallCmd)
|
||||
shellCmd.AddCommand(shellRemoveCmd)
|
||||
shellCmd.AddCommand(shellStatusCmd)
|
||||
rootCmd.AddCommand(shellCmd)
|
||||
}
|
||||
|
||||
func runShellInstall(cmd *cobra.Command, args []string) error {
|
||||
if err := shell.Install(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := state.Enable(Version); err != nil {
|
||||
fmt.Printf("%s Could not enable Gas Town: %v\n", style.Dim.Render("⚠"), err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Shell integration installed (%s)\n", style.Success.Render("✓"), shell.RCFilePath(shell.DetectShell()))
|
||||
fmt.Println()
|
||||
fmt.Println("Run 'source ~/.zshrc' or open a new terminal to activate.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func runShellRemove(cmd *cobra.Command, args []string) error {
|
||||
if err := shell.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s Shell integration removed\n", style.Success.Render("✓"))
|
||||
return nil
|
||||
}
|
||||
|
||||
func runShellStatus(cmd *cobra.Command, args []string) error {
|
||||
s, err := state.Load()
|
||||
if err != nil {
|
||||
fmt.Println("Gas Town: not configured")
|
||||
fmt.Println("Shell integration: not installed")
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.Enabled {
|
||||
fmt.Println("Gas Town: enabled")
|
||||
} else {
|
||||
fmt.Println("Gas Town: disabled")
|
||||
}
|
||||
|
||||
if s.ShellIntegration != "" {
|
||||
fmt.Printf("Shell integration: %s (%s)\n", s.ShellIntegration, shell.RCFilePath(s.ShellIntegration))
|
||||
} else {
|
||||
fmt.Println("Shell integration: not installed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -9,10 +9,12 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/dog"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
@@ -21,6 +23,39 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
type wispCreateJSON struct {
|
||||
NewEpicID string `json:"new_epic_id"`
|
||||
RootID string `json:"root_id"`
|
||||
ResultID string `json:"result_id"`
|
||||
}
|
||||
|
||||
func parseWispIDFromJSON(jsonOutput []byte) (string, error) {
|
||||
var result wispCreateJSON
|
||||
if err := json.Unmarshal(jsonOutput, &result); err != nil {
|
||||
return "", fmt.Errorf("parsing wisp JSON: %w (output: %s)", err, trimJSONForError(jsonOutput))
|
||||
}
|
||||
|
||||
switch {
|
||||
case result.NewEpicID != "":
|
||||
return result.NewEpicID, nil
|
||||
case result.RootID != "":
|
||||
return result.RootID, nil
|
||||
case result.ResultID != "":
|
||||
return result.ResultID, nil
|
||||
default:
|
||||
return "", fmt.Errorf("wisp JSON missing id field (expected one of new_epic_id, root_id, result_id); output: %s", trimJSONForError(jsonOutput))
|
||||
}
|
||||
}
|
||||
|
||||
func trimJSONForError(jsonOutput []byte) string {
|
||||
s := strings.TrimSpace(string(jsonOutput))
|
||||
const maxLen = 500
|
||||
if len(s) > maxLen {
|
||||
return s[:maxLen] + "..."
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var slingCmd = &cobra.Command{
|
||||
Use: "sling <bead-or-formula> [target]",
|
||||
GroupID: GroupWork,
|
||||
@@ -53,7 +88,6 @@ Target Resolution:
|
||||
gt sling gt-abc deacon/dogs/alpha # Specific dog
|
||||
|
||||
Spawning Options (when target is a rig):
|
||||
gt sling gp-abc greenplace --molecule mol-review # Use specific workflow
|
||||
gt sling gp-abc greenplace --create # Create polecat if missing
|
||||
gt sling gp-abc greenplace --naked # No-tmux (manual start)
|
||||
gt sling gp-abc greenplace --force # Ignore unread mail
|
||||
@@ -74,11 +108,6 @@ Formula-on-Bead (--on flag):
|
||||
gt sling mol-review --on gt-abc # Apply formula to existing work
|
||||
gt sling shiny --on gt-abc crew # Apply formula, sling to crew
|
||||
|
||||
Quality Levels (shorthand for polecat workflows):
|
||||
gt sling gp-abc greenplace --quality=basic # mol-polecat-basic (trivial fixes)
|
||||
gt sling gp-abc greenplace --quality=shiny # mol-polecat-shiny (standard)
|
||||
gt sling gp-abc greenplace --quality=chrome # mol-polecat-chrome (max rigor)
|
||||
|
||||
Compare:
|
||||
gt hook <bead> # Just attach (no action)
|
||||
gt sling <bead> # Attach + start now (keep context)
|
||||
@@ -106,10 +135,9 @@ var (
|
||||
// Flags migrated for polecat spawning (used by sling for work assignment
|
||||
slingNaked bool // --naked: no-tmux mode (skip session creation)
|
||||
slingCreate bool // --create: create polecat if it doesn't exist
|
||||
slingMolecule string // --molecule: workflow to instantiate on the bead
|
||||
slingForce bool // --force: force spawn even if polecat has unread mail
|
||||
slingAccount string // --account: Claude Code account handle to use
|
||||
slingQuality string // --quality: shorthand for polecat workflow (basic|shiny|chrome)
|
||||
slingAgent string // --agent: override runtime agent for this sling/spawn
|
||||
slingNoConvoy bool // --no-convoy: skip auto-convoy creation
|
||||
)
|
||||
|
||||
@@ -124,10 +152,9 @@ func init() {
|
||||
// Flags for polecat spawning (when target is a rig)
|
||||
slingCmd.Flags().BoolVar(&slingNaked, "naked", false, "No-tmux mode: assign work but skip session creation (manual start)")
|
||||
slingCmd.Flags().BoolVar(&slingCreate, "create", false, "Create polecat if it doesn't exist")
|
||||
slingCmd.Flags().StringVar(&slingMolecule, "molecule", "", "Molecule workflow to instantiate on the bead")
|
||||
slingCmd.Flags().BoolVar(&slingForce, "force", false, "Force spawn even if polecat has unread mail")
|
||||
slingCmd.Flags().StringVar(&slingAccount, "account", "", "Claude Code account handle to use")
|
||||
slingCmd.Flags().StringVarP(&slingQuality, "quality", "q", "", "Polecat workflow quality level (basic|shiny|chrome)")
|
||||
slingCmd.Flags().StringVar(&slingAgent, "agent", "", "Override agent/runtime for this sling (e.g., claude, gemini, codex, or custom alias)")
|
||||
slingCmd.Flags().BoolVar(&slingNoConvoy, "no-convoy", false, "Skip auto-convoy creation for single-issue sling")
|
||||
|
||||
rootCmd.AddCommand(slingCmd)
|
||||
@@ -162,22 +189,6 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// --quality is shorthand for formula-on-bead with polecat workflow
|
||||
// Convert: gt sling gp-abc greenplace --quality=shiny
|
||||
// To: gt sling mol-polecat-shiny --on gt-abc gastown
|
||||
if slingQuality != "" {
|
||||
qualityFormula, err := qualityToFormula(slingQuality)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The first arg should be the bead, and we wrap it with the formula
|
||||
if slingOnTarget != "" {
|
||||
return fmt.Errorf("--quality cannot be used with --on (both specify formula)")
|
||||
}
|
||||
slingOnTarget = args[0] // The bead becomes --on target
|
||||
args[0] = qualityFormula // The formula becomes first arg
|
||||
}
|
||||
|
||||
// Determine mode based on flags and argument types
|
||||
var beadID string
|
||||
var formulaName string
|
||||
@@ -267,6 +278,7 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
Account: slingAccount,
|
||||
Create: slingCreate,
|
||||
HookBead: beadID, // Set atomically at spawn time
|
||||
Agent: slingAgent,
|
||||
}
|
||||
spawnInfo, spawnErr := SpawnPolecatForSling(rigName, spawnOpts)
|
||||
if spawnErr != nil {
|
||||
@@ -375,8 +387,14 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
if formulaName != "" {
|
||||
fmt.Printf(" Instantiating formula %s...\n", formulaName)
|
||||
|
||||
// Route bd mutations (cook/wisp/bond) to the correct beads context for the target bead.
|
||||
// Some bd mol commands don't support prefix routing, so we must run them from the
|
||||
// rig directory that owns the bead's database.
|
||||
formulaWorkDir := beads.ResolveHookDir(townRoot, beadID, hookWorkDir)
|
||||
|
||||
// Step 1: Cook the formula (ensures proto exists)
|
||||
cookCmd := exec.Command("bd", "--no-daemon", "cook", formulaName)
|
||||
cookCmd.Dir = formulaWorkDir
|
||||
cookCmd.Stderr = os.Stderr
|
||||
if err := cookCmd.Run(); err != nil {
|
||||
return fmt.Errorf("cooking formula %s: %w", formulaName, err)
|
||||
@@ -386,6 +404,7 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
featureVar := fmt.Sprintf("feature=%s", info.Title)
|
||||
wispArgs := []string{"--no-daemon", "mol", "wisp", formulaName, "--var", featureVar, "--json"}
|
||||
wispCmd := exec.Command("bd", wispArgs...)
|
||||
wispCmd.Dir = formulaWorkDir
|
||||
wispCmd.Stderr = os.Stderr
|
||||
wispOut, err := wispCmd.Output()
|
||||
if err != nil {
|
||||
@@ -393,19 +412,17 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Parse wisp output to get the root ID
|
||||
var wispResult struct {
|
||||
RootID string `json:"root_id"`
|
||||
}
|
||||
if err := json.Unmarshal(wispOut, &wispResult); err != nil {
|
||||
wispRootID, err := parseWispIDFromJSON(wispOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing wisp output: %w", err)
|
||||
}
|
||||
wispRootID := wispResult.RootID
|
||||
fmt.Printf("%s Formula wisp created: %s\n", style.Bold.Render("✓"), wispRootID)
|
||||
|
||||
// Step 3: Bond wisp to original bead (creates compound)
|
||||
// Use --no-daemon for mol bond (requires direct database access)
|
||||
bondArgs := []string{"--no-daemon", "mol", "bond", wispRootID, beadID, "--json"}
|
||||
bondCmd := exec.Command("bd", bondArgs...)
|
||||
bondCmd.Dir = formulaWorkDir
|
||||
bondCmd.Stderr = os.Stderr
|
||||
bondOut, err := bondCmd.Output()
|
||||
if err != nil {
|
||||
@@ -430,16 +447,10 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
beadID = wispRootID
|
||||
}
|
||||
|
||||
// Hook the bead using bd update
|
||||
// Set BEADS_DIR to town-level beads so hq-* beads are accessible
|
||||
// even when running from polecat worktree (which only sees gt-* via redirect)
|
||||
// Hook the bead using bd update.
|
||||
// See: https://github.com/steveyegge/gastown/issues/148
|
||||
hookCmd := exec.Command("bd", "--no-daemon", "update", beadID, "--status=hooked", "--assignee="+targetAgent)
|
||||
hookCmd.Env = append(os.Environ(), "BEADS_DIR="+townBeadsDir)
|
||||
if hookWorkDir != "" {
|
||||
hookCmd.Dir = hookWorkDir
|
||||
} else {
|
||||
hookCmd.Dir = townRoot
|
||||
}
|
||||
hookCmd.Dir = beads.ResolveHookDir(townRoot, beadID, hookWorkDir)
|
||||
hookCmd.Stderr = os.Stderr
|
||||
if err := hookCmd.Run(); err != nil {
|
||||
return fmt.Errorf("hooking bead: %w", err)
|
||||
@@ -473,12 +484,24 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
// Try to inject the "start now" prompt (graceful if no tmux)
|
||||
if targetPane == "" {
|
||||
fmt.Printf("%s No pane to nudge (agent will discover work via gt prime)\n", style.Dim.Render("○"))
|
||||
} else if err := injectStartPrompt(targetPane, beadID, slingSubject, slingArgs); err != nil {
|
||||
// Graceful fallback for no-tmux mode
|
||||
fmt.Printf("%s Could not nudge (no tmux?): %v\n", style.Dim.Render("○"), err)
|
||||
fmt.Printf(" Agent will discover work via gt prime / bd show\n")
|
||||
} else {
|
||||
fmt.Printf("%s Start prompt sent\n", style.Bold.Render("▶"))
|
||||
// Ensure agent is ready before nudging (prevents race condition where
|
||||
// message arrives before Claude has fully started - see issue #115)
|
||||
sessionName := getSessionFromPane(targetPane)
|
||||
if sessionName != "" {
|
||||
if err := ensureAgentReady(sessionName); err != nil {
|
||||
// Non-fatal: warn and continue, agent will discover work via gt prime
|
||||
fmt.Printf("%s Could not verify agent ready: %v\n", style.Dim.Render("○"), err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := injectStartPrompt(targetPane, beadID, slingSubject, slingArgs); err != nil {
|
||||
// Graceful fallback for no-tmux mode
|
||||
fmt.Printf("%s Could not nudge (no tmux?): %v\n", style.Dim.Render("○"), err)
|
||||
fmt.Printf(" Agent will discover work via gt prime / bd show\n")
|
||||
} else {
|
||||
fmt.Printf("%s Start prompt sent\n", style.Bold.Render("▶"))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -488,7 +511,7 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
// This enables no-tmux mode where agents discover args via gt prime / bd show.
|
||||
func storeArgsInBead(beadID, args string) error {
|
||||
// Get the bead to preserve existing description content
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json")
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
out, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching bead: %w", err)
|
||||
@@ -599,6 +622,57 @@ func injectStartPrompt(pane, beadID, subject, args string) error {
|
||||
return t.NudgePane(pane, prompt)
|
||||
}
|
||||
|
||||
// getSessionFromPane extracts session name from a pane target.
|
||||
// Pane targets can be:
|
||||
// - "%9" (pane ID) - need to query tmux for session
|
||||
// - "gt-rig-name:0.0" (session:window.pane) - extract session name
|
||||
func getSessionFromPane(pane string) string {
|
||||
if strings.HasPrefix(pane, "%") {
|
||||
// Pane ID format - query tmux for the session
|
||||
cmd := exec.Command("tmux", "display-message", "-t", pane, "-p", "#{session_name}")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
// Session:window.pane format - extract session name
|
||||
if idx := strings.Index(pane, ":"); idx > 0 {
|
||||
return pane[:idx]
|
||||
}
|
||||
return pane
|
||||
}
|
||||
|
||||
// ensureAgentReady waits for an agent to be ready before nudging an existing session.
|
||||
// Uses a pragmatic approach: wait for the pane to leave a shell, then (Claude-only)
|
||||
// accept the bypass permissions warning and give it a moment to finish initializing.
|
||||
func ensureAgentReady(sessionName string) error {
|
||||
t := tmux.NewTmux()
|
||||
|
||||
// If an agent is already running, assume it's ready (session was started earlier)
|
||||
if t.IsAgentRunning(sessionName) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Agent not running yet - wait for it to start (shell → program transition)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
return fmt.Errorf("waiting for agent to start: %w", err)
|
||||
}
|
||||
|
||||
// Claude-only: accept bypass permissions warning if present
|
||||
if t.IsClaudeRunning(sessionName) {
|
||||
_ = t.AcceptBypassPermissionsWarning(sessionName)
|
||||
|
||||
// PRAGMATIC APPROACH: fixed delay rather than prompt detection.
|
||||
// Claude startup takes ~5-8 seconds on typical machines.
|
||||
time.Sleep(8 * time.Second)
|
||||
} else {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveTargetAgent converts a target spec to agent ID, pane, and hook root.
|
||||
// If skipPane is true, skip tmux pane lookup (for --naked mode).
|
||||
func resolveTargetAgent(target string, skipPane bool) (agentID string, pane string, hookRoot string, err error) {
|
||||
@@ -644,11 +718,17 @@ func sessionToAgentID(sessionName string) string {
|
||||
}
|
||||
|
||||
// verifyBeadExists checks that the bead exists using bd show.
|
||||
// Uses bd's native prefix-based routing via routes.jsonl - do NOT set BEADS_DIR
|
||||
// as that overrides routing and breaks resolution of rig-level beads.
|
||||
//
|
||||
// Uses --no-daemon with --allow-stale to avoid daemon socket timing issues
|
||||
// while still finding beads when database is out of sync with JSONL.
|
||||
// For existence checks, stale data is acceptable - we just need to know it exists.
|
||||
func verifyBeadExists(beadID string) error {
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json")
|
||||
// Set BEADS_DIR to town root so hq-* beads are accessible
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
// Run from town root so bd can find routes.jsonl for prefix-based routing.
|
||||
// Do NOT set BEADS_DIR - that overrides routing and breaks rig bead resolution.
|
||||
if townRoot, err := workspace.FindFromCwd(); err == nil {
|
||||
cmd.Env = append(os.Environ(), "BEADS_DIR="+filepath.Join(townRoot, ".beads"))
|
||||
cmd.Dir = townRoot
|
||||
}
|
||||
if err := cmd.Run(); err != nil {
|
||||
@@ -665,8 +745,14 @@ type beadInfo struct {
|
||||
}
|
||||
|
||||
// getBeadInfo returns status and assignee for a bead.
|
||||
// Uses bd's native prefix-based routing via routes.jsonl.
|
||||
// Uses --no-daemon with --allow-stale for consistency with verifyBeadExists.
|
||||
func getBeadInfo(beadID string) (*beadInfo, error) {
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json")
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
// Run from town root so bd can find routes.jsonl for prefix-based routing.
|
||||
if townRoot, err := workspace.FindFromCwd(); err == nil {
|
||||
cmd.Dir = townRoot
|
||||
}
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bead '%s' not found", beadID)
|
||||
@@ -733,15 +819,16 @@ func resolveSelfTarget() (agentID string, pane string, hookRoot string, err erro
|
||||
|
||||
// verifyFormulaExists checks that the formula exists using bd formula show.
|
||||
// Formulas are TOML files (.formula.toml).
|
||||
// Uses --no-daemon with --allow-stale for consistency with verifyBeadExists.
|
||||
func verifyFormulaExists(formulaName string) error {
|
||||
// Try bd formula show (handles all formula file formats)
|
||||
cmd := exec.Command("bd", "--no-daemon", "formula", "show", formulaName)
|
||||
cmd := exec.Command("bd", "--no-daemon", "formula", "show", formulaName, "--allow-stale")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try with mol- prefix
|
||||
cmd = exec.Command("bd", "--no-daemon", "formula", "show", "mol-"+formulaName)
|
||||
cmd = exec.Command("bd", "--no-daemon", "formula", "show", "mol-"+formulaName, "--allow-stale")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -818,6 +905,7 @@ func runSlingFormula(args []string) error {
|
||||
Naked: slingNaked,
|
||||
Account: slingAccount,
|
||||
Create: slingCreate,
|
||||
Agent: slingAgent,
|
||||
}
|
||||
spawnInfo, spawnErr := SpawnPolecatForSling(rigName, spawnOpts)
|
||||
if spawnErr != nil {
|
||||
@@ -887,22 +975,17 @@ func runSlingFormula(args []string) error {
|
||||
}
|
||||
|
||||
// Parse wisp output to get the root ID
|
||||
var wispResult struct {
|
||||
RootID string `json:"root_id"`
|
||||
}
|
||||
if err := json.Unmarshal(wispOut, &wispResult); err != nil {
|
||||
// Fallback: use formula name as identifier, but warn user
|
||||
fmt.Printf("%s Could not parse wisp output, using formula name as ID\n", style.Dim.Render("Warning:"))
|
||||
wispResult.RootID = formulaName
|
||||
wispRootID, err := parseWispIDFromJSON(wispOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing wisp output: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Wisp created: %s\n", style.Bold.Render("✓"), wispResult.RootID)
|
||||
fmt.Printf("%s Wisp created: %s\n", style.Bold.Render("✓"), wispRootID)
|
||||
|
||||
// Step 3: Hook the wisp bead using bd update (discovery-based approach)
|
||||
// Set BEADS_DIR to town-level beads so hq-* beads are accessible
|
||||
hookCmd := exec.Command("bd", "--no-daemon", "update", wispResult.RootID, "--status=hooked", "--assignee="+targetAgent)
|
||||
hookCmd.Env = append(os.Environ(), "BEADS_DIR="+townBeadsDir)
|
||||
hookCmd.Dir = townRoot
|
||||
// Step 3: Hook the wisp bead using bd update.
|
||||
// See: https://github.com/steveyegge/gastown/issues/148
|
||||
hookCmd := exec.Command("bd", "--no-daemon", "update", wispRootID, "--status=hooked", "--assignee="+targetAgent)
|
||||
hookCmd.Dir = beads.ResolveHookDir(townRoot, wispRootID, "")
|
||||
hookCmd.Stderr = os.Stderr
|
||||
if err := hookCmd.Run(); err != nil {
|
||||
return fmt.Errorf("hooking wisp bead: %w", err)
|
||||
@@ -911,23 +994,23 @@ func runSlingFormula(args []string) error {
|
||||
|
||||
// Log sling event to activity feed (formula slinging)
|
||||
actor := detectActor()
|
||||
payload := events.SlingPayload(wispResult.RootID, targetAgent)
|
||||
payload := events.SlingPayload(wispRootID, targetAgent)
|
||||
payload["formula"] = formulaName
|
||||
_ = events.LogFeed(events.TypeSling, actor, payload)
|
||||
|
||||
// Update agent bead's hook_bead field (ZFC: agents track their current work)
|
||||
// Note: formula slinging uses town root as workDir (no polecat-specific path)
|
||||
updateAgentHookBead(targetAgent, wispResult.RootID, "", townBeadsDir)
|
||||
updateAgentHookBead(targetAgent, wispRootID, "", townBeadsDir)
|
||||
|
||||
// Store dispatcher in bead description (enables completion notification to dispatcher)
|
||||
if err := storeDispatcherInBead(wispResult.RootID, actor); err != nil {
|
||||
if err := storeDispatcherInBead(wispRootID, actor); err != nil {
|
||||
// Warn but don't fail - polecat will still complete work
|
||||
fmt.Printf("%s Could not store dispatcher in bead: %v\n", style.Dim.Render("Warning:"), err)
|
||||
}
|
||||
|
||||
// Store args in wisp bead if provided (no-tmux mode: beads as data plane)
|
||||
if slingArgs != "" {
|
||||
if err := storeArgsInBead(wispResult.RootID, slingArgs); err != nil {
|
||||
if err := storeArgsInBead(wispRootID, slingArgs); err != nil {
|
||||
fmt.Printf("%s Could not store args in bead: %v\n", style.Dim.Render("Warning:"), err)
|
||||
} else {
|
||||
fmt.Printf("%s Args stored in bead (durable)\n", style.Bold.Render("✓"))
|
||||
@@ -971,39 +1054,39 @@ func runSlingFormula(args []string) error {
|
||||
func updateAgentHookBead(agentID, beadID, workDir, townBeadsDir string) {
|
||||
_ = townBeadsDir // Not used - BEADS_DIR breaks redirect mechanism
|
||||
|
||||
// Convert agent ID to agent bead ID
|
||||
// Format examples (canonical: prefix-rig-role-name):
|
||||
// greenplace/crew/max -> gt-greenplace-crew-max
|
||||
// greenplace/polecats/Toast -> gt-greenplace-polecat-Toast
|
||||
// mayor -> gt-mayor
|
||||
// greenplace/witness -> gt-greenplace-witness
|
||||
agentBeadID := agentIDToBeadID(agentID)
|
||||
if agentBeadID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Determine the directory to run bd commands from:
|
||||
// - If workDir is provided (polecat's clone path), use it for redirect-based routing
|
||||
// - Otherwise fall back to town root
|
||||
bdWorkDir := workDir
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
// Not in a Gas Town workspace - can't update agent bead
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't find town root to update agent hook: %v\n", err)
|
||||
return
|
||||
}
|
||||
if bdWorkDir == "" {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
// Not in a Gas Town workspace - can't update agent bead
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't find town root to update agent hook: %v\n", err)
|
||||
return
|
||||
}
|
||||
bdWorkDir = townRoot
|
||||
}
|
||||
|
||||
// Convert agent ID to agent bead ID
|
||||
// Format examples (canonical: prefix-rig-role-name):
|
||||
// greenplace/crew/max -> gt-greenplace-crew-max
|
||||
// greenplace/polecats/Toast -> gt-greenplace-polecat-Toast
|
||||
// mayor -> hq-mayor
|
||||
// greenplace/witness -> gt-greenplace-witness
|
||||
agentBeadID := agentIDToBeadID(agentID, townRoot)
|
||||
if agentBeadID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Run from workDir WITHOUT BEADS_DIR to enable redirect-based routing.
|
||||
// Update agent_state to "running" and set hook_bead to the slung work.
|
||||
// For same-database beads, the hook slot is set via `bd slot set`.
|
||||
// Set hook_bead to the slung work (gt-zecmc: removed agent_state update).
|
||||
// Agent liveness is observable from tmux - no need to record it in bead.
|
||||
// For cross-database scenarios, slot set may fail gracefully (warning only).
|
||||
bd := beads.New(bdWorkDir)
|
||||
if err := bd.UpdateAgentState(agentBeadID, "running", &beadID); err != nil {
|
||||
if err := bd.SetHookBead(agentBeadID, beadID); err != nil {
|
||||
// Log warning instead of silent ignore - helps debug cross-beads issues
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't update agent %s state: %v\n", agentBeadID, err)
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't set agent %s hook: %v\n", agentBeadID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1038,7 +1121,8 @@ func detectActor() string {
|
||||
// Uses canonical naming: prefix-rig-role-name
|
||||
// Town-level agents (Mayor, Deacon) use hq- prefix and are stored in town beads.
|
||||
// Rig-level agents use the rig's configured prefix (default "gt-").
|
||||
func agentIDToBeadID(agentID string) string {
|
||||
// townRoot is needed to look up the rig's configured prefix.
|
||||
func agentIDToBeadID(agentID, townRoot string) string {
|
||||
// Handle simple cases (town-level agents with hq- prefix)
|
||||
if agentID == "mayor" {
|
||||
return beads.MayorBeadIDTown()
|
||||
@@ -1054,35 +1138,22 @@ func agentIDToBeadID(agentID string) string {
|
||||
}
|
||||
|
||||
rig := parts[0]
|
||||
prefix := config.GetRigPrefix(townRoot, rig)
|
||||
|
||||
switch {
|
||||
case len(parts) == 2 && parts[1] == "witness":
|
||||
return beads.WitnessBeadID(rig)
|
||||
return beads.WitnessBeadIDWithPrefix(prefix, rig)
|
||||
case len(parts) == 2 && parts[1] == "refinery":
|
||||
return beads.RefineryBeadID(rig)
|
||||
return beads.RefineryBeadIDWithPrefix(prefix, rig)
|
||||
case len(parts) == 3 && parts[1] == "crew":
|
||||
return beads.CrewBeadID(rig, parts[2])
|
||||
return beads.CrewBeadIDWithPrefix(prefix, rig, parts[2])
|
||||
case len(parts) == 3 && parts[1] == "polecats":
|
||||
return beads.PolecatBeadID(rig, parts[2])
|
||||
return beads.PolecatBeadIDWithPrefix(prefix, rig, parts[2])
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// qualityToFormula converts a quality level to the corresponding polecat workflow formula.
|
||||
func qualityToFormula(quality string) (string, error) {
|
||||
switch strings.ToLower(quality) {
|
||||
case "basic", "b":
|
||||
return "mol-polecat-basic", nil
|
||||
case "shiny", "s":
|
||||
return "mol-polecat-shiny", nil
|
||||
case "chrome", "c":
|
||||
return "mol-polecat-chrome", nil
|
||||
default:
|
||||
return "", fmt.Errorf("invalid quality level '%s' (use: basic, shiny, or chrome)", quality)
|
||||
}
|
||||
}
|
||||
|
||||
// IsDogTarget checks if target is a dog target pattern.
|
||||
// Returns the dog name (or empty for pool dispatch) and true if it's a dog target.
|
||||
// Patterns:
|
||||
@@ -1376,6 +1447,7 @@ func runBatchSling(beadIDs []string, rigName string, townBeadsDir string) error
|
||||
Account: slingAccount,
|
||||
Create: slingCreate,
|
||||
HookBead: beadID, // Set atomically at spawn time
|
||||
Agent: slingAgent,
|
||||
}
|
||||
spawnInfo, err := SpawnPolecatForSling(rigName, spawnOpts)
|
||||
if err != nil {
|
||||
@@ -1402,12 +1474,10 @@ func runBatchSling(beadIDs []string, rigName string, townBeadsDir string) error
|
||||
}
|
||||
}
|
||||
|
||||
// Hook the bead
|
||||
// Hook the bead. See: https://github.com/steveyegge/gastown/issues/148
|
||||
townRoot := filepath.Dir(townBeadsDir)
|
||||
hookCmd := exec.Command("bd", "--no-daemon", "update", beadID, "--status=hooked", "--assignee="+targetAgent)
|
||||
hookCmd.Env = append(os.Environ(), "BEADS_DIR="+townBeadsDir)
|
||||
if hookWorkDir != "" {
|
||||
hookCmd.Dir = hookWorkDir
|
||||
}
|
||||
hookCmd.Dir = beads.ResolveHookDir(townRoot, beadID, hookWorkDir)
|
||||
hookCmd.Stderr = os.Stderr
|
||||
if err := hookCmd.Run(); err != nil {
|
||||
results = append(results, slingResult{beadID: beadID, polecat: spawnInfo.PolecatName, success: false, errMsg: "hook failed"})
|
||||
|
||||
@@ -1,6 +1,63 @@
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseWispIDFromJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
json string
|
||||
wantID string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "new_epic_id",
|
||||
json: `{"new_epic_id":"gt-wisp-abc","created":7,"phase":"vapor"}`,
|
||||
wantID: "gt-wisp-abc",
|
||||
},
|
||||
{
|
||||
name: "root_id legacy",
|
||||
json: `{"root_id":"gt-wisp-legacy"}`,
|
||||
wantID: "gt-wisp-legacy",
|
||||
},
|
||||
{
|
||||
name: "result_id forward compat",
|
||||
json: `{"result_id":"gt-wisp-result"}`,
|
||||
wantID: "gt-wisp-result",
|
||||
},
|
||||
{
|
||||
name: "precedence prefers new_epic_id",
|
||||
json: `{"root_id":"gt-wisp-legacy","new_epic_id":"gt-wisp-new"}`,
|
||||
wantID: "gt-wisp-new",
|
||||
},
|
||||
{
|
||||
name: "missing id keys",
|
||||
json: `{"created":7,"phase":"vapor"}`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid JSON",
|
||||
json: `{"new_epic_id":`,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotID, err := parseWispIDFromJSON([]byte(tt.json))
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Fatalf("parseWispIDFromJSON() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if gotID != tt.wantID {
|
||||
t.Fatalf("parseWispIDFromJSON() id = %q, want %q", gotID, tt.wantID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatTrackBeadID(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -131,3 +188,318 @@ func TestFormatTrackBeadIDConsumerCompatibility(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlingFormulaOnBeadRoutesBDCommandsToTargetRig(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Minimal workspace marker so workspace.FindFromCwd() succeeds.
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, "mayor", "rig"), 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
// Create a rig path that owns gt-* beads, and a routes.jsonl pointing to it.
|
||||
rigDir := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, ".beads"), 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(rigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir rigDir: %v", err)
|
||||
}
|
||||
routes := strings.Join([]string{
|
||||
`{"prefix":"gt-","path":"gastown/mayor/rig"}`,
|
||||
`{"prefix":"hq-","path":"."}`,
|
||||
"",
|
||||
}, "\n")
|
||||
if err := os.WriteFile(filepath.Join(townRoot, ".beads", "routes.jsonl"), []byte(routes), 0644); err != nil {
|
||||
t.Fatalf("write routes.jsonl: %v", err)
|
||||
}
|
||||
|
||||
// Stub bd so we can observe the working directory for cook/wisp/bond.
|
||||
binDir := filepath.Join(townRoot, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir binDir: %v", err)
|
||||
}
|
||||
logPath := filepath.Join(townRoot, "bd.log")
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
bdScript := `#!/bin/sh
|
||||
set -e
|
||||
echo "$(pwd)|$*" >> "${BD_LOG}"
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
shift
|
||||
fi
|
||||
cmd="$1"
|
||||
shift || true
|
||||
case "$cmd" in
|
||||
show)
|
||||
echo '[{"title":"Test issue","status":"open","assignee":"","description":""}]'
|
||||
;;
|
||||
formula)
|
||||
# formula show <name>
|
||||
exit 0
|
||||
;;
|
||||
cook)
|
||||
exit 0
|
||||
;;
|
||||
mol)
|
||||
sub="$1"
|
||||
shift || true
|
||||
case "$sub" in
|
||||
wisp)
|
||||
echo '{"new_epic_id":"gt-wisp-xyz"}'
|
||||
;;
|
||||
bond)
|
||||
echo '{"root_id":"gt-wisp-xyz"}'
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
t.Setenv(EnvGTRole, "mayor")
|
||||
t.Setenv("GT_POLECAT", "")
|
||||
t.Setenv("GT_CREW", "")
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(filepath.Join(townRoot, "mayor", "rig")); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we don't leak global flag state across tests.
|
||||
prevOn := slingOnTarget
|
||||
prevVars := slingVars
|
||||
prevDryRun := slingDryRun
|
||||
prevNoConvoy := slingNoConvoy
|
||||
t.Cleanup(func() {
|
||||
slingOnTarget = prevOn
|
||||
slingVars = prevVars
|
||||
slingDryRun = prevDryRun
|
||||
slingNoConvoy = prevNoConvoy
|
||||
})
|
||||
|
||||
slingDryRun = false
|
||||
slingNoConvoy = true
|
||||
slingVars = nil
|
||||
slingOnTarget = "gt-abc123"
|
||||
|
||||
if err := runSling(nil, []string{"mol-review"}); err != nil {
|
||||
t.Fatalf("runSling: %v", err)
|
||||
}
|
||||
|
||||
logBytes, err := os.ReadFile(logPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read bd log: %v", err)
|
||||
}
|
||||
logLines := strings.Split(strings.TrimSpace(string(logBytes)), "\n")
|
||||
|
||||
wantDir := rigDir
|
||||
if resolved, err := filepath.EvalSymlinks(wantDir); err == nil {
|
||||
wantDir = resolved
|
||||
}
|
||||
gotCook := false
|
||||
gotWisp := false
|
||||
gotBond := false
|
||||
|
||||
for _, line := range logLines {
|
||||
parts := strings.SplitN(line, "|", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
dir := parts[0]
|
||||
if resolved, err := filepath.EvalSymlinks(dir); err == nil {
|
||||
dir = resolved
|
||||
}
|
||||
args := parts[1]
|
||||
|
||||
switch {
|
||||
case strings.Contains(args, " cook "):
|
||||
gotCook = true
|
||||
if dir != wantDir {
|
||||
t.Fatalf("bd cook ran in %q, want %q (args: %q)", dir, wantDir, args)
|
||||
}
|
||||
case strings.Contains(args, " mol wisp "):
|
||||
gotWisp = true
|
||||
if dir != wantDir {
|
||||
t.Fatalf("bd mol wisp ran in %q, want %q (args: %q)", dir, wantDir, args)
|
||||
}
|
||||
case strings.Contains(args, " mol bond "):
|
||||
gotBond = true
|
||||
if dir != wantDir {
|
||||
t.Fatalf("bd mol bond ran in %q, want %q (args: %q)", dir, wantDir, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !gotCook || !gotWisp || !gotBond {
|
||||
t.Fatalf("missing expected bd commands: cook=%v wisp=%v bond=%v (log: %q)", gotCook, gotWisp, gotBond, string(logBytes))
|
||||
}
|
||||
}
|
||||
|
||||
// TestVerifyBeadExistsAllowStale reproduces the bug in gtl-ncq where beads
|
||||
// visible via regular bd show fail with --no-daemon due to database sync issues.
|
||||
// The fix uses --allow-stale to skip the sync check for existence verification.
|
||||
func TestVerifyBeadExistsAllowStale(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Create minimal workspace structure
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, "mayor", "rig"), 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
// Create a stub bd that simulates the sync issue:
|
||||
// - --no-daemon without --allow-stale fails (database out of sync)
|
||||
// - --no-daemon with --allow-stale succeeds (skips sync check)
|
||||
binDir := filepath.Join(townRoot, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir binDir: %v", err)
|
||||
}
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
bdScript := `#!/bin/sh
|
||||
# Check for --allow-stale flag
|
||||
allow_stale=false
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--allow-stale" ]; then
|
||||
allow_stale=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
if [ "$allow_stale" = "true" ]; then
|
||||
# --allow-stale skips sync check, succeeds
|
||||
echo '[{"title":"Test bead","status":"open","assignee":""}]'
|
||||
exit 0
|
||||
else
|
||||
# Without --allow-stale, fails with sync error
|
||||
echo '{"error":"Database out of sync with JSONL."}'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# Daemon mode works
|
||||
echo '[{"title":"Test bead","status":"open","assignee":""}]'
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// EXPECTED: verifyBeadExists should use --no-daemon --allow-stale and succeed
|
||||
beadID := "jv-v599"
|
||||
err = verifyBeadExists(beadID)
|
||||
if err != nil {
|
||||
t.Errorf("verifyBeadExists(%q) failed: %v\nExpected --allow-stale to skip sync check", beadID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSlingWithAllowStale tests the full gt sling flow with --allow-stale fix.
|
||||
// This is an integration test for the gtl-ncq bug.
|
||||
func TestSlingWithAllowStale(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Create minimal workspace structure
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, "mayor", "rig"), 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
// Create stub bd that respects --allow-stale
|
||||
binDir := filepath.Join(townRoot, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir binDir: %v", err)
|
||||
}
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
bdScript := `#!/bin/sh
|
||||
# Check for --allow-stale flag
|
||||
allow_stale=false
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--allow-stale" ]; then
|
||||
allow_stale=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
shift
|
||||
cmd="$1"
|
||||
if [ "$cmd" = "show" ]; then
|
||||
if [ "$allow_stale" = "true" ]; then
|
||||
echo '[{"title":"Synced bead","status":"open","assignee":""}]'
|
||||
exit 0
|
||||
fi
|
||||
echo '{"error":"Database out of sync"}'
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
cmd="$1"
|
||||
shift || true
|
||||
case "$cmd" in
|
||||
show)
|
||||
echo '[{"title":"Synced bead","status":"open","assignee":""}]'
|
||||
;;
|
||||
update)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
t.Setenv(EnvGTRole, "crew")
|
||||
t.Setenv("GT_CREW", "jv")
|
||||
t.Setenv("GT_POLECAT", "")
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Save and restore global flags
|
||||
prevDryRun := slingDryRun
|
||||
prevNoConvoy := slingNoConvoy
|
||||
t.Cleanup(func() {
|
||||
slingDryRun = prevDryRun
|
||||
slingNoConvoy = prevNoConvoy
|
||||
})
|
||||
|
||||
slingDryRun = true
|
||||
slingNoConvoy = true
|
||||
|
||||
// EXPECTED: gt sling should use daemon mode and succeed
|
||||
// ACTUAL: verifyBeadExists uses --no-daemon and fails with sync error
|
||||
beadID := "jv-v599"
|
||||
err = runSling(nil, []string{beadID})
|
||||
if err != nil {
|
||||
// Check if it's the specific error we're testing for
|
||||
if strings.Contains(err.Error(), "is not a valid bead or formula") {
|
||||
t.Errorf("gt sling failed to recognize bead %q: %v\nExpected to use daemon mode, but used --no-daemon which fails when DB out of sync", beadID, err)
|
||||
} else {
|
||||
// Some other error - might be expected in dry-run mode
|
||||
t.Logf("gt sling returned error (may be expected in test): %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -9,30 +10,34 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/claude"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/deacon"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/mayor"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/refinery"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/witness"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var (
|
||||
startAll bool
|
||||
startCrewRig string
|
||||
startCrewAccount string
|
||||
shutdownGraceful bool
|
||||
shutdownWait int
|
||||
shutdownAll bool
|
||||
shutdownForce bool
|
||||
shutdownYes bool
|
||||
shutdownPolecatsOnly bool
|
||||
shutdownNuclear bool
|
||||
startAll bool
|
||||
startAgentOverride string
|
||||
startCrewRig string
|
||||
startCrewAccount string
|
||||
startCrewAgentOverride string
|
||||
shutdownGraceful bool
|
||||
shutdownWait int
|
||||
shutdownAll bool
|
||||
shutdownForce bool
|
||||
shutdownYes bool
|
||||
shutdownPolecatsOnly bool
|
||||
shutdownNuclear bool
|
||||
)
|
||||
|
||||
var startCmd = &cobra.Command{
|
||||
@@ -103,9 +108,11 @@ Examples:
|
||||
func init() {
|
||||
startCmd.Flags().BoolVarP(&startAll, "all", "a", false,
|
||||
"Also start Witnesses and Refineries for all rigs")
|
||||
startCmd.Flags().StringVar(&startAgentOverride, "agent", "", "Agent alias to run Mayor/Deacon with (overrides town default)")
|
||||
|
||||
startCrewCmd.Flags().StringVar(&startCrewRig, "rig", "", "Rig to use")
|
||||
startCrewCmd.Flags().StringVar(&startCrewAccount, "account", "", "Claude Code account handle to use")
|
||||
startCrewCmd.Flags().StringVar(&startCrewAgentOverride, "agent", "", "Agent alias to run crew worker with (overrides rig/town default)")
|
||||
startCmd.AddCommand(startCrewCmd)
|
||||
|
||||
shutdownCmd.Flags().BoolVarP(&shutdownGraceful, "graceful", "g", false,
|
||||
@@ -145,12 +152,16 @@ func runStart(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
if err := config.EnsureDaemonPatrolConfig(townRoot); err != nil {
|
||||
fmt.Printf(" %s Could not ensure daemon config: %v\n", style.Dim.Render("○"), err)
|
||||
}
|
||||
|
||||
t := tmux.NewTmux()
|
||||
|
||||
fmt.Printf("Starting Gas Town from %s\n\n", style.Dim.Render(townRoot))
|
||||
|
||||
// Start core agents (Mayor and Deacon)
|
||||
if err := startCoreAgents(t); err != nil {
|
||||
if err := startCoreAgents(townRoot, startAgentOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -176,33 +187,29 @@ func runStart(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// startCoreAgents starts Mayor and Deacon sessions.
|
||||
func startCoreAgents(t *tmux.Tmux) error {
|
||||
// Get session names
|
||||
mayorSession := getMayorSessionName()
|
||||
deaconSession := getDeaconSessionName()
|
||||
|
||||
// startCoreAgents starts Mayor and Deacon sessions using the Manager pattern.
|
||||
func startCoreAgents(townRoot string, agentOverride string) error {
|
||||
// Start Mayor first (so Deacon sees it as up)
|
||||
mayorRunning, _ := t.HasSession(mayorSession)
|
||||
if mayorRunning {
|
||||
fmt.Printf(" %s Mayor already running\n", style.Dim.Render("○"))
|
||||
} else {
|
||||
fmt.Printf(" %s Starting Mayor...\n", style.Bold.Render("→"))
|
||||
if err := startMayorSession(t, mayorSession); err != nil {
|
||||
mayorMgr := mayor.NewManager(townRoot)
|
||||
if err := mayorMgr.Start(agentOverride); err != nil {
|
||||
if err == mayor.ErrAlreadyRunning {
|
||||
fmt.Printf(" %s Mayor already running\n", style.Dim.Render("○"))
|
||||
} else {
|
||||
return fmt.Errorf("starting Mayor: %w", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s Mayor started\n", style.Bold.Render("✓"))
|
||||
}
|
||||
|
||||
// Start Deacon (health monitor)
|
||||
deaconRunning, _ := t.HasSession(deaconSession)
|
||||
if deaconRunning {
|
||||
fmt.Printf(" %s Deacon already running\n", style.Dim.Render("○"))
|
||||
} else {
|
||||
fmt.Printf(" %s Starting Deacon...\n", style.Bold.Render("→"))
|
||||
if err := startDeaconSession(t, deaconSession); err != nil {
|
||||
deaconMgr := deacon.NewManager(townRoot)
|
||||
if err := deaconMgr.Start(agentOverride); err != nil {
|
||||
if err == deacon.ErrAlreadyRunning {
|
||||
fmt.Printf(" %s Deacon already running\n", style.Dim.Render("○"))
|
||||
} else {
|
||||
return fmt.Errorf("starting Deacon: %w", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s Deacon started\n", style.Bold.Render("✓"))
|
||||
}
|
||||
|
||||
@@ -225,26 +232,28 @@ func startRigAgents(t *tmux.Tmux, townRoot string) {
|
||||
if witnessRunning {
|
||||
fmt.Printf(" %s %s witness already running\n", style.Dim.Render("○"), r.Name)
|
||||
} else {
|
||||
created, err := ensureWitnessSession(r.Name, r)
|
||||
if err != nil {
|
||||
fmt.Printf(" %s %s witness failed: %v\n", style.Dim.Render("○"), r.Name, err)
|
||||
} else if created {
|
||||
witMgr := witness.NewManager(r)
|
||||
if err := witMgr.Start(false); err != nil {
|
||||
if err == witness.ErrAlreadyRunning {
|
||||
fmt.Printf(" %s %s witness already running\n", style.Dim.Render("○"), r.Name)
|
||||
} else {
|
||||
fmt.Printf(" %s %s witness failed: %v\n", style.Dim.Render("○"), r.Name, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s %s witness started\n", style.Bold.Render("✓"), r.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Start Refinery
|
||||
refinerySession := fmt.Sprintf("gt-%s-refinery", r.Name)
|
||||
refineryRunning, _ := t.HasSession(refinerySession)
|
||||
if refineryRunning {
|
||||
fmt.Printf(" %s %s refinery already running\n", style.Dim.Render("○"), r.Name)
|
||||
} else {
|
||||
created, err := ensureRefinerySession(r.Name, r)
|
||||
if err != nil {
|
||||
refineryMgr := refinery.NewManager(r)
|
||||
if err := refineryMgr.Start(false); err != nil {
|
||||
if errors.Is(err, refinery.ErrAlreadyRunning) {
|
||||
fmt.Printf(" %s %s refinery already running\n", style.Dim.Render("○"), r.Name)
|
||||
} else {
|
||||
fmt.Printf(" %s %s refinery failed: %v\n", style.Dim.Render("○"), r.Name, err)
|
||||
} else if created {
|
||||
fmt.Printf(" %s %s refinery started\n", style.Bold.Render("✓"), r.Name)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s %s refinery started\n", style.Bold.Render("✓"), r.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -263,7 +272,21 @@ func startConfiguredCrew(t *tmux.Tmux, townRoot string) {
|
||||
for _, crewName := range crewToStart {
|
||||
sessionID := crewSessionName(r.Name, crewName)
|
||||
if running, _ := t.HasSession(sessionID); running {
|
||||
fmt.Printf(" %s %s/%s already running\n", style.Dim.Render("○"), r.Name, crewName)
|
||||
// Session exists - check if Claude is still running
|
||||
agentCfg := config.ResolveAgentConfig(townRoot, r.Path)
|
||||
if !t.IsAgentRunning(sessionID, config.ExpectedPaneCommands(agentCfg)...) {
|
||||
// Claude has exited, restart it
|
||||
fmt.Printf(" %s %s/%s session exists, restarting Claude...\n", style.Dim.Render("○"), r.Name, crewName)
|
||||
claudeCmd := config.BuildCrewStartupCommand(r.Name, crewName, r.Path, "gt prime")
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
fmt.Printf(" %s %s/%s restart failed: %v\n", style.Dim.Render("○"), r.Name, crewName, err)
|
||||
} else {
|
||||
fmt.Printf(" %s %s/%s Claude restarted\n", style.Bold.Render("✓"), r.Name, crewName)
|
||||
startedAny = true
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s %s/%s already running\n", style.Dim.Render("○"), r.Name, crewName)
|
||||
}
|
||||
} else {
|
||||
if err := startCrewMember(r.Name, crewName, townRoot); err != nil {
|
||||
fmt.Printf(" %s %s/%s failed: %v\n", style.Dim.Render("○"), r.Name, crewName, err)
|
||||
@@ -294,84 +317,6 @@ func discoverAllRigs(townRoot string) ([]*rig.Rig, error) {
|
||||
return rigMgr.DiscoverRigs()
|
||||
}
|
||||
|
||||
// ensureRefinerySession creates a refinery tmux session if it doesn't exist.
|
||||
// Returns true if a new session was created, false if it already existed.
|
||||
func ensureRefinerySession(rigName string, r *rig.Rig) (bool, error) {
|
||||
t := tmux.NewTmux()
|
||||
sessionName := fmt.Sprintf("gt-%s-refinery", rigName)
|
||||
|
||||
// Check if session already exists
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
|
||||
if running {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Working directory is the refinery's rig clone
|
||||
refineryRigDir := filepath.Join(r.Path, "refinery", "rig")
|
||||
if _, err := os.Stat(refineryRigDir); os.IsNotExist(err) {
|
||||
// Fall back to rig path if refinery/rig doesn't exist
|
||||
refineryRigDir = r.Path
|
||||
}
|
||||
|
||||
// Ensure Claude settings exist (autonomous role needs mail in SessionStart)
|
||||
if err := claude.EnsureSettingsForRole(refineryRigDir, "refinery"); err != nil {
|
||||
return false, fmt.Errorf("ensuring Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Create new tmux session
|
||||
if err := t.NewSession(sessionName, refineryRigDir); err != nil {
|
||||
return false, fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Set environment
|
||||
bdActor := fmt.Sprintf("%s/refinery", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", "refinery")
|
||||
_ = t.SetEnvironment(sessionName, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", bdActor)
|
||||
|
||||
// Set beads environment
|
||||
beadsDir := filepath.Join(r.Path, "mayor", "rig", ".beads")
|
||||
_ = t.SetEnvironment(sessionName, "BEADS_DIR", beadsDir)
|
||||
_ = t.SetEnvironment(sessionName, "BEADS_NO_DAEMON", "1")
|
||||
_ = t.SetEnvironment(sessionName, "BEADS_AGENT_NAME", fmt.Sprintf("%s/refinery", rigName))
|
||||
|
||||
// Apply Gas Town theming (non-fatal: theming failure doesn't affect operation)
|
||||
theme := tmux.AssignTheme(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, rigName, "refinery", "refinery")
|
||||
|
||||
// Launch Claude directly (no respawn loop - daemon handles restart)
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
if err := t.SendKeys(sessionName, config.BuildAgentStartupCommand("refinery", bdActor, "", "")); err != nil {
|
||||
return false, fmt.Errorf("sending command: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/refinery", rigName)
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "deacon",
|
||||
Topic: "patrol",
|
||||
}) // Non-fatal
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send the propulsion nudge to trigger autonomous patrol execution.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
_ = t.NudgeSession(sessionName, session.PropulsionNudgeForRole("refinery", refineryRigDir)) // Non-fatal
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func runShutdown(cmd *cobra.Command, args []string) error {
|
||||
t := tmux.NewTmux()
|
||||
|
||||
@@ -735,25 +680,6 @@ func runStartCrew(cmd *cobra.Command, args []string) error {
|
||||
crewGit := git.NewGit(r.Path)
|
||||
crewMgr := crew.NewManager(r, crewGit)
|
||||
|
||||
// Check if crew exists, create if not
|
||||
worker, err := crewMgr.Get(name)
|
||||
if err == crew.ErrCrewNotFound {
|
||||
fmt.Printf("Creating crew workspace %s in %s...\n", name, rigName)
|
||||
worker, err = crewMgr.Add(name, false) // No feature branch for crew
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating crew workspace: %w", err)
|
||||
}
|
||||
fmt.Printf("%s Created crew workspace: %s/%s\n",
|
||||
style.Bold.Render("✓"), rigName, name)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("getting crew worker: %w", err)
|
||||
} else {
|
||||
fmt.Printf("Crew workspace %s/%s exists\n", rigName, name)
|
||||
}
|
||||
|
||||
// Ensure crew workspace is on default branch
|
||||
ensureDefaultBranch(worker.ClonePath, fmt.Sprintf("Crew workspace %s/%s", rigName, name), r.Path)
|
||||
|
||||
// Resolve account for Claude config
|
||||
accountsPath := constants.MayorAccountsPath(townRoot)
|
||||
claudeConfigDir, accountHandle, err := config.ResolveAccountConfigDir(accountsPath, startCrewAccount)
|
||||
@@ -764,58 +690,19 @@ func runStartCrew(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Using account: %s\n", accountHandle)
|
||||
}
|
||||
|
||||
// Check if session exists
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(rigName, name)
|
||||
hasSession, err := t.HasSession(sessionID)
|
||||
// Use manager's Start() method - handles workspace creation, settings, and session
|
||||
err = crewMgr.Start(name, crew.StartOptions{
|
||||
Account: startCrewAccount,
|
||||
ClaudeConfigDir: claudeConfigDir,
|
||||
AgentOverride: startCrewAgentOverride,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
|
||||
if hasSession {
|
||||
// Session exists - check if Claude is still running
|
||||
if !t.IsClaudeRunning(sessionID) {
|
||||
// Claude has exited, restart it with "gt prime" as initial prompt
|
||||
fmt.Printf("Session exists, restarting Claude...\n")
|
||||
claudeCmd := config.BuildCrewStartupCommand(rigName, name, r.Path, "gt prime")
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
return fmt.Errorf("restarting claude: %w", err)
|
||||
}
|
||||
if errors.Is(err, crew.ErrSessionRunning) {
|
||||
fmt.Printf("%s Session already running: %s\n", style.Dim.Render("○"), crewMgr.SessionName(name))
|
||||
} else {
|
||||
fmt.Printf("%s Session already running: %s\n", style.Dim.Render("○"), sessionID)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Create new session
|
||||
if err := t.NewSession(sessionID, worker.ClonePath); err != nil {
|
||||
return fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Set environment (non-fatal: session works without these)
|
||||
_ = t.SetEnvironment(sessionID, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionID, "GT_CREW", name)
|
||||
|
||||
// Set CLAUDE_CONFIG_DIR for account selection (non-fatal)
|
||||
if claudeConfigDir != "" {
|
||||
_ = t.SetEnvironment(sessionID, "CLAUDE_CONFIG_DIR", claudeConfigDir)
|
||||
}
|
||||
|
||||
// Apply rig-based theming (non-fatal: theming failure doesn't affect operation)
|
||||
// Note: ConfigureGasTownSession includes cycle bindings
|
||||
theme := getThemeForRig(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionID, theme, rigName, name, "crew")
|
||||
|
||||
// Wait for shell to be ready after session creation
|
||||
if err := t.WaitForShellReady(sessionID, constants.ShellReadyTimeout); err != nil {
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
|
||||
// Start claude with skip permissions and proper env vars for seance
|
||||
// Pass "gt prime" as initial prompt so context is loaded immediately
|
||||
claudeCmd := config.BuildCrewStartupCommand(rigName, name, r.Path, "gt prime")
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
return fmt.Errorf("starting claude: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Started crew workspace: %s/%s\n",
|
||||
style.Bold.Render("✓"), rigName, name)
|
||||
}
|
||||
@@ -889,54 +776,14 @@ func startCrewMember(rigName, crewName, townRoot string) error {
|
||||
return fmt.Errorf("rig '%s' not found", rigName)
|
||||
}
|
||||
|
||||
// Create crew manager
|
||||
// Create crew manager and use Start() method
|
||||
crewGit := git.NewGit(r.Path)
|
||||
crewMgr := crew.NewManager(r, crewGit)
|
||||
|
||||
// Check if crew exists, create if not
|
||||
worker, err := crewMgr.Get(crewName)
|
||||
if err == crew.ErrCrewNotFound {
|
||||
worker, err = crewMgr.Add(crewName, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating crew workspace: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("getting crew worker: %w", err)
|
||||
}
|
||||
|
||||
// Ensure crew workspace is on default branch
|
||||
ensureDefaultBranch(worker.ClonePath, fmt.Sprintf("Crew workspace %s/%s", rigName, crewName), r.Path)
|
||||
|
||||
// Create tmux session
|
||||
t := tmux.NewTmux()
|
||||
sessionID := crewSessionName(rigName, crewName)
|
||||
|
||||
if err := t.NewSession(sessionID, worker.ClonePath); err != nil {
|
||||
return fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Set environment (non-fatal: session works without these)
|
||||
_ = t.SetEnvironment(sessionID, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionID, "GT_CREW", crewName)
|
||||
|
||||
// Apply rig-based theming (non-fatal: theming failure doesn't affect operation)
|
||||
theme := getThemeForRig(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionID, theme, rigName, crewName, "crew")
|
||||
|
||||
// Set up C-b n/p keybindings for crew session cycling (non-fatal)
|
||||
_ = t.SetCrewCycleBindings(sessionID)
|
||||
|
||||
// Wait for shell to be ready
|
||||
if err := t.WaitForShellReady(sessionID, constants.ShellReadyTimeout); err != nil {
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
|
||||
// Start claude with proper env vars for seance
|
||||
// Pass "gt prime" as initial prompt so context is loaded immediately
|
||||
// (SessionStart hook fires, then Claude processes "gt prime" as first user message)
|
||||
claudeCmd := config.BuildCrewStartupCommand(rigName, crewName, r.Path, "gt prime")
|
||||
if err := t.SendKeys(sessionID, claudeCmd); err != nil {
|
||||
return fmt.Errorf("starting claude: %w", err)
|
||||
// Start handles workspace creation, settings, and session all in one
|
||||
err = crewMgr.Start(crewName, crew.StartOptions{})
|
||||
if err != nil && !errors.Is(err, crew.ErrSessionRunning) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -4,9 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
@@ -19,10 +22,14 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
var statusJSON bool
|
||||
var statusFast bool
|
||||
var statusWatch bool
|
||||
var statusInterval int
|
||||
var statusVerbose bool
|
||||
|
||||
var statusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
@@ -33,13 +40,17 @@ var statusCmd = &cobra.Command{
|
||||
|
||||
Shows town name, registered rigs, active polecats, and witness status.
|
||||
|
||||
Use --fast to skip mail lookups for faster execution.`,
|
||||
Use --fast to skip mail lookups for faster execution.
|
||||
Use --watch to continuously refresh status at regular intervals.`,
|
||||
RunE: runStatus,
|
||||
}
|
||||
|
||||
func init() {
|
||||
statusCmd.Flags().BoolVar(&statusJSON, "json", false, "Output as JSON")
|
||||
statusCmd.Flags().BoolVar(&statusFast, "fast", false, "Skip mail lookups for faster execution")
|
||||
statusCmd.Flags().BoolVarP(&statusWatch, "watch", "w", false, "Watch mode: refresh status continuously")
|
||||
statusCmd.Flags().IntVarP(&statusInterval, "interval", "n", 2, "Refresh interval in seconds")
|
||||
statusCmd.Flags().BoolVarP(&statusVerbose, "verbose", "v", false, "Show detailed multi-line output per agent")
|
||||
rootCmd.AddCommand(statusCmd)
|
||||
}
|
||||
|
||||
@@ -120,6 +131,58 @@ type StatusSum struct {
|
||||
}
|
||||
|
||||
func runStatus(cmd *cobra.Command, args []string) error {
|
||||
if statusWatch {
|
||||
return runStatusWatch(cmd, args)
|
||||
}
|
||||
return runStatusOnce(cmd, args)
|
||||
}
|
||||
|
||||
func runStatusWatch(cmd *cobra.Command, args []string) error {
|
||||
if statusJSON {
|
||||
return fmt.Errorf("--json and --watch cannot be used together")
|
||||
}
|
||||
if statusInterval <= 0 {
|
||||
return fmt.Errorf("interval must be positive, got %d", statusInterval)
|
||||
}
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
||||
defer signal.Stop(sigChan)
|
||||
|
||||
ticker := time.NewTicker(time.Duration(statusInterval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
isTTY := term.IsTerminal(int(os.Stdout.Fd()))
|
||||
|
||||
for {
|
||||
if isTTY {
|
||||
fmt.Print("\033[H\033[2J") // ANSI: cursor home + clear screen
|
||||
}
|
||||
|
||||
timestamp := time.Now().Format("15:04:05")
|
||||
header := fmt.Sprintf("[%s] gt status --watch (every %ds, Ctrl+C to stop)", timestamp, statusInterval)
|
||||
if isTTY {
|
||||
fmt.Printf("%s\n\n", style.Dim.Render(header))
|
||||
} else {
|
||||
fmt.Printf("%s\n\n", header)
|
||||
}
|
||||
|
||||
if err := runStatusOnce(cmd, args); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-sigChan:
|
||||
if isTTY {
|
||||
fmt.Println("\nStopped.")
|
||||
}
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runStatusOnce(_ *cobra.Command, _ []string) error {
|
||||
// Find town root
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
@@ -395,8 +458,16 @@ func outputStatusText(status TownStatus) error {
|
||||
if icon == "" {
|
||||
icon = roleIcons[agent.Name]
|
||||
}
|
||||
fmt.Printf("%s %s\n", icon, style.Bold.Render(capitalizeFirst(agent.Name)))
|
||||
renderAgentDetails(agent, " ", nil, status.Location)
|
||||
if statusVerbose {
|
||||
fmt.Printf("%s %s\n", icon, style.Bold.Render(capitalizeFirst(agent.Name)))
|
||||
renderAgentDetails(agent, " ", nil, status.Location)
|
||||
fmt.Println()
|
||||
} else {
|
||||
// Compact: icon + name on one line
|
||||
renderAgentCompact(agent, icon+" ", nil, status.Location)
|
||||
}
|
||||
}
|
||||
if !statusVerbose && len(status.Agents) > 0 {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
@@ -427,73 +498,86 @@ func outputStatusText(status TownStatus) error {
|
||||
|
||||
// Witness
|
||||
if len(witnesses) > 0 {
|
||||
fmt.Printf("%s %s\n", roleIcons["witness"], style.Bold.Render("Witness"))
|
||||
for _, agent := range witnesses {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
if statusVerbose {
|
||||
fmt.Printf("%s %s\n", roleIcons["witness"], style.Bold.Render("Witness"))
|
||||
for _, agent := range witnesses {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
}
|
||||
fmt.Println()
|
||||
} else {
|
||||
for _, agent := range witnesses {
|
||||
renderAgentCompact(agent, roleIcons["witness"]+" ", r.Hooks, status.Location)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Refinery
|
||||
if len(refineries) > 0 {
|
||||
fmt.Printf("%s %s\n", roleIcons["refinery"], style.Bold.Render("Refinery"))
|
||||
for _, agent := range refineries {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
}
|
||||
// MQ summary (shown under refinery)
|
||||
if r.MQ != nil {
|
||||
mqParts := []string{}
|
||||
if r.MQ.Pending > 0 {
|
||||
mqParts = append(mqParts, fmt.Sprintf("%d pending", r.MQ.Pending))
|
||||
if statusVerbose {
|
||||
fmt.Printf("%s %s\n", roleIcons["refinery"], style.Bold.Render("Refinery"))
|
||||
for _, agent := range refineries {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
}
|
||||
if r.MQ.InFlight > 0 {
|
||||
mqParts = append(mqParts, style.Warning.Render(fmt.Sprintf("%d in-flight", r.MQ.InFlight)))
|
||||
}
|
||||
if r.MQ.Blocked > 0 {
|
||||
mqParts = append(mqParts, style.Dim.Render(fmt.Sprintf("%d blocked", r.MQ.Blocked)))
|
||||
}
|
||||
if len(mqParts) > 0 {
|
||||
// Add state indicator
|
||||
stateIcon := "○" // idle
|
||||
switch r.MQ.State {
|
||||
case "processing":
|
||||
stateIcon = style.Success.Render("●")
|
||||
case "blocked":
|
||||
stateIcon = style.Error.Render("○")
|
||||
// MQ summary (shown under refinery)
|
||||
if r.MQ != nil {
|
||||
mqStr := formatMQSummary(r.MQ)
|
||||
if mqStr != "" {
|
||||
fmt.Printf(" MQ: %s\n", mqStr)
|
||||
}
|
||||
// Add health warning if stale
|
||||
healthSuffix := ""
|
||||
if r.MQ.Health == "stale" {
|
||||
healthSuffix = style.Error.Render(" [stale]")
|
||||
}
|
||||
fmt.Println()
|
||||
} else {
|
||||
for _, agent := range refineries {
|
||||
// Compact: include MQ on same line if present
|
||||
mqSuffix := ""
|
||||
if r.MQ != nil {
|
||||
mqStr := formatMQSummaryCompact(r.MQ)
|
||||
if mqStr != "" {
|
||||
mqSuffix = " " + mqStr
|
||||
}
|
||||
}
|
||||
fmt.Printf(" MQ: %s %s%s\n", stateIcon, strings.Join(mqParts, ", "), healthSuffix)
|
||||
renderAgentCompactWithSuffix(agent, roleIcons["refinery"]+" ", r.Hooks, status.Location, mqSuffix)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Crew
|
||||
if len(crews) > 0 {
|
||||
fmt.Printf("%s %s (%d)\n", roleIcons["crew"], style.Bold.Render("Crew"), len(crews))
|
||||
for _, agent := range crews {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
if statusVerbose {
|
||||
fmt.Printf("%s %s (%d)\n", roleIcons["crew"], style.Bold.Render("Crew"), len(crews))
|
||||
for _, agent := range crews {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
}
|
||||
fmt.Println()
|
||||
} else {
|
||||
fmt.Printf("%s %s (%d)\n", roleIcons["crew"], style.Bold.Render("Crew"), len(crews))
|
||||
for _, agent := range crews {
|
||||
renderAgentCompact(agent, " ", r.Hooks, status.Location)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Polecats
|
||||
if len(polecats) > 0 {
|
||||
fmt.Printf("%s %s (%d)\n", roleIcons["polecat"], style.Bold.Render("Polecats"), len(polecats))
|
||||
for _, agent := range polecats {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
if statusVerbose {
|
||||
fmt.Printf("%s %s (%d)\n", roleIcons["polecat"], style.Bold.Render("Polecats"), len(polecats))
|
||||
for _, agent := range polecats {
|
||||
renderAgentDetails(agent, " ", r.Hooks, status.Location)
|
||||
}
|
||||
fmt.Println()
|
||||
} else {
|
||||
fmt.Printf("%s %s (%d)\n", roleIcons["polecat"], style.Bold.Render("Polecats"), len(polecats))
|
||||
for _, agent := range polecats {
|
||||
renderAgentCompact(agent, " ", r.Hooks, status.Location)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// No agents
|
||||
if len(witnesses) == 0 && len(refineries) == 0 && len(crews) == 0 && len(polecats) == 0 {
|
||||
fmt.Printf(" %s\n\n", style.Dim.Render("(no agents)"))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("(no agents)"))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -502,40 +586,34 @@ func outputStatusText(status TownStatus) error {
|
||||
// renderAgentDetails renders full agent bead details
|
||||
func renderAgentDetails(agent AgentRuntime, indent string, hooks []AgentHookInfo, townRoot string) { //nolint:unparam // indent kept for future customization
|
||||
// Line 1: Agent bead ID + status
|
||||
// Reconcile bead state with tmux session state to surface mismatches
|
||||
// States: "running" (active), "idle" (waiting), "stopped", "dead", etc.
|
||||
beadState := agent.State
|
||||
// Per gt-zecmc: derive status from tmux (observable reality), not bead state.
|
||||
// "Discover, don't track" - agent liveness is observable from tmux session.
|
||||
sessionExists := agent.Running
|
||||
|
||||
// "idle" is a normal operational state (running but waiting for work)
|
||||
// Treat it the same as "running" for reconciliation purposes
|
||||
beadSaysRunning := beadState == "running" || beadState == "idle" || beadState == ""
|
||||
|
||||
var statusStr string
|
||||
var stateInfo string
|
||||
|
||||
switch {
|
||||
case beadSaysRunning && sessionExists:
|
||||
// Normal running state - session exists and bead agrees
|
||||
if sessionExists {
|
||||
statusStr = style.Success.Render("running")
|
||||
case beadSaysRunning && !sessionExists:
|
||||
// Bead thinks running but session is gone - stale bead state
|
||||
statusStr = style.Error.Render("running")
|
||||
stateInfo = style.Warning.Render(" [dead]")
|
||||
case !beadSaysRunning && sessionExists:
|
||||
// Session exists but bead says stopped/dead - mismatch!
|
||||
// This is the key case: tmux says alive, bead says dead/stopped
|
||||
statusStr = style.Success.Render("running")
|
||||
stateInfo = style.Warning.Render(" [bead: " + beadState + "]")
|
||||
default:
|
||||
// Both agree: stopped
|
||||
} else {
|
||||
statusStr = style.Error.Render("stopped")
|
||||
}
|
||||
|
||||
// Add agent state info if not already shown and state is interesting
|
||||
// Skip "idle" and "running" as they're normal operational states
|
||||
if stateInfo == "" && beadState != "" && beadState != "idle" && beadState != "running" {
|
||||
// Show non-observable states that represent intentional agent decisions.
|
||||
// These can't be discovered from tmux and are legitimately recorded in beads.
|
||||
beadState := agent.State
|
||||
switch beadState {
|
||||
case "stuck":
|
||||
// Agent escalated - needs help
|
||||
stateInfo = style.Warning.Render(" [stuck]")
|
||||
case "awaiting-gate":
|
||||
// Agent waiting for external trigger (phase gate)
|
||||
stateInfo = style.Dim.Render(" [awaiting-gate]")
|
||||
case "muted", "paused", "degraded":
|
||||
// Other intentional non-observable states
|
||||
stateInfo = style.Dim.Render(fmt.Sprintf(" [%s]", beadState))
|
||||
// Ignore observable states: "running", "idle", "dead", "done", "stopped", ""
|
||||
// These should be derived from tmux, not bead.
|
||||
}
|
||||
|
||||
// Build agent bead ID using canonical naming: prefix-rig-role-name
|
||||
@@ -545,8 +623,8 @@ func renderAgentDetails(agent AgentRuntime, indent string, hooks []AgentHookInfo
|
||||
addr := strings.TrimSuffix(agent.Address, "/") // Remove trailing slash for global agents
|
||||
parts := strings.Split(addr, "/")
|
||||
if len(parts) == 1 {
|
||||
// Global agent: mayor/, deacon/ → gt-mayor, gt-deacon
|
||||
agentBeadID = beads.AgentBeadID("", parts[0], "")
|
||||
// Global agent: mayor/, deacon/ → hq-mayor, hq-deacon
|
||||
agentBeadID = beads.AgentBeadIDWithPrefix(beads.TownBeadsPrefix, "", parts[0], "")
|
||||
} else if len(parts) >= 2 {
|
||||
rig := parts[0]
|
||||
prefix := beads.GetPrefixForRig(townRoot, rig)
|
||||
@@ -604,6 +682,166 @@ func renderAgentDetails(agent AgentRuntime, indent string, hooks []AgentHookInfo
|
||||
}
|
||||
}
|
||||
|
||||
// formatMQSummary formats the MQ status for verbose display
|
||||
func formatMQSummary(mq *MQSummary) string {
|
||||
if mq == nil {
|
||||
return ""
|
||||
}
|
||||
mqParts := []string{}
|
||||
if mq.Pending > 0 {
|
||||
mqParts = append(mqParts, fmt.Sprintf("%d pending", mq.Pending))
|
||||
}
|
||||
if mq.InFlight > 0 {
|
||||
mqParts = append(mqParts, style.Warning.Render(fmt.Sprintf("%d in-flight", mq.InFlight)))
|
||||
}
|
||||
if mq.Blocked > 0 {
|
||||
mqParts = append(mqParts, style.Dim.Render(fmt.Sprintf("%d blocked", mq.Blocked)))
|
||||
}
|
||||
if len(mqParts) == 0 {
|
||||
return ""
|
||||
}
|
||||
// Add state indicator
|
||||
stateIcon := "○" // idle
|
||||
switch mq.State {
|
||||
case "processing":
|
||||
stateIcon = style.Success.Render("●")
|
||||
case "blocked":
|
||||
stateIcon = style.Error.Render("○")
|
||||
}
|
||||
// Add health warning if stale
|
||||
healthSuffix := ""
|
||||
if mq.Health == "stale" {
|
||||
healthSuffix = style.Error.Render(" [stale]")
|
||||
}
|
||||
return fmt.Sprintf("%s %s%s", stateIcon, strings.Join(mqParts, ", "), healthSuffix)
|
||||
}
|
||||
|
||||
// formatMQSummaryCompact formats MQ status for compact single-line display
|
||||
func formatMQSummaryCompact(mq *MQSummary) string {
|
||||
if mq == nil {
|
||||
return ""
|
||||
}
|
||||
// Very compact: "MQ:12" or "MQ:12 [stale]"
|
||||
total := mq.Pending + mq.InFlight + mq.Blocked
|
||||
if total == 0 {
|
||||
return ""
|
||||
}
|
||||
healthSuffix := ""
|
||||
if mq.Health == "stale" {
|
||||
healthSuffix = style.Error.Render("[stale]")
|
||||
}
|
||||
return fmt.Sprintf("MQ:%d%s", total, healthSuffix)
|
||||
}
|
||||
|
||||
// renderAgentCompactWithSuffix renders a single-line agent status with an extra suffix
|
||||
func renderAgentCompactWithSuffix(agent AgentRuntime, indent string, hooks []AgentHookInfo, _ string, suffix string) {
|
||||
// Build status indicator (gt-zecmc: use tmux state, not bead state)
|
||||
statusIndicator := buildStatusIndicator(agent)
|
||||
|
||||
// Get hook info
|
||||
hookBead := agent.HookBead
|
||||
hookTitle := agent.WorkTitle
|
||||
if hookBead == "" && hooks != nil {
|
||||
for _, h := range hooks {
|
||||
if h.Agent == agent.Address && h.HasWork {
|
||||
hookBead = h.Molecule
|
||||
hookTitle = h.Title
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build hook suffix
|
||||
hookSuffix := ""
|
||||
if hookBead != "" {
|
||||
if hookTitle != "" {
|
||||
hookSuffix = style.Dim.Render(" → ") + truncateWithEllipsis(hookTitle, 30)
|
||||
} else {
|
||||
hookSuffix = style.Dim.Render(" → ") + hookBead
|
||||
}
|
||||
} else if hookTitle != "" {
|
||||
hookSuffix = style.Dim.Render(" → ") + truncateWithEllipsis(hookTitle, 30)
|
||||
}
|
||||
|
||||
// Mail indicator
|
||||
mailSuffix := ""
|
||||
if agent.UnreadMail > 0 {
|
||||
mailSuffix = fmt.Sprintf(" 📬%d", agent.UnreadMail)
|
||||
}
|
||||
|
||||
// Print single line: name + status + hook + mail + suffix
|
||||
fmt.Printf("%s%-12s %s%s%s%s\n", indent, agent.Name, statusIndicator, hookSuffix, mailSuffix, suffix)
|
||||
}
|
||||
|
||||
// renderAgentCompact renders a single-line agent status
|
||||
func renderAgentCompact(agent AgentRuntime, indent string, hooks []AgentHookInfo, _ string) {
|
||||
// Build status indicator (gt-zecmc: use tmux state, not bead state)
|
||||
statusIndicator := buildStatusIndicator(agent)
|
||||
|
||||
// Get hook info
|
||||
hookBead := agent.HookBead
|
||||
hookTitle := agent.WorkTitle
|
||||
if hookBead == "" && hooks != nil {
|
||||
for _, h := range hooks {
|
||||
if h.Agent == agent.Address && h.HasWork {
|
||||
hookBead = h.Molecule
|
||||
hookTitle = h.Title
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build hook suffix
|
||||
hookSuffix := ""
|
||||
if hookBead != "" {
|
||||
if hookTitle != "" {
|
||||
hookSuffix = style.Dim.Render(" → ") + truncateWithEllipsis(hookTitle, 30)
|
||||
} else {
|
||||
hookSuffix = style.Dim.Render(" → ") + hookBead
|
||||
}
|
||||
} else if hookTitle != "" {
|
||||
hookSuffix = style.Dim.Render(" → ") + truncateWithEllipsis(hookTitle, 30)
|
||||
}
|
||||
|
||||
// Mail indicator
|
||||
mailSuffix := ""
|
||||
if agent.UnreadMail > 0 {
|
||||
mailSuffix = fmt.Sprintf(" 📬%d", agent.UnreadMail)
|
||||
}
|
||||
|
||||
// Print single line: name + status + hook + mail
|
||||
fmt.Printf("%s%-12s %s%s%s\n", indent, agent.Name, statusIndicator, hookSuffix, mailSuffix)
|
||||
}
|
||||
|
||||
// buildStatusIndicator creates the visual status indicator for an agent.
|
||||
// Per gt-zecmc: uses tmux state (observable reality), not bead state.
|
||||
// Non-observable states (stuck, awaiting-gate, muted, etc.) are shown as suffixes.
|
||||
func buildStatusIndicator(agent AgentRuntime) string {
|
||||
sessionExists := agent.Running
|
||||
|
||||
// Base indicator from tmux state
|
||||
var indicator string
|
||||
if sessionExists {
|
||||
indicator = style.Success.Render("●")
|
||||
} else {
|
||||
indicator = style.Error.Render("○")
|
||||
}
|
||||
|
||||
// Add non-observable state suffix if present
|
||||
beadState := agent.State
|
||||
switch beadState {
|
||||
case "stuck":
|
||||
indicator += style.Warning.Render(" stuck")
|
||||
case "awaiting-gate":
|
||||
indicator += style.Dim.Render(" gate")
|
||||
case "muted", "paused", "degraded":
|
||||
indicator += style.Dim.Render(" " + beadState)
|
||||
// Ignore observable states: running, idle, dead, done, stopped, ""
|
||||
}
|
||||
|
||||
return indicator
|
||||
}
|
||||
|
||||
// formatHookInfo formats the hook bead and title for display
|
||||
func formatHookInfo(hookBead, title string, maxLen int) string {
|
||||
if hookBead == "" {
|
||||
|
||||
@@ -95,3 +95,66 @@ func TestRenderAgentDetails_UsesRigPrefix(t *testing.T) {
|
||||
t.Fatalf("output %q does not contain rig-prefixed bead ID", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunStatusWatch_RejectsZeroInterval(t *testing.T) {
|
||||
oldInterval := statusInterval
|
||||
oldWatch := statusWatch
|
||||
defer func() {
|
||||
statusInterval = oldInterval
|
||||
statusWatch = oldWatch
|
||||
}()
|
||||
|
||||
statusInterval = 0
|
||||
statusWatch = true
|
||||
|
||||
err := runStatusWatch(nil, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for zero interval, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "positive") {
|
||||
t.Errorf("error %q should mention 'positive'", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunStatusWatch_RejectsNegativeInterval(t *testing.T) {
|
||||
oldInterval := statusInterval
|
||||
oldWatch := statusWatch
|
||||
defer func() {
|
||||
statusInterval = oldInterval
|
||||
statusWatch = oldWatch
|
||||
}()
|
||||
|
||||
statusInterval = -5
|
||||
statusWatch = true
|
||||
|
||||
err := runStatusWatch(nil, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for negative interval, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "positive") {
|
||||
t.Errorf("error %q should mention 'positive'", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunStatusWatch_RejectsJSONCombo(t *testing.T) {
|
||||
oldJSON := statusJSON
|
||||
oldWatch := statusWatch
|
||||
oldInterval := statusInterval
|
||||
defer func() {
|
||||
statusJSON = oldJSON
|
||||
statusWatch = oldWatch
|
||||
statusInterval = oldInterval
|
||||
}()
|
||||
|
||||
statusJSON = true
|
||||
statusWatch = true
|
||||
statusInterval = 2
|
||||
|
||||
err := runStatusWatch(nil, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for --json + --watch, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "cannot be used together") {
|
||||
t.Errorf("error %q should mention 'cannot be used together'", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -181,31 +182,76 @@ func runMayorStatusLine(t *tmux.Tmux) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Count polecats and rigs
|
||||
// Polecats: only actual polecats (not witnesses, refineries, deacon, crew)
|
||||
// Rigs: only registered rigs with active sessions
|
||||
// Track per-rig status for LED indicators
|
||||
type rigStatus struct {
|
||||
hasWitness bool
|
||||
hasRefinery bool
|
||||
}
|
||||
rigStatuses := make(map[string]*rigStatus)
|
||||
|
||||
// Initialize for all registered rigs
|
||||
for rigName := range registeredRigs {
|
||||
rigStatuses[rigName] = &rigStatus{}
|
||||
}
|
||||
|
||||
// Count polecats and track rig witness/refinery status
|
||||
polecatCount := 0
|
||||
rigs := make(map[string]bool)
|
||||
for _, s := range sessions {
|
||||
agent := categorizeSession(s)
|
||||
if agent == nil {
|
||||
continue
|
||||
}
|
||||
// Count rigs from any rig-level agent, but only if registered
|
||||
if agent.Rig != "" && registeredRigs[agent.Rig] {
|
||||
rigs[agent.Rig] = true
|
||||
}
|
||||
// Count only polecats for polecat count (in registered rigs)
|
||||
if agent.Type == AgentPolecat && registeredRigs[agent.Rig] {
|
||||
polecatCount++
|
||||
if rigStatuses[agent.Rig] == nil {
|
||||
rigStatuses[agent.Rig] = &rigStatus{}
|
||||
}
|
||||
switch agent.Type {
|
||||
case AgentWitness:
|
||||
rigStatuses[agent.Rig].hasWitness = true
|
||||
case AgentRefinery:
|
||||
rigStatuses[agent.Rig].hasRefinery = true
|
||||
case AgentPolecat:
|
||||
polecatCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
rigCount := len(rigs)
|
||||
|
||||
// Build status
|
||||
var parts []string
|
||||
parts = append(parts, fmt.Sprintf("%d 😺", polecatCount))
|
||||
parts = append(parts, fmt.Sprintf("%d rigs", rigCount))
|
||||
|
||||
// Build rig status display with LED indicators
|
||||
// 🟢 = both witness and refinery running (fully active)
|
||||
// 🟡 = one of witness/refinery running (partially active)
|
||||
// ⚫ = neither running (inactive)
|
||||
var rigParts []string
|
||||
var rigNames []string
|
||||
for rigName := range rigStatuses {
|
||||
rigNames = append(rigNames, rigName)
|
||||
}
|
||||
sort.Strings(rigNames)
|
||||
|
||||
for _, rigName := range rigNames {
|
||||
status := rigStatuses[rigName]
|
||||
var led string
|
||||
|
||||
// Check if rig is parked or docked
|
||||
opState, _ := getRigOperationalState(townRoot, rigName)
|
||||
if opState == "PARKED" || opState == "DOCKED" {
|
||||
led = "⏸️" // Parked/docked - intentionally offline
|
||||
} else if status.hasWitness && status.hasRefinery {
|
||||
led = "🟢" // Both running - fully active
|
||||
} else if status.hasWitness || status.hasRefinery {
|
||||
led = "🟡" // One running - partially active
|
||||
} else {
|
||||
led = "⚫" // Neither running - inactive
|
||||
}
|
||||
rigParts = append(rigParts, led+rigName)
|
||||
}
|
||||
|
||||
if len(rigParts) > 0 {
|
||||
parts = append(parts, strings.Join(rigParts, " "))
|
||||
}
|
||||
|
||||
// Priority 1: Check for hooked work (town beads for mayor)
|
||||
hookedWork := ""
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/townlog"
|
||||
@@ -111,8 +111,8 @@ func runStop(cmd *cobra.Command, args []string) error {
|
||||
stopped := 0
|
||||
|
||||
for _, r := range rigs {
|
||||
mgr := session.NewManager(t, r)
|
||||
infos, err := mgr.List()
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
infos, err := polecatMgr.List()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -125,10 +125,10 @@ func runStop(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Capture output before stopping (best effort)
|
||||
output, _ := mgr.Capture(info.Polecat, 50)
|
||||
output, _ := polecatMgr.Capture(info.Polecat, 50)
|
||||
|
||||
// Stop the session
|
||||
err := mgr.Stop(info.Polecat, force)
|
||||
err := polecatMgr.Stop(info.Polecat, force)
|
||||
if err != nil {
|
||||
result.Success = false
|
||||
result.Error = err.Error()
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/swarm"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
@@ -528,7 +528,7 @@ func spawnSwarmWorkersFromBeads(r *rig.Rig, townRoot string, swarmID string, wor
|
||||
Title string `json:"title"`
|
||||
}) error { //nolint:unparam // error return kept for future use
|
||||
t := tmux.NewTmux()
|
||||
sessMgr := session.NewManager(t, r)
|
||||
polecatSessMgr := polecat.NewSessionManager(t, r)
|
||||
polecatGit := git.NewGit(r.Path)
|
||||
polecatMgr := polecat.NewManager(r, polecatGit)
|
||||
|
||||
@@ -556,12 +556,12 @@ func spawnSwarmWorkersFromBeads(r *rig.Rig, townRoot string, swarmID string, wor
|
||||
}
|
||||
|
||||
// Check if already running
|
||||
running, _ := sessMgr.IsRunning(worker)
|
||||
running, _ := polecatSessMgr.IsRunning(worker)
|
||||
if running {
|
||||
fmt.Printf(" %s already running, injecting task...\n", worker)
|
||||
} else {
|
||||
fmt.Printf(" Starting %s...\n", worker)
|
||||
if err := sessMgr.Start(worker, session.StartOptions{}); err != nil {
|
||||
if err := polecatSessMgr.Start(worker, polecat.SessionStartOptions{}); err != nil {
|
||||
style.PrintWarning(" couldn't start %s: %v", worker, err)
|
||||
continue
|
||||
}
|
||||
@@ -572,7 +572,7 @@ func spawnSwarmWorkersFromBeads(r *rig.Rig, townRoot string, swarmID string, wor
|
||||
// Inject work assignment
|
||||
context := fmt.Sprintf("[SWARM] You are part of swarm %s.\n\nAssigned task: %s\nTitle: %s\n\nWork on this task. When complete, commit and signal DONE.",
|
||||
swarmID, task.ID, task.Title)
|
||||
if err := sessMgr.Inject(worker, context); err != nil {
|
||||
if err := polecatSessMgr.Inject(worker, context); err != nil {
|
||||
style.PrintWarning(" couldn't inject to %s: %v", worker, err)
|
||||
} else {
|
||||
fmt.Printf(" %s → %s ✓\n", worker, task.ID)
|
||||
@@ -809,7 +809,7 @@ func runSwarmLand(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Close the swarm epic in beads
|
||||
closeArgs := []string{"close", swarmID, "--reason", "Swarm landed to main"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
@@ -868,7 +868,7 @@ func runSwarmCancel(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Close the swarm epic in beads with canceled reason
|
||||
closeArgs := []string{"close", swarmID, "--reason", "Swarm canceled"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/formula"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -322,7 +323,7 @@ func runSynthesisClose(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Close the convoy
|
||||
closeArgs := []string{"close", convoyID, "--reason=synthesis complete"}
|
||||
if sessionID := os.Getenv("CLAUDE_SESSION_ID"); sessionID != "" {
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
// townCycleSession is the --session flag for town next/prev commands.
|
||||
@@ -22,18 +21,13 @@ func getTownLevelSessions() []string {
|
||||
}
|
||||
|
||||
// isTownLevelSession checks if the given session name is a town-level session.
|
||||
// Town-level sessions (Mayor, Deacon) use the "hq-" prefix, so we can identify
|
||||
// them by name alone without requiring workspace context. This is critical for
|
||||
// tmux run-shell which may execute from outside the workspace directory.
|
||||
func isTownLevelSession(sessionName string) bool {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil || townRoot == "" {
|
||||
return false
|
||||
}
|
||||
townName, err := workspace.GetTownName(townRoot)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
mayorSession := getMayorSessionName()
|
||||
deaconSession := getDeaconSessionName()
|
||||
_ = townName // used for session name generation
|
||||
// Town-level sessions are identified by their fixed names
|
||||
mayorSession := getMayorSessionName() // "hq-mayor"
|
||||
deaconSession := getDeaconSessionName() // "hq-deacon"
|
||||
return sessionName == mayorSession || sessionName == deaconSession
|
||||
}
|
||||
|
||||
|
||||
171
internal/cmd/uninstall.go
Normal file
171
internal/cmd/uninstall.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// ABOUTME: Command to completely uninstall Gas Town from the system.
|
||||
// ABOUTME: Removes shell integration, wrappers, state, and optionally workspace.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/wrappers"
|
||||
)
|
||||
|
||||
var (
|
||||
uninstallWorkspace bool
|
||||
uninstallForce bool
|
||||
)
|
||||
|
||||
var uninstallCmd = &cobra.Command{
|
||||
Use: "uninstall",
|
||||
GroupID: GroupConfig,
|
||||
Short: "Remove Gas Town from the system",
|
||||
Long: `Completely remove Gas Town from the system.
|
||||
|
||||
By default, removes:
|
||||
- Shell integration (~/.zshrc or ~/.bashrc)
|
||||
- Wrapper scripts (~/bin/gt-codex, ~/bin/gt-opencode)
|
||||
- State directory (~/.local/state/gastown/)
|
||||
- Config directory (~/.config/gastown/)
|
||||
- Cache directory (~/.cache/gastown/)
|
||||
|
||||
The workspace (e.g., ~/gt) is NOT removed unless --workspace is specified.
|
||||
|
||||
Use --force to skip confirmation prompts.
|
||||
|
||||
Examples:
|
||||
gt uninstall # Remove Gas Town, keep workspace
|
||||
gt uninstall --workspace # Also remove workspace directory
|
||||
gt uninstall --force # Skip confirmation`,
|
||||
RunE: runUninstall,
|
||||
}
|
||||
|
||||
func init() {
|
||||
uninstallCmd.Flags().BoolVar(&uninstallWorkspace, "workspace", false,
|
||||
"Also remove the workspace directory (DESTRUCTIVE)")
|
||||
uninstallCmd.Flags().BoolVarP(&uninstallForce, "force", "f", false,
|
||||
"Skip confirmation prompts")
|
||||
rootCmd.AddCommand(uninstallCmd)
|
||||
}
|
||||
|
||||
func runUninstall(cmd *cobra.Command, args []string) error {
|
||||
if !uninstallForce {
|
||||
fmt.Println("This will remove Gas Town from your system.")
|
||||
fmt.Println()
|
||||
fmt.Println("The following will be removed:")
|
||||
fmt.Printf(" • Shell integration (%s)\n", shell.RCFilePath(shell.DetectShell()))
|
||||
fmt.Printf(" • Wrapper scripts (%s)\n", wrappers.BinDir())
|
||||
fmt.Printf(" • State directory (%s)\n", state.StateDir())
|
||||
fmt.Printf(" • Config directory (%s)\n", state.ConfigDir())
|
||||
fmt.Printf(" • Cache directory (%s)\n", state.CacheDir())
|
||||
|
||||
if uninstallWorkspace {
|
||||
fmt.Println()
|
||||
fmt.Printf(" %s WORKSPACE WILL BE DELETED\n", style.Warning.Render("⚠"))
|
||||
fmt.Println(" This cannot be undone!")
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Print("Continue? [y/N] ")
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
response, _ := reader.ReadString('\n')
|
||||
response = strings.TrimSpace(strings.ToLower(response))
|
||||
|
||||
if response != "y" && response != "yes" {
|
||||
fmt.Println("Aborted.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var errors []string
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Removing Gas Town...")
|
||||
|
||||
if err := shell.Remove(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("shell integration: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed shell integration\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := wrappers.Remove(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("wrapper scripts: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed wrapper scripts\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(state.StateDir()); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Sprintf("state directory: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed state directory\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(state.ConfigDir()); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Sprintf("config directory: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed config directory\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(state.CacheDir()); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Sprintf("cache directory: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed cache directory\n", style.Success.Render("✓"))
|
||||
}
|
||||
|
||||
if uninstallWorkspace {
|
||||
workspaceDir := findWorkspaceForUninstall()
|
||||
if workspaceDir != "" {
|
||||
if err := os.RemoveAll(workspaceDir); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("workspace: %v", err))
|
||||
} else {
|
||||
fmt.Printf(" %s Removed workspace: %s\n", style.Success.Render("✓"), workspaceDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s Some components could not be removed:\n", style.Warning.Render("⚠"))
|
||||
for _, e := range errors {
|
||||
fmt.Printf(" • %s\n", e)
|
||||
}
|
||||
return fmt.Errorf("uninstall incomplete")
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("%s Gas Town has been uninstalled\n", style.Success.Render("✓"))
|
||||
fmt.Println()
|
||||
fmt.Println("To reinstall, run:")
|
||||
fmt.Printf(" %s\n", style.Dim.Render("go install github.com/steveyegge/gastown/cmd/gt@latest"))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("gt install ~/gt --shell"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findWorkspaceForUninstall() string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
candidates := []string{
|
||||
filepath.Join(home, "gt"),
|
||||
filepath.Join(home, "gastown"),
|
||||
}
|
||||
|
||||
for _, path := range candidates {
|
||||
mayorDir := filepath.Join(path, "mayor")
|
||||
if _, err := os.Stat(mayorDir); err == nil {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func runUnsling(cmd *cobra.Command, args []string) error {
|
||||
b := beads.New(beadsPath)
|
||||
|
||||
// Convert agent ID to agent bead ID and look up the agent bead
|
||||
agentBeadID := agentIDToBeadID(agentID)
|
||||
agentBeadID := agentIDToBeadID(agentID, townRoot)
|
||||
if agentBeadID == "" {
|
||||
return fmt.Errorf("could not convert agent ID %s to bead ID", agentID)
|
||||
}
|
||||
@@ -162,9 +162,8 @@ func runUnsling(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear the hook by updating agent bead with empty hook_bead
|
||||
emptyHook := ""
|
||||
if err := b.UpdateAgentState(agentBeadID, "running", &emptyHook); err != nil {
|
||||
// Clear the hook (gt-zecmc: removed agent_state update - observable from tmux)
|
||||
if err := b.ClearHookBead(agentBeadID); err != nil {
|
||||
return fmt.Errorf("clearing hook from agent bead %s: %w", agentBeadID, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,13 +11,16 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/daemon"
|
||||
"github.com/steveyegge/gastown/internal/deacon"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/mayor"
|
||||
"github.com/steveyegge/gastown/internal/polecat"
|
||||
"github.com/steveyegge/gastown/internal/refinery"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/witness"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
@@ -65,7 +68,6 @@ func runUp(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
t := tmux.NewTmux()
|
||||
allOK := true
|
||||
|
||||
// 1. Daemon (Go process)
|
||||
@@ -79,37 +81,52 @@ func runUp(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Get session names
|
||||
deaconSession := getDeaconSessionName()
|
||||
mayorSession := getMayorSessionName()
|
||||
|
||||
// 2. Deacon (Claude agent)
|
||||
if err := ensureSession(t, deaconSession, townRoot, "deacon"); err != nil {
|
||||
printStatus("Deacon", false, err.Error())
|
||||
allOK = false
|
||||
deaconMgr := deacon.NewManager(townRoot)
|
||||
if err := deaconMgr.Start(""); err != nil {
|
||||
if err == deacon.ErrAlreadyRunning {
|
||||
printStatus("Deacon", true, deaconMgr.SessionName())
|
||||
} else {
|
||||
printStatus("Deacon", false, err.Error())
|
||||
allOK = false
|
||||
}
|
||||
} else {
|
||||
printStatus("Deacon", true, deaconSession)
|
||||
printStatus("Deacon", true, deaconMgr.SessionName())
|
||||
}
|
||||
|
||||
// 3. Mayor (Claude agent)
|
||||
if err := ensureSession(t, mayorSession, townRoot, "mayor"); err != nil {
|
||||
printStatus("Mayor", false, err.Error())
|
||||
allOK = false
|
||||
mayorMgr := mayor.NewManager(townRoot)
|
||||
if err := mayorMgr.Start(""); err != nil {
|
||||
if err == mayor.ErrAlreadyRunning {
|
||||
printStatus("Mayor", true, mayorMgr.SessionName())
|
||||
} else {
|
||||
printStatus("Mayor", false, err.Error())
|
||||
allOK = false
|
||||
}
|
||||
} else {
|
||||
printStatus("Mayor", true, mayorSession)
|
||||
printStatus("Mayor", true, mayorMgr.SessionName())
|
||||
}
|
||||
|
||||
// 4. Witnesses (one per rig)
|
||||
rigs := discoverRigs(townRoot)
|
||||
for _, rigName := range rigs {
|
||||
sessionName := fmt.Sprintf("gt-%s-witness", rigName)
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
if err := ensureWitness(t, sessionName, rigPath, rigName); err != nil {
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
printStatus(fmt.Sprintf("Witness (%s)", rigName), false, err.Error())
|
||||
allOK = false
|
||||
continue
|
||||
}
|
||||
|
||||
mgr := witness.NewManager(r)
|
||||
if err := mgr.Start(false); err != nil {
|
||||
if err == witness.ErrAlreadyRunning {
|
||||
printStatus(fmt.Sprintf("Witness (%s)", rigName), true, mgr.SessionName())
|
||||
} else {
|
||||
printStatus(fmt.Sprintf("Witness (%s)", rigName), false, err.Error())
|
||||
allOK = false
|
||||
}
|
||||
} else {
|
||||
printStatus(fmt.Sprintf("Witness (%s)", rigName), true, sessionName)
|
||||
printStatus(fmt.Sprintf("Witness (%s)", rigName), true, mgr.SessionName())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,22 +142,20 @@ func runUp(cmd *cobra.Command, args []string) error {
|
||||
mgr := refinery.NewManager(r)
|
||||
if err := mgr.Start(false); err != nil {
|
||||
if err == refinery.ErrAlreadyRunning {
|
||||
sessionName := fmt.Sprintf("gt-%s-refinery", rigName)
|
||||
printStatus(fmt.Sprintf("Refinery (%s)", rigName), true, sessionName)
|
||||
printStatus(fmt.Sprintf("Refinery (%s)", rigName), true, mgr.SessionName())
|
||||
} else {
|
||||
printStatus(fmt.Sprintf("Refinery (%s)", rigName), false, err.Error())
|
||||
allOK = false
|
||||
}
|
||||
} else {
|
||||
sessionName := fmt.Sprintf("gt-%s-refinery", rigName)
|
||||
printStatus(fmt.Sprintf("Refinery (%s)", rigName), true, sessionName)
|
||||
printStatus(fmt.Sprintf("Refinery (%s)", rigName), true, mgr.SessionName())
|
||||
}
|
||||
}
|
||||
|
||||
// 6. Crew (if --restore)
|
||||
if upRestore {
|
||||
for _, rigName := range rigs {
|
||||
crewStarted, crewErrors := startCrewFromSettings(t, townRoot, rigName)
|
||||
crewStarted, crewErrors := startCrewFromSettings(townRoot, rigName)
|
||||
for _, name := range crewStarted {
|
||||
printStatus(fmt.Sprintf("Crew (%s/%s)", rigName, name), true, fmt.Sprintf("gt-%s-crew-%s", rigName, name))
|
||||
}
|
||||
@@ -152,7 +167,7 @@ func runUp(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// 7. Polecats with pinned work (if --restore)
|
||||
for _, rigName := range rigs {
|
||||
polecatsStarted, polecatErrors := startPolecatsWithWork(t, townRoot, rigName)
|
||||
polecatsStarted, polecatErrors := startPolecatsWithWork(townRoot, rigName)
|
||||
for _, name := range polecatsStarted {
|
||||
printStatus(fmt.Sprintf("Polecat (%s/%s)", rigName, name), true, fmt.Sprintf("gt-%s-polecat-%s", rigName, name))
|
||||
}
|
||||
@@ -234,127 +249,6 @@ func ensureDaemon(townRoot string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureSession starts a Claude session if not running.
|
||||
func ensureSession(t *tmux.Tmux, sessionName, workDir, role string) error {
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if running {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create session
|
||||
if err := t.NewSession(sessionName, workDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set environment (non-fatal: session works without these)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", role)
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", role)
|
||||
|
||||
// Apply theme based on role (non-fatal: theming failure doesn't affect operation)
|
||||
switch role {
|
||||
case "mayor":
|
||||
theme := tmux.MayorTheme()
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, "", "Mayor", "coordinator")
|
||||
case "deacon":
|
||||
theme := tmux.DeaconTheme()
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, "", "Deacon", "health-check")
|
||||
}
|
||||
|
||||
// Launch Claude
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
var claudeCmd string
|
||||
runtimeCmd := config.GetRuntimeCommand("")
|
||||
if role == "deacon" {
|
||||
// Deacon uses respawn loop
|
||||
claudeCmd = `export GT_ROLE=deacon BD_ACTOR=deacon GIT_AUTHOR_NAME=deacon && while true; do echo "⛪ Starting Deacon session..."; ` + runtimeCmd + `; echo ""; echo "Deacon exited. Restarting in 2s... (Ctrl-C to stop)"; sleep 2; done`
|
||||
} else {
|
||||
claudeCmd = config.BuildAgentStartupCommand(role, role, "", "")
|
||||
}
|
||||
|
||||
if err := t.SendKeysDelayed(sessionName, claudeCmd, 200); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
// Note: Deacon respawn loop makes beacon tricky - Claude restarts multiple times
|
||||
// For non-respawn (mayor), inject beacon
|
||||
if role != "deacon" {
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
|
||||
// Accept bypass permissions warning dialog if it appears.
|
||||
_ = t.AcceptBypassPermissionsWarning(sessionName)
|
||||
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: role,
|
||||
Sender: "human",
|
||||
Topic: "cold-start",
|
||||
}) // Non-fatal
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureWitness starts a witness session for a rig.
|
||||
func ensureWitness(t *tmux.Tmux, sessionName, rigPath, rigName string) error {
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if running {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create session in rig directory
|
||||
if err := t.NewSession(sessionName, rigPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set environment (non-fatal: session works without these)
|
||||
bdActor := fmt.Sprintf("%s/witness", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", "witness")
|
||||
_ = t.SetEnvironment(sessionName, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", bdActor)
|
||||
|
||||
// Apply theme (non-fatal: theming failure doesn't affect operation)
|
||||
theme := tmux.AssignTheme(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, "", "Witness", rigName)
|
||||
|
||||
// Launch Claude using runtime config
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
claudeCmd := config.BuildAgentStartupCommand("witness", bdActor, rigPath, "")
|
||||
if err := t.SendKeysDelayed(sessionName, claudeCmd, 200); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
|
||||
// Accept bypass permissions warning dialog if it appears.
|
||||
_ = t.AcceptBypassPermissionsWarning(sessionName)
|
||||
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/witness", rigName)
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "deacon",
|
||||
Topic: "patrol",
|
||||
}) // Non-fatal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// discoverRigs finds all rigs in the town.
|
||||
func discoverRigs(townRoot string) []string {
|
||||
var rigs []string
|
||||
@@ -407,7 +301,7 @@ func discoverRigs(townRoot string) []string {
|
||||
|
||||
// startCrewFromSettings starts crew members based on rig settings.
|
||||
// Returns list of started crew names and map of errors.
|
||||
func startCrewFromSettings(t *tmux.Tmux, townRoot, rigName string) ([]string, map[string]error) {
|
||||
func startCrewFromSettings(townRoot, rigName string) ([]string, map[string]error) {
|
||||
started := []string{}
|
||||
errors := map[string]error{}
|
||||
|
||||
@@ -450,24 +344,14 @@ func startCrewFromSettings(t *tmux.Tmux, townRoot, rigName string) ([]string, ma
|
||||
// Parse startup preference and determine which crew to start
|
||||
toStart := parseCrewStartupPreference(settings.Crew.Startup, crewNames)
|
||||
|
||||
// Start each crew member
|
||||
// Start each crew member using Manager
|
||||
for _, crewName := range toStart {
|
||||
sessionName := fmt.Sprintf("gt-%s-crew-%s", rigName, crewName)
|
||||
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
errors[crewName] = err
|
||||
continue
|
||||
}
|
||||
if running {
|
||||
started = append(started, crewName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Start the crew member
|
||||
crewPath := filepath.Join(rigPath, "crew", crewName)
|
||||
if err := ensureCrewSession(t, sessionName, crewPath, rigName, crewName); err != nil {
|
||||
errors[crewName] = err
|
||||
if err := crewMgr.Start(crewName, crew.StartOptions{}); err != nil {
|
||||
if err == crew.ErrSessionRunning {
|
||||
started = append(started, crewName)
|
||||
} else {
|
||||
errors[crewName] = err
|
||||
}
|
||||
} else {
|
||||
started = append(started, crewName)
|
||||
}
|
||||
@@ -539,56 +423,9 @@ func parseCrewStartupPreference(pref string, available []string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
// ensureCrewSession starts a crew session.
|
||||
func ensureCrewSession(t *tmux.Tmux, sessionName, crewPath, rigName, crewName string) error {
|
||||
// Create session in crew directory
|
||||
if err := t.NewSession(sessionName, crewPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set environment
|
||||
bdActor := fmt.Sprintf("%s/crew/%s", rigName, crewName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", "crew")
|
||||
_ = t.SetEnvironment(sessionName, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_CREW", crewName)
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", bdActor)
|
||||
|
||||
// Apply theme (use rig-based theme)
|
||||
theme := tmux.AssignTheme(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, "", "Crew", crewName)
|
||||
|
||||
// Launch Claude using runtime config
|
||||
// crewPath is like ~/gt/gastown/crew/max, so rig path is two dirs up
|
||||
rigPath := filepath.Dir(filepath.Dir(crewPath))
|
||||
claudeCmd := config.BuildCrewStartupCommand(rigName, crewName, rigPath, "")
|
||||
if err := t.SendKeysDelayed(sessionName, claudeCmd, 200); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
|
||||
// Accept bypass permissions warning dialog if it appears.
|
||||
_ = t.AcceptBypassPermissionsWarning(sessionName)
|
||||
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/crew/%s", rigName, crewName)
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "cold-start",
|
||||
}) // Non-fatal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// startPolecatsWithWork starts polecats that have pinned beads (work attached).
|
||||
// Returns list of started polecat names and map of errors.
|
||||
func startPolecatsWithWork(t *tmux.Tmux, townRoot, rigName string) ([]string, map[string]error) {
|
||||
func startPolecatsWithWork(townRoot, rigName string) ([]string, map[string]error) {
|
||||
started := []string{}
|
||||
errors := map[string]error{}
|
||||
|
||||
@@ -602,10 +439,21 @@ func startPolecatsWithWork(t *tmux.Tmux, townRoot, rigName string) ([]string, ma
|
||||
return started, errors
|
||||
}
|
||||
|
||||
// Get polecat session manager
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return started, errors
|
||||
}
|
||||
t := tmux.NewTmux()
|
||||
polecatMgr := polecat.NewSessionManager(t, r)
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
polecatName := entry.Name()
|
||||
polecatPath := filepath.Join(polecatsDir, polecatName)
|
||||
@@ -623,22 +471,13 @@ func startPolecatsWithWork(t *tmux.Tmux, townRoot, rigName string) ([]string, ma
|
||||
continue
|
||||
}
|
||||
|
||||
// This polecat has work - start it
|
||||
sessionName := fmt.Sprintf("gt-%s-polecat-%s", rigName, polecatName)
|
||||
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
errors[polecatName] = err
|
||||
continue
|
||||
}
|
||||
if running {
|
||||
started = append(started, polecatName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Start the polecat
|
||||
if err := ensurePolecatSession(t, sessionName, polecatPath, rigName, polecatName); err != nil {
|
||||
errors[polecatName] = err
|
||||
// This polecat has work - start it using SessionManager
|
||||
if err := polecatMgr.Start(polecatName, polecat.SessionStartOptions{}); err != nil {
|
||||
if err == polecat.ErrSessionRunning {
|
||||
started = append(started, polecatName)
|
||||
} else {
|
||||
errors[polecatName] = err
|
||||
}
|
||||
} else {
|
||||
started = append(started, polecatName)
|
||||
}
|
||||
@@ -646,50 +485,3 @@ func startPolecatsWithWork(t *tmux.Tmux, townRoot, rigName string) ([]string, ma
|
||||
|
||||
return started, errors
|
||||
}
|
||||
|
||||
// ensurePolecatSession starts a polecat session.
|
||||
func ensurePolecatSession(t *tmux.Tmux, sessionName, polecatPath, rigName, polecatName string) error {
|
||||
// Create session in polecat directory
|
||||
if err := t.NewSession(sessionName, polecatPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set environment
|
||||
bdActor := fmt.Sprintf("%s/polecats/%s", rigName, polecatName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", "polecat")
|
||||
_ = t.SetEnvironment(sessionName, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_POLECAT", polecatName)
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", bdActor)
|
||||
|
||||
// Apply theme (use rig-based theme)
|
||||
theme := tmux.AssignTheme(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, "", "Polecat", polecatName)
|
||||
|
||||
// Launch Claude using runtime config
|
||||
// polecatPath is like ~/gt/gastown/polecats/toast, so rig path is two dirs up
|
||||
rigPath := filepath.Dir(filepath.Dir(polecatPath))
|
||||
claudeCmd := config.BuildPolecatStartupCommand(rigName, polecatName, rigPath, "")
|
||||
if err := t.SendKeysDelayed(sessionName, claudeCmd, 200); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
|
||||
// Accept bypass permissions warning dialog if it appears.
|
||||
_ = t.AcceptBypassPermissionsWarning(sessionName)
|
||||
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/polecats/%s", rigName, polecatName)
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "witness",
|
||||
Topic: "dispatch",
|
||||
}) // Non-fatal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// Version information - set at build time via ldflags
|
||||
var (
|
||||
Version = "0.2.1"
|
||||
Version = "0.2.3"
|
||||
// Build can be set via ldflags at compile time
|
||||
Build = "dev"
|
||||
// Commit and Branch - the git revision the binary was built from (optional ldflag)
|
||||
|
||||
@@ -5,15 +5,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/claude"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/witness"
|
||||
@@ -123,55 +116,41 @@ func init() {
|
||||
}
|
||||
|
||||
// getWitnessManager creates a witness manager for a rig.
|
||||
func getWitnessManager(rigName string) (*witness.Manager, *rig.Rig, error) {
|
||||
func getWitnessManager(rigName string) (*witness.Manager, error) {
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mgr := witness.NewManager(r)
|
||||
return mgr, r, nil
|
||||
return mgr, nil
|
||||
}
|
||||
|
||||
func runWitnessStart(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
mgr, r, err := getWitnessManager(rigName)
|
||||
mgr, err := getWitnessManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Starting witness for %s...\n", rigName)
|
||||
|
||||
if witnessForeground {
|
||||
// Foreground mode is no longer supported - patrol logic moved to mol-witness-patrol
|
||||
if err := mgr.Start(); err != nil {
|
||||
if err == witness.ErrAlreadyRunning {
|
||||
fmt.Printf("%s Witness is already running\n", style.Dim.Render("⚠"))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("starting witness: %w", err)
|
||||
if err := mgr.Start(witnessForeground); err != nil {
|
||||
if err == witness.ErrAlreadyRunning {
|
||||
fmt.Printf("%s Witness is already running\n", style.Dim.Render("⚠"))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("Use 'gt witness attach' to connect"))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("starting witness: %w", err)
|
||||
}
|
||||
|
||||
if witnessForeground {
|
||||
fmt.Printf("%s Note: Foreground mode no longer runs patrol loop\n", style.Dim.Render("⚠"))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("Patrol logic is now handled by mol-witness-patrol molecule"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Background mode: create tmux session with Claude
|
||||
created, err := ensureWitnessSession(rigName, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !created {
|
||||
fmt.Printf("%s Witness session already running\n", style.Dim.Render("⚠"))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("Use 'gt witness attach' to connect"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update manager state to reflect running session (non-fatal: state file update)
|
||||
_ = mgr.Start()
|
||||
|
||||
fmt.Printf("%s Witness started for %s\n", style.Bold.Render("✓"), rigName)
|
||||
fmt.Printf(" %s\n", style.Dim.Render("Use 'gt witness attach' to connect"))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("Use 'gt witness status' to check progress"))
|
||||
@@ -181,7 +160,7 @@ func runWitnessStart(cmd *cobra.Command, args []string) error {
|
||||
func runWitnessStop(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
mgr, _, err := getWitnessManager(rigName)
|
||||
mgr, err := getWitnessManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -215,7 +194,7 @@ func runWitnessStop(cmd *cobra.Command, args []string) error {
|
||||
func runWitnessStatus(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
mgr, _, err := getWitnessManager(rigName)
|
||||
mgr, err := getWitnessManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -283,95 +262,6 @@ func witnessSessionName(rigName string) string {
|
||||
return fmt.Sprintf("gt-%s-witness", rigName)
|
||||
}
|
||||
|
||||
// ensureWitnessSession creates a witness tmux session if it doesn't exist.
|
||||
// Returns true if a new session was created, false if it already existed (and is healthy).
|
||||
// Implements 'ensure' semantics: if session exists but Claude is dead (zombie), kills and recreates.
|
||||
func ensureWitnessSession(rigName string, r *rig.Rig) (bool, error) {
|
||||
t := tmux.NewTmux()
|
||||
sessionName := witnessSessionName(rigName)
|
||||
|
||||
// Check if session already exists
|
||||
running, err := t.HasSession(sessionName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("checking session: %w", err)
|
||||
}
|
||||
|
||||
if running {
|
||||
// Session exists - check if Claude is actually running (healthy vs zombie)
|
||||
if t.IsClaudeRunning(sessionName) {
|
||||
// Healthy - Claude is running
|
||||
return false, nil
|
||||
}
|
||||
// Zombie - tmux alive but Claude dead. Kill and recreate.
|
||||
fmt.Printf("%s Detected zombie session (tmux alive, Claude dead). Recreating...\n", style.Dim.Render("⚠"))
|
||||
if err := t.KillSession(sessionName); err != nil {
|
||||
return false, fmt.Errorf("killing zombie session: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Working directory is the witness's rig clone (if it exists) or witness dir
|
||||
// This ensures gt prime detects the Witness role correctly
|
||||
witnessDir := filepath.Join(r.Path, "witness", "rig")
|
||||
if _, err := os.Stat(witnessDir); os.IsNotExist(err) {
|
||||
// Try witness/ without rig subdirectory
|
||||
witnessDir = filepath.Join(r.Path, "witness")
|
||||
if _, err := os.Stat(witnessDir); os.IsNotExist(err) {
|
||||
// Fall back to rig path (shouldn't happen in normal setup)
|
||||
witnessDir = r.Path
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure Claude settings exist (autonomous role needs mail in SessionStart)
|
||||
if err := claude.EnsureSettingsForRole(witnessDir, "witness"); err != nil {
|
||||
return false, fmt.Errorf("ensuring Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Create new tmux session
|
||||
if err := t.NewSession(sessionName, witnessDir); err != nil {
|
||||
return false, fmt.Errorf("creating session: %w", err)
|
||||
}
|
||||
|
||||
// Set environment
|
||||
bdActor := fmt.Sprintf("%s/witness", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "GT_ROLE", "witness")
|
||||
_ = t.SetEnvironment(sessionName, "GT_RIG", rigName)
|
||||
_ = t.SetEnvironment(sessionName, "BD_ACTOR", bdActor)
|
||||
|
||||
// Apply Gas Town theming (non-fatal: theming failure doesn't affect operation)
|
||||
theme := tmux.AssignTheme(rigName)
|
||||
_ = t.ConfigureGasTownSession(sessionName, theme, rigName, "witness", "witness")
|
||||
|
||||
// Launch Claude directly (no shell respawn loop)
|
||||
// Restarts are handled by daemon via LIFECYCLE mail or deacon health-scan
|
||||
// NOTE: No gt prime injection needed - SessionStart hook handles it automatically
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
if err := t.SendKeys(sessionName, config.BuildAgentStartupCommand("witness", bdActor, "", "")); err != nil {
|
||||
return false, fmt.Errorf("sending command: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Claude to start (non-fatal)
|
||||
if err := t.WaitForCommand(sessionName, constants.SupportedShells, constants.ClaudeStartTimeout); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/witness", rigName)
|
||||
_ = session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: address,
|
||||
Sender: "deacon",
|
||||
Topic: "patrol",
|
||||
}) // Non-fatal
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send the propulsion nudge to trigger autonomous patrol execution.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
_ = t.NudgeSession(sessionName, session.PropulsionNudgeForRole("witness", witnessDir)) // Non-fatal
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func runWitnessAttach(cmd *cobra.Command, args []string) error {
|
||||
rigName := ""
|
||||
if len(args) > 0 {
|
||||
@@ -390,8 +280,8 @@ func runWitnessAttach(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Verify rig exists
|
||||
_, r, err := getWitnessManager(rigName)
|
||||
// Verify rig exists and get manager
|
||||
mgr, err := getWitnessManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -399,12 +289,9 @@ func runWitnessAttach(cmd *cobra.Command, args []string) error {
|
||||
sessionName := witnessSessionName(rigName)
|
||||
|
||||
// Ensure session exists (creates if needed)
|
||||
created, err := ensureWitnessSession(rigName, r)
|
||||
if err != nil {
|
||||
if err := mgr.Start(false); err != nil && err != witness.ErrAlreadyRunning {
|
||||
return err
|
||||
}
|
||||
|
||||
if created {
|
||||
} else if err == nil {
|
||||
fmt.Printf("Started witness session for %s\n", rigName)
|
||||
}
|
||||
|
||||
@@ -424,36 +311,21 @@ func runWitnessAttach(cmd *cobra.Command, args []string) error {
|
||||
func runWitnessRestart(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
mgr, r, err := getWitnessManager(rigName)
|
||||
mgr, err := getWitnessManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Restarting witness for %s...\n", rigName)
|
||||
|
||||
// Kill tmux session if it exists
|
||||
t := tmux.NewTmux()
|
||||
sessionName := witnessSessionName(rigName)
|
||||
running, _ := t.HasSession(sessionName)
|
||||
if running {
|
||||
if err := t.KillSession(sessionName); err != nil {
|
||||
style.PrintWarning("failed to kill session: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update state file to stopped (non-fatal: state file update)
|
||||
// Stop existing session (non-fatal: may not be running)
|
||||
_ = mgr.Stop()
|
||||
|
||||
// Start fresh
|
||||
created, err := ensureWitnessSession(rigName, r)
|
||||
if err != nil {
|
||||
if err := mgr.Start(false); err != nil {
|
||||
return fmt.Errorf("starting witness: %w", err)
|
||||
}
|
||||
|
||||
if created {
|
||||
_ = mgr.Start() // non-fatal: state file update
|
||||
}
|
||||
|
||||
fmt.Printf("%s Witness restarted for %s\n", style.Bold.Render("✓"), rigName)
|
||||
fmt.Printf(" %s\n", style.Dim.Render("Use 'gt witness attach' to connect"))
|
||||
return nil
|
||||
|
||||
@@ -113,7 +113,7 @@ func runWorktree(cmd *cobra.Command, args []string) error {
|
||||
// Verify target rig exists
|
||||
_, targetRigInfo, err := getRig(targetRig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rig '%s' not found - run 'gt rigs' to see available rigs", targetRig)
|
||||
return fmt.Errorf("rig '%s' not found - run 'gt rig list' to see available rigs", targetRig)
|
||||
}
|
||||
|
||||
// Compute worktree path: ~/gt/<target-rig>/crew/<source-rig>-<name>/
|
||||
@@ -305,7 +305,7 @@ func runWorktreeRemove(cmd *cobra.Command, args []string) error {
|
||||
// Verify target rig exists
|
||||
_, targetRigInfo, err := getRig(targetRig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rig '%s' not found - run 'gt rigs' to see available rigs", targetRig)
|
||||
return fmt.Errorf("rig '%s' not found - run 'gt rig list' to see available rigs", targetRig)
|
||||
}
|
||||
|
||||
// Compute worktree path: ~/gt/<target-rig>/crew/<source-rig>-<name>/
|
||||
|
||||
@@ -21,12 +21,18 @@ const (
|
||||
AgentGemini AgentPreset = "gemini"
|
||||
// AgentCodex is OpenAI Codex.
|
||||
AgentCodex AgentPreset = "codex"
|
||||
// AgentCursor is Cursor Agent.
|
||||
AgentCursor AgentPreset = "cursor"
|
||||
// AgentAuggie is Auggie CLI.
|
||||
AgentAuggie AgentPreset = "auggie"
|
||||
// AgentAmp is Sourcegraph AMP.
|
||||
AgentAmp AgentPreset = "amp"
|
||||
)
|
||||
|
||||
// AgentPresetInfo contains the configuration details for an agent preset.
|
||||
// This extends the basic RuntimeConfig with agent-specific metadata.
|
||||
type AgentPresetInfo struct {
|
||||
// Name is the preset identifier (e.g., "claude", "gemini", "codex").
|
||||
// Name is the preset identifier (e.g., "claude", "gemini", "codex", "cursor", "auggie", "amp").
|
||||
Name AgentPreset `json:"name"`
|
||||
|
||||
// Command is the CLI binary to invoke.
|
||||
@@ -35,6 +41,11 @@ type AgentPresetInfo struct {
|
||||
// Args are the default command-line arguments for autonomous mode.
|
||||
Args []string `json:"args"`
|
||||
|
||||
// ProcessNames are the process names to look for when detecting if the agent is running.
|
||||
// Used by tmux.IsAgentRunning to check pane_current_command.
|
||||
// E.g., ["node"] for Claude, ["cursor-agent"] for Cursor.
|
||||
ProcessNames []string `json:"process_names,omitempty"`
|
||||
|
||||
// SessionIDEnv is the environment variable for session ID.
|
||||
// Used for resuming sessions across restarts.
|
||||
SessionIDEnv string `json:"session_id_env,omitempty"`
|
||||
@@ -91,6 +102,7 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
Name: AgentClaude,
|
||||
Command: "claude",
|
||||
Args: []string{"--dangerously-skip-permissions"},
|
||||
ProcessNames: []string{"node"}, // Claude runs as Node.js
|
||||
SessionIDEnv: "CLAUDE_SESSION_ID",
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
@@ -102,6 +114,7 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
Name: AgentGemini,
|
||||
Command: "gemini",
|
||||
Args: []string{"--approval-mode", "yolo"},
|
||||
ProcessNames: []string{"gemini"}, // Gemini CLI binary
|
||||
SessionIDEnv: "GEMINI_SESSION_ID",
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
@@ -116,6 +129,7 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
Name: AgentCodex,
|
||||
Command: "codex",
|
||||
Args: []string{"--yolo"},
|
||||
ProcessNames: []string{"codex"}, // Codex CLI binary
|
||||
SessionIDEnv: "", // Codex captures from JSONL output
|
||||
ResumeFlag: "resume",
|
||||
ResumeStyle: "subcommand",
|
||||
@@ -126,6 +140,43 @@ var builtinPresets = map[AgentPreset]*AgentPresetInfo{
|
||||
OutputFlag: "--json",
|
||||
},
|
||||
},
|
||||
AgentCursor: {
|
||||
Name: AgentCursor,
|
||||
Command: "cursor-agent",
|
||||
Args: []string{"-f"}, // Force mode (YOLO equivalent), -p requires prompt
|
||||
ProcessNames: []string{"cursor-agent"},
|
||||
SessionIDEnv: "", // Uses --resume with chatId directly
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
SupportsHooks: false, // TODO: verify hooks support
|
||||
SupportsForkSession: false,
|
||||
NonInteractive: &NonInteractiveConfig{
|
||||
PromptFlag: "-p",
|
||||
OutputFlag: "--output-format json",
|
||||
},
|
||||
},
|
||||
AgentAuggie: {
|
||||
Name: AgentAuggie,
|
||||
Command: "auggie",
|
||||
Args: []string{"--allow-indexing"},
|
||||
ProcessNames: []string{"auggie"},
|
||||
SessionIDEnv: "",
|
||||
ResumeFlag: "--resume",
|
||||
ResumeStyle: "flag",
|
||||
SupportsHooks: false,
|
||||
SupportsForkSession: false,
|
||||
},
|
||||
AgentAmp: {
|
||||
Name: AgentAmp,
|
||||
Command: "amp",
|
||||
Args: []string{"--dangerously-allow-all", "--no-ide"},
|
||||
ProcessNames: []string{"amp"},
|
||||
SessionIDEnv: "",
|
||||
ResumeFlag: "threads continue",
|
||||
ResumeStyle: "subcommand", // 'amp threads continue <threadId>'
|
||||
SupportsHooks: false,
|
||||
SupportsForkSession: false,
|
||||
},
|
||||
}
|
||||
|
||||
// Registry state with proper synchronization.
|
||||
@@ -164,16 +215,11 @@ func ensureRegistry() {
|
||||
initRegistryLocked()
|
||||
}
|
||||
|
||||
// LoadAgentRegistry loads agent definitions from a JSON file and merges with built-ins.
|
||||
// User-defined agents override built-in presets with the same name.
|
||||
// This function caches loaded paths to avoid redundant file reads.
|
||||
func LoadAgentRegistry(path string) error {
|
||||
registryMu.Lock()
|
||||
defer registryMu.Unlock()
|
||||
|
||||
// loadAgentRegistryFromPath loads agent definitions from a JSON file and merges with built-ins.
|
||||
// Caller must hold registryMu write lock.
|
||||
func loadAgentRegistryFromPathLocked(path string) error {
|
||||
initRegistryLocked()
|
||||
|
||||
// Check if already loaded from this path
|
||||
if loadedPaths[path] {
|
||||
return nil
|
||||
}
|
||||
@@ -181,8 +227,8 @@ func LoadAgentRegistry(path string) error {
|
||||
data, err := os.ReadFile(path) //nolint:gosec // G304: path is from config
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
loadedPaths[path] = true // Mark as "loaded" (no file)
|
||||
return nil // No custom config, use built-ins only
|
||||
loadedPaths[path] = true
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -192,7 +238,6 @@ func LoadAgentRegistry(path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Merge user-defined agents (override built-ins)
|
||||
for name, preset := range userRegistry.Agents {
|
||||
preset.Name = AgentPreset(name)
|
||||
globalRegistry.Agents[name] = preset
|
||||
@@ -202,12 +247,41 @@ func LoadAgentRegistry(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadAgentRegistry loads agent definitions from a JSON file and merges with built-ins.
|
||||
// User-defined agents override built-in presets with the same name.
|
||||
// This function caches loaded paths to avoid redundant file reads.
|
||||
func LoadAgentRegistry(path string) error {
|
||||
registryMu.Lock()
|
||||
defer registryMu.Unlock()
|
||||
return loadAgentRegistryFromPathLocked(path)
|
||||
}
|
||||
|
||||
// DefaultAgentRegistryPath returns the default path for agent registry.
|
||||
// Located alongside other town settings.
|
||||
func DefaultAgentRegistryPath(townRoot string) string {
|
||||
return filepath.Join(townRoot, "settings", "agents.json")
|
||||
}
|
||||
|
||||
// DefaultRigAgentRegistryPath returns the default path for rig-level agent registry.
|
||||
// Located in <rig>/settings/agents.json.
|
||||
func DefaultRigAgentRegistryPath(rigPath string) string {
|
||||
return filepath.Join(rigPath, "settings", "agents.json")
|
||||
}
|
||||
|
||||
// RigAgentRegistryPath returns the path for rig-level agent registry.
|
||||
// Alias for DefaultRigAgentRegistryPath for consistency with other path functions.
|
||||
func RigAgentRegistryPath(rigPath string) string {
|
||||
return DefaultRigAgentRegistryPath(rigPath)
|
||||
}
|
||||
|
||||
// LoadRigAgentRegistry loads agent definitions from a rig-level JSON file and merges with built-ins.
|
||||
// This function works similarly to LoadAgentRegistry but for rig-level configurations.
|
||||
func LoadRigAgentRegistry(path string) error {
|
||||
registryMu.Lock()
|
||||
defer registryMu.Unlock()
|
||||
return loadAgentRegistryFromPathLocked(path)
|
||||
}
|
||||
|
||||
// GetAgentPreset returns the preset info for a given agent name.
|
||||
// Returns nil if the preset is not found.
|
||||
func GetAgentPreset(name AgentPreset) *AgentPresetInfo {
|
||||
@@ -305,6 +379,18 @@ func GetSessionIDEnvVar(agentName string) string {
|
||||
return info.SessionIDEnv
|
||||
}
|
||||
|
||||
// GetProcessNames returns the process names used to detect if an agent is running.
|
||||
// Used by tmux.IsAgentRunning to check pane_current_command.
|
||||
// Returns ["node"] for Claude (default) if agent is not found or has no ProcessNames.
|
||||
func GetProcessNames(agentName string) []string {
|
||||
info := GetAgentPresetByName(agentName)
|
||||
if info == nil || len(info.ProcessNames) == 0 {
|
||||
// Default to Claude's process name for backwards compatibility
|
||||
return []string{"node"}
|
||||
}
|
||||
return info.ProcessNames
|
||||
}
|
||||
|
||||
// MergeWithPreset applies preset defaults to a RuntimeConfig.
|
||||
// User-specified values take precedence over preset defaults.
|
||||
// Returns a new RuntimeConfig without modifying the original.
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBuiltinPresets(t *testing.T) {
|
||||
// Ensure all built-in presets are accessible (E2E tested agents only)
|
||||
presets := []AgentPreset{AgentClaude, AgentGemini, AgentCodex}
|
||||
// Ensure all built-in presets are accessible
|
||||
presets := []AgentPreset{AgentClaude, AgentGemini, AgentCodex, AgentCursor, AgentAuggie, AgentAmp}
|
||||
|
||||
for _, preset := range presets {
|
||||
info := GetAgentPreset(preset)
|
||||
@@ -22,6 +22,11 @@ func TestBuiltinPresets(t *testing.T) {
|
||||
if info.Command == "" {
|
||||
t.Errorf("preset %s has empty Command", preset)
|
||||
}
|
||||
|
||||
// All presets should have ProcessNames for agent detection
|
||||
if len(info.ProcessNames) == 0 {
|
||||
t.Errorf("preset %s has empty ProcessNames", preset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +39,9 @@ func TestGetAgentPresetByName(t *testing.T) {
|
||||
{"claude", AgentClaude, false},
|
||||
{"gemini", AgentGemini, false},
|
||||
{"codex", AgentCodex, false},
|
||||
{"cursor", AgentCursor, false},
|
||||
{"auggie", AgentAuggie, false},
|
||||
{"amp", AgentAmp, false},
|
||||
{"aider", "", true}, // Not built-in, can be added via config
|
||||
{"opencode", "", true}, // Not built-in, can be added via config
|
||||
{"unknown", "", true},
|
||||
@@ -63,6 +71,9 @@ func TestRuntimeConfigFromPreset(t *testing.T) {
|
||||
{AgentClaude, "claude"},
|
||||
{AgentGemini, "gemini"},
|
||||
{AgentCodex, "codex"},
|
||||
{AgentCursor, "cursor-agent"},
|
||||
{AgentAuggie, "auggie"},
|
||||
{AgentAmp, "amp"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -84,6 +95,9 @@ func TestIsKnownPreset(t *testing.T) {
|
||||
{"claude", true},
|
||||
{"gemini", true},
|
||||
{"codex", true},
|
||||
{"cursor", true},
|
||||
{"auggie", true},
|
||||
{"amp", true},
|
||||
{"aider", false}, // Not built-in, can be added via config
|
||||
{"opencode", false}, // Not built-in, can be added via config
|
||||
{"unknown", false},
|
||||
@@ -128,7 +142,7 @@ func TestLoadAgentRegistry(t *testing.T) {
|
||||
// Reset global registry for test isolation
|
||||
ResetRegistryForTesting()
|
||||
|
||||
// Load the custom registry
|
||||
// Load should succeed
|
||||
if err := LoadAgentRegistry(configPath); err != nil {
|
||||
t.Fatalf("LoadAgentRegistry failed: %v", err)
|
||||
}
|
||||
@@ -138,6 +152,7 @@ func TestLoadAgentRegistry(t *testing.T) {
|
||||
if myAgent == nil {
|
||||
t.Fatal("custom agent 'my-agent' not found after loading registry")
|
||||
}
|
||||
|
||||
if myAgent.Command != "my-agent-bin" {
|
||||
t.Errorf("my-agent.Command = %v, want my-agent-bin", myAgent.Command)
|
||||
}
|
||||
@@ -196,6 +211,7 @@ func TestMergeWithPreset(t *testing.T) {
|
||||
if merged.Command != "/custom/claude" {
|
||||
t.Errorf("merged command should be user value, got %s", merged.Command)
|
||||
}
|
||||
|
||||
if len(merged.Args) != 1 || merged.Args[0] != "--custom-arg" {
|
||||
t.Errorf("merged args should be user value, got %v", merged.Args)
|
||||
}
|
||||
@@ -251,12 +267,14 @@ func TestBuildResumeCommand(t *testing.T) {
|
||||
agentName: "claude",
|
||||
sessionID: "",
|
||||
wantEmpty: true,
|
||||
contains: []string{"claude"},
|
||||
},
|
||||
{
|
||||
name: "unknown agent",
|
||||
agentName: "unknown-agent",
|
||||
sessionID: "session-123",
|
||||
wantEmpty: true,
|
||||
contains: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -286,6 +304,9 @@ func TestSupportsSessionResume(t *testing.T) {
|
||||
{"claude", true},
|
||||
{"gemini", true},
|
||||
{"codex", true},
|
||||
{"cursor", true},
|
||||
{"auggie", true},
|
||||
{"amp", true},
|
||||
{"unknown", false},
|
||||
}
|
||||
|
||||
@@ -305,7 +326,10 @@ func TestGetSessionIDEnvVar(t *testing.T) {
|
||||
}{
|
||||
{"claude", "CLAUDE_SESSION_ID"},
|
||||
{"gemini", "GEMINI_SESSION_ID"},
|
||||
{"codex", ""}, // Codex uses JSONL output instead
|
||||
{"codex", ""}, // Codex uses JSONL output instead
|
||||
{"cursor", ""}, // Cursor uses --resume with chatId directly
|
||||
{"auggie", ""}, // Auggie uses --resume directly
|
||||
{"amp", ""}, // AMP uses 'threads continue' subcommand
|
||||
{"unknown", ""},
|
||||
}
|
||||
|
||||
@@ -317,3 +341,277 @@ func TestGetSessionIDEnvVar(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetProcessNames(t *testing.T) {
|
||||
tests := []struct {
|
||||
agentName string
|
||||
want []string
|
||||
}{
|
||||
{"claude", []string{"node"}},
|
||||
{"gemini", []string{"gemini"}},
|
||||
{"codex", []string{"codex"}},
|
||||
{"cursor", []string{"cursor-agent"}},
|
||||
{"auggie", []string{"auggie"}},
|
||||
{"amp", []string{"amp"}},
|
||||
{"unknown", []string{"node"}}, // Falls back to Claude's process
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.agentName, func(t *testing.T) {
|
||||
got := GetProcessNames(tt.agentName)
|
||||
if len(got) != len(tt.want) {
|
||||
t.Errorf("GetProcessNames(%s) = %v, want %v", tt.agentName, got, tt.want)
|
||||
return
|
||||
}
|
||||
for i := range got {
|
||||
if got[i] != tt.want[i] {
|
||||
t.Errorf("GetProcessNames(%s)[%d] = %q, want %q", tt.agentName, i, got[i], tt.want[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListAgentPresetsMatchesConstants(t *testing.T) {
|
||||
// Ensure all AgentPreset constants are returned by ListAgentPresets
|
||||
allConstants := []AgentPreset{AgentClaude, AgentGemini, AgentCodex, AgentCursor, AgentAuggie, AgentAmp}
|
||||
presets := ListAgentPresets()
|
||||
|
||||
// Convert to map for quick lookup
|
||||
presetMap := make(map[string]bool)
|
||||
for _, p := range presets {
|
||||
presetMap[p] = true
|
||||
}
|
||||
|
||||
// Verify all constants are in the list
|
||||
for _, c := range allConstants {
|
||||
if !presetMap[string(c)] {
|
||||
t.Errorf("ListAgentPresets() missing constant %q", c)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify no empty names
|
||||
for _, p := range presets {
|
||||
if p == "" {
|
||||
t.Error("ListAgentPresets() contains empty string")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentCommandGeneration(t *testing.T) {
|
||||
// Test full command line generation for each agent
|
||||
tests := []struct {
|
||||
preset AgentPreset
|
||||
wantCommand string
|
||||
wantContains []string // Args that should be present
|
||||
}{
|
||||
{
|
||||
preset: AgentClaude,
|
||||
wantCommand: "claude",
|
||||
wantContains: []string{"--dangerously-skip-permissions"},
|
||||
},
|
||||
{
|
||||
preset: AgentGemini,
|
||||
wantCommand: "gemini",
|
||||
wantContains: []string{"--approval-mode", "yolo"},
|
||||
},
|
||||
{
|
||||
preset: AgentCodex,
|
||||
wantCommand: "codex",
|
||||
wantContains: []string{"--yolo"},
|
||||
},
|
||||
{
|
||||
preset: AgentCursor,
|
||||
wantCommand: "cursor-agent",
|
||||
wantContains: []string{"-f"},
|
||||
},
|
||||
{
|
||||
preset: AgentAuggie,
|
||||
wantCommand: "auggie",
|
||||
wantContains: []string{"--allow-indexing"},
|
||||
},
|
||||
{
|
||||
preset: AgentAmp,
|
||||
wantCommand: "amp",
|
||||
wantContains: []string{"--dangerously-allow-all", "--no-ide"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(string(tt.preset), func(t *testing.T) {
|
||||
rc := RuntimeConfigFromPreset(tt.preset)
|
||||
if rc == nil {
|
||||
t.Fatal("RuntimeConfigFromPreset returned nil")
|
||||
}
|
||||
|
||||
if rc.Command != tt.wantCommand {
|
||||
t.Errorf("Command = %q, want %q", rc.Command, tt.wantCommand)
|
||||
}
|
||||
|
||||
// Check required args are present
|
||||
argsStr := strings.Join(rc.Args, " ")
|
||||
for _, arg := range tt.wantContains {
|
||||
found := false
|
||||
for _, a := range rc.Args {
|
||||
if a == arg {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Args %q missing expected %q", argsStr, arg)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCursorAgentPreset(t *testing.T) {
|
||||
// Verify cursor agent preset is correctly configured
|
||||
info := GetAgentPreset(AgentCursor)
|
||||
if info == nil {
|
||||
t.Fatal("cursor preset not found")
|
||||
}
|
||||
|
||||
// Check command
|
||||
if info.Command != "cursor-agent" {
|
||||
t.Errorf("cursor command = %q, want cursor-agent", info.Command)
|
||||
}
|
||||
|
||||
// Check YOLO-equivalent flag (-f for force mode)
|
||||
// Note: -p is for non-interactive mode with prompt, not used for default Args
|
||||
hasF := false
|
||||
for _, arg := range info.Args {
|
||||
if arg == "-f" {
|
||||
hasF = true
|
||||
}
|
||||
}
|
||||
if !hasF {
|
||||
t.Error("cursor args missing -f (force/YOLO mode)")
|
||||
}
|
||||
|
||||
// Check ProcessNames for detection
|
||||
if len(info.ProcessNames) == 0 {
|
||||
t.Error("cursor ProcessNames is empty")
|
||||
}
|
||||
if info.ProcessNames[0] != "cursor-agent" {
|
||||
t.Errorf("cursor ProcessNames[0] = %q, want cursor-agent", info.ProcessNames[0])
|
||||
}
|
||||
|
||||
// Check resume support
|
||||
if info.ResumeFlag != "--resume" {
|
||||
t.Errorf("cursor ResumeFlag = %q, want --resume", info.ResumeFlag)
|
||||
}
|
||||
if info.ResumeStyle != "flag" {
|
||||
t.Errorf("cursor ResumeStyle = %q, want flag", info.ResumeStyle)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDefaultRigAgentRegistryPath verifies that the default rig agent registry path is constructed correctly.
|
||||
func TestDefaultRigAgentRegistryPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
rigPath string
|
||||
expectedPath string
|
||||
}{
|
||||
{"/Users/alice/gt/myproject", "/Users/alice/gt/myproject/settings/agents.json"},
|
||||
{"/tmp/my-rig", "/tmp/my-rig/settings/agents.json"},
|
||||
{"relative/path", "relative/path/settings/agents.json"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.rigPath, func(t *testing.T) {
|
||||
got := DefaultRigAgentRegistryPath(tt.rigPath)
|
||||
want := tt.expectedPath
|
||||
if got != want {
|
||||
t.Errorf("DefaultRigAgentRegistryPath(%s) = %s, want %s", tt.rigPath, got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoadRigAgentRegistry verifies that rig-level agent registry is loaded correctly.
|
||||
func TestLoadRigAgentRegistry(t *testing.T) {
|
||||
// Reset registry for test isolation
|
||||
ResetRegistryForTesting()
|
||||
t.Cleanup(ResetRegistryForTesting)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
registryPath := filepath.Join(tmpDir, "settings", "agents.json")
|
||||
configDir := filepath.Join(tmpDir, "settings")
|
||||
|
||||
// Create settings directory
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create settings dir: %v", err)
|
||||
}
|
||||
|
||||
// Write agent registry
|
||||
registryContent := `{
|
||||
"version": 1,
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": ["--session"],
|
||||
"non_interactive": {
|
||||
"subcommand": "run",
|
||||
"output_flag": "--format json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
if err := os.WriteFile(registryPath, []byte(registryContent), 0644); err != nil {
|
||||
t.Fatalf("failed to write registry file: %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Load should succeed and merge agents
|
||||
t.Run("load and merge", func(t *testing.T) {
|
||||
if err := LoadRigAgentRegistry(registryPath); err != nil {
|
||||
t.Fatalf("LoadRigAgentRegistry(%s) failed: %v", registryPath, err)
|
||||
}
|
||||
|
||||
info := GetAgentPresetByName("opencode")
|
||||
if info == nil {
|
||||
t.Fatal("expected opencode agent to be available after loading rig registry")
|
||||
}
|
||||
|
||||
if info.Command != "opencode" {
|
||||
t.Errorf("expected opencode agent command to be 'opencode', got %s", info.Command)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: File not found should return nil (no error)
|
||||
t.Run("file not found", func(t *testing.T) {
|
||||
nonExistentPath := filepath.Join(tmpDir, "other-rig", "settings", "agents.json")
|
||||
if err := LoadRigAgentRegistry(nonExistentPath); err != nil {
|
||||
t.Errorf("LoadRigAgentRegistry(%s) should not error for non-existent file: %v", nonExistentPath, err)
|
||||
}
|
||||
|
||||
// Verify that previously loaded agent (from test 1) is still available
|
||||
info := GetAgentPresetByName("opencode")
|
||||
if info == nil {
|
||||
t.Errorf("expected opencode agent to still be available after loading non-existent path")
|
||||
return
|
||||
}
|
||||
if info.Command != "opencode" {
|
||||
t.Errorf("expected opencode agent command to be 'opencode', got %s", info.Command)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Invalid JSON should error
|
||||
t.Run("invalid JSON", func(t *testing.T) {
|
||||
invalidRegistryPath := filepath.Join(tmpDir, "bad-rig", "settings", "agents.json")
|
||||
badConfigDir := filepath.Join(tmpDir, "bad-rig", "settings")
|
||||
if err := os.MkdirAll(badConfigDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create bad-rig settings dir: %v", err)
|
||||
}
|
||||
|
||||
invalidContent := `{"version": 1, "agents": {invalid json}}`
|
||||
if err := os.WriteFile(invalidRegistryPath, []byte(invalidContent), 0644); err != nil {
|
||||
t.Fatalf("failed to write invalid registry file: %v", err)
|
||||
}
|
||||
|
||||
if err := LoadRigAgentRegistry(invalidRegistryPath); err == nil {
|
||||
t.Errorf("LoadRigAgentRegistry(%s) should error for invalid JSON: got nil", invalidRegistryPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
444
internal/config/integration_test.go
Normal file
444
internal/config/integration_test.go
Normal file
@@ -0,0 +1,444 @@
|
||||
// Test Rig-Level Custom Agent Support
|
||||
//
|
||||
// This integration test verifies that custom agents defined in rig-level
|
||||
// settings/config.json are correctly loaded and used when spawning polecats.
|
||||
// It creates a stub agent, configures it at the rig level, and verifies
|
||||
// the agent is actually used via tmux session capture.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestRigLevelCustomAgentIntegration tests end-to-end rig-level custom agent functionality.
|
||||
// This test:
|
||||
// 1. Creates a stub agent script that echoes identifiable output
|
||||
// 2. Sets up a minimal town/rig with the custom agent configured
|
||||
// 3. Verifies that BuildPolecatStartupCommand uses the custom agent
|
||||
// 4. Optionally spawns a tmux session and verifies output (if tmux available)
|
||||
func TestRigLevelCustomAgentIntegration(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create the stub agent script
|
||||
stubAgentPath := createStubAgent(t, tmpDir)
|
||||
|
||||
// Set up town structure
|
||||
townRoot := filepath.Join(tmpDir, "town")
|
||||
rigName := "testrig"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
setupTestTownWithCustomAgent(t, townRoot, rigName, stubAgentPath)
|
||||
|
||||
// Test 1: Verify ResolveAgentConfig picks up the custom agent
|
||||
t.Run("ResolveAgentConfig uses rig-level agent", func(t *testing.T) {
|
||||
rc := ResolveAgentConfig(townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveAgentConfig returned nil")
|
||||
}
|
||||
|
||||
if rc.Command != stubAgentPath {
|
||||
t.Errorf("Expected command %q, got %q", stubAgentPath, rc.Command)
|
||||
}
|
||||
|
||||
// Verify args are passed through
|
||||
if len(rc.Args) != 2 || rc.Args[0] != "--test-mode" || rc.Args[1] != "--stub" {
|
||||
t.Errorf("Expected args [--test-mode --stub], got %v", rc.Args)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: Verify BuildPolecatStartupCommand includes the custom agent
|
||||
t.Run("BuildPolecatStartupCommand uses custom agent", func(t *testing.T) {
|
||||
cmd := BuildPolecatStartupCommand(rigName, "test-polecat", rigPath, "")
|
||||
|
||||
if !strings.Contains(cmd, stubAgentPath) {
|
||||
t.Errorf("Expected command to contain stub agent path %q, got: %s", stubAgentPath, cmd)
|
||||
}
|
||||
|
||||
if !strings.Contains(cmd, "--test-mode") {
|
||||
t.Errorf("Expected command to contain --test-mode, got: %s", cmd)
|
||||
}
|
||||
|
||||
// Verify environment variables are set
|
||||
if !strings.Contains(cmd, "GT_ROLE=polecat") {
|
||||
t.Errorf("Expected GT_ROLE=polecat in command, got: %s", cmd)
|
||||
}
|
||||
|
||||
if !strings.Contains(cmd, "GT_POLECAT=test-polecat") {
|
||||
t.Errorf("Expected GT_POLECAT=test-polecat in command, got: %s", cmd)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Verify ResolveAgentConfigWithOverride respects rig agents
|
||||
t.Run("ResolveAgentConfigWithOverride with rig agent", func(t *testing.T) {
|
||||
rc, agentName, err := ResolveAgentConfigWithOverride(townRoot, rigPath, "stub-agent")
|
||||
if err != nil {
|
||||
t.Fatalf("ResolveAgentConfigWithOverride failed: %v", err)
|
||||
}
|
||||
|
||||
if agentName != "stub-agent" {
|
||||
t.Errorf("Expected agent name 'stub-agent', got %q", agentName)
|
||||
}
|
||||
|
||||
if rc.Command != stubAgentPath {
|
||||
t.Errorf("Expected command %q, got %q", stubAgentPath, rc.Command)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 4: Verify unknown agent override returns error
|
||||
t.Run("ResolveAgentConfigWithOverride unknown agent errors", func(t *testing.T) {
|
||||
_, _, err := ResolveAgentConfigWithOverride(townRoot, rigPath, "nonexistent-agent")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for nonexistent agent, got nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
t.Errorf("Expected 'not found' error, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Tmux integration (skip if tmux not available)
|
||||
t.Run("TmuxSessionWithCustomAgent", func(t *testing.T) {
|
||||
if _, err := exec.LookPath("tmux"); err != nil {
|
||||
t.Skip("tmux not available, skipping session test")
|
||||
}
|
||||
|
||||
testTmuxSessionWithStubAgent(t, tmpDir, stubAgentPath, rigName)
|
||||
})
|
||||
}
|
||||
|
||||
// createStubAgent creates a bash script that simulates an AI agent.
|
||||
// The script echoes identifiable output and handles simple Q&A.
|
||||
func createStubAgent(t *testing.T, tmpDir string) string {
|
||||
t.Helper()
|
||||
|
||||
stubScript := `#!/bin/bash
|
||||
# Stub Agent for Integration Testing
|
||||
# This simulates an AI agent with identifiable output
|
||||
|
||||
AGENT_NAME="STUB_AGENT"
|
||||
AGENT_VERSION="1.0.0"
|
||||
|
||||
echo "=========================================="
|
||||
echo "STUB_AGENT_STARTED"
|
||||
echo "Agent: $AGENT_NAME v$AGENT_VERSION"
|
||||
echo "Args: $@"
|
||||
echo "Working Dir: $(pwd)"
|
||||
echo "GT_ROLE: ${GT_ROLE:-not_set}"
|
||||
echo "GT_POLECAT: ${GT_POLECAT:-not_set}"
|
||||
echo "GT_RIG: ${GT_RIG:-not_set}"
|
||||
echo "=========================================="
|
||||
|
||||
# Simple Q&A loop
|
||||
while true; do
|
||||
echo ""
|
||||
echo "STUB_AGENT_READY"
|
||||
echo "Enter question (or 'exit' to quit):"
|
||||
|
||||
# Read with timeout for non-interactive testing
|
||||
if read -t 5 question; then
|
||||
case "$question" in
|
||||
"exit"|"quit"|"q")
|
||||
echo "STUB_AGENT_EXITING"
|
||||
exit 0
|
||||
;;
|
||||
"what is 2+2"*)
|
||||
echo "STUB_AGENT_ANSWER: 4"
|
||||
;;
|
||||
"ping"*)
|
||||
echo "STUB_AGENT_ANSWER: pong"
|
||||
;;
|
||||
"status"*)
|
||||
echo "STUB_AGENT_ANSWER: operational"
|
||||
;;
|
||||
*)
|
||||
echo "STUB_AGENT_ANSWER: I received your question: $question"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
# Timeout - check if we should exit
|
||||
if [ -f "/tmp/stub_agent_stop_$$" ]; then
|
||||
echo "STUB_AGENT_STOPPING (signal file detected)"
|
||||
rm -f "/tmp/stub_agent_stop_$$"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
`
|
||||
|
||||
stubPath := filepath.Join(tmpDir, "stub-agent")
|
||||
if err := os.WriteFile(stubPath, []byte(stubScript), 0755); err != nil {
|
||||
t.Fatalf("Failed to create stub agent: %v", err)
|
||||
}
|
||||
|
||||
return stubPath
|
||||
}
|
||||
|
||||
// setupTestTownWithCustomAgent creates a minimal town/rig structure with a custom agent.
|
||||
func setupTestTownWithCustomAgent(t *testing.T, townRoot, rigName, stubAgentPath string) {
|
||||
t.Helper()
|
||||
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
// Create directory structure
|
||||
dirs := []string{
|
||||
filepath.Join(townRoot, "mayor"),
|
||||
filepath.Join(townRoot, "settings"),
|
||||
filepath.Join(rigPath, "settings"),
|
||||
filepath.Join(rigPath, "polecats"),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create directory %s: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create town.json
|
||||
townConfig := map[string]interface{}{
|
||||
"type": "town",
|
||||
"version": 2,
|
||||
"name": "test-town",
|
||||
"created_at": time.Now().Format(time.RFC3339),
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "mayor", "town.json"), townConfig)
|
||||
|
||||
// Create town settings (empty, uses defaults)
|
||||
townSettings := map[string]interface{}{
|
||||
"type": "town-settings",
|
||||
"version": 1,
|
||||
"default_agent": "claude",
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "settings", "config.json"), townSettings)
|
||||
|
||||
// Create rig settings with custom agent
|
||||
rigSettings := map[string]interface{}{
|
||||
"type": "rig-settings",
|
||||
"version": 1,
|
||||
"agent": "stub-agent",
|
||||
"agents": map[string]interface{}{
|
||||
"stub-agent": map[string]interface{}{
|
||||
"command": stubAgentPath,
|
||||
"args": []string{"--test-mode", "--stub"},
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(rigPath, "settings", "config.json"), rigSettings)
|
||||
|
||||
// Create rigs.json
|
||||
rigsConfig := map[string]interface{}{
|
||||
"version": 1,
|
||||
"rigs": map[string]interface{}{
|
||||
rigName: map[string]interface{}{
|
||||
"git_url": "https://github.com/test/testrepo.git",
|
||||
"added_at": time.Now().Format(time.RFC3339),
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "mayor", "rigs.json"), rigsConfig)
|
||||
}
|
||||
|
||||
// writeTownJSON writes a JSON config file.
|
||||
func writeTownJSON(t *testing.T, path string, data interface{}) {
|
||||
t.Helper()
|
||||
|
||||
jsonData, err := json.MarshalIndent(data, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal JSON for %s: %v", path, err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(path, jsonData, 0644); err != nil {
|
||||
t.Fatalf("Failed to write %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func pollForOutput(t *testing.T, sessionName, expected string, timeout time.Duration) (string, bool) {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
output := captureTmuxPane(t, sessionName, 50)
|
||||
if strings.Contains(output, expected) {
|
||||
return output, true
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return captureTmuxPane(t, sessionName, 50), false
|
||||
}
|
||||
|
||||
func testTmuxSessionWithStubAgent(t *testing.T, tmpDir, stubAgentPath, rigName string) {
|
||||
t.Helper()
|
||||
|
||||
sessionName := fmt.Sprintf("gt-test-pid%d-%d", os.Getpid(), time.Now().UnixNano())
|
||||
workDir := tmpDir
|
||||
|
||||
exec.Command("tmux", "kill-session", "-t", sessionName).Run()
|
||||
|
||||
defer func() {
|
||||
exec.Command("tmux", "kill-session", "-t", sessionName).Run()
|
||||
}()
|
||||
|
||||
cmd := exec.Command("tmux", "new-session", "-d", "-s", sessionName, "-c", workDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Failed to create tmux session: %v", err)
|
||||
}
|
||||
|
||||
envVars := map[string]string{
|
||||
"GT_ROLE": "polecat",
|
||||
"GT_POLECAT": "test-polecat",
|
||||
"GT_RIG": rigName,
|
||||
}
|
||||
|
||||
for key, val := range envVars {
|
||||
cmd := exec.Command("tmux", "set-environment", "-t", sessionName, key, val)
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Logf("Warning: failed to set %s: %v", key, err)
|
||||
}
|
||||
}
|
||||
|
||||
agentCmd := fmt.Sprintf("%s --test-mode --stub", stubAgentPath)
|
||||
cmd = exec.Command("tmux", "send-keys", "-t", sessionName, agentCmd, "Enter")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Failed to send keys: %v", err)
|
||||
}
|
||||
|
||||
output, found := pollForOutput(t, sessionName, "STUB_AGENT_STARTED", 12*time.Second)
|
||||
if !found {
|
||||
t.Skipf("stub agent output not detected; tmux capture unreliable. Output:\n%s", output)
|
||||
}
|
||||
|
||||
if !strings.Contains(output, "GT_ROLE: polecat") {
|
||||
t.Logf("Warning: GT_ROLE not visible in agent output (tmux env may not propagate to subshell)")
|
||||
}
|
||||
|
||||
cmd = exec.Command("tmux", "send-keys", "-t", sessionName, "ping", "Enter")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("Failed to send ping: %v", err)
|
||||
}
|
||||
|
||||
output, found = pollForOutput(t, sessionName, "STUB_AGENT_ANSWER: pong", 6*time.Second)
|
||||
if !found {
|
||||
t.Errorf("Expected 'pong' response, got:\n%s", output)
|
||||
}
|
||||
|
||||
cmd = exec.Command("tmux", "send-keys", "-t", sessionName, "exit", "Enter")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Logf("Warning: failed to send exit: %v", err)
|
||||
}
|
||||
|
||||
output, found = pollForOutput(t, sessionName, "STUB_AGENT_EXITING", 3*time.Second)
|
||||
if !found {
|
||||
t.Logf("Note: Agent may have exited before capture. Output:\n%s", output)
|
||||
}
|
||||
|
||||
t.Logf("Tmux session test completed successfully")
|
||||
}
|
||||
|
||||
// captureTmuxPane captures the output from a tmux pane.
|
||||
func captureTmuxPane(t *testing.T, sessionName string, lines int) string {
|
||||
t.Helper()
|
||||
|
||||
cmd := exec.Command("tmux", "capture-pane", "-t", sessionName, "-p", "-S", fmt.Sprintf("-%d", lines))
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
t.Logf("Warning: failed to capture pane: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(output)
|
||||
}
|
||||
|
||||
func waitForTmuxOutputContains(t *testing.T, sessionName, needle string, timeout time.Duration) (string, bool) {
|
||||
t.Helper()
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
output := ""
|
||||
for time.Now().Before(deadline) {
|
||||
output = captureTmuxPane(t, sessionName, 200)
|
||||
if strings.Contains(output, needle) {
|
||||
return output, true
|
||||
}
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
return output, false
|
||||
}
|
||||
|
||||
// TestRigAgentOverridesTownAgent verifies rig agents take precedence over town agents.
|
||||
func TestRigAgentOverridesTownAgent(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town")
|
||||
rigName := "testrig"
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
|
||||
// Create directory structure
|
||||
dirs := []string{
|
||||
filepath.Join(townRoot, "mayor"),
|
||||
filepath.Join(townRoot, "settings"),
|
||||
filepath.Join(rigPath, "settings"),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create directory %s: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Town settings with a custom agent
|
||||
townSettings := map[string]interface{}{
|
||||
"type": "town-settings",
|
||||
"version": 1,
|
||||
"default_agent": "my-agent",
|
||||
"agents": map[string]interface{}{
|
||||
"my-agent": map[string]interface{}{
|
||||
"command": "/town/path/to/agent",
|
||||
"args": []string{"--town-level"},
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "settings", "config.json"), townSettings)
|
||||
|
||||
// Rig settings with SAME agent name but different config (should override)
|
||||
rigSettings := map[string]interface{}{
|
||||
"type": "rig-settings",
|
||||
"version": 1,
|
||||
"agent": "my-agent",
|
||||
"agents": map[string]interface{}{
|
||||
"my-agent": map[string]interface{}{
|
||||
"command": "/rig/path/to/agent",
|
||||
"args": []string{"--rig-level"},
|
||||
},
|
||||
},
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(rigPath, "settings", "config.json"), rigSettings)
|
||||
|
||||
// Create town.json
|
||||
townConfig := map[string]interface{}{
|
||||
"type": "town",
|
||||
"version": 2,
|
||||
"name": "test-town",
|
||||
"created_at": time.Now().Format(time.RFC3339),
|
||||
}
|
||||
writeTownJSON(t, filepath.Join(townRoot, "mayor", "town.json"), townConfig)
|
||||
|
||||
// Resolve agent config
|
||||
rc := ResolveAgentConfig(townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveAgentConfig returned nil")
|
||||
}
|
||||
|
||||
// Rig agent should take precedence
|
||||
if rc.Command != "/rig/path/to/agent" {
|
||||
t.Errorf("Expected rig agent command '/rig/path/to/agent', got %q", rc.Command)
|
||||
}
|
||||
|
||||
if len(rc.Args) != 1 || rc.Args[0] != "--rig-level" {
|
||||
t.Errorf("Expected rig args [--rig-level], got %v", rc.Args)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user