Compare commits
393 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5cc2995345 | ||
|
|
e57297cb1b | ||
|
|
dff6c3fb3c | ||
|
|
fb4c415127 | ||
|
|
b612df0463 | ||
|
|
785d9adfef | ||
|
|
3d7b109395 | ||
|
|
b14835b140 | ||
|
|
35abe21c50 | ||
|
|
405d40ee4b | ||
|
|
748fa73931 | ||
|
|
1dc31024ca | ||
|
|
94c2d71c13 | ||
|
|
02390251fc | ||
|
|
0dfb0be368 | ||
|
|
1feb48dd11 | ||
|
|
58d5226f30 | ||
|
|
c42b5db7ab | ||
|
|
2119841d57 | ||
|
|
2514507a49 | ||
|
|
e4ebd0784a | ||
|
|
1e97d1e637 | ||
|
|
7e5c3dd695 | ||
|
|
0cdcd0a20b | ||
|
|
aba0a5069c | ||
|
|
a8bedd2172 | ||
|
|
b9f5797b9e | ||
|
|
5791cd7e34 | ||
|
|
3931d10af3 | ||
|
|
d67aa0212c | ||
|
|
b333bf8146 | ||
|
|
7016b33b39 | ||
|
|
1a0f2d6b3b | ||
|
|
39b1c11bb6 | ||
|
|
f6fab3afad | ||
|
|
40cc4c9335 | ||
|
|
82079f9715 | ||
|
|
53fd6bad33 | ||
|
|
6e2169de7f | ||
|
|
d0e49a216a | ||
|
|
6616a4726c | ||
|
|
f00b0254f2 | ||
|
|
e12aa45dd6 | ||
|
|
9f06eb94c4 | ||
|
|
7a2090bb15 | ||
|
|
a5bbe24444 | ||
|
|
87f9a7cfd1 | ||
|
|
78001d2c01 | ||
|
|
d96b53e173 | ||
|
|
fa1f812ce9 | ||
|
|
dfd4199396 | ||
|
|
77126283dd | ||
|
|
afc1ff04b1 | ||
|
|
987502ebb3 | ||
|
|
3588dbc5e4 | ||
|
|
4fbe00e224 | ||
|
|
3afd1a1dcd | ||
|
|
535647cefc | ||
|
|
3c44e2202d | ||
|
|
b2b9cbc836 | ||
|
|
035b7775ea | ||
|
|
a8be623eeb | ||
|
|
63a30ce548 | ||
|
|
1b036aadf5 | ||
|
|
9de8859be0 | ||
|
|
560431d2f5 | ||
|
|
aef99753df | ||
|
|
d610d444d7 | ||
|
|
cd347dfdf9 | ||
|
|
d0a1e165e5 | ||
|
|
2b56ee2545 | ||
|
|
9b412707ab | ||
|
|
45951c0fad | ||
|
|
9caf5302d4 | ||
|
|
78ca8bd5bf | ||
|
|
44d5b4fdd2 | ||
|
|
77ac332a41 | ||
|
|
b71188d0b4 | ||
|
|
6bfe61f796 | ||
|
|
2aadb0165b | ||
|
|
05ea767149 | ||
|
|
f4072e58cc | ||
|
|
7c2f9687ec | ||
|
|
e591f2ae25 | ||
|
|
0a6b0b892f | ||
|
|
6a3780d282 | ||
|
|
8357a94cae | ||
|
|
8b393b7c39 | ||
|
|
195ecf7578 | ||
|
|
5218102f49 | ||
|
|
126ec84bb3 | ||
|
|
9a91a1b94f | ||
|
|
f82477d6a6 | ||
|
|
4dd11d4ffa | ||
|
|
7564cd5997 | ||
|
|
5a14053a6b | ||
|
|
d2f7dbd3ae | ||
|
|
65c1fad8ce | ||
|
|
0db2bda6e6 | ||
|
|
48ace2cbf3 | ||
|
|
3d5a66f850 | ||
|
|
b8a679c30c | ||
|
|
183a0d7d8d | ||
|
|
477c28c9d1 | ||
|
|
f58a516b7b | ||
|
|
fd61259336 | ||
|
|
6a22b47ef6 | ||
|
|
5c45b4438a | ||
|
|
08cee416a4 | ||
|
|
2fe23b7be5 | ||
|
|
6c5c671595 | ||
|
|
371074cc67 | ||
|
|
6966eb4c28 | ||
|
|
55a3b9858a | ||
|
|
e59955a580 | ||
|
|
08bc632a03 | ||
|
|
a610283078 | ||
|
|
544cacf36d | ||
|
|
b8eb936219 | ||
|
|
dcf7b81011 | ||
|
|
37f465bde5 | ||
|
|
b73ee91970 | ||
|
|
b41a5ef243 | ||
|
|
4eb3915ce9 | ||
|
|
b28c25b8a2 | ||
|
|
2333b38ecf | ||
|
|
6f9bfec60f | ||
|
|
7421d1554d | ||
|
|
e2bd5ef76c | ||
|
|
61e9a36dfd | ||
|
|
8c200d4a83 | ||
|
|
9cd2696abe | ||
|
|
2b3f287f02 | ||
|
|
021b087a12 | ||
|
|
3cb3a0bbf7 | ||
|
|
7714295a43 | ||
|
|
616ff01e2c | ||
|
|
8d41f817b9 | ||
|
|
3f724336f4 | ||
|
|
576e73a924 | ||
|
|
5ecf8ccaf5 | ||
|
|
238ad8cd95 | ||
|
|
50bcf96afb | ||
|
|
2feefd1731 | ||
|
|
4a856f6e0d | ||
|
|
e853ac3539 | ||
|
|
f14dadc956 | ||
|
|
f19a0ab5d6 | ||
|
|
38d3c0c4f1 | ||
|
|
d4ad4c0726 | ||
|
|
88a74c50f7 | ||
|
|
7ff87ff012 | ||
|
|
bd655f58f9 | ||
|
|
72b03469d1 | ||
|
|
d6a4bc22fd | ||
|
|
3283ee42aa | ||
|
|
b40a6b0736 | ||
|
|
265239d4a1 | ||
|
|
cd67eae044 | ||
|
|
5badb54048 | ||
|
|
4deeba6304 | ||
|
|
93c6c70296 | ||
|
|
bda1dc97c5 | ||
|
|
5823c9fb36 | ||
|
|
885b5023d3 | ||
|
|
4ef93e1d8a | ||
|
|
6d29f34cd0 | ||
|
|
8880c61067 | ||
|
|
0cc4867ad7 | ||
|
|
d8bb9a9ba9 | ||
|
|
8dab7b662a | ||
|
|
938b068145 | ||
|
|
eed5cddc97 | ||
|
|
15d1dc8fa8 | ||
|
|
11b38294d4 | ||
|
|
d4026b79cf | ||
|
|
eb18dbf9e2 | ||
|
|
4d8236e26c | ||
|
|
6b895e56de | ||
|
|
ae2fddf4fc | ||
|
|
eea3dd564d | ||
|
|
5178fa7f0a | ||
|
|
0545d596c3 | ||
|
|
22064b0730 | ||
|
|
5a56525655 | ||
|
|
74050cd0ab | ||
|
|
fbc67e89e1 | ||
|
|
43e38f037c | ||
|
|
22a24c5648 | ||
|
|
9b34b6bfec | ||
|
|
301a42a90e | ||
|
|
7af7634022 | ||
|
|
29f8dd67e2 | ||
|
|
91433e8b1d | ||
|
|
c7e1451ce6 | ||
|
|
f89ac47ff9 | ||
|
|
e344e77921 | ||
|
|
a09c6b71d7 | ||
|
|
4fa6cfa0da | ||
|
|
c51047b654 | ||
|
|
d42a9bd6e0 | ||
|
|
08ef50047d | ||
|
|
95cb58e36f | ||
|
|
d3606c8c46 | ||
|
|
a88d2e1a9e | ||
|
|
29039ed69d | ||
|
|
b1a5241430 | ||
|
|
03213a7307 | ||
|
|
7e158cddd6 | ||
|
|
e5aea04fa1 | ||
|
|
8332a719ab | ||
|
|
139f3aeba3 | ||
|
|
add3d56c8b | ||
|
|
5c13e5f95a | ||
|
|
3ebb1118d3 | ||
|
|
618b0d9810 | ||
|
|
39185f8d00 | ||
|
|
a4776b9bee | ||
|
|
20effb0a51 | ||
|
|
4f02abb535 | ||
|
|
cbbf566f06 | ||
|
|
e30e46a87a | ||
|
|
7bbc09230e | ||
|
|
2ffc8e8712 | ||
|
|
012d50b2b2 | ||
|
|
bf8bddb004 | ||
|
|
42999d883d | ||
|
|
b3b980fd79 | ||
|
|
839fa19e90 | ||
|
|
7164e7a6d2 | ||
|
|
8eafcc8a16 | ||
|
|
a244c3d498 | ||
|
|
0bf68de517 | ||
|
|
42d9890e5c | ||
|
|
92144757ac | ||
|
|
e7ca4908dc | ||
|
|
3cf77b2e8b | ||
|
|
a1195cb104 | ||
|
|
80af0547ea | ||
|
|
08755f62cd | ||
|
|
5d96243414 | ||
|
|
60da5de104 | ||
|
|
0a6fa457f6 | ||
|
|
1043f00d06 | ||
|
|
8660641009 | ||
|
|
4ee1a4472d | ||
|
|
5882039715 | ||
|
|
7d8d96f7f9 | ||
|
|
69110309cc | ||
|
|
901b60e927 | ||
|
|
712a37b9c1 | ||
|
|
aa0bfd0c40 | ||
|
|
1453b8b592 | ||
|
|
65c5e05c43 | ||
|
|
bd2a5ab56a | ||
|
|
f32a63e6e5 | ||
|
|
c61b67eb03 | ||
|
|
fa99e615f0 | ||
|
|
ff6c02b15d | ||
|
|
66805079de | ||
|
|
bedccb1634 | ||
|
|
e0e5a00dfc | ||
|
|
275910b702 | ||
|
|
fdd4b0aeb0 | ||
|
|
f42ec42268 | ||
|
|
503e66ba8d | ||
|
|
8051c8bdd7 | ||
|
|
c0526f244e | ||
|
|
bda248fb9a | ||
|
|
45de02db43 | ||
|
|
9315248134 | ||
|
|
73a349e5ee | ||
|
|
a2607b5b72 | ||
|
|
18893e713a | ||
|
|
ea12679a5a | ||
|
|
b1fcb7d3e7 | ||
|
|
a43c89c01b | ||
|
|
e043f4a16c | ||
|
|
87fde4b4fd | ||
|
|
e083317cc3 | ||
|
|
7924921d17 | ||
|
|
278b2f2d4d | ||
|
|
791b388a93 | ||
|
|
6becab4a60 | ||
|
|
38bedc03e8 | ||
|
|
e7b0af0295 | ||
|
|
f9ca7bb87b | ||
|
|
392ff1d31b | ||
|
|
58207a00ec | ||
|
|
f0192c8b3d | ||
|
|
15cfb76c2c | ||
|
|
2d8949a3d3 | ||
|
|
f79614d764 | ||
|
|
e442212c05 | ||
|
|
6b2a7438e1 | ||
|
|
1902182f3a | ||
|
|
c99b004aeb | ||
|
|
c860112cf6 | ||
|
|
ee2ca10b0a | ||
|
|
5a373fbd57 | ||
|
|
efac19d184 | ||
|
|
ff3f3b4580 | ||
|
|
5a7c328f1f | ||
|
|
069fe0f285 | ||
|
|
1e3bf292f9 | ||
|
|
d6dc43938d | ||
|
|
6b8480c483 | ||
|
|
cd2de6ec46 | ||
|
|
025586e16b | ||
|
|
b990094010 | ||
|
|
716bab396f | ||
|
|
605eeec84e | ||
|
|
3caf32f9f7 | ||
|
|
3cdc98651e | ||
|
|
9779ae3190 | ||
|
|
b9ecb7b82e | ||
|
|
98b11eda3c | ||
|
|
3247b57926 | ||
|
|
f6fd76172e | ||
|
|
77e1199196 | ||
|
|
36ffa379b8 | ||
|
|
9835e13fee | ||
|
|
eae08ee509 | ||
|
|
7ee708ffef | ||
|
|
7182599b42 | ||
|
|
39a51c0d14 | ||
|
|
a9080ed04f | ||
|
|
043a6abc59 | ||
|
|
a1008f6f58 | ||
|
|
995476a9c0 | ||
|
|
7b35398ebc | ||
|
|
0d0d2763a8 | ||
|
|
ea5d72a07b | ||
|
|
cdea53e221 | ||
|
|
b0f377f973 | ||
|
|
28c55bd451 | ||
|
|
2a0a8c760b | ||
|
|
1f272ffc53 | ||
|
|
4bbf97ab82 | ||
|
|
add77eea84 | ||
|
|
a144c99f46 | ||
|
|
956f8cc5f0 | ||
|
|
30a6f27404 | ||
|
|
f5832188a6 | ||
|
|
a106796a0e | ||
|
|
88f784a9aa | ||
|
|
8ed31e9634 | ||
|
|
833724a7ed | ||
|
|
c7e1b207df | ||
|
|
d22b5b6ab5 | ||
|
|
91641b01a0 | ||
|
|
7ef4ddab6c | ||
|
|
5aa218fc96 | ||
|
|
e16d5840c6 | ||
|
|
947111f6d8 | ||
|
|
66f6e37844 | ||
|
|
96632fe4ba | ||
|
|
54be24ab5b | ||
|
|
ce9cd72c37 | ||
|
|
d126c967a0 | ||
|
|
b9025379b7 | ||
|
|
598a39e708 | ||
|
|
ea84079f8b | ||
|
|
b9e8be4352 | ||
|
|
89aec8e19e | ||
|
|
5d554a616a | ||
|
|
dceabab8db | ||
|
|
1418b1123a | ||
|
|
2c73cf35f1 | ||
|
|
0b90837a18 | ||
|
|
566bdfbcd8 | ||
|
|
1ece29e1fd | ||
|
|
7f4c3201cf | ||
|
|
8deb5ed1bd | ||
|
|
dab619b3d0 | ||
|
|
3246c7c6b7 | ||
|
|
6a705f6210 | ||
|
|
62d5e4b550 | ||
|
|
0f6759e4a2 | ||
|
|
1bed63f087 | ||
|
|
5607bc4f01 | ||
|
|
e7d7a1bd6b | ||
|
|
982ce6c5d1 | ||
|
|
f1c49630ca | ||
|
|
21a88e2c18 | ||
|
|
8219fd5abe | ||
|
|
ad6386809c | ||
|
|
d13922523a | ||
|
|
84b6780a87 | ||
|
|
40c67e0796 | ||
|
|
0d7f5d1f05 | ||
|
|
30984dcf95 | ||
|
|
064f7b1a40 |
5
.beads/.gitignore
vendored
5
.beads/.gitignore
vendored
@@ -32,6 +32,11 @@ beads.left.meta.json
|
||||
beads.right.jsonl
|
||||
beads.right.meta.json
|
||||
|
||||
# Sync state (local-only, per-machine)
|
||||
# These files are machine-specific and should not be shared across clones
|
||||
.sync.lock
|
||||
sync_base.jsonl
|
||||
|
||||
# NOTE: Do NOT add negation patterns (e.g., !issues.jsonl) here.
|
||||
# They would override fork protection in .git/info/exclude, allowing
|
||||
# contributors to accidentally commit upstream issue databases.
|
||||
|
||||
@@ -15,6 +15,8 @@ Each leg examines the code from a different perspective. Findings are
|
||||
collected and synthesized into a prioritized, actionable review.
|
||||
|
||||
## Legs (parallel execution)
|
||||
|
||||
### Analysis Legs (read and analyze code)
|
||||
- **correctness**: Logic errors, bugs, edge cases
|
||||
- **performance**: Bottlenecks, efficiency issues
|
||||
- **security**: Vulnerabilities, OWASP concerns
|
||||
@@ -23,6 +25,16 @@ collected and synthesized into a prioritized, actionable review.
|
||||
- **style**: Convention compliance, consistency
|
||||
- **smells**: Anti-patterns, technical debt
|
||||
|
||||
### Verification Legs (check implementation quality)
|
||||
- **wiring**: Installed-but-not-wired gaps (deps added but not used)
|
||||
- **commit-discipline**: Commit quality and atomicity
|
||||
- **test-quality**: Test meaningfulness, not just coverage
|
||||
|
||||
## Presets
|
||||
- **gate**: Light review for automatic flow (wiring, security, smells, test-quality)
|
||||
- **full**: Comprehensive review (all 10 legs)
|
||||
- **custom**: Select specific legs via --legs flag
|
||||
|
||||
## Execution Model
|
||||
1. Each leg spawns as a separate polecat
|
||||
2. Polecats work in parallel
|
||||
@@ -293,6 +305,125 @@ Review the code for code smells and anti-patterns.
|
||||
- Is technical debt being added or paid down?
|
||||
"""
|
||||
|
||||
# ============================================================================
|
||||
# VERIFICATION LEGS - Check implementation quality (not just code analysis)
|
||||
# ============================================================================
|
||||
|
||||
[[legs]]
|
||||
id = "wiring"
|
||||
title = "Wiring Review"
|
||||
focus = "Installed-but-not-wired gaps"
|
||||
description = """
|
||||
Detect dependencies, configs, or libraries that were added but not actually used.
|
||||
|
||||
This catches subtle bugs where the implementer THINKS they integrated something,
|
||||
but the old implementation is still being used.
|
||||
|
||||
**Look for:**
|
||||
- New dependency in manifest but never imported
|
||||
- Go: module in go.mod but no import
|
||||
- Rust: crate in Cargo.toml but no `use`
|
||||
- Node: package in package.json but no import/require
|
||||
|
||||
- SDK added but old implementation remains
|
||||
- Added Sentry but still using console.error for errors
|
||||
- Added Zod but still using manual typeof validation
|
||||
|
||||
- Config/env var defined but never loaded
|
||||
- New .env var that isn't accessed in code
|
||||
|
||||
**Questions to answer:**
|
||||
- Is every new dependency actually used?
|
||||
- Are there old patterns that should have been replaced?
|
||||
- Is there dead config that suggests incomplete migration?
|
||||
"""
|
||||
|
||||
[[legs]]
|
||||
id = "commit-discipline"
|
||||
title = "Commit Discipline Review"
|
||||
focus = "Commit quality and atomicity"
|
||||
description = """
|
||||
Review commit history for good practices.
|
||||
|
||||
Good commits make the codebase easier to understand, bisect, and revert.
|
||||
|
||||
**Look for:**
|
||||
- Giant "WIP" or "fix" commits
|
||||
- Multiple unrelated changes in one commit
|
||||
- Commits that touch 20+ files across different features
|
||||
|
||||
- Poor commit messages
|
||||
- "stuff", "update", "asdf", "fix"
|
||||
- No context about WHY the change was made
|
||||
|
||||
- Unatomic commits
|
||||
- Feature + refactor + bugfix in same commit
|
||||
- Should be separable logical units
|
||||
|
||||
- Missing type prefixes (if project uses conventional commits)
|
||||
- feat:, fix:, refactor:, test:, docs:, chore:
|
||||
|
||||
**Questions to answer:**
|
||||
- Could this history be bisected effectively?
|
||||
- Would a reviewer understand the progression?
|
||||
- Are commits atomic (one logical change each)?
|
||||
"""
|
||||
|
||||
[[legs]]
|
||||
id = "test-quality"
|
||||
title = "Test Quality Review"
|
||||
focus = "Test meaningfulness, not just coverage"
|
||||
description = """
|
||||
Verify tests are actually testing something meaningful.
|
||||
|
||||
Coverage numbers lie. A test that can't fail provides no value.
|
||||
|
||||
**Look for:**
|
||||
- Weak assertions
|
||||
- Only checking != nil / !== null / is not None
|
||||
- Using .is_ok() without checking the value
|
||||
- assertTrue(true) or equivalent
|
||||
|
||||
- Missing negative test cases
|
||||
- Happy path only, no error cases
|
||||
- No boundary testing
|
||||
- No invalid input testing
|
||||
|
||||
- Tests that can't fail
|
||||
- Mocked so heavily the test is meaningless
|
||||
- Testing implementation details, not behavior
|
||||
|
||||
- Flaky test indicators
|
||||
- Sleep/delay in tests
|
||||
- Time-dependent assertions
|
||||
|
||||
**Questions to answer:**
|
||||
- Do these tests actually verify behavior?
|
||||
- Would a bug in the implementation cause a test failure?
|
||||
- Are edge cases and error paths tested?
|
||||
"""
|
||||
|
||||
# ============================================================================
|
||||
# PRESETS - Configurable leg selection
|
||||
# ============================================================================
|
||||
|
||||
[presets]
|
||||
[presets.gate]
|
||||
description = "Light review for automatic flow - fast, focused on blockers"
|
||||
legs = ["wiring", "security", "smells", "test-quality"]
|
||||
|
||||
[presets.full]
|
||||
description = "Comprehensive review - all legs, for major features"
|
||||
legs = ["correctness", "performance", "security", "elegance", "resilience", "style", "smells", "wiring", "commit-discipline", "test-quality"]
|
||||
|
||||
[presets.security-focused]
|
||||
description = "Security-heavy review for sensitive changes"
|
||||
legs = ["security", "resilience", "correctness", "wiring"]
|
||||
|
||||
[presets.refactor]
|
||||
description = "Review focused on code quality during refactoring"
|
||||
legs = ["elegance", "smells", "style", "commit-discipline"]
|
||||
|
||||
# Synthesis step - combines all leg outputs
|
||||
[synthesis]
|
||||
title = "Review Synthesis"
|
||||
@@ -310,10 +441,13 @@ A synthesized review at: {{.output.directory}}/{{.output.synthesis}}
|
||||
2. **Critical Issues** - P0 items from all legs, deduplicated
|
||||
3. **Major Issues** - P1 items, grouped by theme
|
||||
4. **Minor Issues** - P2 items, briefly listed
|
||||
5. **Positive Observations** - What's done well
|
||||
6. **Recommendations** - Actionable next steps
|
||||
5. **Wiring Gaps** - Dependencies added but not used (from wiring leg)
|
||||
6. **Commit Quality** - Notes on commit discipline
|
||||
7. **Test Quality** - Assessment of test meaningfulness
|
||||
8. **Positive Observations** - What's done well
|
||||
9. **Recommendations** - Actionable next steps
|
||||
|
||||
Deduplicate issues found by multiple legs (note which legs found them).
|
||||
Prioritize by impact and effort. Be actionable.
|
||||
"""
|
||||
depends_on = ["correctness", "performance", "security", "elegance", "resilience", "style", "smells"]
|
||||
depends_on = ["correctness", "performance", "security", "elegance", "resilience", "style", "smells", "wiring", "commit-discipline", "test-quality"]
|
||||
|
||||
381
.beads/formulas/gastown-release.formula.toml
Normal file
381
.beads/formulas/gastown-release.formula.toml
Normal file
@@ -0,0 +1,381 @@
|
||||
description = """
|
||||
Gas Town release workflow - from version bump to verified release.
|
||||
|
||||
This formula orchestrates a release cycle for Gas Town:
|
||||
1. Preflight checks (workspace cleanliness, clean git, up to date)
|
||||
2. Documentation updates (CHANGELOG.md, info.go)
|
||||
3. Version bump (all components)
|
||||
4. Git operations (commit, tag, push)
|
||||
5. Local installation update
|
||||
6. Daemon restart
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
gt mol wisp create gastown-release --var version=0.3.0
|
||||
```
|
||||
|
||||
Or assign to a crew member:
|
||||
```bash
|
||||
gt sling gastown/crew/max --formula gastown-release --var version=0.3.0
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
- **Crew members (with user present)**: Attempt to resolve issues (merge branches,
|
||||
commit/stash work). Ask the user if blocked.
|
||||
- **Polecats (autonomous)**: Escalate via `gt escalate` if preflight fails or
|
||||
unrecoverable errors occur. Do not proceed with a release if workspaces have
|
||||
uncommitted work.
|
||||
"""
|
||||
formula = "gastown-release"
|
||||
type = "workflow"
|
||||
version = 1
|
||||
|
||||
[vars.version]
|
||||
description = "The semantic version to release (e.g., 0.3.0)"
|
||||
required = true
|
||||
|
||||
[[steps]]
|
||||
id = "preflight-workspaces"
|
||||
title = "Preflight: Check all workspaces for uncommitted work"
|
||||
description = """
|
||||
Before releasing, ensure no gastown workspaces have uncommitted work that would
|
||||
be excluded from the release.
|
||||
|
||||
Check all crew workspaces and the mayor rig:
|
||||
|
||||
```bash
|
||||
# Check each workspace
|
||||
for dir in $GT_ROOT/gastown/crew/* $GT_ROOT/gastown/mayor; do
|
||||
if [ -d "$dir/.git" ] || [ -d "$dir" ]; then
|
||||
echo "=== Checking $dir ==="
|
||||
cd "$dir" 2>/dev/null || continue
|
||||
|
||||
# Check for uncommitted changes
|
||||
if ! git diff-index --quiet HEAD -- 2>/dev/null; then
|
||||
echo " ⚠ UNCOMMITTED CHANGES"
|
||||
git status --short
|
||||
fi
|
||||
|
||||
# Check for stashes
|
||||
stash_count=$(git stash list 2>/dev/null | wc -l | tr -d ' ')
|
||||
if [ "$stash_count" -gt 0 ]; then
|
||||
echo " ⚠ HAS $stash_count STASH(ES)"
|
||||
git stash list
|
||||
fi
|
||||
|
||||
# Check for non-main branches with unpushed commits
|
||||
current_branch=$(git branch --show-current 2>/dev/null)
|
||||
if [ -n "$current_branch" ] && [ "$current_branch" != "main" ]; then
|
||||
echo " ⚠ ON BRANCH: $current_branch (not main)"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
```
|
||||
|
||||
## If issues found:
|
||||
|
||||
**For crew members (interactive)**:
|
||||
1. Try to resolve: merge branches, commit work, apply/drop stashes
|
||||
2. If work is in-progress and not ready, ask the user whether to:
|
||||
- Wait for completion
|
||||
- Stash and proceed
|
||||
- Exclude from this release
|
||||
3. Only proceed when all workspaces are clean on main
|
||||
|
||||
**For polecats (autonomous)**:
|
||||
1. If any workspace has uncommitted work: STOP and escalate
|
||||
2. Use: `gt escalate --severity medium "Release blocked: workspace X has uncommitted work"`
|
||||
3. Do NOT proceed with release - uncommitted work would be excluded
|
||||
|
||||
This step is critical. A release with uncommitted work means losing changes.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "preflight-git"
|
||||
title = "Preflight: Check git status"
|
||||
needs = ["preflight-workspaces"]
|
||||
description = """
|
||||
Ensure YOUR working tree is clean before starting release.
|
||||
|
||||
```bash
|
||||
git status
|
||||
```
|
||||
|
||||
If there are uncommitted changes:
|
||||
- Commit them first (if they should be in the release)
|
||||
- Stash them: `git stash` (if they should NOT be in the release)
|
||||
|
||||
## On failure:
|
||||
- **Crew**: Commit or stash your changes, then continue
|
||||
- **Polecat**: Escalate if you have uncommitted changes you didn't create
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "preflight-pull"
|
||||
title = "Preflight: Pull latest"
|
||||
needs = ["preflight-git"]
|
||||
description = """
|
||||
Ensure we're up to date with origin.
|
||||
|
||||
```bash
|
||||
git pull --rebase
|
||||
```
|
||||
|
||||
## On merge conflicts:
|
||||
- **Crew**: Resolve conflicts manually. Ask user if unsure about resolution.
|
||||
- **Polecat**: Escalate immediately. Do not attempt to resolve release-blocking
|
||||
merge conflicts autonomously.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "review-changes"
|
||||
title = "Review changes since last release"
|
||||
needs = ["preflight-pull"]
|
||||
description = """
|
||||
Understand what's being released.
|
||||
|
||||
```bash
|
||||
git log $(git describe --tags --abbrev=0)..HEAD --oneline
|
||||
```
|
||||
|
||||
Categorize changes:
|
||||
- Features (feat:)
|
||||
- Fixes (fix:)
|
||||
- Breaking changes
|
||||
- Documentation
|
||||
|
||||
If there are no changes since last release, ask whether to proceed with an
|
||||
empty release (version bump only).
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "update-changelog"
|
||||
title = "Update CHANGELOG.md"
|
||||
needs = ["review-changes"]
|
||||
description = """
|
||||
Write the [Unreleased] section with all changes for {{version}}.
|
||||
|
||||
Edit CHANGELOG.md and add entries under [Unreleased].
|
||||
|
||||
Format: Keep a Changelog (https://keepachangelog.com)
|
||||
|
||||
Sections to use:
|
||||
- ### Added - for new features
|
||||
- ### Changed - for changes in existing functionality
|
||||
- ### Fixed - for bug fixes
|
||||
- ### Deprecated - for soon-to-be removed features
|
||||
- ### Removed - for now removed features
|
||||
|
||||
Base entries on the git log from the previous step. Group related commits.
|
||||
|
||||
The bump script will automatically create the version header with today's date.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "update-info-go"
|
||||
title = "Update info.go versionChanges"
|
||||
needs = ["update-changelog"]
|
||||
description = """
|
||||
Add entry to versionChanges in internal/cmd/info.go.
|
||||
|
||||
This powers `gt info --whats-new` for agents.
|
||||
|
||||
Add a new entry at the TOP of the versionChanges slice:
|
||||
|
||||
```go
|
||||
{
|
||||
Version: "{{version}}",
|
||||
Date: "YYYY-MM-DD", // Today's date
|
||||
Changes: []string{
|
||||
"NEW: Key feature 1",
|
||||
"NEW: Key feature 2",
|
||||
"CHANGED: Modified behavior",
|
||||
"FIX: Bug that was fixed",
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
Focus on agent-relevant and workflow-impacting changes.
|
||||
Prefix with NEW:, CHANGED:, FIX:, or DEPRECATED: for clarity.
|
||||
|
||||
This is similar to CHANGELOG.md but focused on what agents need to know -
|
||||
new commands, changed behaviors, workflow impacts.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "run-bump-script"
|
||||
title = "Run bump-version.sh"
|
||||
needs = ["update-info-go"]
|
||||
description = """
|
||||
Update all component versions atomically.
|
||||
|
||||
```bash
|
||||
./scripts/bump-version.sh {{version}}
|
||||
```
|
||||
|
||||
This updates:
|
||||
- internal/cmd/version.go - CLI version constant
|
||||
- npm-package/package.json - npm package version
|
||||
- CHANGELOG.md - Creates [{{version}}] header with date
|
||||
|
||||
Review the changes shown by the script.
|
||||
|
||||
## On failure:
|
||||
If the script fails (e.g., version already exists, format error):
|
||||
- **Crew**: Debug and fix, or ask user
|
||||
- **Polecat**: Escalate with error details
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "verify-versions"
|
||||
title = "Verify version consistency"
|
||||
needs = ["run-bump-script"]
|
||||
description = """
|
||||
Confirm all versions match {{version}}.
|
||||
|
||||
```bash
|
||||
grep 'Version = ' internal/cmd/version.go
|
||||
grep '"version"' npm-package/package.json | head -1
|
||||
```
|
||||
|
||||
Both should show {{version}}.
|
||||
|
||||
## On mismatch:
|
||||
Do NOT proceed. Either the bump script failed or there's a bug.
|
||||
- **Crew**: Investigate and fix manually
|
||||
- **Polecat**: Escalate immediately - version mismatch is a release blocker
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "commit-release"
|
||||
title = "Commit release"
|
||||
needs = ["verify-versions"]
|
||||
description = """
|
||||
Stage and commit all version changes.
|
||||
|
||||
```bash
|
||||
git add -A
|
||||
git commit -m "chore: Bump version to {{version}}"
|
||||
```
|
||||
|
||||
Review the commit to ensure all expected files are included:
|
||||
- internal/cmd/version.go
|
||||
- internal/cmd/info.go
|
||||
- npm-package/package.json
|
||||
- CHANGELOG.md
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "create-tag"
|
||||
title = "Create release tag"
|
||||
needs = ["commit-release"]
|
||||
description = """
|
||||
Create annotated git tag.
|
||||
|
||||
```bash
|
||||
git tag -a v{{version}} -m "Release v{{version}}"
|
||||
```
|
||||
|
||||
Verify: `git tag -l | tail -5`
|
||||
|
||||
## If tag already exists:
|
||||
The version may have been previously (partially) released.
|
||||
- **Crew**: Ask user how to proceed (delete tag and retry? use different version?)
|
||||
- **Polecat**: Escalate - do not delete existing tags autonomously
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "push-release"
|
||||
title = "Push commit and tag"
|
||||
needs = ["create-tag"]
|
||||
description = """
|
||||
Push the release commit and tag to origin.
|
||||
|
||||
```bash
|
||||
git push origin main
|
||||
git push origin v{{version}}
|
||||
```
|
||||
|
||||
This triggers GitHub Actions to build release artifacts.
|
||||
|
||||
Monitor: https://github.com/steveyegge/gastown/actions
|
||||
|
||||
## On push rejection:
|
||||
Someone pushed while we were releasing.
|
||||
- **Crew**: Pull, rebase, re-tag, try again. Ask user if conflicts.
|
||||
- **Polecat**: Escalate - release coordination conflict requires human decision
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "local-install"
|
||||
title = "Update local installation"
|
||||
needs = ["push-release"]
|
||||
description = """
|
||||
Rebuild and install gt locally with the new version.
|
||||
|
||||
```bash
|
||||
go build -o $(go env GOPATH)/bin/gt ./cmd/gt
|
||||
```
|
||||
|
||||
On macOS, codesign the binary:
|
||||
```bash
|
||||
codesign -f -s - $(go env GOPATH)/bin/gt
|
||||
```
|
||||
|
||||
Verify:
|
||||
```bash
|
||||
gt version
|
||||
```
|
||||
|
||||
Should show {{version}}.
|
||||
|
||||
## On build failure:
|
||||
- **Crew**: Debug build error, fix, retry
|
||||
- **Polecat**: Escalate - release is pushed but local install failed
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "restart-daemons"
|
||||
title = "Restart daemons"
|
||||
needs = ["local-install"]
|
||||
description = """
|
||||
Restart gt daemon to pick up the new version.
|
||||
|
||||
```bash
|
||||
gt daemon stop && gt daemon start
|
||||
```
|
||||
|
||||
Verify:
|
||||
```bash
|
||||
gt daemon status
|
||||
```
|
||||
|
||||
The daemon should show the new binary timestamp and no stale warning.
|
||||
|
||||
Note: This step is safe to retry if it fails.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "release-complete"
|
||||
title = "Release complete"
|
||||
needs = ["restart-daemons"]
|
||||
description = """
|
||||
Release v{{version}} is complete!
|
||||
|
||||
Summary:
|
||||
- All workspaces verified clean before release
|
||||
- Version files updated (version.go, package.json)
|
||||
- CHANGELOG.md updated with release date
|
||||
- info.go versionChanges updated for `gt info --whats-new`
|
||||
- Git tag v{{version}} pushed
|
||||
- GitHub Actions triggered for artifact builds
|
||||
- Local gt binary rebuilt and installed
|
||||
- Daemons restarted with new version
|
||||
|
||||
Optional next steps:
|
||||
- Monitor GitHub Actions for release build completion
|
||||
- Verify release artifacts at https://github.com/steveyegge/gastown/releases
|
||||
- Announce the release
|
||||
"""
|
||||
@@ -47,7 +47,7 @@ bd show hq-deacon 2>/dev/null
|
||||
gt feed --since 10m --plain | head -20
|
||||
|
||||
# Recent wisps (operational state)
|
||||
ls -lt ~/gt/.beads-wisp/*.wisp.json 2>/dev/null | head -5
|
||||
ls -lt $GT_ROOT/.beads-wisp/*.wisp.json 2>/dev/null | head -5
|
||||
```
|
||||
|
||||
**Step 4: Check Deacon mail**
|
||||
@@ -221,7 +221,7 @@ Then exit. The next daemon tick will spawn a fresh Boot.
|
||||
**Update status file**
|
||||
```bash
|
||||
# The gt boot command handles this automatically
|
||||
# Status is written to ~/gt/deacon/dogs/boot/.boot-status.json
|
||||
# Status is written to $GT_ROOT/deacon/dogs/boot/.boot-status.json
|
||||
```
|
||||
|
||||
Boot is ephemeral by design. Each instance runs fresh.
|
||||
|
||||
@@ -84,10 +84,46 @@ Callbacks may spawn new polecats, update issue state, or trigger other actions.
|
||||
**Hygiene principle**: Archive messages after they're fully processed.
|
||||
Keep inbox near-empty - only unprocessed items should remain."""
|
||||
|
||||
[[steps]]
|
||||
id = "orphan-process-cleanup"
|
||||
title = "Clean up orphaned claude subagent processes"
|
||||
needs = ["inbox-check"]
|
||||
description = """
|
||||
Clean up orphaned claude subagent processes.
|
||||
|
||||
Claude Code's Task tool spawns subagent processes that sometimes don't clean up
|
||||
properly after completion. These accumulate and consume significant memory.
|
||||
|
||||
**Detection method:**
|
||||
Orphaned processes have no controlling terminal (TTY = "?"). Legitimate claude
|
||||
instances in terminals have a TTY like "pts/0".
|
||||
|
||||
**Run cleanup:**
|
||||
```bash
|
||||
gt deacon cleanup-orphans
|
||||
```
|
||||
|
||||
This command:
|
||||
1. Lists all claude/codex processes with `ps -eo pid,tty,comm`
|
||||
2. Filters for TTY = "?" (no controlling terminal)
|
||||
3. Sends SIGTERM to each orphaned process
|
||||
4. Reports how many were killed
|
||||
|
||||
**Why this is safe:**
|
||||
- Processes in terminals (your personal sessions) have a TTY - they won't be touched
|
||||
- Only kills processes that have no controlling terminal
|
||||
- These orphans are children of the tmux server with no TTY, indicating they're
|
||||
detached subagents that failed to exit
|
||||
|
||||
**If cleanup fails:**
|
||||
Log the error but continue patrol - this is best-effort cleanup.
|
||||
|
||||
**Exit criteria:** Orphan cleanup attempted (success or logged failure)."""
|
||||
|
||||
[[steps]]
|
||||
id = "trigger-pending-spawns"
|
||||
title = "Nudge newly spawned polecats"
|
||||
needs = ["inbox-check"]
|
||||
needs = ["orphan-process-cleanup"]
|
||||
description = """
|
||||
Nudge newly spawned polecats that are ready for input.
|
||||
|
||||
@@ -444,7 +480,7 @@ needs = ["zombie-scan"]
|
||||
description = """
|
||||
Execute registered plugins.
|
||||
|
||||
Scan ~/gt/plugins/ for plugin directories. Each plugin has a plugin.md with TOML frontmatter defining its gate (when to run) and instructions (what to do).
|
||||
Scan $GT_ROOT/plugins/ for plugin directories. Each plugin has a plugin.md with TOML frontmatter defining its gate (when to run) and instructions (what to do).
|
||||
|
||||
See docs/deacon-plugins.md for full documentation.
|
||||
|
||||
@@ -461,7 +497,7 @@ For each plugin:
|
||||
|
||||
Plugins marked parallel: true can run concurrently using Task tool subagents. Sequential plugins run one at a time in directory order.
|
||||
|
||||
Skip this step if ~/gt/plugins/ does not exist or is empty."""
|
||||
Skip this step if $GT_ROOT/plugins/ does not exist or is empty."""
|
||||
|
||||
[[steps]]
|
||||
id = "dog-pool-maintenance"
|
||||
@@ -499,10 +535,74 @@ gt dog status <name>
|
||||
|
||||
**Exit criteria:** Pool has at least 1 idle dog."""
|
||||
|
||||
[[steps]]
|
||||
id = "dog-health-check"
|
||||
title = "Check for stuck dogs"
|
||||
needs = ["dog-pool-maintenance"]
|
||||
description = """
|
||||
Check for dogs that have been working too long (stuck).
|
||||
|
||||
Dogs dispatched via `gt dog dispatch --plugin` are marked as "working" with
|
||||
a work description like "plugin:rebuild-gt". If a dog hangs, crashes, or
|
||||
takes too long, it needs intervention.
|
||||
|
||||
**Step 1: List working dogs**
|
||||
```bash
|
||||
gt dog list --json
|
||||
# Filter for state: "working"
|
||||
```
|
||||
|
||||
**Step 2: Check work duration**
|
||||
For each working dog:
|
||||
```bash
|
||||
gt dog status <name> --json
|
||||
# Check: work_started_at, current_work
|
||||
```
|
||||
|
||||
Compare against timeout:
|
||||
- If plugin has [execution] timeout in plugin.md, use that
|
||||
- Default timeout: 10 minutes for infrastructure tasks
|
||||
|
||||
**Duration calculation:**
|
||||
```
|
||||
stuck_threshold = plugin_timeout or 10m
|
||||
duration = now - work_started_at
|
||||
is_stuck = duration > stuck_threshold
|
||||
```
|
||||
|
||||
**Step 3: Handle stuck dogs**
|
||||
|
||||
For dogs working > timeout:
|
||||
```bash
|
||||
# Option A: File death warrant (Boot handles termination)
|
||||
gt warrant file deacon/dogs/<name> --reason "Stuck: working on <work> for <duration>"
|
||||
|
||||
# Option B: Force clear work and notify
|
||||
gt dog clear <name> --force
|
||||
gt mail send deacon/ -s "DOG_TIMEOUT <name>" -m "Dog <name> timed out on <work> after <duration>"
|
||||
```
|
||||
|
||||
**Decision matrix:**
|
||||
|
||||
| Duration over timeout | Action |
|
||||
|----------------------|--------|
|
||||
| < 2x timeout | Log warning, check next cycle |
|
||||
| 2x - 5x timeout | File death warrant |
|
||||
| > 5x timeout | Force clear + escalate to Mayor |
|
||||
|
||||
**Step 4: Track chronic failures**
|
||||
If same dog gets stuck repeatedly:
|
||||
```bash
|
||||
gt mail send mayor/ -s "Dog <name> chronic failures" \
|
||||
-m "Dog has timed out N times in last 24h. Consider removing from pool."
|
||||
```
|
||||
|
||||
**Exit criteria:** All stuck dogs handled (warrant filed or cleared)."""
|
||||
|
||||
[[steps]]
|
||||
id = "orphan-check"
|
||||
title = "Detect abandoned work"
|
||||
needs = ["dog-pool-maintenance"]
|
||||
needs = ["dog-health-check"]
|
||||
description = """
|
||||
**DETECT ONLY** - Check for orphaned state and dispatch to dog if found.
|
||||
|
||||
@@ -565,59 +665,84 @@ Skip dispatch - system is healthy.
|
||||
|
||||
[[steps]]
|
||||
id = "costs-digest"
|
||||
title = "Aggregate daily costs"
|
||||
title = "Aggregate daily costs [DISABLED]"
|
||||
needs = ["session-gc"]
|
||||
description = """
|
||||
**DAILY DIGEST** - Aggregate yesterday's session cost wisps.
|
||||
**⚠️ DISABLED** - Skip this step entirely.
|
||||
|
||||
Session costs are recorded as ephemeral wisps (not exported to JSONL) to avoid
|
||||
log-in-database pollution. This step aggregates them into a permanent daily
|
||||
"Cost Report YYYY-MM-DD" bead for audit purposes.
|
||||
Cost tracking is temporarily disabled because Claude Code does not expose
|
||||
session costs in a way that can be captured programmatically.
|
||||
|
||||
**Why disabled:**
|
||||
- The `gt costs` command uses tmux capture-pane to find costs
|
||||
- Claude Code displays costs in the TUI status bar, not in scrollback
|
||||
- All sessions show $0.00 because capture-pane can't see TUI chrome
|
||||
- The infrastructure is sound but has no data source
|
||||
|
||||
**What we need from Claude Code:**
|
||||
- Stop hook env var (e.g., `$CLAUDE_SESSION_COST`)
|
||||
- Or queryable file/API endpoint
|
||||
|
||||
**Re-enable when:** Claude Code exposes cost data via API or environment.
|
||||
|
||||
See: GH#24, gt-7awfj
|
||||
|
||||
**Exit criteria:** Skip this step - proceed to next."""
|
||||
|
||||
[[steps]]
|
||||
id = "patrol-digest"
|
||||
title = "Aggregate daily patrol digests"
|
||||
needs = ["costs-digest"]
|
||||
description = """
|
||||
**DAILY DIGEST** - Aggregate yesterday's patrol cycle digests.
|
||||
|
||||
Patrol cycles (Deacon, Witness, Refinery) create ephemeral per-cycle digests
|
||||
to avoid JSONL pollution. This step aggregates them into a single permanent
|
||||
"Patrol Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
**Step 1: Check if digest is needed**
|
||||
```bash
|
||||
# Preview yesterday's costs (dry run)
|
||||
gt costs digest --yesterday --dry-run
|
||||
# Preview yesterday's patrol digests (dry run)
|
||||
gt patrol digest --yesterday --dry-run
|
||||
```
|
||||
|
||||
If output shows "No session cost wisps found", skip to Step 3.
|
||||
If output shows "No patrol digests found", skip to Step 3.
|
||||
|
||||
**Step 2: Create the digest**
|
||||
```bash
|
||||
gt costs digest --yesterday
|
||||
gt patrol digest --yesterday
|
||||
```
|
||||
|
||||
This:
|
||||
- Queries all session.ended wisps from yesterday
|
||||
- Creates a single "Cost Report YYYY-MM-DD" bead with aggregated data
|
||||
- Deletes the source wisps
|
||||
- Queries all ephemeral patrol digests from yesterday
|
||||
- Creates a single "Patrol Report YYYY-MM-DD" bead with aggregated data
|
||||
- Deletes the source digests
|
||||
|
||||
**Step 3: Verify**
|
||||
The digest appears in `gt costs --week` queries.
|
||||
Daily digests preserve audit trail without per-session pollution.
|
||||
Daily patrol digests preserve audit trail without per-cycle pollution.
|
||||
|
||||
**Timing**: Run once per morning patrol cycle. The --yesterday flag ensures
|
||||
we don't try to digest today's incomplete data.
|
||||
|
||||
**Exit criteria:** Yesterday's costs digested (or no wisps to digest)."""
|
||||
**Exit criteria:** Yesterday's patrol digests aggregated (or none to aggregate)."""
|
||||
|
||||
[[steps]]
|
||||
id = "log-maintenance"
|
||||
title = "Rotate logs and prune state"
|
||||
needs = ["costs-digest"]
|
||||
needs = ["patrol-digest"]
|
||||
description = """
|
||||
Maintain daemon logs and state files.
|
||||
|
||||
**Step 1: Check daemon.log size**
|
||||
```bash
|
||||
# Get log file size
|
||||
ls -la ~/.beads/daemon*.log 2>/dev/null || ls -la ~/gt/.beads/daemon*.log 2>/dev/null
|
||||
ls -la ~/.beads/daemon*.log 2>/dev/null || ls -la $GT_ROOT/.beads/daemon*.log 2>/dev/null
|
||||
```
|
||||
|
||||
If daemon.log exceeds 10MB:
|
||||
```bash
|
||||
# Rotate with date suffix and gzip
|
||||
LOGFILE="$HOME/gt/.beads/daemon.log"
|
||||
LOGFILE="$GT_ROOT/.beads/daemon.log"
|
||||
if [ -f "$LOGFILE" ] && [ $(stat -f%z "$LOGFILE" 2>/dev/null || stat -c%s "$LOGFILE") -gt 10485760 ]; then
|
||||
DATE=$(date +%Y-%m-%dT%H-%M-%S)
|
||||
mv "$LOGFILE" "${LOGFILE%.log}-${DATE}.log"
|
||||
@@ -629,7 +754,7 @@ fi
|
||||
|
||||
Clean up daemon logs older than 7 days:
|
||||
```bash
|
||||
find ~/gt/.beads/ -name "daemon-*.log.gz" -mtime +7 -delete
|
||||
find $GT_ROOT/.beads/ -name "daemon-*.log.gz" -mtime +7 -delete
|
||||
```
|
||||
|
||||
**Step 3: Prune state.json of dead sessions**
|
||||
|
||||
318
.beads/formulas/mol-polecat-code-review.formula.toml
Normal file
318
.beads/formulas/mol-polecat-code-review.formula.toml
Normal file
@@ -0,0 +1,318 @@
|
||||
description = """
|
||||
Review code and file beads for issues found.
|
||||
|
||||
This molecule guides a polecat through a code review task - examining a portion
|
||||
of the codebase for bugs, security issues, code quality problems, or improvement
|
||||
opportunities. The output is a set of beads capturing actionable findings.
|
||||
|
||||
## Polecat Contract (Self-Cleaning Model)
|
||||
|
||||
You are a self-cleaning worker. You:
|
||||
1. Receive work via your hook (pinned molecule + review scope)
|
||||
2. Work through molecule steps using `bd ready` / `bd close <step>`
|
||||
3. Complete and self-clean via `gt done` (submit findings + nuke yourself)
|
||||
4. You are GONE - your findings are recorded in beads
|
||||
|
||||
**Self-cleaning:** When you run `gt done`, you submit your findings, nuke your
|
||||
sandbox, and exit. There is no idle state. Done means gone.
|
||||
|
||||
**Important:** This formula defines the template. Your molecule already has step
|
||||
beads created from it. Use `bd ready` to find them - do NOT read this file directly.
|
||||
|
||||
**You do NOT:**
|
||||
- Fix the issues yourself (file beads, let other polecats fix)
|
||||
- Scope creep into unrelated areas
|
||||
- Wait for someone to act on findings (you're done after filing)
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|----------|--------|-------------|
|
||||
| scope | hook_bead | What to review (file path, directory, or description) |
|
||||
| issue | hook_bead | The tracking issue for this review task |
|
||||
| focus | hook_bead | Optional focus area (security, performance, etc.) |
|
||||
|
||||
## Failure Modes
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| Scope too broad | Mail Witness, request narrower scope |
|
||||
| Can't understand code | Mail Witness for context |
|
||||
| Critical issue found | Mail Witness immediately, then continue |"""
|
||||
formula = "mol-polecat-code-review"
|
||||
version = 1
|
||||
|
||||
[[steps]]
|
||||
id = "load-context"
|
||||
title = "Load context and understand the review scope"
|
||||
description = """
|
||||
Initialize your session and understand what you're reviewing.
|
||||
|
||||
**1. Prime your environment:**
|
||||
```bash
|
||||
gt prime # Load role context
|
||||
bd prime # Load beads context
|
||||
```
|
||||
|
||||
**2. Check your hook:**
|
||||
```bash
|
||||
gt hook # Shows your pinned molecule and hook_bead
|
||||
```
|
||||
|
||||
The hook_bead describes your review scope. Read the tracking issue:
|
||||
```bash
|
||||
bd show {{issue}} # Full issue details
|
||||
```
|
||||
|
||||
**3. Understand the scope:**
|
||||
- What files/directories are in scope?
|
||||
- Is there a specific focus (security, performance, correctness)?
|
||||
- What's the context - why is this review happening?
|
||||
|
||||
**4. Locate the code:**
|
||||
```bash
|
||||
# If scope is a path:
|
||||
ls -la {{scope}}
|
||||
head -100 {{scope}} # Quick look at the code
|
||||
|
||||
# If scope is a directory:
|
||||
find {{scope}} -type f -name "*.go" | head -20
|
||||
```
|
||||
|
||||
**5. Check for recent changes:**
|
||||
```bash
|
||||
git log --oneline -10 -- {{scope}}
|
||||
```
|
||||
|
||||
**Exit criteria:** You understand what you're reviewing and why."""
|
||||
|
||||
[[steps]]
|
||||
id = "survey-code"
|
||||
title = "Survey the code structure"
|
||||
needs = ["load-context"]
|
||||
description = """
|
||||
Get a high-level understanding before diving into details.
|
||||
|
||||
**1. Understand the structure:**
|
||||
```bash
|
||||
# For a directory:
|
||||
tree {{scope}} -L 2
|
||||
|
||||
# For a file:
|
||||
wc -l {{scope}} # How big is it?
|
||||
```
|
||||
|
||||
**2. Identify key components:**
|
||||
- What are the main types/structs?
|
||||
- What are the public functions?
|
||||
- What are the dependencies?
|
||||
|
||||
**3. Read the tests (if any):**
|
||||
```bash
|
||||
find {{scope}} -name "*_test.go" | xargs head -50
|
||||
```
|
||||
Tests often reveal intended behavior.
|
||||
|
||||
**4. Note initial impressions:**
|
||||
- Is the code well-organized?
|
||||
- Are there obvious patterns or anti-patterns?
|
||||
- What areas look risky?
|
||||
|
||||
**Exit criteria:** You have a mental map of the code structure."""
|
||||
|
||||
[[steps]]
|
||||
id = "detailed-review"
|
||||
title = "Perform detailed code review"
|
||||
needs = ["survey-code"]
|
||||
description = """
|
||||
Systematically review the code for issues.
|
||||
|
||||
**Review checklist:**
|
||||
|
||||
| Category | Look For |
|
||||
|----------|----------|
|
||||
| **Correctness** | Logic errors, off-by-one, nil handling, race conditions |
|
||||
| **Security** | Injection, auth bypass, secrets in code, unsafe operations |
|
||||
| **Error handling** | Swallowed errors, missing checks, unclear error messages |
|
||||
| **Performance** | N+1 queries, unnecessary allocations, blocking calls |
|
||||
| **Maintainability** | Dead code, unclear naming, missing comments on complex logic |
|
||||
| **Testing** | Untested paths, missing edge cases, flaky tests |
|
||||
|
||||
**Focus on {{focus}} if specified.**
|
||||
|
||||
**1. Read through the code:**
|
||||
```bash
|
||||
cat {{scope}} # For single file
|
||||
# Or read files systematically for a directory
|
||||
```
|
||||
|
||||
**2. For each issue found, note:**
|
||||
- File and line number
|
||||
- Category (bug, security, performance, etc.)
|
||||
- Severity (critical, high, medium, low)
|
||||
- Description of the issue
|
||||
- Suggested fix (if obvious)
|
||||
|
||||
**3. Don't fix issues yourself:**
|
||||
Your job is to find and report, not fix. File beads.
|
||||
|
||||
**Exit criteria:** You've reviewed all code in scope and noted issues."""
|
||||
|
||||
[[steps]]
|
||||
id = "prioritize-findings"
|
||||
title = "Prioritize and categorize findings"
|
||||
needs = ["detailed-review"]
|
||||
description = """
|
||||
Organize your findings by priority and category.
|
||||
|
||||
**Priority levels:**
|
||||
|
||||
| Priority | Description | Action |
|
||||
|----------|-------------|--------|
|
||||
| P0 | Security vulnerability, data loss risk | Mail Witness immediately |
|
||||
| P1 | Bug affecting users, broken functionality | File as bug, high priority |
|
||||
| P2 | Code quality issue, potential future bug | File as task |
|
||||
| P3 | Improvement opportunity, nice-to-have | File as task, low priority |
|
||||
|
||||
**1. Sort your findings:**
|
||||
Group by priority, then by category.
|
||||
|
||||
**2. For P0 issues:**
|
||||
```bash
|
||||
gt mail send {{rig}}/witness -s "CRITICAL: Security issue found" -m "Scope: {{scope}}
|
||||
Issue: {{issue}}
|
||||
Finding: <description of critical issue>
|
||||
Location: <file:line>"
|
||||
```
|
||||
|
||||
**3. Prepare bead descriptions:**
|
||||
For each finding, prepare:
|
||||
- Clear title
|
||||
- File/line location
|
||||
- Description of the issue
|
||||
- Why it matters
|
||||
- Suggested fix (if known)
|
||||
|
||||
**Exit criteria:** Findings prioritized and ready to file."""
|
||||
|
||||
[[steps]]
|
||||
id = "file-beads"
|
||||
title = "File beads for all findings"
|
||||
needs = ["prioritize-findings"]
|
||||
description = """
|
||||
Create beads for each finding.
|
||||
|
||||
**1. For bugs (P0, P1):**
|
||||
```bash
|
||||
bd create --type=bug --priority=1 \
|
||||
--title="<clear description of bug>" \
|
||||
--description="Found during code review of {{scope}}.
|
||||
|
||||
Location: <file:line>
|
||||
|
||||
Issue:
|
||||
<description>
|
||||
|
||||
Impact:
|
||||
<why this matters>
|
||||
|
||||
Suggested fix:
|
||||
<if known>"
|
||||
```
|
||||
|
||||
**2. For code quality issues (P2, P3):**
|
||||
```bash
|
||||
bd create --type=task --priority=2 \
|
||||
--title="<clear description>" \
|
||||
--description="Found during code review of {{scope}}.
|
||||
|
||||
Location: <file:line>
|
||||
|
||||
Issue:
|
||||
<description>
|
||||
|
||||
Suggestion:
|
||||
<how to improve>"
|
||||
```
|
||||
|
||||
**3. Track filed beads:**
|
||||
Note each bead ID as you create them.
|
||||
|
||||
**4. If no issues found:**
|
||||
That's a valid outcome! Note that the code review passed.
|
||||
|
||||
**Exit criteria:** All findings filed as beads."""
|
||||
|
||||
[[steps]]
|
||||
id = "summarize-review"
|
||||
title = "Summarize review results"
|
||||
needs = ["file-beads"]
|
||||
description = """
|
||||
Update the tracking issue with review summary.
|
||||
|
||||
**1. Create summary:**
|
||||
```bash
|
||||
bd update {{issue}} --notes "Code review complete.
|
||||
|
||||
Scope: {{scope}}
|
||||
Focus: {{focus}}
|
||||
|
||||
Findings:
|
||||
- P0 (critical): <count>
|
||||
- P1 (high): <count>
|
||||
- P2 (medium): <count>
|
||||
- P3 (low): <count>
|
||||
|
||||
Beads filed:
|
||||
<list of bead IDs>
|
||||
|
||||
Overall assessment:
|
||||
<brief summary - healthy, needs attention, significant issues, etc.>"
|
||||
```
|
||||
|
||||
**2. Sync beads:**
|
||||
```bash
|
||||
bd sync
|
||||
```
|
||||
|
||||
**Exit criteria:** Tracking issue updated with summary."""
|
||||
|
||||
[[steps]]
|
||||
id = "complete-and-exit"
|
||||
title = "Complete review and self-clean"
|
||||
needs = ["summarize-review"]
|
||||
description = """
|
||||
Signal completion and clean up. You cease to exist after this step.
|
||||
|
||||
**Self-Cleaning Model:**
|
||||
Once you run `gt done`, you're gone. The command:
|
||||
1. Syncs beads (final sync)
|
||||
2. Nukes your sandbox
|
||||
3. Exits your session immediately
|
||||
|
||||
**Run gt done:**
|
||||
```bash
|
||||
gt done
|
||||
```
|
||||
|
||||
**What happens next (not your concern):**
|
||||
- Other polecats may be assigned to fix the issues you found
|
||||
- Witness may escalate critical findings
|
||||
- The codebase improves based on your findings
|
||||
|
||||
You are NOT involved in any of that. You're gone. Done means gone.
|
||||
|
||||
**Exit criteria:** Beads synced, sandbox nuked, session exited."""
|
||||
|
||||
[vars]
|
||||
[vars.scope]
|
||||
description = "What to review - file path, directory, or description"
|
||||
required = true
|
||||
|
||||
[vars.issue]
|
||||
description = "The tracking issue for this review task"
|
||||
required = true
|
||||
|
||||
[vars.focus]
|
||||
description = "Optional focus area (security, performance, correctness, etc.)"
|
||||
required = false
|
||||
283
.beads/formulas/mol-polecat-review-pr.formula.toml
Normal file
283
.beads/formulas/mol-polecat-review-pr.formula.toml
Normal file
@@ -0,0 +1,283 @@
|
||||
description = """
|
||||
Review an external PR and decide on merge/reject/revise.
|
||||
|
||||
This molecule guides a polecat through reviewing a pull request from an external
|
||||
contributor. The polecat reviews code quality, tests, and alignment with project
|
||||
standards, then approves, requests changes, or files followup beads.
|
||||
|
||||
## Polecat Contract (Self-Cleaning Model)
|
||||
|
||||
You are a self-cleaning worker. You:
|
||||
1. Receive work via your hook (pinned molecule + PR reference)
|
||||
2. Work through molecule steps using `bd ready` / `bd close <step>`
|
||||
3. Complete and self-clean via `gt done` (submit findings + nuke yourself)
|
||||
4. You are GONE - your review is recorded in beads
|
||||
|
||||
**Self-cleaning:** When you run `gt done`, you submit your findings, nuke your
|
||||
sandbox, and exit. There is no idle state. Done means gone.
|
||||
|
||||
**Important:** This formula defines the template. Your molecule already has step
|
||||
beads created from it. Use `bd ready` to find them - do NOT read this file directly.
|
||||
|
||||
**You do NOT:**
|
||||
- Merge the PR yourself (maintainer or Refinery does that)
|
||||
- Push to the PR branch (it's external)
|
||||
- Wait for contributor response (you're done after review)
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|----------|--------|-------------|
|
||||
| pr_url | hook_bead | The PR URL to review |
|
||||
| issue | hook_bead | The tracking issue for this review task |
|
||||
|
||||
## Failure Modes
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| PR is stale/unmergeable | Note in review, request rebase |
|
||||
| Tests fail | Note in review, request fixes |
|
||||
| Major issues found | File followup beads, request changes |
|
||||
| Unclear requirements | Mail Witness for guidance |"""
|
||||
formula = "mol-polecat-review-pr"
|
||||
version = 1
|
||||
|
||||
[[steps]]
|
||||
id = "load-context"
|
||||
title = "Load context and understand the PR"
|
||||
description = """
|
||||
Initialize your session and understand the PR you're reviewing.
|
||||
|
||||
**1. Prime your environment:**
|
||||
```bash
|
||||
gt prime # Load role context
|
||||
bd prime # Load beads context
|
||||
```
|
||||
|
||||
**2. Check your hook:**
|
||||
```bash
|
||||
gt hook # Shows your pinned molecule and hook_bead
|
||||
```
|
||||
|
||||
The hook_bead references the PR to review. Read the tracking issue:
|
||||
```bash
|
||||
bd show {{issue}} # Full issue details including PR URL
|
||||
```
|
||||
|
||||
**3. Fetch the PR:**
|
||||
```bash
|
||||
gh pr view {{pr_url}} --json title,body,author,files,commits
|
||||
gh pr diff {{pr_url}} # See the actual changes
|
||||
```
|
||||
|
||||
**4. Understand the PR:**
|
||||
- What is the PR trying to accomplish?
|
||||
- What files are changed?
|
||||
- Is there a linked issue?
|
||||
- Does the PR description explain the "why"?
|
||||
|
||||
**5. Check PR status:**
|
||||
```bash
|
||||
gh pr checks {{pr_url}} # CI status
|
||||
gh pr view {{pr_url}} --json mergeable,reviewDecision
|
||||
```
|
||||
|
||||
**Exit criteria:** You understand the PR's purpose and scope."""
|
||||
|
||||
[[steps]]
|
||||
id = "review-code"
|
||||
title = "Review the code changes"
|
||||
needs = ["load-context"]
|
||||
description = """
|
||||
Perform a thorough code review of the PR.
|
||||
|
||||
**1. Review the diff systematically:**
|
||||
```bash
|
||||
gh pr diff {{pr_url}}
|
||||
```
|
||||
|
||||
**2. Check for common issues:**
|
||||
|
||||
| Category | Look For |
|
||||
|----------|----------|
|
||||
| Correctness | Logic errors, edge cases, null handling |
|
||||
| Security | Injection, auth bypass, exposed secrets |
|
||||
| Style | Naming, formatting, consistency with codebase |
|
||||
| Tests | Are changes tested? Do tests cover edge cases? |
|
||||
| Docs | Are docs updated if needed? |
|
||||
| Scope | Does PR stay focused? Any scope creep? |
|
||||
|
||||
**3. For each file changed:**
|
||||
- Does the change make sense?
|
||||
- Is it consistent with existing patterns?
|
||||
- Are there any red flags?
|
||||
|
||||
**4. Note issues found:**
|
||||
Keep a running list of:
|
||||
- Blocking issues (must fix before merge)
|
||||
- Suggestions (nice to have)
|
||||
- Questions (need clarification)
|
||||
|
||||
**Exit criteria:** You have reviewed all changes and noted issues."""
|
||||
|
||||
[[steps]]
|
||||
id = "check-tests"
|
||||
title = "Verify tests and CI"
|
||||
needs = ["review-code"]
|
||||
description = """
|
||||
Ensure tests pass and coverage is adequate.
|
||||
|
||||
**1. Check CI status:**
|
||||
```bash
|
||||
gh pr checks {{pr_url}}
|
||||
```
|
||||
|
||||
All required checks should pass. If not, note which are failing.
|
||||
|
||||
**2. Review test changes:**
|
||||
- Are there new tests for new functionality?
|
||||
- Do tests cover edge cases?
|
||||
- Are tests readable and maintainable?
|
||||
|
||||
**3. If tests are missing:**
|
||||
Note this as a blocking issue - new code should have tests.
|
||||
|
||||
**4. Check for test-only changes:**
|
||||
If PR is test-only, ensure tests are meaningful and not just
|
||||
padding coverage numbers.
|
||||
|
||||
**Exit criteria:** You've verified test status and coverage."""
|
||||
|
||||
[[steps]]
|
||||
id = "make-decision"
|
||||
title = "Decide: approve, request changes, or needs discussion"
|
||||
needs = ["check-tests"]
|
||||
description = """
|
||||
Make your review decision.
|
||||
|
||||
**Decision matrix:**
|
||||
|
||||
| Situation | Decision |
|
||||
|-----------|----------|
|
||||
| Clean code, tests pass, good scope | APPROVE |
|
||||
| Minor issues, easily fixed | REQUEST_CHANGES (with specific feedback) |
|
||||
| Major issues, needs rework | REQUEST_CHANGES (with detailed explanation) |
|
||||
| Unclear requirements or scope | NEEDS_DISCUSSION (mail Witness) |
|
||||
| Security concern | BLOCK (mail Witness immediately) |
|
||||
|
||||
**1. If APPROVE:**
|
||||
The PR is ready to merge. Note any minor suggestions as comments
|
||||
but don't block on them.
|
||||
|
||||
**2. If REQUEST_CHANGES:**
|
||||
Be specific about what needs to change. Provide examples if helpful.
|
||||
The contributor should be able to act on your feedback.
|
||||
|
||||
**3. If NEEDS_DISCUSSION:**
|
||||
```bash
|
||||
gt mail send {{rig}}/witness -s "PR review needs discussion" -m "PR: {{pr_url}}
|
||||
Issue: {{issue}}
|
||||
Question: <what needs clarification>"
|
||||
```
|
||||
|
||||
**4. If BLOCK (security):**
|
||||
```bash
|
||||
gt mail send {{rig}}/witness -s "SECURITY: PR blocked" -m "PR: {{pr_url}}
|
||||
Issue: {{issue}}
|
||||
Concern: <security issue found>"
|
||||
```
|
||||
|
||||
**Exit criteria:** You've made a clear decision with rationale."""
|
||||
|
||||
[[steps]]
|
||||
id = "submit-review"
|
||||
title = "Submit the review on GitHub"
|
||||
needs = ["make-decision"]
|
||||
description = """
|
||||
Submit your review via GitHub.
|
||||
|
||||
**1. Submit the review:**
|
||||
```bash
|
||||
# For APPROVE:
|
||||
gh pr review {{pr_url}} --approve --body "LGTM. <brief summary of what's good>"
|
||||
|
||||
# For REQUEST_CHANGES:
|
||||
gh pr review {{pr_url}} --request-changes --body "<detailed feedback>"
|
||||
|
||||
# For COMMENT (needs discussion):
|
||||
gh pr review {{pr_url}} --comment --body "<questions or discussion points>"
|
||||
```
|
||||
|
||||
**2. Add inline comments if needed:**
|
||||
If you have specific line-by-line feedback, add those via GitHub UI
|
||||
or additional `gh pr comment` calls.
|
||||
|
||||
**Exit criteria:** Review submitted on GitHub."""
|
||||
|
||||
[[steps]]
|
||||
id = "file-followups"
|
||||
title = "File beads for any followup work"
|
||||
needs = ["submit-review"]
|
||||
description = """
|
||||
Create beads for any followup work discovered during review.
|
||||
|
||||
**1. For issues found that are outside PR scope:**
|
||||
```bash
|
||||
bd create --type=bug --title="Found during PR review: <description>" \
|
||||
--description="Discovered while reviewing {{pr_url}}.
|
||||
|
||||
<details of the issue>"
|
||||
```
|
||||
|
||||
**2. For improvements suggested but not required:**
|
||||
```bash
|
||||
bd create --type=task --title="Improvement: <description>" \
|
||||
--description="Suggested during review of {{pr_url}}.
|
||||
|
||||
<details of the improvement>"
|
||||
```
|
||||
|
||||
**3. Update the tracking issue:**
|
||||
```bash
|
||||
bd update {{issue}} --notes "Review complete. Decision: <APPROVE|REQUEST_CHANGES|etc>
|
||||
Followups filed: <list of bead IDs if any>"
|
||||
```
|
||||
|
||||
**Exit criteria:** All followup work captured as beads."""
|
||||
|
||||
[[steps]]
|
||||
id = "complete-and-exit"
|
||||
title = "Complete review and self-clean"
|
||||
needs = ["file-followups"]
|
||||
description = """
|
||||
Signal completion and clean up. You cease to exist after this step.
|
||||
|
||||
**Self-Cleaning Model:**
|
||||
Once you run `gt done`, you're gone. The command:
|
||||
1. Syncs beads
|
||||
2. Nukes your sandbox
|
||||
3. Exits your session immediately
|
||||
|
||||
**Run gt done:**
|
||||
```bash
|
||||
bd sync
|
||||
gt done
|
||||
```
|
||||
|
||||
**What happens next (not your concern):**
|
||||
- Maintainer or Refinery acts on your review
|
||||
- Contributor responds to feedback
|
||||
- PR gets merged, revised, or closed
|
||||
|
||||
You are NOT involved in any of that. You're gone. Done means gone.
|
||||
|
||||
**Exit criteria:** Beads synced, sandbox nuked, session exited."""
|
||||
|
||||
[vars]
|
||||
[vars.pr_url]
|
||||
description = "The PR URL to review"
|
||||
required = true
|
||||
|
||||
[vars.issue]
|
||||
description = "The tracking issue for this review task"
|
||||
required = true
|
||||
@@ -1,26 +1,29 @@
|
||||
description = """
|
||||
Full polecat work lifecycle from assignment through MR submission.
|
||||
Full polecat work lifecycle from assignment through completion.
|
||||
|
||||
This molecule guides a polecat through a complete work assignment. Each step
|
||||
has clear entry/exit criteria and specific commands to run. A polecat can
|
||||
crash after any step and resume from the last completed step.
|
||||
|
||||
## Polecat Contract (Ephemeral Model)
|
||||
## Polecat Contract (Self-Cleaning Model)
|
||||
|
||||
You are an ephemeral worker. You:
|
||||
You are a self-cleaning worker. You:
|
||||
1. Receive work via your hook (pinned molecule + issue)
|
||||
2. Work through molecule steps using `bd ready` / `bd close <step>`
|
||||
3. Submit to merge queue via `gt done`
|
||||
4. Become recyclable - Refinery handles the rest
|
||||
3. Complete and self-clean via `gt done` (submit + nuke yourself)
|
||||
4. You are GONE - Refinery merges from MQ
|
||||
|
||||
**Self-cleaning:** When you run `gt done`, you push your work, submit to MQ,
|
||||
nuke your sandbox, and exit. There is no idle state. Done means gone.
|
||||
|
||||
**Important:** This formula defines the template. Your molecule already has step
|
||||
beads created from it. Use `bd ready` to find them - do NOT read this file directly.
|
||||
|
||||
**You do NOT:**
|
||||
- Push directly to main (Refinery merges)
|
||||
- Push directly to main (Refinery merges from MQ)
|
||||
- Close your own issue (Refinery closes after merge)
|
||||
- Wait for merge (you're done at MR submission)
|
||||
- Handle rebase conflicts (Refinery dispatches fresh polecats for that)
|
||||
- Wait for merge (you're gone after `gt done`)
|
||||
- Handle rebase conflicts (Refinery spawns fresh polecats for that)
|
||||
|
||||
## Variables
|
||||
|
||||
@@ -407,30 +410,23 @@ bd sync
|
||||
|
||||
[[steps]]
|
||||
id = "submit-and-exit"
|
||||
title = "Submit to merge queue and exit"
|
||||
title = "Submit work and self-clean"
|
||||
needs = ["prepare-for-review"]
|
||||
description = """
|
||||
Submit your work to the merge queue. You become recyclable after this.
|
||||
Submit your work and clean up. You cease to exist after this step.
|
||||
|
||||
**Ephemeral Polecat Model:**
|
||||
Once you submit, you're done. The Refinery will:
|
||||
1. Process your merge request
|
||||
2. Handle rebasing (mechanical rebases done automatically)
|
||||
3. Close your issue after successful merge
|
||||
4. Create conflict-resolution tasks if needed (fresh polecat handles those)
|
||||
**Self-Cleaning Model:**
|
||||
Once you run `gt done`, you're gone. The command:
|
||||
1. Pushes your branch to origin
|
||||
2. Creates an MR bead in the merge queue
|
||||
3. Nukes your sandbox (worktree removal)
|
||||
4. Exits your session immediately
|
||||
|
||||
**1. Submit with gt done:**
|
||||
**Run gt done:**
|
||||
```bash
|
||||
gt done
|
||||
```
|
||||
|
||||
This single command:
|
||||
- Creates an MR bead in the merge queue
|
||||
- Notifies the Witness (POLECAT_DONE)
|
||||
- Updates your agent state to 'done'
|
||||
- Reports cleanup status (ZFC compliance)
|
||||
|
||||
**2. Verify submission:**
|
||||
You should see output like:
|
||||
```
|
||||
✓ Work submitted to merge queue
|
||||
@@ -438,20 +434,19 @@ You should see output like:
|
||||
Source: polecat/<name>
|
||||
Target: main
|
||||
Issue: {{issue}}
|
||||
✓ Sandbox nuked
|
||||
✓ Session exiting
|
||||
```
|
||||
|
||||
**3. You're recyclable:**
|
||||
Your work is in the queue. The Witness knows you're done.
|
||||
Your sandbox can be cleaned up - all work is pushed to origin.
|
||||
**What happens next (not your concern):**
|
||||
- Refinery processes your MR from the queue
|
||||
- Refinery rebases and merges to main
|
||||
- Refinery closes the issue
|
||||
- If conflicts: Refinery spawns a FRESH polecat to re-implement
|
||||
|
||||
If you have context remaining, you may:
|
||||
- Pick up new work from `bd ready`
|
||||
- Or use `gt handoff` to cycle to a fresh session
|
||||
You are NOT involved in any of that. You're gone. Done means gone.
|
||||
|
||||
If the Refinery needs conflict resolution, it will dispatch a fresh polecat.
|
||||
You do NOT need to wait around.
|
||||
|
||||
**Exit criteria:** MR submitted, Witness notified, polecat recyclable."""
|
||||
**Exit criteria:** Work submitted, sandbox nuked, session exited."""
|
||||
|
||||
[vars]
|
||||
[vars.issue]
|
||||
|
||||
@@ -8,7 +8,7 @@ goroutine (NOT a Claude session) that runs the interrogation state machine.
|
||||
|
||||
Dogs are lightweight workers in Boot's pool (see dog-pool-architecture.md):
|
||||
- Fixed pool of 5 goroutines (configurable via GT_DOG_POOL_SIZE)
|
||||
- State persisted to ~/gt/deacon/dogs/active/<id>.json
|
||||
- State persisted to $GT_ROOT/deacon/dogs/active/<id>.json
|
||||
- Recovery on Boot restart via orphan state files
|
||||
|
||||
## State Machine
|
||||
@@ -151,7 +151,7 @@ If target doesn't exist:
|
||||
- Skip to EPITAPH with outcome=already_dead
|
||||
|
||||
**3. Initialize state file:**
|
||||
Write initial state to ~/gt/deacon/dogs/active/{dog-id}.json
|
||||
Write initial state to $GT_ROOT/deacon/dogs/active/{dog-id}.json
|
||||
|
||||
**4. Set initial attempt counter:**
|
||||
attempt = 1
|
||||
@@ -477,11 +477,11 @@ bd close {warrant_id} --reason "{epitaph_summary}"
|
||||
|
||||
**3. Move state file to completed:**
|
||||
```bash
|
||||
mv ~/gt/deacon/dogs/active/{dog-id}.json ~/gt/deacon/dogs/completed/
|
||||
mv $GT_ROOT/deacon/dogs/active/{dog-id}.json $GT_ROOT/deacon/dogs/completed/
|
||||
```
|
||||
|
||||
**4. Report to Boot:**
|
||||
Write completion file: ~/gt/deacon/dogs/active/{dog-id}.done
|
||||
Write completion file: $GT_ROOT/deacon/dogs/active/{dog-id}.done
|
||||
```json
|
||||
{
|
||||
"dog_id": "{dog-id}",
|
||||
|
||||
@@ -132,7 +132,7 @@ gt daemon rotate-logs
|
||||
gt doctor --fix
|
||||
```
|
||||
|
||||
Old logs are moved to `~/gt/logs/archive/` with timestamps.
|
||||
Old logs are moved to `$GT_ROOT/logs/archive/` with timestamps.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
|
||||
@@ -27,7 +27,7 @@ needs = ["review"]
|
||||
title = "Test {{feature}}"
|
||||
|
||||
[[steps]]
|
||||
description = "Submit for merge. Final check: git status, git diff. Commit with clear message. Push and create PR."
|
||||
description = "Submit for merge. Final check: git status, git diff. Commit with clear message. Follow your role's git workflow for landing code."
|
||||
id = "submit"
|
||||
needs = ["test"]
|
||||
title = "Submit for merge"
|
||||
|
||||
2669
.beads/issues.jsonl
2669
.beads/issues.jsonl
File diff suppressed because it is too large
Load Diff
@@ -15,17 +15,22 @@ while read local_ref local_sha remote_ref remote_sha; do
|
||||
# Allowed branches
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Invalid branch for Gas Town agents."
|
||||
echo ""
|
||||
echo "Blocked push to: $branch"
|
||||
echo ""
|
||||
echo "Allowed branches:"
|
||||
echo " main - Crew workers push here directly"
|
||||
echo " polecat/* - Polecat working branches"
|
||||
echo " beads-sync - Beads synchronization"
|
||||
echo ""
|
||||
echo "Do NOT create PRs. Push to main or let Refinery merge polecat work."
|
||||
exit 1
|
||||
# Allow feature branches when contributing to upstream (fork workflow).
|
||||
# If an 'upstream' remote exists, this is a contribution setup where
|
||||
# feature branches are needed for PRs. See: #848
|
||||
if ! git remote get-url upstream &>/dev/null; then
|
||||
echo "ERROR: Invalid branch for Gas Town agents."
|
||||
echo ""
|
||||
echo "Blocked push to: $branch"
|
||||
echo ""
|
||||
echo "Allowed branches:"
|
||||
echo " main - Crew workers push here directly"
|
||||
echo " polecat/* - Polecat working branches"
|
||||
echo " beads-sync - Beads synchronization"
|
||||
echo ""
|
||||
echo "Do NOT create PRs. Push to main or let Refinery merge polecat work."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -235,7 +235,8 @@ jobs:
|
||||
git config --global user.email "ci@gastown.test"
|
||||
|
||||
- name: Install beads (bd)
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@latest
|
||||
# Pin to v0.47.1 - v0.47.2 has routing defaults that cause prefix mismatch errors
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@v0.47.1
|
||||
|
||||
- name: Build gt
|
||||
run: go build -v -o gt ./cmd/gt
|
||||
|
||||
3
.github/workflows/integration.yml
vendored
3
.github/workflows/integration.yml
vendored
@@ -30,7 +30,8 @@ jobs:
|
||||
git config --global user.email "ci@gastown.test"
|
||||
|
||||
- name: Install beads (bd)
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@latest
|
||||
# Pin to v0.47.1 - v0.47.2 has routing defaults that cause prefix mismatch errors
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@v0.47.1
|
||||
|
||||
- name: Add to PATH
|
||||
run: echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
|
||||
|
||||
32
.github/workflows/windows-ci.yml
vendored
Normal file
32
.github/workflows/windows-ci.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Windows CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Windows Build and Unit Tests
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config --global user.name "CI Bot"
|
||||
git config --global user.email "ci@gastown.test"
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./cmd/gt
|
||||
|
||||
- name: Unit Tests
|
||||
run: go test -short ./...
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -42,6 +42,8 @@ state.json
|
||||
.beads/mq/
|
||||
.beads/last-touched
|
||||
.beads/daemon-*.log.gz
|
||||
.beads/.sync.lock
|
||||
.beads/sync_base.jsonl
|
||||
.beads-wisp/
|
||||
|
||||
# Clone-specific CLAUDE.md (regenerated locally per clone)
|
||||
@@ -49,3 +51,10 @@ CLAUDE.md
|
||||
|
||||
# Embedded formulas are committed so `go install @latest` works
|
||||
# Run `go generate ./...` after modifying .beads/formulas/
|
||||
|
||||
# Gas Town (added by gt)
|
||||
.beads/
|
||||
.logs/
|
||||
logs/
|
||||
settings/
|
||||
.events.jsonl
|
||||
|
||||
45
AGENTS.md
45
AGENTS.md
@@ -4,47 +4,6 @@ See **CLAUDE.md** for complete agent context and instructions.
|
||||
|
||||
This file exists for compatibility with tools that look for AGENTS.md.
|
||||
|
||||
## Landing the Plane (Session Completion)
|
||||
> **Recovery**: Run `gt prime` after compaction, clear, or new session
|
||||
|
||||
**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds.
|
||||
|
||||
**MANDATORY WORKFLOW:**
|
||||
|
||||
1. **File issues for remaining work** - Create issues for anything that needs follow-up
|
||||
2. **Run quality gates** (if code changed) - Tests, linters, builds
|
||||
3. **Update issue status** - Close finished work, update in-progress items
|
||||
4. **PUSH TO REMOTE** - This is MANDATORY:
|
||||
```bash
|
||||
git pull --rebase
|
||||
bd sync
|
||||
git push
|
||||
git status # MUST show "up to date with origin"
|
||||
```
|
||||
5. **Clean up** - Clear stashes, prune remote branches
|
||||
6. **Verify** - All changes committed AND pushed
|
||||
7. **Hand off** - Provide context for next session
|
||||
|
||||
**CRITICAL RULES:**
|
||||
- Work is NOT complete until `git push` succeeds
|
||||
- NEVER stop before pushing - that leaves work stranded locally
|
||||
- NEVER say "ready to push when you are" - YOU must push
|
||||
- If push fails, resolve and retry until it succeeds
|
||||
|
||||
## Dependency Management
|
||||
|
||||
Periodically check for outdated dependencies:
|
||||
|
||||
```bash
|
||||
go list -m -u all | grep '\['
|
||||
```
|
||||
|
||||
Update direct dependencies:
|
||||
|
||||
```bash
|
||||
go get <package>@latest
|
||||
go mod tidy
|
||||
go build ./...
|
||||
go test ./...
|
||||
```
|
||||
|
||||
Check release notes for breaking changes before major version bumps.
|
||||
Full context is injected by `gt prime` at session start.
|
||||
|
||||
285
CHANGELOG.md
285
CHANGELOG.md
@@ -7,6 +7,291 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.5.0] - 2026-01-22
|
||||
|
||||
### Added
|
||||
|
||||
#### Mail Improvements
|
||||
- **Numeric index support for `gt mail read`** - Read messages by inbox position (e.g., `gt mail read 1`)
|
||||
- **`gt mail hook` alias** - Shortcut for `gt hook attach` from mail context
|
||||
- **`--body` alias for `--message`** - More intuitive flag in `gt mail send` and `gt mail reply`
|
||||
- **Multiple message IDs in delete** - `gt mail delete msg1 msg2 msg3`
|
||||
- **Positional message arg in reply** - `gt mail reply <id> "message"` without --message flag
|
||||
- **`--all` flag for inbox** - Show all messages including read
|
||||
- **Parallel inbox queries** - ~6x speedup for mail inbox
|
||||
|
||||
#### Command Aliases
|
||||
- **`gt bd`** - Alias for `gt bead`
|
||||
- **`gt work`** - Alias for `gt hook`
|
||||
- **`--comment` alias for `--reason`** - In `gt close`
|
||||
- **`read` alias for `show`** - In `gt bead`
|
||||
|
||||
#### Configuration & Agents
|
||||
- **OpenCode as built-in agent preset** - Configure with `gt config set agent opencode`
|
||||
- **Config-based role definition system** - Roles defined in config, not beads
|
||||
- **Env field in RuntimeConfig** - Custom environment variables for agent presets
|
||||
- **ShellQuote helper** - Safe env var escaping for shell commands
|
||||
|
||||
#### Infrastructure
|
||||
- **Deacon status line display** - Shows deacon icon in mayor status line
|
||||
- **Configurable polecat branch naming** - Template-based branch naming
|
||||
- **Hook registry and install command** - Manage Claude Code hooks via `gt hooks`
|
||||
- **Doctor auto-fix capability** - SessionHookCheck can auto-repair
|
||||
- **`gt orphans kill` command** - Clean up orphaned Claude processes
|
||||
- **Zombie-scan command for deacon** - tmux-verified process cleanup
|
||||
- **Initial prompt for autonomous patrol startup** - Better agent priming
|
||||
|
||||
#### Refinery & Merging
|
||||
- **Squash merge for cleaner history** - Eliminates redundant merge commits
|
||||
- **Redundant observers** - Witness and Refinery both watch convoys
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Crew & Session Stability
|
||||
- **Don't kill pane processes on new sessions** - Prevents destroying fresh shells
|
||||
- **Auto-recover from stale tmux pane references** - Recreates sessions automatically
|
||||
- **Preserve GT_AGENT across session restarts** - Handoff maintains identity
|
||||
|
||||
#### Process Management
|
||||
- **KillPaneProcesses kills pane process itself** - Not just descendants
|
||||
- **Kill pane processes before all RespawnPane calls** - Prevents orphan leaks
|
||||
- **Shutdown reliability improvements** - Multiple fixes for clean shutdown
|
||||
- **Deacon spawns immediately after killing stuck session**
|
||||
|
||||
#### Convoy & Routing
|
||||
- **Pass convoy ID to convoy check command** - Correct ID propagation
|
||||
- **Multi-repo routing for custom types** - Correct beads routing across repos
|
||||
- **Normalize agent ID trailing slash** - Consistent ID handling
|
||||
|
||||
#### Miscellaneous
|
||||
- **Sling auto-apply mol-polecat-work** - Auto-attach on open polecat beads
|
||||
- **Wisp orphan lifecycle bug** - Proper cleanup of abandoned wisps
|
||||
- **Misclassified wisp detection** - Defense-in-depth filtering
|
||||
- **Cross-account session access in seance** - Talk to predecessors across accounts
|
||||
- **Many more bug fixes** - See git log for full details
|
||||
|
||||
## [0.4.0] - 2026-01-19
|
||||
|
||||
_Changelog not documented at release time. See git log v0.3.1..v0.4.0 for changes._
|
||||
|
||||
## [0.3.1] - 2026-01-18
|
||||
|
||||
_Changelog not documented at release time. See git log v0.3.0..v0.3.1 for changes._
|
||||
|
||||
## [0.3.0] - 2026-01-17
|
||||
|
||||
### Added
|
||||
|
||||
#### Release Automation
|
||||
- **`gastown-release` molecule formula** - Workflow for releases with preflight checks, CHANGELOG/info.go updates, local install, and daemon restart
|
||||
|
||||
#### New Commands
|
||||
- **`gt show`** - Inspect bead contents and metadata
|
||||
- **`gt cat`** - Display bead content directly
|
||||
- **`gt orphans list/kill`** - Detect and clean up orphaned Claude processes
|
||||
- **`gt convoy close`** - Manual convoy closure command
|
||||
- **`gt commit`** - Wrapper for git commit with bead awareness
|
||||
- **`gt trail`** - View commit trail for current work
|
||||
- **`gt mail ack`** - Alias for mark-read command
|
||||
|
||||
#### Plugin System
|
||||
- **Plugin discovery and management** - `gt plugin run`, `gt plugin history`
|
||||
- **`gt dispatch --plugin`** - Execute plugins via dispatch command
|
||||
|
||||
#### Messaging Infrastructure (Beads-Native)
|
||||
- **Queue beads** - New bead type for message queues
|
||||
- **Channel beads** - Pub/sub messaging with retention
|
||||
- **Group beads** - Group management for messaging
|
||||
- **Address resolution** - Resolve agent addresses for mail routing
|
||||
- **`gt mail claim`** - Claim messages from queues
|
||||
|
||||
#### Agent Identity
|
||||
- **`gt polecat identity show`** - Display CV summary for agents
|
||||
- **Worktree setup hooks** - Inject local configurations into worktrees
|
||||
|
||||
#### Performance & Reliability
|
||||
- **Parallel agent startup** - Faster boot with concurrency limit
|
||||
- **Event-driven convoy completion** - Deacon checks convoy status on events
|
||||
- **Automatic orphan cleanup** - Detect and kill orphaned Claude processes
|
||||
- **Namepool auto-theming** - Themes selected per rig based on name hash
|
||||
|
||||
### Changed
|
||||
|
||||
- **MR tracking via beads** - Removed mrqueue package, MRs now stored as beads
|
||||
- **Desire-path commands** - Added agent ergonomics shortcuts
|
||||
- **Explicit escalation in templates** - Polecat templates include escalation instructions
|
||||
- **NamePool state is transient** - InUse state no longer persisted to config
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Process Management
|
||||
- **Kill process tree on shutdown** - Prevents orphaned Claude processes
|
||||
- **Explicit pane process kill** - Prevents setsid orphans in tmux
|
||||
- **Session survival verification** - Verify session survives startup before returning
|
||||
- **Batch session queries** - Improved performance in `gt down`
|
||||
- **Prevent tmux server exit** - `gt down` no longer kills tmux server
|
||||
|
||||
#### Beads & Routing
|
||||
- **Agent bead prefix alignment** - Force multi-hyphen IDs for consistency
|
||||
- **hq- prefix for town-level beads** - Groups, channels use correct prefix
|
||||
- **CreatedAt for group/channel beads** - Proper timestamps on creation
|
||||
- **Routes.jsonl protection** - Doctor check for rig-level routing issues
|
||||
- **Clear BEADS_DIR in auto-convoys** - Prevent prefix inheritance issues
|
||||
|
||||
#### Mail & Communication
|
||||
- **Channel routing in router.Send()** - Mail correctly routes to channels
|
||||
- **Filter unread in beads mode** - Correct unread message filtering
|
||||
- **Town root detection** - Use workspace.Find for consistent detection
|
||||
|
||||
#### Session & Lifecycle
|
||||
- **Idle Polecat Heresy warnings** - Templates warn against idle waiting
|
||||
- **Direct push prohibition for polecats** - Explicit in templates
|
||||
- **Handoff working directory** - Use correct witness directory
|
||||
- **Dead polecat handling in sling** - Detect and handle dead polecats
|
||||
- **gt done self-cleaning** - Kill tmux session on completion
|
||||
|
||||
#### Doctor & Diagnostics
|
||||
- **Zombie session detection** - Detect dead Claude processes in tmux
|
||||
- **sqlite3 availability check** - Verify sqlite3 is installed
|
||||
- **Clone divergence check** - Remove blocking git fetch
|
||||
|
||||
#### Build & Platform
|
||||
- **Windows build support** - Platform-specific process/signal handling
|
||||
- **macOS codesigning** - Sign binary after install
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Idle Polecat Heresy** - Document the anti-pattern of waiting for work
|
||||
- **Bead ID vs Issue ID** - Clarify terminology in README
|
||||
- **Explicit escalation** - Add escalation guidance to polecat templates
|
||||
- **Getting Started placement** - Fix README section ordering
|
||||
|
||||
## [0.2.6] - 2026-01-12
|
||||
|
||||
### Added
|
||||
|
||||
#### Escalation System
|
||||
- **Unified escalation system** - Complete escalation implementation with severity levels, routing, and tracking (gt-i9r20)
|
||||
- **Escalation config schema alignment** - Configuration now matches design doc specifications
|
||||
|
||||
#### Agent Identity & Management
|
||||
- **`gt polecat identity` subcommand group** - Agent bead management commands for polecat lifecycle
|
||||
- **AGENTS.md fallback copy** - Polecats automatically copy AGENTS.md from mayor/rig for context bootstrapping
|
||||
- **`--debug` flag for `gt crew at`** - Debug mode for crew attachment troubleshooting
|
||||
- **Boot role detection in priming** - Proper context injection for boot role agents (#370)
|
||||
|
||||
#### Statusline Improvements
|
||||
- **Per-agent-type health tracking** - Statusline now shows health status per agent type (#344)
|
||||
- **Visual rig grouping** - Rigs sorted by activity with visual grouping in tmux statusline (#337)
|
||||
|
||||
#### Mail & Communication
|
||||
- **`gt mail show` alias** - Alternative command for reading mail (#340)
|
||||
|
||||
#### Developer Experience
|
||||
- **`gt stale` command** - Check for stale binaries and version mismatches
|
||||
|
||||
### Changed
|
||||
|
||||
- **Refactored statusline** - Merged session loops and removed dead code for cleaner implementation
|
||||
- **Refactored sling.go** - Split 1560-line file into 7 focused modules for maintainability
|
||||
- **Magic numbers extracted** - Suggest package now uses named constants (#353)
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Configuration & Environment
|
||||
- **Empty GT_ROOT/BEADS_DIR not exported** - AgentEnv no longer exports empty environment variables (#385)
|
||||
- **Inherited BEADS_DIR prefix mismatch** - Prevent inherited BEADS_DIR from causing prefix mismatches (#321)
|
||||
|
||||
#### Beads & Routing
|
||||
- **routes.jsonl corruption prevention** - Added protection against routes.jsonl corruption with doctor check for rig-level issues (#377)
|
||||
- **Tracked beads init after clone** - Initialize beads database for tracked beads after git clone (#376)
|
||||
- **Rig root from BeadsPath()** - Correctly return rig root to respect redirect system
|
||||
|
||||
#### Sling & Formula
|
||||
- **Feature and issue vars in formula-on-bead mode** - Pass both variables correctly (#382)
|
||||
- **Crew member shorthand resolution** - Resolve crew members correctly with shorthand paths
|
||||
- **Removed obsolete --naked flag** - Cleanup of deprecated sling option
|
||||
|
||||
#### Doctor & Diagnostics
|
||||
- **Role beads check with shared definitions** - Doctor now validates role beads using shared role definitions (#378)
|
||||
- **Filter bd "Note:" messages** - Custom types check no longer confused by bd informational output (#381)
|
||||
|
||||
#### Installation & Setup
|
||||
- **gt:role label on role beads** - Role beads now properly labeled during creation (#383)
|
||||
- **Fetch origin after refspec config** - Bare clones now fetch after configuring refspec (#384)
|
||||
- **Allow --wrappers in existing town** - No longer recreates HQ unnecessarily (#366)
|
||||
|
||||
#### Session & Lifecycle
|
||||
- **Fallback instructions in start/restart beacons** - Session beacons now include fallback instructions
|
||||
- **Handoff recognizes polecat session pattern** - Correctly handles gt-<rig>-<name> session names (#373)
|
||||
- **gt done resilient to missing agent beads** - No longer fails when agent beads don't exist
|
||||
- **MR beads as ephemeral wisps** - Create MR beads as ephemeral wisps for proper cleanup
|
||||
- **Auto-detect cleanup status** - Prevents premature polecat nuke (#361)
|
||||
- **Delete remote polecat branches after merge** - Refinery cleans up remote branches (#369)
|
||||
|
||||
#### Costs & Events
|
||||
- **Query all beads locations for session events** - Cost tracking finds events across locations (#374)
|
||||
|
||||
#### Linting & Quality
|
||||
- **errcheck and unparam violations resolved** - Fixed linting errors
|
||||
- **NudgeSession for all agent notifications** - Mail now uses consistent notification method
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Polecat three-state model** - Clarified working/stalled/zombie states
|
||||
- **Name pool vs polecat pool** - Clarified misconception about pools
|
||||
- **Plugin and escalation system designs** - Added design documentation
|
||||
- **Documentation reorganization** - Concepts, design, and examples structure
|
||||
- **gt prime clarification** - Clarified that gt prime is context recovery, not session start (GH #308)
|
||||
- **Formula package documentation** - Comprehensive package docs
|
||||
- **Various godoc additions** - GenerateMRIDWithTime, isAutonomousRole, formatInt, nil sentinel pattern
|
||||
- **Beads issue ID format** - Clarified format in README (gt-uzx2c)
|
||||
- **Stale polecat identity description** - Fixed outdated documentation
|
||||
|
||||
### Tests
|
||||
|
||||
- **AGENTS.md worktree tests** - Test coverage for AGENTS.md in worktrees
|
||||
- **Comprehensive test coverage** - Added tests for 5 packages (#351)
|
||||
- **Sling test for bd empty output** - Fixed test for empty output handling
|
||||
|
||||
### Deprecated
|
||||
|
||||
- **`gt polecat add`** - Added migration warning for deprecated command
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to all contributors for this release:
|
||||
- @JeremyKalmus - Various contributions (#364)
|
||||
- @boshu2 - Formula package documentation (#343), PR documentation (#352)
|
||||
- @sauerdaniel - Polecat mail notification fix (#347)
|
||||
- @abhijit360 - Assign model to role (#368)
|
||||
- @julianknutsen - Beads path fix (#334)
|
||||
|
||||
## [0.2.5] - 2026-01-11
|
||||
|
||||
### Added
|
||||
- **`gt mail mark-read`** - Mark messages as read without opening them (desire path)
|
||||
- **`gt down --polecats`** - Shut down polecats without affecting other components
|
||||
- **Self-cleaning polecat model** - Polecats self-nuke on completion, witness tracks leases
|
||||
- **`gt prime --state` validation** - Flag exclusivity checks for cleaner CLI
|
||||
|
||||
### Changed
|
||||
- **Removed `gt stop`** - Use `gt down --polecats` instead (cleaner semantics)
|
||||
- **Policy-neutral templates** - crew.md.tmpl checks remote origin for PR policy
|
||||
- **Refactored prime.go** - Split 1833-line file into logical modules
|
||||
|
||||
### Fixed
|
||||
- **Polecat re-spawn** - CreateOrReopenAgentBead handles polecat lifecycle correctly (#333)
|
||||
- **Vim mode compatibility** - tmux sends Escape before Enter for vim users
|
||||
- **Worktree default branch** - Uses rig's configured default branch (#325)
|
||||
- **Agent bead type** - Sets --type=agent when creating agent beads
|
||||
- **Bootstrap priming** - Reduced AGENTS.md to bootstrap pointer, fixed CLAUDE.md templates
|
||||
|
||||
### Documentation
|
||||
- Updated witness help text for self-cleaning model
|
||||
- Updated daemon comments for self-cleaning model
|
||||
- Policy-aware PR guidance in crew template
|
||||
|
||||
## [0.2.4] - 2026-01-10
|
||||
|
||||
Priming subsystem overhaul and Zero Framework Cognition (ZFC) improvements.
|
||||
|
||||
4
Makefile
4
Makefile
@@ -22,8 +22,8 @@ ifeq ($(shell uname),Darwin)
|
||||
@echo "Signed $(BINARY) for macOS"
|
||||
endif
|
||||
|
||||
install: build
|
||||
cp $(BUILD_DIR)/$(BINARY) ~/.local/bin/$(BINARY)
|
||||
install: generate
|
||||
go install -ldflags "$(LDFLAGS)" ./cmd/gt
|
||||
|
||||
clean:
|
||||
rm -f $(BUILD_DIR)/$(BINARY)
|
||||
|
||||
57
README.md
57
README.md
@@ -71,12 +71,14 @@ Git worktree-based persistent storage for agent work. Survives crashes and resta
|
||||
|
||||
### Convoys 🚚
|
||||
|
||||
Work tracking units. Bundle multiple issues/tasks that get assigned to agents.
|
||||
Work tracking units. Bundle multiple beads that get assigned to agents.
|
||||
|
||||
### Beads Integration 📿
|
||||
|
||||
Git-backed issue tracking system that stores work state as structured data.
|
||||
|
||||
**Bead IDs** (also called **issue IDs**) use a prefix + 5-character alphanumeric format (e.g., `gt-abc12`, `hq-x7k2m`). The prefix indicates the item's origin or rig. Commands like `gt sling` and `gt convoy` accept these IDs to reference specific work items. The terms "bead" and "issue" are used interchangeably—beads are the underlying data format, while issues are the work items stored as beads.
|
||||
|
||||
> **New to Gas Town?** See the [Glossary](docs/glossary.md) for a complete guide to terminology and concepts.
|
||||
|
||||
## Installation
|
||||
@@ -86,6 +88,7 @@ Git-backed issue tracking system that stores work state as structured data.
|
||||
- **Go 1.23+** - [go.dev/dl](https://go.dev/dl/)
|
||||
- **Git 2.25+** - for worktree support
|
||||
- **beads (bd) 0.44.0+** - [github.com/steveyegge/beads](https://github.com/steveyegge/beads) (required for custom type support)
|
||||
- **sqlite3** - for convoy database queries (usually pre-installed on macOS/Linux)
|
||||
- **tmux 3.0+** - recommended for full experience
|
||||
- **Claude Code CLI** (default runtime) - [claude.ai/code](https://claude.ai/code)
|
||||
- **Codex CLI** (optional runtime) - [developers.openai.com/codex/cli](https://developers.openai.com/codex/cli)
|
||||
@@ -116,6 +119,18 @@ gt mayor attach
|
||||
|
||||
## Quick Start Guide
|
||||
|
||||
### Getting Started
|
||||
Run
|
||||
```shell
|
||||
gt install ~/gt --git &&
|
||||
cd ~/gt &&
|
||||
gt config agent list &&
|
||||
gt mayor attach
|
||||
```
|
||||
and tell the Mayor what you want to build!
|
||||
|
||||
---
|
||||
|
||||
### Basic Workflow
|
||||
|
||||
```mermaid
|
||||
@@ -127,8 +142,8 @@ sequenceDiagram
|
||||
participant Hook
|
||||
|
||||
You->>Mayor: Tell Mayor what to build
|
||||
Mayor->>Convoy: Create convoy with issues
|
||||
Mayor->>Agent: Sling issue to agent
|
||||
Mayor->>Convoy: Create convoy with beads
|
||||
Mayor->>Agent: Sling bead to agent
|
||||
Agent->>Hook: Store work state
|
||||
Agent->>Agent: Complete work
|
||||
Agent->>Convoy: Report completion
|
||||
@@ -141,11 +156,11 @@ sequenceDiagram
|
||||
# 1. Start the Mayor
|
||||
gt mayor attach
|
||||
|
||||
# 2. In Mayor session, create a convoy
|
||||
gt convoy create "Feature X" issue-123 issue-456 --notify --human
|
||||
# 2. In Mayor session, create a convoy with bead IDs
|
||||
gt convoy create "Feature X" gt-abc12 gt-def34 --notify --human
|
||||
|
||||
# 3. Assign work to an agent
|
||||
gt sling issue-123 myproject
|
||||
gt sling gt-abc12 myproject
|
||||
|
||||
# 4. Track progress
|
||||
gt convoy list
|
||||
@@ -177,7 +192,7 @@ flowchart LR
|
||||
gt mayor attach
|
||||
|
||||
# In Mayor, create convoy and let it orchestrate
|
||||
gt convoy create "Auth System" issue-101 issue-102 --notify
|
||||
gt convoy create "Auth System" gt-x7k2m gt-p9n4q --notify
|
||||
|
||||
# Track progress
|
||||
gt convoy list
|
||||
@@ -188,8 +203,8 @@ gt convoy list
|
||||
Run individual runtime instances manually. Gas Town just tracks state.
|
||||
|
||||
```bash
|
||||
gt convoy create "Fix bugs" issue-123 # Create convoy (sling auto-creates if skipped)
|
||||
gt sling issue-123 myproject # Assign to worker
|
||||
gt convoy create "Fix bugs" gt-abc12 # Create convoy (sling auto-creates if skipped)
|
||||
gt sling gt-abc12 myproject # Assign to worker
|
||||
claude --resume # Agent reads mail, runs work (Claude)
|
||||
# or: codex # Start Codex in the workspace
|
||||
gt convoy list # Check progress
|
||||
@@ -263,11 +278,11 @@ bd mol pour release --var version=1.2.0
|
||||
# Create convoy manually
|
||||
gt convoy create "Bug Fixes" --human
|
||||
|
||||
# Add issues
|
||||
gt convoy add-issue bug-101 bug-102
|
||||
# Add issues to existing convoy
|
||||
gt convoy add hq-cv-abc gt-m3k9p gt-w5t2x
|
||||
|
||||
# Assign to specific agents
|
||||
gt sling bug-101 myproject/my-agent
|
||||
gt sling gt-m3k9p myproject/my-agent
|
||||
|
||||
# Check status
|
||||
gt convoy show
|
||||
@@ -312,11 +327,11 @@ gt crew add <name> --rig <rig> # Create crew workspace
|
||||
|
||||
```bash
|
||||
gt agents # List active agents
|
||||
gt sling <issue> <rig> # Assign work to agent
|
||||
gt sling <issue> <rig> --agent cursor # Override runtime for this sling/spawn
|
||||
gt sling <bead-id> <rig> # Assign work to agent
|
||||
gt sling <bead-id> <rig> --agent cursor # Override runtime for this sling/spawn
|
||||
gt mayor attach # Start Mayor session
|
||||
gt mayor start --agent auggie # Run Mayor with a specific agent alias
|
||||
gt prime # Alternative to mayor attach
|
||||
gt prime # Context recovery (run inside existing session)
|
||||
```
|
||||
|
||||
**Built-in agent presets**: `claude`, `gemini`, `codex`, `cursor`, `auggie`, `amp`
|
||||
@@ -324,10 +339,10 @@ gt prime # Alternative to mayor attach
|
||||
### Convoy (Work Tracking)
|
||||
|
||||
```bash
|
||||
gt convoy create <name> [issues...] # Create convoy
|
||||
gt convoy create <name> [issues...] # Create convoy with issues
|
||||
gt convoy list # List all convoys
|
||||
gt convoy show [id] # Show convoy details
|
||||
gt convoy add-issue <issue> # Add issue to convoy
|
||||
gt convoy add <convoy-id> <issue-id...> # Add issues to convoy
|
||||
```
|
||||
|
||||
### Configuration
|
||||
@@ -406,9 +421,9 @@ MEOW is the recommended pattern:
|
||||
|
||||
1. **Tell the Mayor** - Describe what you want
|
||||
2. **Mayor analyzes** - Breaks down into tasks
|
||||
3. **Convoy creation** - Mayor creates convoy with issues
|
||||
3. **Convoy creation** - Mayor creates convoy with beads
|
||||
4. **Agent spawning** - Mayor spawns appropriate agents
|
||||
5. **Work distribution** - Issues slung to agents via hooks
|
||||
5. **Work distribution** - Beads slung to agents via hooks
|
||||
6. **Progress monitoring** - Track through convoy status
|
||||
7. **Completion** - Mayor summarizes results
|
||||
|
||||
@@ -475,7 +490,3 @@ gt mayor attach
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details
|
||||
|
||||
---
|
||||
|
||||
**Getting Started:** Run `gt install ~/gt --git && cd ~/gt && gt config agent list && gt mayor attach` (or `gt mayor attach --agent codex`) and tell the Mayor what you want to build!
|
||||
|
||||
57
cmd/gt/build_test.go
Normal file
57
cmd/gt/build_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCrossPlatformBuild verifies that the codebase compiles for all supported
|
||||
// platforms. This catches cases where platform-specific code (using build tags
|
||||
// like //go:build !windows) is called from platform-agnostic code without
|
||||
// providing stubs for all platforms.
|
||||
func TestCrossPlatformBuild(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping cross-platform build test in short mode")
|
||||
}
|
||||
|
||||
// Skip if not running on a platform that can cross-compile
|
||||
// (need Go toolchain, not just running tests)
|
||||
if os.Getenv("CI") == "" && runtime.GOOS != "darwin" && runtime.GOOS != "linux" {
|
||||
t.Skip("skipping cross-platform build test on unsupported platform")
|
||||
}
|
||||
|
||||
platforms := []struct {
|
||||
goos string
|
||||
goarch string
|
||||
cgo string
|
||||
}{
|
||||
{"linux", "amd64", "0"},
|
||||
{"linux", "arm64", "0"},
|
||||
{"darwin", "amd64", "0"},
|
||||
{"darwin", "arm64", "0"},
|
||||
{"windows", "amd64", "0"},
|
||||
{"freebsd", "amd64", "0"},
|
||||
}
|
||||
|
||||
for _, p := range platforms {
|
||||
p := p // capture range variable
|
||||
t.Run(p.goos+"_"+p.goarch, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cmd := exec.Command("go", "build", "-o", os.DevNull, ".")
|
||||
cmd.Dir = "."
|
||||
cmd.Env = append(os.Environ(),
|
||||
"GOOS="+p.goos,
|
||||
"GOARCH="+p.goarch,
|
||||
"CGO_ENABLED="+p.cgo,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("build failed for %s/%s:\n%s", p.goos, p.goarch, string(output))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -44,8 +44,8 @@ sudo apt update
|
||||
sudo apt install -y git
|
||||
|
||||
# Install Go (apt version may be outdated, use official installer)
|
||||
wget https://go.dev/dl/go1.24.linux-amd64.tar.gz
|
||||
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.24.linux-amd64.tar.gz
|
||||
wget https://go.dev/dl/go1.24.12.linux-amd64.tar.gz
|
||||
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.24.12.linux-amd64.tar.gz
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
|
||||
@@ -152,7 +152,7 @@ You can also override the agent per command without changing defaults:
|
||||
|
||||
```bash
|
||||
gt start --agent codex-low
|
||||
gt sling issue-123 myproject --agent claude-haiku
|
||||
gt sling gt-abc12 myproject --agent claude-haiku
|
||||
```
|
||||
|
||||
## Minimal Mode vs Full Stack Mode
|
||||
@@ -165,8 +165,8 @@ Run individual runtime instances manually. Gas Town only tracks state.
|
||||
|
||||
```bash
|
||||
# Create and assign work
|
||||
gt convoy create "Fix bugs" issue-123
|
||||
gt sling issue-123 myproject
|
||||
gt convoy create "Fix bugs" gt-abc12
|
||||
gt sling gt-abc12 myproject
|
||||
|
||||
# Run runtime manually
|
||||
cd ~/gt/myproject/polecats/<worker>
|
||||
@@ -188,9 +188,9 @@ Agents run in tmux sessions. Daemon manages lifecycle automatically.
|
||||
gt daemon start
|
||||
|
||||
# Create and assign work (workers spawn automatically)
|
||||
gt convoy create "Feature X" issue-123 issue-456
|
||||
gt sling issue-123 myproject
|
||||
gt sling issue-456 myproject
|
||||
gt convoy create "Feature X" gt-abc12 gt-def34
|
||||
gt sling gt-abc12 myproject
|
||||
gt sling gt-def34 myproject
|
||||
|
||||
# Monitor on dashboard
|
||||
gt convoy list
|
||||
@@ -303,6 +303,6 @@ rm -rf ~/gt
|
||||
After installation:
|
||||
|
||||
1. **Read the README** - Core concepts and workflows
|
||||
2. **Try a simple workflow** - `gt convoy create "Test" test-issue`
|
||||
2. **Try a simple workflow** - `bd create "Test task"` then `gt convoy create "Test" <bead-id>`
|
||||
3. **Explore docs** - `docs/reference.md` for command reference
|
||||
4. **Run doctor regularly** - `gt doctor` catches problems early
|
||||
|
||||
201
docs/beads-native-messaging.md
Normal file
201
docs/beads-native-messaging.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Beads-Native Messaging
|
||||
|
||||
This document describes the beads-native messaging system for Gas Town, which replaces the file-based messaging configuration with persistent beads stored in the town's `.beads` directory.
|
||||
|
||||
## Overview
|
||||
|
||||
Beads-native messaging introduces three new bead types for managing communication:
|
||||
|
||||
- **Groups** (`gt:group`) - Named collections of addresses for mail distribution
|
||||
- **Queues** (`gt:queue`) - Work queues where messages can be claimed by workers
|
||||
- **Channels** (`gt:channel`) - Pub/sub broadcast streams with message retention
|
||||
|
||||
All messaging beads use the `hq-` prefix because they are town-level entities that span rigs.
|
||||
|
||||
## Bead Types
|
||||
|
||||
### Groups (`gt:group`)
|
||||
|
||||
Groups are named collections of addresses used for mail distribution. When you send to a group, the message is delivered to all members.
|
||||
|
||||
**Bead ID format:** `hq-group-<name>` (e.g., `hq-group-ops-team`)
|
||||
|
||||
**Fields:**
|
||||
- `name` - Unique group name
|
||||
- `members` - Comma-separated list of addresses, patterns, or nested group names
|
||||
- `created_by` - Who created the group (from BD_ACTOR)
|
||||
- `created_at` - ISO 8601 timestamp
|
||||
|
||||
**Member types:**
|
||||
- Direct addresses: `gastown/crew/max`, `mayor/`, `deacon/`
|
||||
- Wildcard patterns: `*/witness`, `gastown/*`, `gastown/crew/*`
|
||||
- Special patterns: `@town`, `@crew`, `@witnesses`
|
||||
- Nested groups: Reference other group names
|
||||
|
||||
### Queues (`gt:queue`)
|
||||
|
||||
Queues are work queues where messages wait to be claimed by workers. Unlike groups, each message goes to exactly one claimant.
|
||||
|
||||
**Bead ID format:** `hq-q-<name>` (town-level) or `gt-q-<name>` (rig-level)
|
||||
|
||||
**Fields:**
|
||||
- `name` - Queue name
|
||||
- `status` - `active`, `paused`, or `closed`
|
||||
- `max_concurrency` - Maximum concurrent workers (0 = unlimited)
|
||||
- `processing_order` - `fifo` or `priority`
|
||||
- `available_count` - Items ready to process
|
||||
- `processing_count` - Items currently being processed
|
||||
- `completed_count` - Items completed
|
||||
- `failed_count` - Items that failed
|
||||
|
||||
### Channels (`gt:channel`)
|
||||
|
||||
Channels are pub/sub streams for broadcasting messages. Messages are retained according to the channel's retention policy.
|
||||
|
||||
**Bead ID format:** `hq-channel-<name>` (e.g., `hq-channel-alerts`)
|
||||
|
||||
**Fields:**
|
||||
- `name` - Unique channel name
|
||||
- `subscribers` - Comma-separated list of subscribed addresses
|
||||
- `status` - `active` or `closed`
|
||||
- `retention_count` - Number of recent messages to retain (0 = unlimited)
|
||||
- `retention_hours` - Hours to retain messages (0 = forever)
|
||||
- `created_by` - Who created the channel
|
||||
- `created_at` - ISO 8601 timestamp
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### Group Management
|
||||
|
||||
```bash
|
||||
# List all groups
|
||||
gt mail group list
|
||||
|
||||
# Show group details
|
||||
gt mail group show <name>
|
||||
|
||||
# Create a new group with members
|
||||
gt mail group create <name> [members...]
|
||||
gt mail group create ops-team gastown/witness gastown/crew/max
|
||||
|
||||
# Add member to group
|
||||
gt mail group add <name> <member>
|
||||
|
||||
# Remove member from group
|
||||
gt mail group remove <name> <member>
|
||||
|
||||
# Delete a group
|
||||
gt mail group delete <name>
|
||||
```
|
||||
|
||||
### Channel Management
|
||||
|
||||
```bash
|
||||
# List all channels
|
||||
gt mail channel
|
||||
gt mail channel list
|
||||
|
||||
# View channel messages
|
||||
gt mail channel <name>
|
||||
gt mail channel show <name>
|
||||
|
||||
# Create a channel with retention policy
|
||||
gt mail channel create <name> [--retain-count=N] [--retain-hours=N]
|
||||
gt mail channel create alerts --retain-count=100
|
||||
|
||||
# Delete a channel
|
||||
gt mail channel delete <name>
|
||||
```
|
||||
|
||||
### Sending Messages
|
||||
|
||||
The `gt mail send` command now supports groups, queues, and channels:
|
||||
|
||||
```bash
|
||||
# Send to a group (expands to all members)
|
||||
gt mail send my-group -s "Subject" -m "Body"
|
||||
|
||||
# Send to a queue (single message, workers claim)
|
||||
gt mail send queue:my-queue -s "Work item" -m "Details"
|
||||
|
||||
# Send to a channel (broadcast with retention)
|
||||
gt mail send channel:my-channel -s "Announcement" -m "Content"
|
||||
|
||||
# Direct address (unchanged)
|
||||
gt mail send gastown/crew/max -s "Hello" -m "World"
|
||||
```
|
||||
|
||||
## Address Resolution
|
||||
|
||||
When sending mail, addresses are resolved in this order:
|
||||
|
||||
1. **Explicit prefix** - If address starts with `group:`, `queue:`, or `channel:`, use that type directly
|
||||
2. **Contains `/`** - Treat as agent address or pattern (direct delivery)
|
||||
3. **Starts with `@`** - Special pattern (`@town`, `@crew`, etc.) or beads-native group
|
||||
4. **Name lookup** - Search for group → queue → channel by name
|
||||
|
||||
If a name matches multiple types (e.g., both a group and a channel named "alerts"), the resolver returns an error and requires an explicit prefix.
|
||||
|
||||
## Key Implementation Files
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `internal/beads/beads_group.go` | Group bead CRUD operations |
|
||||
| `internal/beads/beads_queue.go` | Queue bead CRUD operations |
|
||||
| `internal/beads/beads_channel.go` | Channel bead + retention logic |
|
||||
| `internal/mail/resolve.go` | Address resolution logic |
|
||||
| `internal/cmd/mail_group.go` | Group CLI commands |
|
||||
| `internal/cmd/mail_channel.go` | Channel CLI commands |
|
||||
| `internal/cmd/mail_send.go` | Updated send with resolver |
|
||||
|
||||
## Retention Policy
|
||||
|
||||
Channels support two retention mechanisms:
|
||||
|
||||
- **Count-based** (`--retain-count=N`): Keep only the last N messages
|
||||
- **Time-based** (`--retain-hours=N`): Delete messages older than N hours
|
||||
|
||||
Retention is enforced:
|
||||
1. **On-write**: After posting a new message, old messages are pruned
|
||||
2. **On-patrol**: Deacon patrol runs `PruneAllChannels()` as a backup cleanup
|
||||
|
||||
The patrol uses a 10% buffer to avoid thrashing (only prunes if count > retainCount × 1.1).
|
||||
|
||||
## Examples
|
||||
|
||||
### Create a team distribution group
|
||||
|
||||
```bash
|
||||
# Create a group for the ops team
|
||||
gt mail group create ops-team gastown/witness gastown/crew/max deacon/
|
||||
|
||||
# Send to the group
|
||||
gt mail send ops-team -s "Team meeting" -m "Tomorrow at 10am"
|
||||
|
||||
# Add a new member
|
||||
gt mail group add ops-team gastown/crew/dennis
|
||||
```
|
||||
|
||||
### Set up an alerts channel
|
||||
|
||||
```bash
|
||||
# Create an alerts channel that keeps last 50 messages
|
||||
gt mail channel create alerts --retain-count=50
|
||||
|
||||
# Send an alert
|
||||
gt mail send channel:alerts -s "Build failed" -m "See CI for details"
|
||||
|
||||
# View recent alerts
|
||||
gt mail channel alerts
|
||||
```
|
||||
|
||||
### Create nested groups
|
||||
|
||||
```bash
|
||||
# Create role-based groups
|
||||
gt mail group create witnesses */witness
|
||||
gt mail group create leads gastown/crew/max gastown/crew/dennis
|
||||
|
||||
# Create a group that includes other groups
|
||||
gt mail group create all-hands witnesses leads mayor/
|
||||
```
|
||||
@@ -51,6 +51,7 @@ so you can see when it lands and what was included.
|
||||
|---------|-------------|-----|-------------|
|
||||
| **Convoy** | Yes | hq-cv-* | Tracking unit. What you create, track, get notified about. |
|
||||
| **Swarm** | No | None | Ephemeral. "The workers currently on this convoy's issues." |
|
||||
| **Stranded Convoy** | Yes | hq-cv-* | A convoy with ready work but no polecats assigned. Needs attention. |
|
||||
|
||||
When you "kick off a swarm", you're really:
|
||||
1. Creating a convoy (the tracking unit)
|
||||
@@ -223,4 +224,4 @@ Use rig status for "what's everyone in this rig working on?"
|
||||
## See Also
|
||||
|
||||
- [Propulsion Principle](propulsion-principle.md) - Worker execution model
|
||||
- [Mail Protocol](mail-protocol.md) - Notification delivery
|
||||
- [Mail Protocol](../design/mail-protocol.md) - Notification delivery
|
||||
@@ -205,13 +205,22 @@ steve@example.com ← global identity (from git author)
|
||||
|
||||
**Agents execute. Humans own.** The polecat name in `completed-by: gastown/polecats/toast` is executor attribution. The CV credits the human owner (`steve@example.com`).
|
||||
|
||||
### Polecats Are Ephemeral
|
||||
### Polecats Have Persistent Identities
|
||||
|
||||
Polecats are like K8s pods - ephemeral executors with no persistent identity:
|
||||
- Named pool for human convenience (furiosa, nux, slit)
|
||||
- Names are transient - reused after cleanup
|
||||
- No persistent polecat CV
|
||||
- Work credits the human owner
|
||||
Polecats have **persistent identities but ephemeral sessions**. Like employees who
|
||||
clock in/out: each work session is fresh (new tmux, new worktree), but the identity
|
||||
persists across sessions.
|
||||
|
||||
- **Identity (persistent)**: Agent bead, CV chain, work history
|
||||
- **Session (ephemeral)**: Claude instance, context window
|
||||
- **Sandbox (ephemeral)**: Git worktree, branch
|
||||
|
||||
Work credits the polecat identity, enabling:
|
||||
- Performance tracking per polecat
|
||||
- Capability-based routing (send Go work to polecats with Go track records)
|
||||
- Model comparison (A/B test different models via different polecats)
|
||||
|
||||
See [polecat-lifecycle.md](polecat-lifecycle.md#polecat-identity) for details.
|
||||
|
||||
### Skills Are Derived
|
||||
|
||||
@@ -25,6 +25,7 @@ Protomolecule (frozen template) ─── Solid
|
||||
| **Molecule** | Active workflow instance with trackable steps |
|
||||
| **Wisp** | Ephemeral molecule for patrol cycles (never synced) |
|
||||
| **Digest** | Squashed summary of completed molecule |
|
||||
| **Shiny Workflow** | Canonical polecat formula: design → implement → review → test → submit |
|
||||
|
||||
## Common Mistake: Reading Formulas Directly
|
||||
|
||||
@@ -154,9 +155,54 @@ gt mol squash # Squash attached molecule
|
||||
gt mol step done <step> # Complete a molecule step
|
||||
```
|
||||
|
||||
## Polecat Workflow
|
||||
|
||||
Polecats receive work via their hook - a pinned molecule attached to an issue.
|
||||
They execute molecule steps sequentially, closing each step as they complete it.
|
||||
|
||||
### Molecule Types for Polecats
|
||||
|
||||
| Type | Storage | Use Case |
|
||||
|------|---------|----------|
|
||||
| **Regular Molecule** | `.beads/` (synced) | Discrete deliverables, audit trail |
|
||||
| **Wisp** | `.beads/` (ephemeral) | Patrol cycles, operational loops |
|
||||
|
||||
Polecats typically use **regular molecules** because each assignment has audit value.
|
||||
Patrol agents (Witness, Refinery, Deacon) use **wisps** to prevent accumulation.
|
||||
|
||||
### Hook Management
|
||||
|
||||
```bash
|
||||
gt hook # What's on MY hook?
|
||||
gt mol attach-from-mail <id> # Attach work from mail message
|
||||
gt done # Signal completion (syncs, submits to MQ, notifies Witness)
|
||||
```
|
||||
|
||||
### Polecat Workflow Summary
|
||||
|
||||
```
|
||||
1. Spawn with work on hook
|
||||
2. gt hook # What's hooked?
|
||||
3. bd mol current # Where am I?
|
||||
4. Execute current step
|
||||
5. bd close <step> --continue
|
||||
6. If more steps: GOTO 3
|
||||
7. gt done # Signal completion
|
||||
```
|
||||
|
||||
### Wisp vs Molecule Decision
|
||||
|
||||
| Question | Molecule | Wisp |
|
||||
|----------|----------|------|
|
||||
| Does it need audit trail? | Yes | No |
|
||||
| Will it repeat continuously? | No | Yes |
|
||||
| Is it discrete deliverable? | Yes | No |
|
||||
| Is it operational routine? | No | Yes |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use `--continue` for propulsion** - Keep momentum by auto-advancing
|
||||
2. **Check progress with `bd mol current`** - Know where you are before resuming
|
||||
3. **Squash completed molecules** - Create digests for audit trail
|
||||
4. **Burn routine wisps** - Don't accumulate ephemeral patrol data
|
||||
1. **CRITICAL: Close steps in real-time** - Mark `in_progress` BEFORE starting, `closed` IMMEDIATELY after completing. Never batch-close steps at the end. Molecules ARE the ledger - each step closure is a timestamped CV entry. Batch-closing corrupts the timeline and violates HOP's core promise.
|
||||
2. **Use `--continue` for propulsion** - Keep momentum by auto-advancing
|
||||
3. **Check progress with `bd mol current`** - Know where you are before resuming
|
||||
4. **Squash completed molecules** - Create digests for audit trail
|
||||
5. **Burn routine wisps** - Don't accumulate ephemeral patrol data
|
||||
@@ -5,8 +5,56 @@
|
||||
## Overview
|
||||
|
||||
Polecats have three distinct lifecycle layers that operate independently. Confusing
|
||||
these layers leads to heresies like "idle polecats" and misunderstanding when
|
||||
recycling occurs.
|
||||
these layers leads to "heresies" like thinking there are "idle polecats" and
|
||||
misunderstanding when recycling occurs.
|
||||
|
||||
## The Three Operating States
|
||||
|
||||
Polecats have exactly three operating states. There is **no idle pool**.
|
||||
|
||||
| State | Description | How it happens |
|
||||
|-------|-------------|----------------|
|
||||
| **Working** | Actively doing assigned work | Normal operation |
|
||||
| **Stalled** | Session stopped mid-work | Interrupted, crashed, or timed out without being nudged |
|
||||
| **Zombie** | Completed work but failed to die | `gt done` failed during cleanup |
|
||||
|
||||
**The key distinction:** Zombies completed their work; stalled polecats did not.
|
||||
|
||||
- **Stalled** = supposed to be working, but stopped. The polecat was interrupted or
|
||||
crashed and was never nudged back to life. Work is incomplete.
|
||||
- **Zombie** = finished work, tried to exit via `gt done`, but cleanup failed. The
|
||||
session should have shut down but didn't. Work is complete, just stuck in limbo.
|
||||
|
||||
There is no "idle" state. Polecats don't wait around between tasks. When work is
|
||||
done, `gt done` shuts down the session. If you see a non-working polecat, something
|
||||
is broken.
|
||||
|
||||
## The Self-Cleaning Polecat Model
|
||||
|
||||
**Polecats are responsible for their own cleanup.** When a polecat completes its
|
||||
work unit, it:
|
||||
|
||||
1. Signals completion via `gt done`
|
||||
2. Exits its session immediately (no idle waiting)
|
||||
3. Requests its own nuke (self-delete)
|
||||
|
||||
This removes dependency on the Witness/Deacon for cleanup and ensures polecats
|
||||
never sit idle. The simple model: **sandbox dies with session**.
|
||||
|
||||
### Why Self-Cleaning?
|
||||
|
||||
- **No idle polecats** - There's no state where a polecat exists without work
|
||||
- **Reduced watchdog overhead** - Deacon patrols for stalled/zombie polecats, not idle ones
|
||||
- **Faster turnover** - Resources freed immediately on completion
|
||||
- **Simpler mental model** - Done means gone
|
||||
|
||||
### What About Pending Merges?
|
||||
|
||||
The Refinery owns the merge queue. Once `gt done` submits work:
|
||||
- The branch is pushed to origin
|
||||
- Work exists in the MQ, not in the polecat
|
||||
- If rebase fails, Refinery re-implements on new baseline (fresh polecat)
|
||||
- The original polecat is already gone - no sending work "back"
|
||||
|
||||
## The Three Layers
|
||||
|
||||
@@ -92,19 +140,23 @@ The slot:
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ gt done │
|
||||
│ → Polecat signals completion to Witness │
|
||||
│ → Session exits (no idle waiting) │
|
||||
│ → Witness receives POLECAT_DONE event │
|
||||
│ gt done (self-cleaning) │
|
||||
│ → Push branch to origin │
|
||||
│ → Submit work to merge queue (MR bead) │
|
||||
│ → Request self-nuke (sandbox + session cleanup) │
|
||||
│ → Exit immediately │
|
||||
│ │
|
||||
│ Work now lives in MQ, not in polecat. │
|
||||
│ Polecat is GONE. No idle state. │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Witness: gt polecat nuke │
|
||||
│ → Verify work landed (merged or in MQ) │
|
||||
│ → Delete sandbox (remove worktree) │
|
||||
│ → Kill tmux session │
|
||||
│ → Release slot back to pool │
|
||||
│ Refinery: merge queue │
|
||||
│ → Rebase and merge to main │
|
||||
│ → Close the issue │
|
||||
│ → If conflict: spawn FRESH polecat to re-implement │
|
||||
│ (never send work back to original polecat - it's gone) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
@@ -127,19 +179,24 @@ during normal operation.
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
### Idle Polecats
|
||||
### "Idle" Polecats (They Don't Exist)
|
||||
|
||||
**Myth:** Polecats wait between tasks in an idle state.
|
||||
**Myth:** Polecats wait between tasks in an idle pool.
|
||||
|
||||
**Reality:** Polecats don't exist without work. The lifecycle is:
|
||||
**Reality:** There is no idle state. Polecats don't exist without work:
|
||||
1. Work assigned → polecat spawned
|
||||
2. Work done → polecat nuked
|
||||
3. There is no idle state
|
||||
2. Work done → `gt done` → session exits → polecat nuked
|
||||
3. There is no step 3 where they wait around
|
||||
|
||||
If you see a polecat without work, something is broken. Either:
|
||||
- The hook was lost (bug)
|
||||
- The session crashed before loading context
|
||||
- Manual intervention corrupted state
|
||||
If you see a non-working polecat, it's in a **failure state**:
|
||||
|
||||
| What you see | What it is | What went wrong |
|
||||
|--------------|------------|-----------------|
|
||||
| Session exists but not working | **Stalled** | Interrupted/crashed, never nudged |
|
||||
| Session done but didn't exit | **Zombie** | `gt done` failed during cleanup |
|
||||
|
||||
Don't call these "idle" - that implies they're waiting for work. They're not.
|
||||
A stalled polecat is *supposed* to be working. A zombie is *supposed* to be dead.
|
||||
|
||||
### Manual State Transitions
|
||||
|
||||
@@ -161,20 +218,23 @@ gt polecat nuke Toast # (from Witness, after verification)
|
||||
Polecats manage their own session lifecycle. The Witness manages sandbox lifecycle.
|
||||
External manipulation bypasses verification.
|
||||
|
||||
### Sandboxes Without Work
|
||||
### Sandboxes Without Work (Stalled Polecats)
|
||||
|
||||
**Anti-pattern:** A sandbox exists but no molecule is hooked.
|
||||
**Anti-pattern:** A sandbox exists but no molecule is hooked, or the session isn't running.
|
||||
|
||||
This means:
|
||||
- The polecat was spawned incorrectly
|
||||
- The hook was lost during crash
|
||||
This is a **stalled** polecat. It means:
|
||||
- The session crashed and wasn't nudged back to life
|
||||
- The hook was lost during a crash
|
||||
- State corruption occurred
|
||||
|
||||
This is NOT an "idle" polecat waiting for work. It's stalled - supposed to be
|
||||
working but stopped unexpectedly.
|
||||
|
||||
**Recovery:**
|
||||
```bash
|
||||
# From Witness:
|
||||
gt polecat nuke Toast # Clean slate
|
||||
gt sling gt-abc gastown # Respawn with work
|
||||
gt polecat nuke Toast # Clean up the stalled polecat
|
||||
gt sling gt-abc gastown # Respawn with fresh polecat
|
||||
```
|
||||
|
||||
### Confusing Session with Sandbox
|
||||
@@ -210,16 +270,43 @@ All except `gt done` result in continued work. Only `gt done` signals completion
|
||||
The Witness monitors polecats but does NOT:
|
||||
- Force session cycles (polecats self-manage via handoff)
|
||||
- Interrupt mid-step (unless truly stuck)
|
||||
- Recycle sandboxes between steps
|
||||
- Nuke polecats (polecats self-nuke via `gt done`)
|
||||
|
||||
The Witness DOES:
|
||||
- Detect and nudge stalled polecats (sessions that stopped unexpectedly)
|
||||
- Clean up zombie polecats (sessions where `gt done` failed)
|
||||
- Respawn crashed sessions
|
||||
- Nudge stuck polecats
|
||||
- Nuke completed polecats (after verification)
|
||||
- Handle escalations
|
||||
- Handle escalations from stuck polecats (polecats that explicitly asked for help)
|
||||
|
||||
## Polecat Identity
|
||||
|
||||
**Key insight:** Polecat *identity* is long-lived; only sessions and sandboxes are ephemeral.
|
||||
|
||||
In the HOP model, every entity has a chain (CV) that tracks:
|
||||
- What work they've done
|
||||
- Success/failure rates
|
||||
- Skills demonstrated
|
||||
- Quality metrics
|
||||
|
||||
The polecat *name* (Toast, Shadow, etc.) is a slot from a pool - truly ephemeral.
|
||||
But the *agent identity* that executes as that polecat accumulates a work history.
|
||||
|
||||
```
|
||||
POLECAT IDENTITY (persistent) SESSION (ephemeral) SANDBOX (ephemeral)
|
||||
├── CV chain ├── Claude instance ├── Git worktree
|
||||
├── Work history ├── Context window ├── Branch
|
||||
├── Skills demonstrated └── Dies on handoff └── Dies on gt done
|
||||
└── Credit for work or gt done
|
||||
```
|
||||
|
||||
This distinction matters for:
|
||||
- **Attribution** - Who gets credit for the work?
|
||||
- **Skill routing** - Which agent is best for this task?
|
||||
- **Cost accounting** - Who pays for inference?
|
||||
- **Federation** - Agents having their own chains in a distributed world
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Understanding Gas Town](understanding-gas-town.md) - Role taxonomy and architecture
|
||||
- [Polecat Wisp Architecture](polecat-wisp-architecture.md) - Molecule execution
|
||||
- [Overview](../overview.md) - Role taxonomy and architecture
|
||||
- [Molecules](molecules.md) - Molecule execution and polecat workflow
|
||||
- [Propulsion Principle](propulsion-principle.md) - Why work triggers immediate execution
|
||||
@@ -125,6 +125,6 @@ bd show gt-xyz # Routes to gastown/mayor/rig/.beads
|
||||
|
||||
## See Also
|
||||
|
||||
- [reference.md](reference.md) - Command reference
|
||||
- [molecules.md](molecules.md) - Workflow molecules
|
||||
- [identity.md](identity.md) - Agent identity and BD_ACTOR
|
||||
- [reference.md](../reference.md) - Command reference
|
||||
- [molecules.md](../concepts/molecules.md) - Workflow molecules
|
||||
- [identity.md](../concepts/identity.md) - Agent identity and BD_ACTOR
|
||||
197
docs/design/convoy-lifecycle.md
Normal file
197
docs/design/convoy-lifecycle.md
Normal file
@@ -0,0 +1,197 @@
|
||||
# Convoy Lifecycle Design
|
||||
|
||||
> Making convoys actively converge on completion.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Convoys are passive trackers. They group work but don't drive it. The completion
|
||||
loop has a structural gap:
|
||||
|
||||
```
|
||||
Create → Assign → Execute → Issues close → ??? → Convoy closes
|
||||
```
|
||||
|
||||
The `???` is "Deacon patrol runs `gt convoy check`" - a poll-based single point of
|
||||
failure. When Deacon is down, convoys don't close. Work completes but the loop
|
||||
never lands.
|
||||
|
||||
## Current State
|
||||
|
||||
### What Works
|
||||
- Convoy creation and issue tracking
|
||||
- `gt convoy status` shows progress
|
||||
- `gt convoy stranded` finds unassigned work
|
||||
- `gt convoy check` auto-closes completed convoys
|
||||
|
||||
### What Breaks
|
||||
1. **Poll-based completion**: Only Deacon runs `gt convoy check`
|
||||
2. **No event-driven trigger**: Issue close doesn't propagate to convoy
|
||||
3. **No manual close**: Can't force-close abandoned convoys
|
||||
4. **Single observer**: No redundant completion detection
|
||||
5. **Weak notification**: Convoy owner not always clear
|
||||
|
||||
## Design: Active Convoy Convergence
|
||||
|
||||
### Principle: Event-Driven, Redundantly Observed
|
||||
|
||||
Convoy completion should be:
|
||||
1. **Event-driven**: Triggered by issue close, not polling
|
||||
2. **Redundantly observed**: Multiple agents can detect and close
|
||||
3. **Manually overridable**: Humans can force-close
|
||||
|
||||
### Event-Driven Completion
|
||||
|
||||
When an issue closes, check if it's tracked by a convoy:
|
||||
|
||||
```
|
||||
Issue closes
|
||||
↓
|
||||
Is issue tracked by convoy? ──(no)──► done
|
||||
│
|
||||
(yes)
|
||||
↓
|
||||
Run gt convoy check <convoy-id>
|
||||
↓
|
||||
All tracked issues closed? ──(no)──► done
|
||||
│
|
||||
(yes)
|
||||
↓
|
||||
Close convoy, send notifications
|
||||
```
|
||||
|
||||
**Implementation options:**
|
||||
1. Daemon hook on `bd update --status=closed`
|
||||
2. Refinery step after successful merge
|
||||
3. Witness step after verifying polecat completion
|
||||
|
||||
Option 1 is most reliable - catches all closes regardless of source.
|
||||
|
||||
### Redundant Observers
|
||||
|
||||
Per PRIMING.md: "Redundant Monitoring Is Resilience."
|
||||
|
||||
Three places should check convoy completion:
|
||||
|
||||
| Observer | When | Scope |
|
||||
|----------|------|-------|
|
||||
| **Daemon** | On any issue close | All convoys |
|
||||
| **Witness** | After verifying polecat work | Rig's convoy work |
|
||||
| **Deacon** | Periodic patrol | All convoys (backup) |
|
||||
|
||||
Any observer noticing completion triggers close. Idempotent - closing
|
||||
an already-closed convoy is a no-op.
|
||||
|
||||
### Manual Close Command
|
||||
|
||||
**Desire path**: `gt convoy close` is expected but missing.
|
||||
|
||||
```bash
|
||||
# Close a completed convoy
|
||||
gt convoy close hq-cv-abc
|
||||
|
||||
# Force-close an abandoned convoy
|
||||
gt convoy close hq-cv-xyz --reason="work done differently"
|
||||
|
||||
# Close with explicit notification
|
||||
gt convoy close hq-cv-abc --notify mayor/
|
||||
```
|
||||
|
||||
Use cases:
|
||||
- Abandoned convoys no longer relevant
|
||||
- Work completed outside tracked path
|
||||
- Force-closing stuck convoys
|
||||
|
||||
### Convoy Owner/Requester
|
||||
|
||||
Track who requested the convoy for targeted notifications:
|
||||
|
||||
```bash
|
||||
gt convoy create "Feature X" gt-abc --owner mayor/ --notify overseer
|
||||
```
|
||||
|
||||
| Field | Purpose |
|
||||
|-------|---------|
|
||||
| `owner` | Who requested (gets completion notification) |
|
||||
| `notify` | Additional subscribers |
|
||||
|
||||
If `owner` not specified, defaults to creator (from `created_by`).
|
||||
|
||||
### Convoy States
|
||||
|
||||
```
|
||||
OPEN ──(all issues close)──► CLOSED
|
||||
│ │
|
||||
│ ▼
|
||||
│ (add issues)
|
||||
│ │
|
||||
└─────────────────────────────┘
|
||||
(auto-reopens)
|
||||
```
|
||||
|
||||
Adding issues to closed convoy reopens automatically.
|
||||
|
||||
**New state for abandonment:**
|
||||
|
||||
```
|
||||
OPEN ──► CLOSED (completed)
|
||||
│
|
||||
└────► ABANDONED (force-closed without completion)
|
||||
```
|
||||
|
||||
### Timeout/SLA (Future)
|
||||
|
||||
Optional `due_at` field for convoy deadline:
|
||||
|
||||
```bash
|
||||
gt convoy create "Sprint work" gt-abc --due="2026-01-15"
|
||||
```
|
||||
|
||||
Overdue convoys surface in `gt convoy stranded --overdue`.
|
||||
|
||||
## Commands
|
||||
|
||||
### New: `gt convoy close`
|
||||
|
||||
```bash
|
||||
gt convoy close <convoy-id> [--reason=<reason>] [--notify=<agent>]
|
||||
```
|
||||
|
||||
- Closes convoy regardless of tracked issue status
|
||||
- Sets `close_reason` field
|
||||
- Sends notification to owner and subscribers
|
||||
- Idempotent - closing closed convoy is no-op
|
||||
|
||||
### Enhanced: `gt convoy check`
|
||||
|
||||
```bash
|
||||
# Check all convoys (current behavior)
|
||||
gt convoy check
|
||||
|
||||
# Check specific convoy (new)
|
||||
gt convoy check <convoy-id>
|
||||
|
||||
# Dry-run mode
|
||||
gt convoy check --dry-run
|
||||
```
|
||||
|
||||
### New: `gt convoy reopen`
|
||||
|
||||
```bash
|
||||
gt convoy reopen <convoy-id>
|
||||
```
|
||||
|
||||
Explicit reopen for clarity (currently implicit via add).
|
||||
|
||||
## Implementation Priority
|
||||
|
||||
1. **P0: `gt convoy close`** - Desire path, escape hatch
|
||||
2. **P0: Event-driven check** - Daemon hook on issue close
|
||||
3. **P1: Redundant observers** - Witness/Refinery integration
|
||||
4. **P2: Owner field** - Targeted notifications
|
||||
5. **P3: Timeout/SLA** - Deadline tracking
|
||||
|
||||
## Related
|
||||
|
||||
- [convoy.md](../concepts/convoy.md) - Convoy concept and usage
|
||||
- [watchdog-chain.md](watchdog-chain.md) - Deacon patrol system
|
||||
- [mail-protocol.md](mail-protocol.md) - Notification delivery
|
||||
@@ -306,7 +306,7 @@ Rationale:
|
||||
What dogs DO share:
|
||||
- tmux utilities for message sending/capture
|
||||
- State file patterns
|
||||
- Pool allocation pattern
|
||||
- Name slot allocation pattern (pool of names, not instances)
|
||||
|
||||
### Dog Execution Loop
|
||||
|
||||
576
docs/design/escalation-system.md
Normal file
576
docs/design/escalation-system.md
Normal file
@@ -0,0 +1,576 @@
|
||||
# Escalation System Design
|
||||
|
||||
> Detailed design for the Gas Town unified escalation system.
|
||||
> Written 2026-01-11, crew/george session.
|
||||
> Parent epic: gt-i9r20
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Current escalation is ad-hoc "mail Mayor". Issues:
|
||||
- Mayor gets backlogged easily (especially during swarms)
|
||||
- No severity differentiation
|
||||
- No alternative channels (email, SMS, Slack)
|
||||
- No tracking of stale/unacknowledged escalations
|
||||
- No visibility into escalation history
|
||||
|
||||
## Design Goals
|
||||
|
||||
1. **Unified API**: Single `gt escalate` command for all escalation needs
|
||||
2. **Severity-based routing**: Different severities go to different channels
|
||||
3. **Config-driven**: Town config controls routing, no code changes needed
|
||||
4. **Audit trail**: All escalations tracked as beads
|
||||
5. **Stale detection**: Unacknowledged escalations re-escalate automatically
|
||||
6. **Extensible**: Easy to add new notification channels
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### Components
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ gt escalate command │
|
||||
│ --severity --subject --body --source │
|
||||
└─────────────────────┬───────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Escalation Manager │
|
||||
│ 1. Read config (settings/escalation.json) │
|
||||
│ 2. Create escalation bead │
|
||||
│ 3. Execute route actions for severity │
|
||||
└─────────────────────┬───────────────────────────────────────┘
|
||||
│
|
||||
┌───────────┼───────────┬───────────┐
|
||||
▼ ▼ ▼ ▼
|
||||
┌───────┐ ┌─────────┐ ┌───────┐ ┌───────┐
|
||||
│ Bead │ │ Mail │ │ Email │ │ SMS │
|
||||
│Create │ │ Action │ │Action │ │Action │
|
||||
└───────┘ └─────────┘ └───────┘ └───────┘
|
||||
```
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Agent calls `gt escalate --severity=high --subject="..." --body="..."`
|
||||
2. Command loads escalation config from `settings/escalation.json`
|
||||
3. Creates escalation bead with severity, subject, body, source labels
|
||||
4. Looks up route for severity level
|
||||
5. Executes each action in the route (bead already created, then mail, email, etc.)
|
||||
6. Returns escalation bead ID
|
||||
|
||||
### Stale Escalation Flow
|
||||
|
||||
1. Deacon patrol (or plugin) runs `gt escalate stale`
|
||||
2. Queries for escalation beads older than threshold without `acknowledged:true`
|
||||
3. For each stale escalation:
|
||||
- Bump severity (low→medium, medium→high, high→critical)
|
||||
- Re-execute route for new severity
|
||||
- Add `reescalated:true` label and timestamp
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
### File Location
|
||||
|
||||
`~/gt/settings/escalation.json`
|
||||
|
||||
This follows the existing pattern where `~/gt/settings/` contains town-level behavioral config.
|
||||
|
||||
### Schema
|
||||
|
||||
```go
|
||||
// EscalationConfig represents escalation routing configuration.
|
||||
type EscalationConfig struct {
|
||||
Type string `json:"type"` // "escalation"
|
||||
Version int `json:"version"` // schema version
|
||||
|
||||
// Routes maps severity levels to action lists.
|
||||
// Actions are executed in order.
|
||||
Routes map[string][]string `json:"routes"`
|
||||
|
||||
// Contacts contains contact information for actions.
|
||||
Contacts EscalationContacts `json:"contacts"`
|
||||
|
||||
// StaleThreshold is how long before an unacknowledged escalation
|
||||
// is considered stale and gets re-escalated. Default: "4h"
|
||||
StaleThreshold string `json:"stale_threshold,omitempty"`
|
||||
|
||||
// MaxReescalations limits how many times an escalation can be
|
||||
// re-escalated. Default: 2 (low→medium→high, then stops)
|
||||
MaxReescalations int `json:"max_reescalations,omitempty"`
|
||||
}
|
||||
|
||||
// EscalationContacts contains contact information.
|
||||
type EscalationContacts struct {
|
||||
HumanEmail string `json:"human_email,omitempty"`
|
||||
HumanSMS string `json:"human_sms,omitempty"`
|
||||
SlackWebhook string `json:"slack_webhook,omitempty"`
|
||||
}
|
||||
|
||||
const CurrentEscalationVersion = 1
|
||||
```
|
||||
|
||||
### Default Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "escalation",
|
||||
"version": 1,
|
||||
"routes": {
|
||||
"low": ["bead"],
|
||||
"medium": ["bead", "mail:mayor"],
|
||||
"high": ["bead", "mail:mayor", "email:human"],
|
||||
"critical": ["bead", "mail:mayor", "email:human", "sms:human"]
|
||||
},
|
||||
"contacts": {
|
||||
"human_email": "",
|
||||
"human_sms": ""
|
||||
},
|
||||
"stale_threshold": "4h",
|
||||
"max_reescalations": 2
|
||||
}
|
||||
```
|
||||
|
||||
### Action Types
|
||||
|
||||
| Action | Format | Behavior |
|
||||
|--------|--------|----------|
|
||||
| `bead` | `bead` | Create escalation bead (always first, implicit) |
|
||||
| `mail:<target>` | `mail:mayor` | Send gt mail to target |
|
||||
| `email:human` | `email:human` | Send email to `contacts.human_email` |
|
||||
| `sms:human` | `sms:human` | Send SMS to `contacts.human_sms` |
|
||||
| `slack` | `slack` | Post to `contacts.slack_webhook` |
|
||||
| `log` | `log` | Write to escalation log file |
|
||||
|
||||
### Severity Levels
|
||||
|
||||
| Level | Use Case | Default Route |
|
||||
|-------|----------|---------------|
|
||||
| `low` | Informational, non-urgent | bead only |
|
||||
| `medium` | Needs attention soon | bead + mail mayor |
|
||||
| `high` | Urgent, needs human | bead + mail + email |
|
||||
| `critical` | Emergency, immediate | bead + mail + email + SMS |
|
||||
|
||||
---
|
||||
|
||||
## Escalation Beads
|
||||
|
||||
### Bead Format
|
||||
|
||||
```yaml
|
||||
id: gt-esc-abc123
|
||||
type: escalation
|
||||
status: open
|
||||
title: "Plugin FAILED: rebuild-gt"
|
||||
labels:
|
||||
- severity:high
|
||||
- source:plugin:rebuild-gt
|
||||
- acknowledged:false
|
||||
- reescalated:false
|
||||
- reescalation_count:0
|
||||
description: |
|
||||
Build failed: make returned exit code 2
|
||||
|
||||
## Context
|
||||
- Source: plugin:rebuild-gt
|
||||
- Original severity: medium
|
||||
- Escalated at: 2026-01-11T19:00:00Z
|
||||
created_at: 2026-01-11T15:00:00Z
|
||||
```
|
||||
|
||||
### Label Schema
|
||||
|
||||
| Label | Values | Purpose |
|
||||
|-------|--------|---------|
|
||||
| `severity:<level>` | low, medium, high, critical | Current severity |
|
||||
| `source:<type>:<name>` | plugin:rebuild-gt, patrol:deacon | What triggered it |
|
||||
| `acknowledged:<bool>` | true, false | Has human acknowledged |
|
||||
| `reescalated:<bool>` | true, false | Has been re-escalated |
|
||||
| `reescalation_count:<n>` | 0, 1, 2, ... | Times re-escalated |
|
||||
| `original_severity:<level>` | low, medium, high | Initial severity |
|
||||
|
||||
---
|
||||
|
||||
## Commands
|
||||
|
||||
### gt escalate
|
||||
|
||||
Create a new escalation.
|
||||
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=<low|medium|high|critical> \
|
||||
--subject="Short description" \
|
||||
--body="Detailed explanation" \
|
||||
[--source="plugin:rebuild-gt"]
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
- `--severity` (required): Escalation severity level
|
||||
- `--subject` (required): Short description (becomes bead title)
|
||||
- `--body` (required): Detailed explanation (becomes bead description)
|
||||
- `--source`: Source identifier for tracking (e.g., "plugin:rebuild-gt")
|
||||
- `--dry-run`: Show what would happen without executing
|
||||
- `--json`: Output escalation bead ID as JSON
|
||||
|
||||
**Exit codes:**
|
||||
- 0: Success
|
||||
- 1: Config error or invalid flags
|
||||
- 2: Action failed (e.g., email send failed)
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=high \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="Build failed: make returned exit code 2. Working directory: ~/gt/gastown/crew/george" \
|
||||
--source="plugin:rebuild-gt"
|
||||
|
||||
# Output:
|
||||
# ✓ Created escalation gt-esc-abc123 (severity: high)
|
||||
# → Created bead
|
||||
# → Mailed mayor/
|
||||
# → Emailed steve@example.com
|
||||
```
|
||||
|
||||
### gt escalate ack
|
||||
|
||||
Acknowledge an escalation.
|
||||
|
||||
```bash
|
||||
gt escalate ack <bead-id> [--note="Investigating"]
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Sets `acknowledged:true` label
|
||||
- Optionally adds note to bead
|
||||
- Prevents re-escalation
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate ack gt-esc-abc123 --note="Looking into it"
|
||||
# ✓ Acknowledged gt-esc-abc123
|
||||
```
|
||||
|
||||
### gt escalate list
|
||||
|
||||
List escalations.
|
||||
|
||||
```bash
|
||||
gt escalate list [--severity=...] [--stale] [--unacked] [--all]
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
- `--severity`: Filter by severity level
|
||||
- `--stale`: Show only stale (past threshold, unacked)
|
||||
- `--unacked`: Show only unacknowledged
|
||||
- `--all`: Include acknowledged/closed
|
||||
- `--json`: Output as JSON
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate list --unacked
|
||||
# 📢 Unacknowledged Escalations (2)
|
||||
#
|
||||
# ● gt-esc-abc123 [HIGH] Plugin FAILED: rebuild-gt
|
||||
# Source: plugin:rebuild-gt · Age: 2h · Stale in: 2h
|
||||
# ● gt-esc-def456 [MEDIUM] Witness unresponsive
|
||||
# Source: patrol:deacon · Age: 30m · Stale in: 3h30m
|
||||
```
|
||||
|
||||
### gt escalate stale
|
||||
|
||||
Check for and re-escalate stale escalations.
|
||||
|
||||
```bash
|
||||
gt escalate stale [--dry-run]
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Queries unacked escalations older than `stale_threshold`
|
||||
- For each, bumps severity and re-executes route
|
||||
- Respects `max_reescalations` limit
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate stale
|
||||
# 🔄 Re-escalating stale escalations...
|
||||
#
|
||||
# gt-esc-abc123: medium → high (age: 5h, reescalation: 1/2)
|
||||
# → Emailed steve@example.com
|
||||
#
|
||||
# ✓ Re-escalated 1 escalation
|
||||
```
|
||||
|
||||
### gt escalate close
|
||||
|
||||
Close an escalation (resolved).
|
||||
|
||||
```bash
|
||||
gt escalate close <bead-id> [--reason="Fixed in commit abc123"]
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Sets status to closed
|
||||
- Adds resolution note
|
||||
- Records who closed it
|
||||
|
||||
---
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### File: internal/cmd/escalate.go
|
||||
|
||||
```go
|
||||
package cmd
|
||||
|
||||
// escalateCmd is the parent command for escalation management.
|
||||
var escalateCmd = &cobra.Command{
|
||||
Use: "escalate",
|
||||
Short: "Manage escalations",
|
||||
Long: `Create, acknowledge, and manage escalations with severity-based routing.`,
|
||||
}
|
||||
|
||||
// escalateCreateCmd creates a new escalation.
|
||||
var escalateCreateCmd = &cobra.Command{
|
||||
Use: "escalate --severity=<level> --subject=<text> --body=<text>",
|
||||
Short: "Create a new escalation",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateAckCmd acknowledges an escalation.
|
||||
var escalateAckCmd = &cobra.Command{
|
||||
Use: "ack <bead-id>",
|
||||
Short: "Acknowledge an escalation",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateListCmd lists escalations.
|
||||
var escalateListCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List escalations",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateStaleCmd checks for stale escalations.
|
||||
var escalateStaleCmd = &cobra.Command{
|
||||
Use: "stale",
|
||||
Short: "Re-escalate stale escalations",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateCloseCmd closes an escalation.
|
||||
var escalateCloseCmd = &cobra.Command{
|
||||
Use: "close <bead-id>",
|
||||
Short: "Close an escalation",
|
||||
// ... implementation
|
||||
}
|
||||
```
|
||||
|
||||
### File: internal/escalation/manager.go
|
||||
|
||||
```go
|
||||
package escalation
|
||||
|
||||
// Manager handles escalation creation and routing.
|
||||
type Manager struct {
|
||||
config *config.EscalationConfig
|
||||
beads *beads.Client
|
||||
mailer *mail.Client
|
||||
}
|
||||
|
||||
// Escalate creates a new escalation and executes the route.
|
||||
func (m *Manager) Escalate(ctx context.Context, opts EscalateOptions) (*Escalation, error) {
|
||||
// 1. Validate options
|
||||
// 2. Create escalation bead
|
||||
// 3. Look up route for severity
|
||||
// 4. Execute each action
|
||||
// 5. Return escalation with results
|
||||
}
|
||||
|
||||
// Acknowledge marks an escalation as acknowledged.
|
||||
func (m *Manager) Acknowledge(ctx context.Context, beadID string, note string) error {
|
||||
// 1. Load escalation bead
|
||||
// 2. Set acknowledged:true label
|
||||
// 3. Add note if provided
|
||||
}
|
||||
|
||||
// ReescalateStale finds and re-escalates stale escalations.
|
||||
func (m *Manager) ReescalateStale(ctx context.Context) ([]Reescalation, error) {
|
||||
// 1. Query unacked escalations older than threshold
|
||||
// 2. For each, bump severity
|
||||
// 3. Execute new route
|
||||
// 4. Update labels
|
||||
}
|
||||
```
|
||||
|
||||
### File: internal/escalation/actions.go
|
||||
|
||||
```go
|
||||
package escalation
|
||||
|
||||
// Action is an escalation route action.
|
||||
type Action interface {
|
||||
Execute(ctx context.Context, esc *Escalation) error
|
||||
String() string
|
||||
}
|
||||
|
||||
// BeadAction creates the escalation bead.
|
||||
type BeadAction struct{}
|
||||
|
||||
// MailAction sends gt mail.
|
||||
type MailAction struct {
|
||||
Target string // e.g., "mayor"
|
||||
}
|
||||
|
||||
// EmailAction sends email.
|
||||
type EmailAction struct {
|
||||
Recipient string // from config.contacts
|
||||
}
|
||||
|
||||
// SMSAction sends SMS.
|
||||
type SMSAction struct {
|
||||
Recipient string // from config.contacts
|
||||
}
|
||||
|
||||
// ParseAction parses an action string into an Action.
|
||||
func ParseAction(s string) (Action, error) {
|
||||
// "bead" -> BeadAction{}
|
||||
// "mail:mayor" -> MailAction{Target: "mayor"}
|
||||
// "email:human" -> EmailAction{Recipient: "human"}
|
||||
// etc.
|
||||
}
|
||||
```
|
||||
|
||||
### Email/SMS Implementation
|
||||
|
||||
For v1, use simple exec of external commands:
|
||||
|
||||
```go
|
||||
// EmailAction sends email using the 'mail' command or similar.
|
||||
func (a *EmailAction) Execute(ctx context.Context, esc *Escalation) error {
|
||||
// Option 1: Use system mail command
|
||||
// Option 2: Use sendgrid/ses API (future)
|
||||
// Option 3: Use configured webhook
|
||||
|
||||
// For now, just log a placeholder
|
||||
// Real implementation can be added based on user's infrastructure
|
||||
}
|
||||
```
|
||||
|
||||
The email/SMS actions can start as stubs that log warnings, with real implementations added based on the user's infrastructure (SendGrid, Twilio, etc.).
|
||||
|
||||
---
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Plugin System
|
||||
|
||||
Plugins use escalation for failure notification:
|
||||
|
||||
```markdown
|
||||
# In plugin.md execution section:
|
||||
|
||||
On failure:
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=medium \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="$ERROR" \
|
||||
--source="plugin:rebuild-gt"
|
||||
```
|
||||
```
|
||||
|
||||
### Deacon Patrol
|
||||
|
||||
Deacon uses escalation for health issues:
|
||||
|
||||
```bash
|
||||
# In health-scan step:
|
||||
if [ $unresponsive_cycles -ge 5 ]; then
|
||||
gt escalate \
|
||||
--severity=high \
|
||||
--subject="Witness unresponsive: gastown" \
|
||||
--body="Witness has been unresponsive for $unresponsive_cycles cycles" \
|
||||
--source="patrol:deacon:health-scan"
|
||||
fi
|
||||
```
|
||||
|
||||
### Stale Escalation Check
|
||||
|
||||
Can be either:
|
||||
1. A Deacon patrol step
|
||||
2. A plugin (dogfood!)
|
||||
3. Part of `gt escalate` itself (run periodically)
|
||||
|
||||
Recommendation: Start as patrol step, migrate to plugin later.
|
||||
|
||||
---
|
||||
|
||||
## Testing Plan
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Config loading and validation
|
||||
- Action parsing
|
||||
- Severity level ordering
|
||||
- Re-escalation logic
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Create escalation → bead exists
|
||||
- Acknowledge → label updated
|
||||
- Stale detection → re-escalation triggers
|
||||
- Route execution → all actions called
|
||||
|
||||
### Manual Testing
|
||||
|
||||
1. `gt escalate --severity=low --subject="Test" --body="Testing"`
|
||||
2. `gt escalate list --unacked`
|
||||
3. `gt escalate ack <id>`
|
||||
4. Wait for stale threshold, run `gt escalate stale`
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
### Internal Dependencies (task order)
|
||||
|
||||
```
|
||||
gt-i9r20.2 (Config Schema)
|
||||
│
|
||||
▼
|
||||
gt-i9r20.1 (gt escalate command)
|
||||
│
|
||||
├──▶ gt-i9r20.4 (gt escalate ack)
|
||||
│
|
||||
└──▶ gt-i9r20.3 (Stale patrol)
|
||||
```
|
||||
|
||||
### External Dependencies
|
||||
|
||||
- `bd create` for creating escalation beads
|
||||
- `bd list` for querying escalations
|
||||
- `bd label` for updating labels
|
||||
- `gt mail send` for mail action
|
||||
|
||||
---
|
||||
|
||||
## Open Questions (Resolved)
|
||||
|
||||
1. **Where to store config?** → `settings/escalation.json` (follows existing pattern)
|
||||
2. **How to implement email/SMS?** → Start with stubs, add real impl based on infrastructure
|
||||
3. **Stale check: patrol step or plugin?** → Start as patrol step, can migrate to plugin
|
||||
4. **Escalation bead type?** → `type: escalation` (new bead type)
|
||||
|
||||
---
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. **Slack integration**: Post to Slack channels
|
||||
2. **PagerDuty integration**: Create incidents
|
||||
3. **Escalation dashboard**: Web UI for escalation management
|
||||
4. **Scheduled escalations**: "Remind me in 2h if not resolved"
|
||||
5. **Escalation templates**: Pre-defined escalation types
|
||||
@@ -1,5 +1,7 @@
|
||||
# Federation Architecture
|
||||
|
||||
> **Status: Design spec - not yet implemented**
|
||||
|
||||
> Multi-workspace coordination for Gas Town and Beads
|
||||
|
||||
## Overview
|
||||
@@ -100,7 +102,7 @@ Distribute work across workspaces:
|
||||
|
||||
## Agent Provenance
|
||||
|
||||
Every agent operation is attributed. See [identity.md](identity.md) for the
|
||||
Every agent operation is attributed. See [identity.md](../concepts/identity.md) for the
|
||||
complete BD_ACTOR format convention.
|
||||
|
||||
### Git Commits
|
||||
141
docs/design/operational-state.md
Normal file
141
docs/design/operational-state.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Operational State in Gas Town
|
||||
|
||||
> Managing runtime state through events and labels.
|
||||
|
||||
## Overview
|
||||
|
||||
Gas Town tracks operational state changes as structured data. This document covers:
|
||||
- **Events**: State transitions as beads (immutable audit trail)
|
||||
- **Labels-as-state**: Fast queries via role bead labels (current state cache)
|
||||
|
||||
For Boot triage and degraded mode details, see [Watchdog Chain](watchdog-chain.md).
|
||||
|
||||
## Events: State Transitions as Data
|
||||
|
||||
Operational state changes are recorded as event beads. Each event captures:
|
||||
- **What** changed (`event_type`)
|
||||
- **Who** caused it (`actor`)
|
||||
- **What** was affected (`target`)
|
||||
- **Context** (`payload`)
|
||||
- **When** (`created_at`)
|
||||
|
||||
### Event Types
|
||||
|
||||
| Event Type | Description | Payload |
|
||||
|------------|-------------|---------|
|
||||
| `patrol.muted` | Patrol cycle disabled | `{reason, until?}` |
|
||||
| `patrol.unmuted` | Patrol cycle re-enabled | `{reason?}` |
|
||||
| `agent.started` | Agent session began | `{session_id?}` |
|
||||
| `agent.stopped` | Agent session ended | `{reason, outcome?}` |
|
||||
| `mode.degraded` | System entered degraded mode | `{reason}` |
|
||||
| `mode.normal` | System returned to normal | `{}` |
|
||||
|
||||
### Creating Events
|
||||
|
||||
```bash
|
||||
# Mute deacon patrol
|
||||
bd create --type=event --event-type=patrol.muted \
|
||||
--actor=human:overseer --target=agent:deacon \
|
||||
--payload='{"reason":"fixing convoy deadlock","until":"gt-abc1"}'
|
||||
|
||||
# System entered degraded mode
|
||||
bd create --type=event --event-type=mode.degraded \
|
||||
--actor=system:daemon --target=rig:greenplace \
|
||||
--payload='{"reason":"tmux unavailable"}'
|
||||
```
|
||||
|
||||
### Querying Events
|
||||
|
||||
```bash
|
||||
# Recent events for an agent
|
||||
bd list --type=event --target=agent:deacon --limit=10
|
||||
|
||||
# All patrol state changes
|
||||
bd list --type=event --event-type=patrol.muted
|
||||
bd list --type=event --event-type=patrol.unmuted
|
||||
|
||||
# Events in the activity feed
|
||||
bd activity --follow --type=event
|
||||
```
|
||||
|
||||
## Labels-as-State Pattern
|
||||
|
||||
Events capture the full history. Labels cache the current state for fast queries.
|
||||
|
||||
### Convention
|
||||
|
||||
Labels use `<dimension>:<value>` format:
|
||||
- `patrol:muted` / `patrol:active`
|
||||
- `mode:degraded` / `mode:normal`
|
||||
- `status:idle` / `status:working` (for persistent agents only - see note)
|
||||
|
||||
**Note on polecats:** The `status:idle` label does NOT apply to polecats. Polecats
|
||||
have no idle state - they're either working, stalled (stopped unexpectedly), or
|
||||
zombie (`gt done` failed). This label is for persistent agents like Deacon, Witness,
|
||||
and Crew members who can legitimately be idle between tasks.
|
||||
|
||||
### State Change Flow
|
||||
|
||||
1. Create event bead (full context, immutable)
|
||||
2. Update role bead labels (current state cache)
|
||||
|
||||
```bash
|
||||
# Mute patrol
|
||||
bd create --type=event --event-type=patrol.muted ...
|
||||
bd update role-deacon --add-label=patrol:muted --remove-label=patrol:active
|
||||
|
||||
# Unmute patrol
|
||||
bd create --type=event --event-type=patrol.unmuted ...
|
||||
bd update role-deacon --add-label=patrol:active --remove-label=patrol:muted
|
||||
```
|
||||
|
||||
### Querying Current State
|
||||
|
||||
```bash
|
||||
# Is deacon patrol muted?
|
||||
bd show role-deacon | grep patrol:
|
||||
|
||||
# All agents with muted patrol
|
||||
bd list --type=role --label=patrol:muted
|
||||
|
||||
# All agents in degraded mode
|
||||
bd list --type=role --label=mode:degraded
|
||||
```
|
||||
|
||||
## Configuration vs State
|
||||
|
||||
| Type | Storage | Example |
|
||||
|------|---------|---------|
|
||||
| **Static config** | TOML files | Daemon tick interval |
|
||||
| **Operational state** | Beads (events + labels) | Patrol muted |
|
||||
| **Runtime flags** | Marker files | `.deacon-disabled` |
|
||||
|
||||
Static config rarely changes and doesn't need history.
|
||||
Operational state changes at runtime and benefits from audit trail.
|
||||
Marker files are fast checks that can trigger deeper beads queries.
|
||||
|
||||
## Commands Summary
|
||||
|
||||
```bash
|
||||
# Create operational event
|
||||
bd create --type=event --event-type=<type> \
|
||||
--actor=<entity> --target=<entity> --payload='<json>'
|
||||
|
||||
# Update state label
|
||||
bd update <role-bead> --add-label=<dim>:<val> --remove-label=<dim>:<old>
|
||||
|
||||
# Query current state
|
||||
bd list --type=role --label=<dim>:<val>
|
||||
|
||||
# Query state history
|
||||
bd list --type=event --target=<entity>
|
||||
|
||||
# Boot management
|
||||
gt dog status boot
|
||||
gt dog call boot
|
||||
gt dog prime boot
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Events are the source of truth. Labels are the cache.*
|
||||
485
docs/design/plugin-system.md
Normal file
485
docs/design/plugin-system.md
Normal file
@@ -0,0 +1,485 @@
|
||||
# Plugin System Design
|
||||
|
||||
> Design document for the Gas Town plugin system.
|
||||
> Written 2026-01-11, crew/george session.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Gas Town needs extensible, project-specific automation that runs during Deacon patrol cycles. The immediate use case is rebuilding stale binaries (gt, bd, wv), but the pattern generalizes to any periodic maintenance task.
|
||||
|
||||
Current state:
|
||||
- Plugin infrastructure exists conceptually (patrol step mentions it)
|
||||
- `~/gt/plugins/` directory exists with README
|
||||
- No actual plugins in production use
|
||||
- No formalized execution model
|
||||
|
||||
## Design Principles Applied
|
||||
|
||||
### Discover, Don't Track
|
||||
> Reality is truth. State is derived.
|
||||
|
||||
Plugin state (last run, run count, results) lives on the ledger as wisps, not in shadow state files. Gate evaluation queries the ledger directly.
|
||||
|
||||
### ZFC: Zero Framework Cognition
|
||||
> Agent decides. Go transports.
|
||||
|
||||
The Deacon (agent) evaluates gates and decides whether to dispatch. Go code provides transport (`gt dog dispatch`) but doesn't make decisions.
|
||||
|
||||
### MEOW Stack Integration
|
||||
|
||||
| Layer | Plugin Analog |
|
||||
|-------|---------------|
|
||||
| **M**olecule | `plugin.md` - work template with TOML frontmatter |
|
||||
| **E**phemeral | Plugin-run wisps - high-volume, digestible |
|
||||
| **O**bservable | Plugin runs appear in `bd activity` feed |
|
||||
| **W**orkflow | Gate → Dispatch → Execute → Record → Digest |
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### Plugin Locations
|
||||
|
||||
```
|
||||
~/gt/
|
||||
├── plugins/ # Town-level plugins (universal)
|
||||
│ └── README.md
|
||||
├── gastown/
|
||||
│ └── plugins/ # Rig-level plugins
|
||||
│ └── rebuild-gt/
|
||||
│ └── plugin.md
|
||||
├── beads/
|
||||
│ └── plugins/
|
||||
│ └── rebuild-bd/
|
||||
│ └── plugin.md
|
||||
└── wyvern/
|
||||
└── plugins/
|
||||
└── rebuild-wv/
|
||||
└── plugin.md
|
||||
```
|
||||
|
||||
**Town-level** (`~/gt/plugins/`): Universal plugins that apply everywhere.
|
||||
**Rig-level** (`<rig>/plugins/`): Project-specific plugins.
|
||||
|
||||
The Deacon scans both locations during patrol.
|
||||
|
||||
### Execution Model: Dog Dispatch
|
||||
|
||||
**Key insight**: Plugin execution should not block Deacon patrol.
|
||||
|
||||
Dogs are reusable workers designed for infrastructure tasks. Plugin execution is dispatched to dogs:
|
||||
|
||||
```
|
||||
Deacon Patrol Dog Worker
|
||||
───────────────── ─────────────────
|
||||
1. Scan plugins
|
||||
2. Evaluate gates
|
||||
3. For open gates:
|
||||
└─ gt dog dispatch plugin ──→ 4. Execute plugin
|
||||
(non-blocking) 5. Create result wisp
|
||||
6. Send DOG_DONE
|
||||
4. Continue patrol
|
||||
...
|
||||
5. Process DOG_DONE ←── (next cycle)
|
||||
```
|
||||
|
||||
Benefits:
|
||||
- Deacon stays responsive
|
||||
- Multiple plugins can run concurrently (different dogs)
|
||||
- Plugin failures don't stall patrol
|
||||
- Consistent with Dogs' purpose (infrastructure work)
|
||||
|
||||
### State Tracking: Wisps on the Ledger
|
||||
|
||||
Each plugin run creates a wisp:
|
||||
|
||||
```bash
|
||||
bd wisp create \
|
||||
--label type:plugin-run \
|
||||
--label plugin:rebuild-gt \
|
||||
--label rig:gastown \
|
||||
--label result:success \
|
||||
--body "Rebuilt gt: abc123 → def456 (5 commits)"
|
||||
```
|
||||
|
||||
**Gate evaluation** queries wisps instead of state files:
|
||||
|
||||
```bash
|
||||
# Cooldown check: any runs in last hour?
|
||||
bd list --type=wisp --label=plugin:rebuild-gt --since=1h --limit=1
|
||||
```
|
||||
|
||||
**Derived state** (no state.json needed):
|
||||
|
||||
| Query | Command |
|
||||
|-------|---------|
|
||||
| Last run time | `bd list --label=plugin:X --limit=1 --json` |
|
||||
| Run count | `bd list --label=plugin:X --json \| jq length` |
|
||||
| Last result | Parse `result:` label from latest wisp |
|
||||
| Failure rate | Count `result:failure` vs total |
|
||||
|
||||
### Digest Pattern
|
||||
|
||||
Like cost digests, plugin wisps accumulate and get squashed daily:
|
||||
|
||||
```bash
|
||||
gt plugin digest --yesterday
|
||||
```
|
||||
|
||||
Creates: `Plugin Digest 2026-01-10` bead with summary
|
||||
Deletes: Individual plugin-run wisps from that day
|
||||
|
||||
This keeps the ledger clean while preserving audit history.
|
||||
|
||||
---
|
||||
|
||||
## Plugin Format Specification
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
rebuild-gt/
|
||||
└── plugin.md # Definition with TOML frontmatter
|
||||
```
|
||||
|
||||
### plugin.md Format
|
||||
|
||||
```markdown
|
||||
+++
|
||||
name = "rebuild-gt"
|
||||
description = "Rebuild stale gt binary from source"
|
||||
version = 1
|
||||
|
||||
[gate]
|
||||
type = "cooldown"
|
||||
duration = "1h"
|
||||
|
||||
[tracking]
|
||||
labels = ["plugin:rebuild-gt", "rig:gastown", "category:maintenance"]
|
||||
digest = true
|
||||
|
||||
[execution]
|
||||
timeout = "5m"
|
||||
notify_on_failure = true
|
||||
+++
|
||||
|
||||
# Rebuild gt Binary
|
||||
|
||||
Instructions for the dog worker to execute...
|
||||
```
|
||||
|
||||
### TOML Frontmatter Schema
|
||||
|
||||
```toml
|
||||
# Required
|
||||
name = "string" # Unique plugin identifier
|
||||
description = "string" # Human-readable description
|
||||
version = 1 # Schema version (for future evolution)
|
||||
|
||||
[gate]
|
||||
type = "cooldown|cron|condition|event|manual"
|
||||
# Type-specific fields:
|
||||
duration = "1h" # For cooldown
|
||||
schedule = "0 9 * * *" # For cron
|
||||
check = "gt stale -q" # For condition (exit 0 = run)
|
||||
on = "startup" # For event
|
||||
|
||||
[tracking]
|
||||
labels = ["label:value", ...] # Labels for execution wisps
|
||||
digest = true|false # Include in daily digest
|
||||
|
||||
[execution]
|
||||
timeout = "5m" # Max execution time
|
||||
notify_on_failure = true # Escalate on failure
|
||||
severity = "low" # Escalation severity if failed
|
||||
```
|
||||
|
||||
### Gate Types
|
||||
|
||||
| Type | Config | Behavior |
|
||||
|------|--------|----------|
|
||||
| `cooldown` | `duration = "1h"` | Query wisps, run if none in window |
|
||||
| `cron` | `schedule = "0 9 * * *"` | Run on cron schedule |
|
||||
| `condition` | `check = "cmd"` | Run check command, run if exit 0 |
|
||||
| `event` | `on = "startup"` | Run on Deacon startup |
|
||||
| `manual` | (no gate section) | Never auto-run, dispatch explicitly |
|
||||
|
||||
### Instructions Section
|
||||
|
||||
The markdown body after the frontmatter contains agent-executable instructions. The dog worker reads and executes these steps.
|
||||
|
||||
Standard sections:
|
||||
- **Detection**: Check if action is needed
|
||||
- **Action**: The actual work
|
||||
- **Record Result**: Create the execution wisp
|
||||
- **Notification**: On success/failure
|
||||
|
||||
---
|
||||
|
||||
## Escalation System
|
||||
|
||||
### Problem
|
||||
|
||||
Current escalation is ad-hoc "mail Mayor". Issues:
|
||||
- Mayor gets backlogged easily
|
||||
- No severity differentiation
|
||||
- No alternative channels (email, SMS, etc.)
|
||||
- No tracking of stale escalations
|
||||
|
||||
### Solution: Unified Escalation API
|
||||
|
||||
New command:
|
||||
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=<low|medium|high|critical> \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="Build failed: make returned exit code 2" \
|
||||
--source="plugin:rebuild-gt"
|
||||
```
|
||||
|
||||
### Escalation Routing
|
||||
|
||||
The command reads town config (`~/gt/config.json` or similar) for routing rules:
|
||||
|
||||
```json
|
||||
{
|
||||
"escalation": {
|
||||
"routes": {
|
||||
"low": ["bead"],
|
||||
"medium": ["bead", "mail:mayor"],
|
||||
"high": ["bead", "mail:mayor", "email:human"],
|
||||
"critical": ["bead", "mail:mayor", "email:human", "sms:human"]
|
||||
},
|
||||
"contacts": {
|
||||
"human_email": "steve@example.com",
|
||||
"human_sms": "+1234567890"
|
||||
},
|
||||
"stale_threshold": "4h"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Escalation Actions
|
||||
|
||||
| Action | Behavior |
|
||||
|--------|----------|
|
||||
| `bead` | Create escalation bead with severity label |
|
||||
| `mail:mayor` | Send mail to mayor/ |
|
||||
| `email:human` | Send email via configured service |
|
||||
| `sms:human` | Send SMS via configured service |
|
||||
|
||||
### Escalation Beads
|
||||
|
||||
Every escalation creates a bead:
|
||||
|
||||
```yaml
|
||||
type: escalation
|
||||
status: open
|
||||
labels:
|
||||
- severity:high
|
||||
- source:plugin:rebuild-gt
|
||||
- acknowledged:false
|
||||
```
|
||||
|
||||
### Stale Escalation Patrol
|
||||
|
||||
A patrol step (or plugin!) checks for unacknowledged escalations:
|
||||
|
||||
```bash
|
||||
bd list --type=escalation --label=acknowledged:false --older-than=4h
|
||||
```
|
||||
|
||||
Stale escalations get re-escalated at higher severity.
|
||||
|
||||
### Acknowledging Escalations
|
||||
|
||||
```bash
|
||||
gt escalate ack <bead-id>
|
||||
# Sets label acknowledged:true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## New Commands Required
|
||||
|
||||
### gt stale
|
||||
|
||||
Expose binary staleness check:
|
||||
|
||||
```bash
|
||||
gt stale # Human-readable output
|
||||
gt stale --json # Machine-readable
|
||||
gt stale --quiet # Exit code only (0=stale, 1=fresh)
|
||||
```
|
||||
|
||||
### gt dog dispatch
|
||||
|
||||
Formalized plugin dispatch to dogs:
|
||||
|
||||
```bash
|
||||
gt dog dispatch --plugin <name> [--rig <rig>]
|
||||
```
|
||||
|
||||
This:
|
||||
1. Finds the plugin definition
|
||||
2. Slinga a standardized work unit to an idle dog
|
||||
3. Returns immediately (non-blocking)
|
||||
|
||||
### gt escalate
|
||||
|
||||
Unified escalation API:
|
||||
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=<level> \
|
||||
--subject="..." \
|
||||
--body="..." \
|
||||
[--source="..."]
|
||||
|
||||
gt escalate ack <bead-id>
|
||||
gt escalate list [--severity=...] [--stale]
|
||||
```
|
||||
|
||||
### gt plugin
|
||||
|
||||
Plugin management:
|
||||
|
||||
```bash
|
||||
gt plugin list # List all plugins
|
||||
gt plugin show <name> # Show plugin details
|
||||
gt plugin run <name> [--force] # Manual trigger
|
||||
gt plugin digest [--yesterday] # Squash wisps to digest
|
||||
gt plugin history <name> # Show execution history
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Foundation
|
||||
|
||||
1. **`gt stale` command** - Expose CheckStaleBinary() via CLI
|
||||
2. **Plugin format spec** - Finalize TOML schema
|
||||
3. **Plugin scanning** - Deacon scans town + rig plugin dirs
|
||||
|
||||
### Phase 2: Execution
|
||||
|
||||
4. **`gt dog dispatch --plugin`** - Formalized dog dispatch
|
||||
5. **Plugin execution in dogs** - Dog reads plugin.md, executes
|
||||
6. **Wisp creation** - Record results on ledger
|
||||
|
||||
### Phase 3: Gates & State
|
||||
|
||||
7. **Gate evaluation** - Cooldown via wisp query
|
||||
8. **Other gate types** - Cron, condition, event
|
||||
9. **Plugin digest** - Daily squash of plugin wisps
|
||||
|
||||
### Phase 4: Escalation
|
||||
|
||||
10. **`gt escalate` command** - Unified escalation API
|
||||
11. **Escalation routing** - Config-driven multi-channel
|
||||
12. **Stale escalation patrol** - Check unacknowledged
|
||||
|
||||
### Phase 5: First Plugin
|
||||
|
||||
13. **`rebuild-gt` plugin** - The actual gastown plugin
|
||||
14. **Documentation** - So Beads/Wyvern can create theirs
|
||||
|
||||
---
|
||||
|
||||
## Example: rebuild-gt Plugin
|
||||
|
||||
```markdown
|
||||
+++
|
||||
name = "rebuild-gt"
|
||||
description = "Rebuild stale gt binary from gastown source"
|
||||
version = 1
|
||||
|
||||
[gate]
|
||||
type = "cooldown"
|
||||
duration = "1h"
|
||||
|
||||
[tracking]
|
||||
labels = ["plugin:rebuild-gt", "rig:gastown", "category:maintenance"]
|
||||
digest = true
|
||||
|
||||
[execution]
|
||||
timeout = "5m"
|
||||
notify_on_failure = true
|
||||
severity = "medium"
|
||||
+++
|
||||
|
||||
# Rebuild gt Binary
|
||||
|
||||
Checks if the gt binary is stale (built from older commit than HEAD) and rebuilds.
|
||||
|
||||
## Gate Check
|
||||
|
||||
The Deacon evaluates this before dispatch. If gate closed, skip.
|
||||
|
||||
## Detection
|
||||
|
||||
Check binary staleness:
|
||||
|
||||
```bash
|
||||
gt stale --json
|
||||
```
|
||||
|
||||
If `"stale": false`, record success wisp and exit early.
|
||||
|
||||
## Action
|
||||
|
||||
Rebuild from source:
|
||||
|
||||
```bash
|
||||
cd ~/gt/gastown/crew/george && make build && make install
|
||||
```
|
||||
|
||||
## Record Result
|
||||
|
||||
On success:
|
||||
```bash
|
||||
bd wisp create \
|
||||
--label type:plugin-run \
|
||||
--label plugin:rebuild-gt \
|
||||
--label rig:gastown \
|
||||
--label result:success \
|
||||
--body "Rebuilt gt: $OLD → $NEW ($N commits)"
|
||||
```
|
||||
|
||||
On failure:
|
||||
```bash
|
||||
bd wisp create \
|
||||
--label type:plugin-run \
|
||||
--label plugin:rebuild-gt \
|
||||
--label rig:gastown \
|
||||
--label result:failure \
|
||||
--body "Build failed: $ERROR"
|
||||
|
||||
gt escalate --severity=medium \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="$ERROR" \
|
||||
--source="plugin:rebuild-gt"
|
||||
```
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **Plugin discovery in multiple clones**: If gastown has crew/george, crew/max, crew/joe - which clone's plugins/ dir is canonical? Probably: scan all, dedupe by name, prefer rig-root if exists.
|
||||
|
||||
2. **Dog assignment**: Should specific plugins prefer specific dogs? Or any idle dog?
|
||||
|
||||
3. **Plugin dependencies**: Can plugins depend on other plugins? Probably not in v1.
|
||||
|
||||
4. **Plugin disable/enable**: How to temporarily disable a plugin without deleting it? Label on a plugin bead? `enabled = false` in frontmatter?
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- PRIMING.md - Core design principles
|
||||
- mol-deacon-patrol.formula.toml - Patrol step plugin-run
|
||||
- ~/gt/plugins/README.md - Current plugin stub
|
||||
248
docs/formula-resolution.md
Normal file
248
docs/formula-resolution.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# Formula Resolution Architecture
|
||||
|
||||
> Where formulas live, how they're found, and how they'll scale to Mol Mall
|
||||
|
||||
## The Problem
|
||||
|
||||
Formulas currently exist in multiple locations with no clear precedence:
|
||||
- `.beads/formulas/` (source of truth for a project)
|
||||
- `internal/formula/formulas/` (embedded copy for `go install`)
|
||||
- Crew directories have their own `.beads/formulas/` (diverging copies)
|
||||
|
||||
When an agent runs `bd cook mol-polecat-work`, which version do they get?
|
||||
|
||||
## Design Goals
|
||||
|
||||
1. **Predictable resolution** - Clear precedence rules
|
||||
2. **Local customization** - Override system defaults without forking
|
||||
3. **Project-specific formulas** - Committed workflows for collaborators
|
||||
4. **Mol Mall ready** - Architecture supports remote formula installation
|
||||
5. **Federation ready** - Formulas are shareable across towns via HOP (Highway Operations Protocol)
|
||||
|
||||
## Three-Tier Resolution
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ FORMULA RESOLUTION ORDER │
|
||||
│ (most specific wins) │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
|
||||
TIER 1: PROJECT (rig-level)
|
||||
Location: <project>/.beads/formulas/
|
||||
Source: Committed to project repo
|
||||
Use case: Project-specific workflows (deploy, test, release)
|
||||
Example: ~/gt/gastown/.beads/formulas/mol-gastown-release.formula.toml
|
||||
|
||||
TIER 2: TOWN (user-level)
|
||||
Location: ~/gt/.beads/formulas/
|
||||
Source: Mol Mall installs, user customizations
|
||||
Use case: Cross-project workflows, personal preferences
|
||||
Example: ~/gt/.beads/formulas/mol-polecat-work.formula.toml (customized)
|
||||
|
||||
TIER 3: SYSTEM (embedded)
|
||||
Location: Compiled into gt binary
|
||||
Source: gastown/mayor/rig/.beads/formulas/ at build time
|
||||
Use case: Defaults, blessed patterns, fallback
|
||||
Example: mol-polecat-work.formula.toml (factory default)
|
||||
```
|
||||
|
||||
### Resolution Algorithm
|
||||
|
||||
```go
|
||||
func ResolveFormula(name string, cwd string) (Formula, Tier, error) {
|
||||
// Tier 1: Project-level (walk up from cwd to find .beads/formulas/)
|
||||
if projectDir := findProjectRoot(cwd); projectDir != "" {
|
||||
path := filepath.Join(projectDir, ".beads", "formulas", name+".formula.toml")
|
||||
if f, err := loadFormula(path); err == nil {
|
||||
return f, TierProject, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Tier 2: Town-level
|
||||
townDir := getTownRoot() // ~/gt or $GT_HOME
|
||||
path := filepath.Join(townDir, ".beads", "formulas", name+".formula.toml")
|
||||
if f, err := loadFormula(path); err == nil {
|
||||
return f, TierTown, nil
|
||||
}
|
||||
|
||||
// Tier 3: Embedded (system)
|
||||
if f, err := loadEmbeddedFormula(name); err == nil {
|
||||
return f, TierSystem, nil
|
||||
}
|
||||
|
||||
return nil, 0, ErrFormulaNotFound
|
||||
}
|
||||
```
|
||||
|
||||
### Why This Order
|
||||
|
||||
**Project wins** because:
|
||||
- Project maintainers know their workflows best
|
||||
- Collaborators get consistent behavior via git
|
||||
- CI/CD uses the same formulas as developers
|
||||
|
||||
**Town is middle** because:
|
||||
- User customizations override system defaults
|
||||
- Mol Mall installs don't require project changes
|
||||
- Cross-project consistency for the user
|
||||
|
||||
**System is fallback** because:
|
||||
- Always available (compiled in)
|
||||
- Factory reset target
|
||||
- The "blessed" versions
|
||||
|
||||
## Formula Identity
|
||||
|
||||
### Current Format
|
||||
|
||||
```toml
|
||||
formula = "mol-polecat-work"
|
||||
version = 4
|
||||
description = "..."
|
||||
```
|
||||
|
||||
### Extended Format (Mol Mall Ready)
|
||||
|
||||
```toml
|
||||
[formula]
|
||||
name = "mol-polecat-work"
|
||||
version = "4.0.0" # Semver
|
||||
author = "steve@gastown.io" # Author identity
|
||||
license = "MIT"
|
||||
repository = "https://github.com/steveyegge/gastown"
|
||||
|
||||
[formula.registry]
|
||||
uri = "hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0"
|
||||
checksum = "sha256:abc123..." # Integrity verification
|
||||
signed_by = "steve@gastown.io" # Optional signing
|
||||
|
||||
[formula.capabilities]
|
||||
# What capabilities does this formula exercise? Used for agent routing.
|
||||
primary = ["go", "testing", "code-review"]
|
||||
secondary = ["git", "ci-cd"]
|
||||
```
|
||||
|
||||
### Version Resolution
|
||||
|
||||
When multiple versions exist:
|
||||
|
||||
```bash
|
||||
bd cook mol-polecat-work # Resolves per tier order
|
||||
bd cook mol-polecat-work@4 # Specific major version
|
||||
bd cook mol-polecat-work@4.0.0 # Exact version
|
||||
bd cook mol-polecat-work@latest # Explicit latest
|
||||
```
|
||||
|
||||
## Crew Directory Problem
|
||||
|
||||
### Current State
|
||||
|
||||
Crew directories (`gastown/crew/max/`) are sparse checkouts of gastown. They have:
|
||||
- Their own `.beads/formulas/` (from the checkout)
|
||||
- These can diverge from `mayor/rig/.beads/formulas/`
|
||||
|
||||
### The Fix
|
||||
|
||||
Crew should NOT have their own formula copies. Options:
|
||||
|
||||
**Option A: Symlink/Redirect**
|
||||
```bash
|
||||
# crew/max/.beads/formulas -> ../../mayor/rig/.beads/formulas
|
||||
```
|
||||
All crew share the rig's formulas.
|
||||
|
||||
**Option B: Provision on Demand**
|
||||
Crew directories don't have `.beads/formulas/`. Resolution falls through to:
|
||||
1. Town-level (~/gt/.beads/formulas/)
|
||||
2. System (embedded)
|
||||
|
||||
**Option C: Sparse Checkout Exclusion**
|
||||
Exclude `.beads/formulas/` from crew sparse checkouts entirely.
|
||||
|
||||
**Recommendation: Option B** - Crew shouldn't need project-level formulas. They work on the project, they don't define its workflows.
|
||||
|
||||
## Commands
|
||||
|
||||
### Existing
|
||||
|
||||
```bash
|
||||
bd formula list # Available formulas (should show tier)
|
||||
bd formula show <name> # Formula details
|
||||
bd cook <formula> # Formula → Proto
|
||||
```
|
||||
|
||||
### Enhanced
|
||||
|
||||
```bash
|
||||
# List with tier information
|
||||
bd formula list
|
||||
mol-polecat-work v4 [project]
|
||||
mol-polecat-code-review v1 [town]
|
||||
mol-witness-patrol v2 [system]
|
||||
|
||||
# Show resolution path
|
||||
bd formula show mol-polecat-work --resolve
|
||||
Resolving: mol-polecat-work
|
||||
✓ Found at: ~/gt/gastown/.beads/formulas/mol-polecat-work.formula.toml
|
||||
Tier: project
|
||||
Version: 4
|
||||
|
||||
Resolution path checked:
|
||||
1. [project] ~/gt/gastown/.beads/formulas/ ← FOUND
|
||||
2. [town] ~/gt/.beads/formulas/
|
||||
3. [system] <embedded>
|
||||
|
||||
# Override tier for testing
|
||||
bd cook mol-polecat-work --tier=system # Force embedded version
|
||||
bd cook mol-polecat-work --tier=town # Force town version
|
||||
```
|
||||
|
||||
### Future (Mol Mall)
|
||||
|
||||
```bash
|
||||
# Install from Mol Mall
|
||||
gt formula install mol-code-review-strict
|
||||
gt formula install mol-code-review-strict@2.0.0
|
||||
gt formula install hop://acme.corp/formulas/mol-deploy
|
||||
|
||||
# Manage installed formulas
|
||||
gt formula list --installed # What's in town-level
|
||||
gt formula upgrade mol-polecat-work # Update to latest
|
||||
gt formula pin mol-polecat-work@4.0.0 # Lock version
|
||||
gt formula uninstall mol-code-review-strict
|
||||
```
|
||||
|
||||
## Migration Path
|
||||
|
||||
### Phase 1: Resolution Order (Now)
|
||||
|
||||
1. Implement three-tier resolution in `bd cook`
|
||||
2. Add `--resolve` flag to show resolution path
|
||||
3. Update `bd formula list` to show tiers
|
||||
4. Fix crew directories (Option B)
|
||||
|
||||
### Phase 2: Town-Level Formulas
|
||||
|
||||
1. Establish `~/gt/.beads/formulas/` as town formula location
|
||||
2. Add `gt formula` commands for managing town formulas
|
||||
3. Support manual installation (copy file, track in `.installed.json`)
|
||||
|
||||
### Phase 3: Mol Mall Integration
|
||||
|
||||
1. Define registry API (see mol-mall-design.md)
|
||||
2. Implement `gt formula install` from remote
|
||||
3. Add version pinning and upgrade flows
|
||||
4. Add integrity verification (checksums, optional signing)
|
||||
|
||||
### Phase 4: Federation (HOP)
|
||||
|
||||
1. Add capability tags to formula schema
|
||||
2. Track formula execution for agent accountability
|
||||
3. Enable federation (cross-town formula sharing via Highway Operations Protocol)
|
||||
4. Author attribution and validation records
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Mol Mall Design](mol-mall-design.md) - Registry architecture
|
||||
- [molecules.md](molecules.md) - Formula → Proto → Mol lifecycle
|
||||
- [understanding-gas-town.md](../../../docs/understanding-gas-town.md) - Gas Town architecture
|
||||
@@ -1,73 +0,0 @@
|
||||
# Decision 009: Session Events Architecture
|
||||
|
||||
**Status:** Accepted
|
||||
**Date:** 2025-12-31
|
||||
**Context:** Where should session events live? Beads, separate repo, or events.jsonl?
|
||||
|
||||
## Decision
|
||||
|
||||
Session events are **orchestration infrastructure**, not work items. They stay in
|
||||
`events.jsonl` (outside beads). Work attribution happens by capturing `session_id`
|
||||
on beads mutations (issue close, MR merge).
|
||||
|
||||
## Context
|
||||
|
||||
The seance feature needs to discover and resume Claude Code sessions. This requires:
|
||||
1. **Pointer** to session (session_id) - for `claude --resume`
|
||||
2. **Attribution** (which work happened in this session) - for entity CV
|
||||
|
||||
Claude Code already stores full session transcripts indefinitely. Gas Town doesn't
|
||||
need to duplicate them - just point at them.
|
||||
|
||||
## The Separation
|
||||
|
||||
| Layer | Storage | Content | Retention |
|
||||
|-------|---------|---------|-----------|
|
||||
| **Orchestration** | `~/.events.jsonl` | session_start, nudges, mail routing | Ephemeral (auto-prune) |
|
||||
| **Work** | Beads (rig-level) | Issues, MRs, convoys | Permanent (ledger) |
|
||||
| **Entity activity** | Beads (entity chain) | Session digests | Permanent (CV) |
|
||||
| **Transcript** | Claude Code | Full session content | Claude Code's retention |
|
||||
|
||||
## Why Not Beads for Events?
|
||||
|
||||
1. **Volume**: Orchestration events are high volume, would overwhelm work signal
|
||||
2. **Ephemerality**: Most orchestration events don't need CV/ledger permanence
|
||||
3. **Different audiences**: Work items are cross-agent; orchestration is internal
|
||||
4. **Claude Code has it**: Transcripts already live there; we just need pointers
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Attribution (Now)
|
||||
- `gt done` captures `CLAUDE_SESSION_ID` in issue close
|
||||
- Beads supports `closed_by_session` field on issue mutations
|
||||
- Events.jsonl continues to capture `session_start` for seance
|
||||
|
||||
### Phase 2: Session Digests (Future)
|
||||
- Sessions as wisps: `session_start` creates ephemeral wisp
|
||||
- Session work adds steps (issues closed, commits made)
|
||||
- `session_end` squashes to digest
|
||||
- Digest lives on entity chain (agent CV)
|
||||
|
||||
### Phase 3: Pruning (Future)
|
||||
- Events.jsonl auto-prunes after N days
|
||||
- Session digests provide permanent summary
|
||||
- Full transcripts remain in Claude Code
|
||||
|
||||
## Consequences
|
||||
|
||||
**Positive:**
|
||||
- Clean separation of concerns
|
||||
- Work ledger stays focused on work
|
||||
- CV attribution via session_id on beads mutations
|
||||
- Seance works via events.jsonl discovery
|
||||
|
||||
**Negative:**
|
||||
- Two systems to understand (events vs beads)
|
||||
- Need to ensure session_id flows through commands
|
||||
|
||||
## Related
|
||||
|
||||
- `gt seance` - Session discovery and resume
|
||||
- `gt-3zsml` - SessionStart hook passes session_id to gt prime
|
||||
- PRIMING.md - "The Feed Is the Signal" section
|
||||
- CONTEXT.md - Entity chains and CV model
|
||||
476
docs/mol-mall-design.md
Normal file
476
docs/mol-mall-design.md
Normal file
@@ -0,0 +1,476 @@
|
||||
# Mol Mall Design
|
||||
|
||||
> A marketplace for Gas Town formulas
|
||||
|
||||
## Vision
|
||||
|
||||
**Mol Mall** is a registry for sharing formulas across Gas Town installations. Think npm for molecules, or Terraform Registry for workflows.
|
||||
|
||||
```
|
||||
"Cook a formula, sling it to a polecat, the witness watches, refinery merges."
|
||||
|
||||
What if you could browse a mall of formulas, install one, and immediately
|
||||
have your polecats executing world-class workflows?
|
||||
```
|
||||
|
||||
### The Network Effect
|
||||
|
||||
A well-designed formula for "code review" or "security audit" or "deploy to K8s" can spread across thousands of Gas Town installations. Each adoption means:
|
||||
- More agents executing proven workflows
|
||||
- More structured, trackable work output
|
||||
- Better capability routing (agents with track records on a formula get similar work)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Registry Types
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ MOL MALL REGISTRIES │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
|
||||
PUBLIC REGISTRY (molmall.gastown.io)
|
||||
├── Community formulas (MIT licensed)
|
||||
├── Official Gas Town formulas (blessed)
|
||||
├── Verified publisher formulas
|
||||
└── Open contribution model
|
||||
|
||||
PRIVATE REGISTRY (self-hosted)
|
||||
├── Organization-specific formulas
|
||||
├── Proprietary workflows
|
||||
├── Internal deployment patterns
|
||||
└── Enterprise compliance formulas
|
||||
|
||||
FEDERATED REGISTRY (HOP future)
|
||||
├── Cross-organization discovery
|
||||
├── Skill-based search
|
||||
└── Attribution chain tracking
|
||||
└── hop:// URI resolution
|
||||
```
|
||||
|
||||
### URI Scheme
|
||||
|
||||
```
|
||||
hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0
|
||||
└──────────────────┘ └──────────────┘ └───┘
|
||||
registry host formula name version
|
||||
|
||||
# Short forms
|
||||
mol-polecat-work # Default registry, latest version
|
||||
mol-polecat-work@4 # Major version
|
||||
mol-polecat-work@4.0.0 # Exact version
|
||||
@acme/mol-deploy # Scoped to publisher
|
||||
hop://acme.corp/formulas/mol-deploy # Full HOP URI
|
||||
```
|
||||
|
||||
### Registry API
|
||||
|
||||
```yaml
|
||||
# OpenAPI-style specification
|
||||
|
||||
GET /formulas
|
||||
# List all formulas
|
||||
Query:
|
||||
- q: string # Search query
|
||||
- capabilities: string[] # Filter by capability tags
|
||||
- author: string # Filter by author
|
||||
- limit: int
|
||||
- offset: int
|
||||
Response:
|
||||
formulas:
|
||||
- name: mol-polecat-work
|
||||
version: 4.0.0
|
||||
description: "Full polecat work lifecycle..."
|
||||
author: steve@gastown.io
|
||||
downloads: 12543
|
||||
capabilities: [go, testing, code-review]
|
||||
|
||||
GET /formulas/{name}
|
||||
# Get formula metadata
|
||||
Response:
|
||||
name: mol-polecat-work
|
||||
versions: [4.0.0, 3.2.1, 3.2.0, ...]
|
||||
latest: 4.0.0
|
||||
author: steve@gastown.io
|
||||
repository: https://github.com/steveyegge/gastown
|
||||
license: MIT
|
||||
capabilities:
|
||||
primary: [go, testing]
|
||||
secondary: [git, code-review]
|
||||
stats:
|
||||
downloads: 12543
|
||||
stars: 234
|
||||
used_by: 89 # towns using this formula
|
||||
|
||||
GET /formulas/{name}/{version}
|
||||
# Get specific version
|
||||
Response:
|
||||
name: mol-polecat-work
|
||||
version: 4.0.0
|
||||
checksum: sha256:abc123...
|
||||
signature: <optional PGP signature>
|
||||
content: <base64 or URL to .formula.toml>
|
||||
changelog: "Added self-cleaning model..."
|
||||
published_at: 2026-01-10T00:00:00Z
|
||||
|
||||
POST /formulas
|
||||
# Publish formula (authenticated)
|
||||
Body:
|
||||
name: mol-my-workflow
|
||||
version: 1.0.0
|
||||
content: <formula TOML>
|
||||
changelog: "Initial release"
|
||||
Auth: Bearer token (linked to HOP identity)
|
||||
|
||||
GET /formulas/{name}/{version}/download
|
||||
# Download formula content
|
||||
Response: raw .formula.toml content
|
||||
```
|
||||
|
||||
## Formula Package Format
|
||||
|
||||
### Simple Case: Single File
|
||||
|
||||
Most formulas are single `.formula.toml` files:
|
||||
|
||||
```bash
|
||||
gt formula install mol-polecat-code-review
|
||||
# Downloads mol-polecat-code-review.formula.toml to ~/gt/.beads/formulas/
|
||||
```
|
||||
|
||||
### Complex Case: Formula Bundle
|
||||
|
||||
Some formulas need supporting files (scripts, templates, configs):
|
||||
|
||||
```
|
||||
mol-deploy-k8s.formula.bundle/
|
||||
├── formula.toml # Main formula
|
||||
├── templates/
|
||||
│ ├── deployment.yaml.tmpl
|
||||
│ └── service.yaml.tmpl
|
||||
├── scripts/
|
||||
│ └── healthcheck.sh
|
||||
└── README.md
|
||||
```
|
||||
|
||||
Bundle format:
|
||||
```bash
|
||||
# Bundles are tarballs
|
||||
mol-deploy-k8s-1.0.0.bundle.tar.gz
|
||||
```
|
||||
|
||||
Installation:
|
||||
```bash
|
||||
gt formula install mol-deploy-k8s
|
||||
# Extracts to ~/gt/.beads/formulas/mol-deploy-k8s/
|
||||
# formula.toml is at mol-deploy-k8s/formula.toml
|
||||
```
|
||||
|
||||
## Installation Flow
|
||||
|
||||
### Basic Install
|
||||
|
||||
```bash
|
||||
$ gt formula install mol-polecat-code-review
|
||||
|
||||
Resolving mol-polecat-code-review...
|
||||
Registry: molmall.gastown.io
|
||||
Version: 1.2.0 (latest)
|
||||
Author: steve@gastown.io
|
||||
Skills: code-review, security
|
||||
|
||||
Downloading... ████████████████████ 100%
|
||||
Verifying checksum... ✓
|
||||
|
||||
Installed to: ~/gt/.beads/formulas/mol-polecat-code-review.formula.toml
|
||||
```
|
||||
|
||||
### Version Pinning
|
||||
|
||||
```bash
|
||||
$ gt formula install mol-polecat-work@4.0.0
|
||||
|
||||
Installing mol-polecat-work@4.0.0 (pinned)...
|
||||
✓ Installed
|
||||
|
||||
$ gt formula list --installed
|
||||
mol-polecat-work 4.0.0 [pinned]
|
||||
mol-polecat-code-review 1.2.0 [latest]
|
||||
```
|
||||
|
||||
### Upgrade Flow
|
||||
|
||||
```bash
|
||||
$ gt formula upgrade mol-polecat-code-review
|
||||
|
||||
Checking for updates...
|
||||
Current: 1.2.0
|
||||
Latest: 1.3.0
|
||||
|
||||
Changelog for 1.3.0:
|
||||
- Added security focus option
|
||||
- Improved test coverage step
|
||||
|
||||
Upgrade? [y/N] y
|
||||
|
||||
Downloading... ✓
|
||||
Installed: mol-polecat-code-review@1.3.0
|
||||
```
|
||||
|
||||
### Lock File
|
||||
|
||||
```json
|
||||
// ~/gt/.beads/formulas/.lock.json
|
||||
{
|
||||
"version": 1,
|
||||
"formulas": {
|
||||
"mol-polecat-work": {
|
||||
"version": "4.0.0",
|
||||
"pinned": true,
|
||||
"checksum": "sha256:abc123...",
|
||||
"installed_at": "2026-01-10T00:00:00Z",
|
||||
"source": "hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0"
|
||||
},
|
||||
"mol-polecat-code-review": {
|
||||
"version": "1.3.0",
|
||||
"pinned": false,
|
||||
"checksum": "sha256:def456...",
|
||||
"installed_at": "2026-01-10T12:00:00Z",
|
||||
"source": "hop://molmall.gastown.io/formulas/mol-polecat-code-review@1.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Publishing Flow
|
||||
|
||||
### First-Time Setup
|
||||
|
||||
```bash
|
||||
$ gt formula publish --init
|
||||
|
||||
Setting up Mol Mall publishing...
|
||||
|
||||
1. Create account at https://molmall.gastown.io/signup
|
||||
2. Generate API token at https://molmall.gastown.io/settings/tokens
|
||||
3. Run: gt formula login
|
||||
|
||||
$ gt formula login
|
||||
Token: ********
|
||||
Logged in as: steve@gastown.io
|
||||
```
|
||||
|
||||
### Publishing
|
||||
|
||||
```bash
|
||||
$ gt formula publish mol-polecat-work
|
||||
|
||||
Publishing mol-polecat-work...
|
||||
|
||||
Pre-flight checks:
|
||||
✓ formula.toml is valid
|
||||
✓ Version 4.0.0 not yet published
|
||||
✓ Required fields present (name, version, description)
|
||||
✓ Skills declared
|
||||
|
||||
Publish to molmall.gastown.io? [y/N] y
|
||||
|
||||
Uploading... ✓
|
||||
Published: hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0
|
||||
|
||||
View at: https://molmall.gastown.io/formulas/mol-polecat-work
|
||||
```
|
||||
|
||||
### Verification Levels
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ FORMULA TRUST LEVELS │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
|
||||
UNVERIFIED (default)
|
||||
Anyone can publish
|
||||
Basic validation only
|
||||
Displayed with ⚠️ warning
|
||||
|
||||
VERIFIED PUBLISHER
|
||||
Publisher identity confirmed
|
||||
Displayed with ✓ checkmark
|
||||
Higher search ranking
|
||||
|
||||
OFFICIAL
|
||||
Maintained by Gas Town team
|
||||
Displayed with 🏛️ badge
|
||||
Included in embedded defaults
|
||||
|
||||
AUDITED
|
||||
Security review completed
|
||||
Displayed with 🔒 badge
|
||||
Required for enterprise registries
|
||||
```
|
||||
|
||||
## Capability Tagging
|
||||
|
||||
### Formula Capability Declaration
|
||||
|
||||
```toml
|
||||
[formula.capabilities]
|
||||
# What capabilities does this formula exercise? Used for agent routing.
|
||||
primary = ["go", "testing", "code-review"]
|
||||
secondary = ["git", "ci-cd"]
|
||||
|
||||
# Capability weights (optional, for fine-grained routing)
|
||||
[formula.capabilities.weights]
|
||||
go = 0.3 # 30% of formula work is Go
|
||||
testing = 0.4 # 40% is testing
|
||||
code-review = 0.3 # 30% is code review
|
||||
```
|
||||
|
||||
### Capability-Based Search
|
||||
|
||||
```bash
|
||||
$ gt formula search --capabilities="security,go"
|
||||
|
||||
Formulas matching capabilities: security, go
|
||||
|
||||
mol-security-audit v2.1.0 ⭐ 4.8 📥 8,234
|
||||
Capabilities: security, go, code-review
|
||||
"Comprehensive security audit workflow"
|
||||
|
||||
mol-dependency-scan v1.0.0 ⭐ 4.2 📥 3,102
|
||||
Capabilities: security, go, supply-chain
|
||||
"Scan Go dependencies for vulnerabilities"
|
||||
```
|
||||
|
||||
### Agent Accountability
|
||||
|
||||
When a polecat completes a formula, the execution is tracked:
|
||||
|
||||
```
|
||||
Polecat: beads/amber
|
||||
Formula: mol-polecat-code-review@1.3.0
|
||||
Completed: 2026-01-10T15:30:00Z
|
||||
Capabilities exercised:
|
||||
- code-review (primary)
|
||||
- security (secondary)
|
||||
- go (secondary)
|
||||
```
|
||||
|
||||
This execution record enables:
|
||||
1. **Routing** - Agents with successful track records get similar work
|
||||
2. **Debugging** - Trace which agent did what, when
|
||||
3. **Quality metrics** - Track success rates by agent and formula
|
||||
|
||||
## Private Registries
|
||||
|
||||
### Enterprise Deployment
|
||||
|
||||
```yaml
|
||||
# ~/.gtconfig.yaml
|
||||
registries:
|
||||
- name: acme
|
||||
url: https://molmall.acme.corp
|
||||
auth: token
|
||||
priority: 1 # Check first
|
||||
|
||||
- name: public
|
||||
url: https://molmall.gastown.io
|
||||
auth: none
|
||||
priority: 2 # Fallback
|
||||
```
|
||||
|
||||
### Self-Hosted Registry
|
||||
|
||||
```bash
|
||||
# Docker deployment
|
||||
docker run -d \
|
||||
-p 8080:8080 \
|
||||
-v /data/formulas:/formulas \
|
||||
-e AUTH_PROVIDER=oidc \
|
||||
gastown/molmall-registry:latest
|
||||
|
||||
# Configuration
|
||||
MOLMALL_STORAGE=s3://bucket/formulas
|
||||
MOLMALL_AUTH=oidc
|
||||
MOLMALL_OIDC_ISSUER=https://auth.acme.corp
|
||||
```
|
||||
|
||||
## Federation
|
||||
|
||||
Federation enables formula sharing across organizations using the Highway Operations Protocol (HOP).
|
||||
|
||||
### Cross-Registry Discovery
|
||||
|
||||
```bash
|
||||
$ gt formula search "deploy kubernetes" --federated
|
||||
|
||||
Searching across federated registries...
|
||||
|
||||
molmall.gastown.io:
|
||||
mol-deploy-k8s v3.0.0 🏛️ Official
|
||||
|
||||
molmall.acme.corp:
|
||||
@acme/mol-deploy-k8s v2.1.0 ✓ Verified
|
||||
|
||||
molmall.bigco.io:
|
||||
@bigco/k8s-workflow v1.0.0 ⚠️ Unverified
|
||||
```
|
||||
|
||||
### HOP URI Resolution
|
||||
|
||||
The `hop://` URI scheme provides cross-registry entity references:
|
||||
|
||||
```bash
|
||||
# Full HOP URI
|
||||
gt formula install hop://molmall.acme.corp/formulas/@acme/mol-deploy@2.1.0
|
||||
|
||||
# Resolution via HOP (Highway Operations Protocol)
|
||||
1. Parse hop:// URI
|
||||
2. Resolve registry endpoint (DNS/HOP discovery)
|
||||
3. Authenticate (if required)
|
||||
4. Download formula
|
||||
5. Verify checksum/signature
|
||||
6. Install to town-level
|
||||
```
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Local Commands (Now)
|
||||
|
||||
- `gt formula list` with tier display
|
||||
- `gt formula show --resolve`
|
||||
- Formula resolution order (project → town → system)
|
||||
|
||||
### Phase 2: Manual Sharing
|
||||
|
||||
- Formula export/import
|
||||
- `gt formula export mol-polecat-work > mol-polecat-work.formula.toml`
|
||||
- `gt formula import < mol-polecat-work.formula.toml`
|
||||
- Lock file format
|
||||
|
||||
### Phase 3: Public Registry
|
||||
|
||||
- molmall.gastown.io launch
|
||||
- `gt formula install` from registry
|
||||
- `gt formula publish` flow
|
||||
- Basic search and browse
|
||||
|
||||
### Phase 4: Enterprise Features
|
||||
|
||||
- Private registry support
|
||||
- Authentication integration
|
||||
- Verification levels
|
||||
- Audit logging
|
||||
|
||||
### Phase 5: Federation (HOP)
|
||||
|
||||
- Capability tags in schema
|
||||
- Federation protocol (Highway Operations Protocol)
|
||||
- Cross-registry search
|
||||
- Agent execution tracking for accountability
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Formula Resolution](formula-resolution.md) - Local resolution order
|
||||
- [molecules.md](molecules.md) - Formula lifecycle (cook, pour, squash)
|
||||
- [understanding-gas-town.md](../../../docs/understanding-gas-town.md) - Gas Town architecture
|
||||
@@ -1,278 +0,0 @@
|
||||
# Operational State in Gas Town
|
||||
|
||||
> Managing runtime state, degraded modes, and the Boot triage system.
|
||||
|
||||
## Overview
|
||||
|
||||
Gas Town needs to track operational state: Is the Deacon's patrol muted? Is the
|
||||
system in degraded mode? When did state change, and why?
|
||||
|
||||
This document covers:
|
||||
- **Events**: State transitions as beads
|
||||
- **Labels-as-state**: Fast queries via role bead labels
|
||||
- **Boot**: The dog that triages the Deacon
|
||||
- **Degraded mode**: Operating without tmux
|
||||
|
||||
## Events: State Transitions as Data
|
||||
|
||||
Operational state changes are recorded as event beads. Each event captures:
|
||||
- **What** changed (`event_type`)
|
||||
- **Who** caused it (`actor`)
|
||||
- **What** was affected (`target`)
|
||||
- **Context** (`payload`)
|
||||
- **When** (`created_at`)
|
||||
|
||||
### Event Types
|
||||
|
||||
| Event Type | Description | Payload |
|
||||
|------------|-------------|---------|
|
||||
| `patrol.muted` | Patrol cycle disabled | `{reason, until?}` |
|
||||
| `patrol.unmuted` | Patrol cycle re-enabled | `{reason?}` |
|
||||
| `agent.started` | Agent session began | `{session_id?}` |
|
||||
| `agent.stopped` | Agent session ended | `{reason, outcome?}` |
|
||||
| `mode.degraded` | System entered degraded mode | `{reason}` |
|
||||
| `mode.normal` | System returned to normal | `{}` |
|
||||
|
||||
### Creating Events
|
||||
|
||||
```bash
|
||||
# Mute deacon patrol
|
||||
bd create --type=event --event-type=patrol.muted \
|
||||
--actor=human:overseer --target=agent:deacon \
|
||||
--payload='{"reason":"fixing convoy deadlock","until":"gt-abc1"}'
|
||||
|
||||
# System entered degraded mode
|
||||
bd create --type=event --event-type=mode.degraded \
|
||||
--actor=system:daemon --target=rig:greenplace \
|
||||
--payload='{"reason":"tmux unavailable"}'
|
||||
```
|
||||
|
||||
### Querying Events
|
||||
|
||||
```bash
|
||||
# Recent events for an agent
|
||||
bd list --type=event --target=agent:deacon --limit=10
|
||||
|
||||
# All patrol state changes
|
||||
bd list --type=event --event-type=patrol.muted
|
||||
bd list --type=event --event-type=patrol.unmuted
|
||||
|
||||
# Events in the activity feed
|
||||
bd activity --follow --type=event
|
||||
```
|
||||
|
||||
## Labels-as-State Pattern
|
||||
|
||||
Events capture the full history. Labels cache the current state for fast queries.
|
||||
|
||||
### Convention
|
||||
|
||||
Labels use `<dimension>:<value>` format:
|
||||
- `patrol:muted` / `patrol:active`
|
||||
- `mode:degraded` / `mode:normal`
|
||||
- `status:idle` / `status:working`
|
||||
|
||||
### State Change Flow
|
||||
|
||||
1. Create event bead (full context, immutable)
|
||||
2. Update role bead labels (current state cache)
|
||||
|
||||
```bash
|
||||
# Mute patrol
|
||||
bd create --type=event --event-type=patrol.muted ...
|
||||
bd update role-deacon --add-label=patrol:muted --remove-label=patrol:active
|
||||
|
||||
# Unmute patrol
|
||||
bd create --type=event --event-type=patrol.unmuted ...
|
||||
bd update role-deacon --add-label=patrol:active --remove-label=patrol:muted
|
||||
```
|
||||
|
||||
### Querying Current State
|
||||
|
||||
```bash
|
||||
# Is deacon patrol muted?
|
||||
bd show role-deacon | grep patrol:
|
||||
|
||||
# All agents with muted patrol
|
||||
bd list --type=role --label=patrol:muted
|
||||
|
||||
# All agents in degraded mode
|
||||
bd list --type=role --label=mode:degraded
|
||||
```
|
||||
|
||||
## Boot: The Deacon's Watchdog
|
||||
|
||||
> See [Watchdog Chain](watchdog-chain.md) for the complete Daemon/Boot/Deacon
|
||||
> architecture and design rationale.
|
||||
|
||||
Boot is a dog (Deacon helper) that triages the Deacon's health. The daemon pokes
|
||||
Boot instead of the Deacon directly, centralizing the "when to wake" decision in
|
||||
an agent that can reason about it.
|
||||
|
||||
### Why Boot?
|
||||
|
||||
The daemon is dumb transport (ZFC principle). It can't decide:
|
||||
- Is the Deacon stuck or just thinking?
|
||||
- Should we interrupt or let it continue?
|
||||
- Is the system in a state where nudging would help?
|
||||
|
||||
Boot is an agent that can observe and decide.
|
||||
|
||||
### Boot's Lifecycle
|
||||
|
||||
```
|
||||
Daemon tick
|
||||
│
|
||||
├── Check: Is Boot already running? (marker file)
|
||||
│ └── Yes + recent: Skip this tick
|
||||
│
|
||||
└── Spawn Boot (fresh session each time)
|
||||
│
|
||||
└── Boot runs triage molecule
|
||||
├── Observe (wisps, mail, git state, tmux panes)
|
||||
├── Decide (start/wake/nudge/interrupt/nothing)
|
||||
├── Act
|
||||
├── Clean inbox (discard stale handoffs)
|
||||
└── Handoff (or exit in degraded mode)
|
||||
```
|
||||
|
||||
### Boot is Always Fresh
|
||||
|
||||
Boot restarts on each daemon tick. This is intentional:
|
||||
- Narrow scope makes restarts cheap
|
||||
- Fresh context avoids accumulated confusion
|
||||
- Handoff mail provides continuity without session persistence
|
||||
- No keepalive needed
|
||||
|
||||
### Boot's Decision Guidance
|
||||
|
||||
Agents may take several minutes on legitimate work - composing artifacts, running
|
||||
tools, deep analysis. Ten minutes or more in edge cases.
|
||||
|
||||
To assess whether an agent is stuck:
|
||||
1. Check the agent's last reported activity (recent wisps, mail sent, git commits)
|
||||
2. Observe the tmux pane output over a 30-second window
|
||||
3. Look for signs of progress vs. signs of hanging (tool prompt, error loop, silence)
|
||||
|
||||
Agents work in small steps with feedback. Most tasks complete in 2-3 minutes, but
|
||||
task nature matters.
|
||||
|
||||
**Boot's options (increasing disruption):**
|
||||
- Let them continue (if progress is evident)
|
||||
- `gt nudge <agent>` (gentle wake signal)
|
||||
- Escape + chat (interrupt and ask what's happening)
|
||||
- Request process restart (last resort, for true hangs)
|
||||
|
||||
**Common false positives:**
|
||||
- Tool waiting for user confirmation
|
||||
- Long-running test suite
|
||||
- Large file read/write operations
|
||||
|
||||
### Boot's Location
|
||||
|
||||
```
|
||||
~/gt/deacon/dogs/boot/
|
||||
```
|
||||
|
||||
Session name: `gt-boot`
|
||||
|
||||
Created/maintained by `bd doctor`.
|
||||
|
||||
### Boot Commands
|
||||
|
||||
```bash
|
||||
# Check Boot status
|
||||
gt dog status boot
|
||||
|
||||
# Manual Boot run (debugging)
|
||||
gt dog call boot
|
||||
|
||||
# Prime Boot with context
|
||||
gt dog prime boot
|
||||
```
|
||||
|
||||
## Degraded Mode
|
||||
|
||||
Gas Town can operate without tmux, with reduced capabilities.
|
||||
|
||||
### Detection
|
||||
|
||||
The daemon detects degraded mode mechanically and passes it to agents:
|
||||
|
||||
```bash
|
||||
GT_DEGRADED=true # Set by daemon when tmux unavailable
|
||||
```
|
||||
|
||||
Boot and other agents check this environment variable.
|
||||
|
||||
### What Changes in Degraded Mode
|
||||
|
||||
| Capability | Normal | Degraded |
|
||||
|------------|--------|----------|
|
||||
| Observe tmux panes | Yes | No |
|
||||
| Interactive interrupt | Yes | No |
|
||||
| Session management | Full | Limited |
|
||||
| Agent spawn | tmux sessions | Direct spawn |
|
||||
| Boot lifecycle | Handoff | Exit |
|
||||
|
||||
### Agents in Degraded Mode
|
||||
|
||||
In degraded mode, agents:
|
||||
- Cannot observe other agents' pane output
|
||||
- Cannot interactively interrupt stuck agents
|
||||
- Focus on beads/git state observation only
|
||||
- Report anomalies but can't fix interactively
|
||||
|
||||
Boot specifically:
|
||||
- Runs to completion and exits (no handoff)
|
||||
- Limited to: start deacon, file beads, mail overseer
|
||||
- Cannot: observe panes, nudge, interrupt
|
||||
|
||||
### Recording Degraded Mode
|
||||
|
||||
```bash
|
||||
# System entered degraded mode
|
||||
bd create --type=event --event-type=mode.degraded \
|
||||
--actor=system:daemon --target=rig:greenplace \
|
||||
--payload='{"reason":"tmux unavailable"}'
|
||||
|
||||
bd update role-greenplace --add-label=mode:degraded --remove-label=mode:normal
|
||||
```
|
||||
|
||||
## Configuration vs State
|
||||
|
||||
| Type | Storage | Example |
|
||||
|------|---------|---------|
|
||||
| **Static config** | TOML files | Daemon tick interval |
|
||||
| **Operational state** | Beads (events + labels) | Patrol muted |
|
||||
| **Runtime flags** | Marker files | `.deacon-disabled` |
|
||||
|
||||
Static config rarely changes and doesn't need history.
|
||||
Operational state changes at runtime and benefits from audit trail.
|
||||
Marker files are fast checks that can trigger deeper beads queries.
|
||||
|
||||
## Commands Summary
|
||||
|
||||
```bash
|
||||
# Create operational event
|
||||
bd create --type=event --event-type=<type> \
|
||||
--actor=<entity> --target=<entity> --payload='<json>'
|
||||
|
||||
# Update state label
|
||||
bd update <role-bead> --add-label=<dim>:<val> --remove-label=<dim>:<old>
|
||||
|
||||
# Query current state
|
||||
bd list --type=role --label=<dim>:<val>
|
||||
|
||||
# Query state history
|
||||
bd list --type=event --target=<entity>
|
||||
|
||||
# Boot management
|
||||
gt dog status boot
|
||||
gt dog call boot
|
||||
gt dog prime boot
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Events are the source of truth. Labels are the cache.*
|
||||
@@ -27,7 +27,7 @@ These roles manage the Gas Town system itself:
|
||||
| Role | Description | Lifecycle |
|
||||
|------|-------------|-----------|
|
||||
| **Mayor** | Global coordinator at mayor/ | Singleton, persistent |
|
||||
| **Deacon** | Background supervisor daemon ([watchdog chain](watchdog-chain.md)) | Singleton, persistent |
|
||||
| **Deacon** | Background supervisor daemon ([watchdog chain](design/watchdog-chain.md)) | Singleton, persistent |
|
||||
| **Witness** | Per-rig polecat lifecycle manager | One per rig, persistent |
|
||||
| **Refinery** | Per-rig merge queue processor | One per rig, persistent |
|
||||
|
||||
@@ -37,7 +37,7 @@ These roles do actual project work:
|
||||
|
||||
| Role | Description | Lifecycle |
|
||||
|------|-------------|-----------|
|
||||
| **Polecat** | Ephemeral worker with own worktree | Transient, Witness-managed ([details](polecat-lifecycle.md)) |
|
||||
| **Polecat** | Ephemeral worker with own worktree | Transient, Witness-managed ([details](concepts/polecat-lifecycle.md)) |
|
||||
| **Crew** | Persistent worker with own clone | Long-lived, user-managed |
|
||||
| **Dog** | Deacon helper for infrastructure tasks | Ephemeral, Deacon-managed |
|
||||
|
||||
@@ -64,7 +64,7 @@ gt convoy list
|
||||
- Historical record of completed work (`gt convoy list --all`)
|
||||
|
||||
The "swarm" is ephemeral - just the workers currently assigned to a convoy's issues.
|
||||
When issues close, the convoy lands. See [Convoys](convoy.md) for details.
|
||||
When issues close, the convoy lands. See [Convoys](concepts/convoy.md) for details.
|
||||
|
||||
## Crew vs Polecats
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
# Polecat Wisp Architecture
|
||||
|
||||
How polecats use molecules and wisps to execute work in Gas Town.
|
||||
|
||||
## Overview
|
||||
|
||||
Polecats receive work via their hook - a pinned molecule attached to an issue.
|
||||
They execute molecule steps sequentially, closing each step as they complete it.
|
||||
|
||||
## Molecule Types for Polecats
|
||||
|
||||
| Type | Storage | Use Case |
|
||||
|------|---------|----------|
|
||||
| **Regular Molecule** | `.beads/` (synced) | Discrete deliverables, audit trail |
|
||||
| **Wisp** | `.beads/` (ephemeral, type=wisp) | Patrol cycles, operational loops |
|
||||
|
||||
Polecats typically use **regular molecules** because each assignment has audit value.
|
||||
Patrol agents (Witness, Refinery, Deacon) use **wisps** to prevent accumulation.
|
||||
|
||||
## Step Execution
|
||||
|
||||
### The Traditional Approach
|
||||
|
||||
```bash
|
||||
# 1. Check current status
|
||||
gt hook
|
||||
|
||||
# 2. Find next step
|
||||
bd ready --parent=gt-abc
|
||||
|
||||
# 3. Claim the step
|
||||
bd update gt-abc.4 --status=in_progress
|
||||
|
||||
# 4. Do the work...
|
||||
|
||||
# 5. Close the step
|
||||
bd close gt-abc.4
|
||||
|
||||
# 6. Repeat from step 2
|
||||
```
|
||||
|
||||
### The Propulsion Approach
|
||||
|
||||
```bash
|
||||
# 1. Check where you are
|
||||
bd mol current
|
||||
|
||||
# 2. Do the work on current step...
|
||||
|
||||
# 3. Close and advance in one command
|
||||
bd close gt-abc.4 --continue
|
||||
|
||||
# 4. Repeat from step 1
|
||||
```
|
||||
|
||||
The `--continue` flag:
|
||||
- Closes the current step
|
||||
- Finds the next ready step in the same molecule
|
||||
- Auto-marks it `in_progress`
|
||||
- Outputs the transition
|
||||
|
||||
### Example Session
|
||||
|
||||
```bash
|
||||
$ bd mol current
|
||||
You're working on molecule gt-abc (Implement user auth)
|
||||
|
||||
✓ gt-abc.1: Design schema
|
||||
✓ gt-abc.2: Create models
|
||||
→ gt-abc.3: Add endpoints [in_progress] <- YOU ARE HERE
|
||||
○ gt-abc.4: Write tests
|
||||
○ gt-abc.5: Update docs
|
||||
|
||||
Progress: 2/5 steps complete
|
||||
|
||||
$ # ... implement the endpoints ...
|
||||
|
||||
$ bd close gt-abc.3 --continue
|
||||
✓ Closed gt-abc.3: Add endpoints
|
||||
|
||||
Next ready in molecule:
|
||||
gt-abc.4: Write tests
|
||||
|
||||
→ Marked in_progress (use --no-auto to skip)
|
||||
|
||||
$ bd mol current
|
||||
You're working on molecule gt-abc (Implement user auth)
|
||||
|
||||
✓ gt-abc.1: Design schema
|
||||
✓ gt-abc.2: Create models
|
||||
✓ gt-abc.3: Add endpoints
|
||||
→ gt-abc.4: Write tests [in_progress] <- YOU ARE HERE
|
||||
○ gt-abc.5: Update docs
|
||||
|
||||
Progress: 3/5 steps complete
|
||||
```
|
||||
|
||||
## Molecule Completion
|
||||
|
||||
When closing the last step:
|
||||
|
||||
```bash
|
||||
$ bd close gt-abc.5 --continue
|
||||
✓ Closed gt-abc.5: Update docs
|
||||
|
||||
Molecule gt-abc complete! All steps closed.
|
||||
Consider: bd mol squash gt-abc --summary '...'
|
||||
```
|
||||
|
||||
After all steps are closed:
|
||||
|
||||
```bash
|
||||
# Squash to digest for audit trail
|
||||
bd mol squash gt-abc --summary "Implemented user authentication with JWT"
|
||||
|
||||
# Or if it's routine work
|
||||
bd mol burn gt-abc
|
||||
```
|
||||
|
||||
## Hook Management
|
||||
|
||||
### Checking Your Hook
|
||||
|
||||
```bash
|
||||
gt hook
|
||||
```
|
||||
|
||||
Shows what molecule is pinned to your current agent and the associated bead.
|
||||
|
||||
### Attaching Work from Mail
|
||||
|
||||
```bash
|
||||
gt mail inbox
|
||||
gt mol attach-from-mail <mail-id>
|
||||
```
|
||||
|
||||
### Completing Work
|
||||
|
||||
```bash
|
||||
# After all molecule steps closed
|
||||
gt done
|
||||
|
||||
# This:
|
||||
# 1. Syncs beads
|
||||
# 2. Submits to merge queue
|
||||
# 3. Notifies Witness
|
||||
```
|
||||
|
||||
## Polecat Workflow Summary
|
||||
|
||||
```
|
||||
1. Spawn with work on hook
|
||||
2. gt hook # What's hooked?
|
||||
3. bd mol current # Where am I?
|
||||
4. Execute current step
|
||||
5. bd close <step> --continue
|
||||
6. If more steps: GOTO 3
|
||||
7. gt done # Signal completion
|
||||
8. Wait for Witness cleanup
|
||||
```
|
||||
|
||||
## Wisp vs Molecule Decision
|
||||
|
||||
| Question | Molecule | Wisp |
|
||||
|----------|----------|------|
|
||||
| Does it need audit trail? | Yes | No |
|
||||
| Will it repeat continuously? | No | Yes |
|
||||
| Is it discrete deliverable? | Yes | No |
|
||||
| Is it operational routine? | No | Yes |
|
||||
|
||||
Polecats: **Use molecules** (deliverables have audit value)
|
||||
Patrol agents: **Use wisps** (routine loops don't accumulate)
|
||||
@@ -89,6 +89,58 @@ Debug routing: `BD_DEBUG_ROUTING=1 bd show <id>`
|
||||
|
||||
Process state, PIDs, ephemeral data.
|
||||
|
||||
### Rig-Level Configuration
|
||||
|
||||
Rigs support layered configuration through:
|
||||
1. **Wisp layer** (`.beads-wisp/config/`) - transient, local overrides
|
||||
2. **Rig identity bead labels** - persistent rig settings
|
||||
3. **Town defaults** (`~/gt/settings/config.json`)
|
||||
4. **System defaults** - compiled-in fallbacks
|
||||
|
||||
#### Polecat Branch Naming
|
||||
|
||||
Configure custom branch name templates for polecats:
|
||||
|
||||
```bash
|
||||
# Set via wisp (transient - for testing)
|
||||
echo '{"polecat_branch_template": "adam/{year}/{month}/{description}"}' > \
|
||||
~/gt/.beads-wisp/config/myrig.json
|
||||
|
||||
# Or set via rig identity bead labels (persistent)
|
||||
bd update gt-rig-myrig --labels="polecat_branch_template:adam/{year}/{month}/{description}"
|
||||
```
|
||||
|
||||
**Template Variables:**
|
||||
|
||||
| Variable | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `{user}` | From `git config user.name` | `adam` |
|
||||
| `{year}` | Current year (YY format) | `26` |
|
||||
| `{month}` | Current month (MM format) | `01` |
|
||||
| `{name}` | Polecat name | `alpha` |
|
||||
| `{issue}` | Issue ID without prefix | `123` (from `gt-123`) |
|
||||
| `{description}` | Sanitized issue title | `fix-auth-bug` |
|
||||
| `{timestamp}` | Unique timestamp | `1ks7f9a` |
|
||||
|
||||
**Default Behavior (backward compatible):**
|
||||
|
||||
When `polecat_branch_template` is empty or not set:
|
||||
- With issue: `polecat/{name}/{issue}@{timestamp}`
|
||||
- Without issue: `polecat/{name}-{timestamp}`
|
||||
|
||||
**Example Configurations:**
|
||||
|
||||
```bash
|
||||
# GitHub enterprise format
|
||||
"adam/{year}/{month}/{description}"
|
||||
|
||||
# Simple feature branches
|
||||
"feature/{issue}"
|
||||
|
||||
# Include polecat name for clarity
|
||||
"work/{name}/{issue}"
|
||||
```
|
||||
|
||||
## Formula Format
|
||||
|
||||
```toml
|
||||
@@ -471,7 +523,7 @@ gt convoy list --all # Include landed convoys
|
||||
gt convoy list --status=closed # Only landed convoys
|
||||
```
|
||||
|
||||
Note: "Swarm" is ephemeral (workers on a convoy's issues). See [Convoys](convoy.md).
|
||||
Note: "Swarm" is ephemeral (workers on a convoy's issues). See [Convoys](concepts/convoy.md).
|
||||
|
||||
### Work Assignment
|
||||
|
||||
@@ -510,7 +562,7 @@ gt escalate -s HIGH "msg" # Important blocker
|
||||
gt escalate -s MEDIUM "msg" -m "Details..."
|
||||
```
|
||||
|
||||
See [escalation.md](escalation.md) for full protocol.
|
||||
See [escalation.md](design/escalation.md) for full protocol.
|
||||
|
||||
### Sessions
|
||||
|
||||
@@ -545,6 +597,24 @@ gt stop --all # Kill all sessions
|
||||
gt stop --rig <name> # Kill rig sessions
|
||||
```
|
||||
|
||||
### Health Check
|
||||
|
||||
```bash
|
||||
gt deacon health-check <agent> # Send health check ping, track response
|
||||
gt deacon health-state # Show health check state for all agents
|
||||
```
|
||||
|
||||
### Merge Queue (MQ)
|
||||
|
||||
```bash
|
||||
gt mq list [rig] # Show the merge queue
|
||||
gt mq next [rig] # Show highest-priority merge request
|
||||
gt mq submit # Submit current branch to merge queue
|
||||
gt mq status <id> # Show detailed merge request status
|
||||
gt mq retry <id> # Retry a failed merge request
|
||||
gt mq reject <id> # Reject a merge request
|
||||
```
|
||||
|
||||
## Beads Commands (bd)
|
||||
|
||||
```bash
|
||||
@@ -611,4 +681,4 @@ bd mol bond mol-security-scan $PATROL_ID --var scope="$SCOPE"
|
||||
|
||||
**Nondeterministic idempotence**: Any worker can continue any molecule. Steps are atomic checkpoints in beads.
|
||||
|
||||
**Convoy tracking**: Convoys track batched work across rigs. A "swarm" is ephemeral - just the workers currently on a convoy's issues. See [Convoys](convoy.md) for details.
|
||||
**Convoy tracking**: Convoys track batched work across rigs. A "swarm" is ephemeral - just the workers currently on a convoy's issues. See [Convoys](concepts/convoy.md) for details.
|
||||
|
||||
@@ -1,220 +0,0 @@
|
||||
# Infrastructure & Utilities Code Review
|
||||
|
||||
**Review ID**: gt-a02fj.8
|
||||
**Date**: 2026-01-04
|
||||
**Reviewer**: gastown/polecats/interceptor (polecat gus)
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Reviewed 14 infrastructure packages for dead code, missing abstractions, performance concerns, and error handling consistency. Found significant cleanup opportunities totaling ~44% dead code in constants package and an entire unused package (keepalive).
|
||||
|
||||
---
|
||||
|
||||
## 1. Dead Code Inventory
|
||||
|
||||
### Critical: Entire Package Unused
|
||||
|
||||
| Package | Status | Recommendation |
|
||||
|---------|--------|----------------|
|
||||
| `internal/keepalive/` | 100% unused | **DELETE ENTIRE PACKAGE** |
|
||||
|
||||
The keepalive package (5 functions) was removed from the codebase on Dec 30, 2025 as part of the shift to feed-based activation. No imports exist anywhere.
|
||||
|
||||
### High Priority: Functions to Remove
|
||||
|
||||
| Package | Function | Location | Notes |
|
||||
|---------|----------|----------|-------|
|
||||
| `config` | `NewExampleAgentRegistry()` | agents.go:361-381 | Zero usage in codebase |
|
||||
| `constants` | `DirMayor`, `DirPolecats`, `DirCrew`, etc. | constants.go:32-59 | 9 unused directory constants |
|
||||
| `constants` | `FileRigsJSON`, `FileTownJSON`, etc. | constants.go:62-74 | 4 unused file constants |
|
||||
| `constants` | `BranchMain`, `BranchBeadsSync`, etc. | constants.go:77-89 | 4 unused branch constants |
|
||||
| `constants` | `RigBeadsPath()`, `RigPolecatsPath()`, etc. | constants.go | 5 unused path helper functions |
|
||||
| `doctor` | `itoa()` | daemon_check.go:93-111 | Duplicate of `strconv.Itoa()` |
|
||||
| `lock` | `DetectCollisions()` | lock.go:367-402 | Superseded by doctor checks |
|
||||
| `events` | `BootPayload()` | events.go:186-191 | Never called |
|
||||
| `events` | `TypePatrolStarted`, `TypeSessionEnd` | events.go:50,54 | Never emitted |
|
||||
| `events` | `VisibilityBoth` | events.go:32 | Never set |
|
||||
| `boot` | `DeaconDir()` | boot.go:235-237 | Exported but never called |
|
||||
| `dog` | `IdleCount()`, `WorkingCount()` | manager.go:532-562 | Inlined in callers |
|
||||
|
||||
### Medium Priority: Duplicate Definitions
|
||||
|
||||
| Package | Item | Duplicate Location | Action |
|
||||
|---------|------|-------------------|--------|
|
||||
| `constants` | `RigSettingsPath()` | Also in config/loader.go:673 | Remove from constants |
|
||||
| `util` | Atomic write pattern | Also in mrqueue/, wisp/ | Consolidate to util |
|
||||
| `doctor` | `findRigs()` | 3 identical implementations | Extract shared helper |
|
||||
|
||||
---
|
||||
|
||||
## 2. Utility Consolidation Plan
|
||||
|
||||
### Pattern: Atomic Write (Priority: HIGH)
|
||||
|
||||
**Current state**: Duplicated in 3+ locations
|
||||
- `util/atomic.go` (canonical)
|
||||
- `mrqueue/mrqueue.go` (duplicate)
|
||||
- `wisp/io.go` (duplicate)
|
||||
- `polecat/pending.go` (NON-ATOMIC - bug!)
|
||||
|
||||
**Action**:
|
||||
1. Fix `polecat/pending.go:SavePending()` to use `util.AtomicWriteJSON`
|
||||
2. Replace inline atomic writes in mrqueue and wisp with util calls
|
||||
|
||||
### Pattern: Rig Discovery (Priority: HIGH)
|
||||
|
||||
**Current state**: 7+ implementations scattered across doctor package
|
||||
- `BranchCheck.findPersistentRoleDirs()`
|
||||
- `OrphanSessionCheck.getValidRigs()`
|
||||
- `PatrolMoleculesExistCheck.discoverRigs()`
|
||||
- `config_check.go.findAllRigs()`
|
||||
- Multiple `findCrewDirs()` implementations
|
||||
|
||||
**Action**: Create `internal/workspace/discovery.go`:
|
||||
```go
|
||||
type RigDiscovery struct { ... }
|
||||
func (d *RigDiscovery) FindAllRigs() []string
|
||||
func (d *RigDiscovery) FindCrewDirs(rig string) []string
|
||||
func (d *RigDiscovery) FindPolecatDirs(rig string) []string
|
||||
```
|
||||
|
||||
### Pattern: Clone Validation (Priority: MEDIUM)
|
||||
|
||||
**Current state**: Duplicate logic in doctor checks
|
||||
- `rig_check.go`: Validates .git, runs git status
|
||||
- `branch_check.go`: Similar traversal logic
|
||||
|
||||
**Action**: Create `internal/workspace/clone.go`:
|
||||
```go
|
||||
type CloneValidator struct { ... }
|
||||
func (v *CloneValidator) ValidateClone(path string) error
|
||||
func (v *CloneValidator) GetCloneInfo(path string) (*CloneInfo, error)
|
||||
```
|
||||
|
||||
### Pattern: Tmux Session Handling (Priority: MEDIUM)
|
||||
|
||||
**Current state**: Fragmented across lock, doctor, daemon
|
||||
- `lock/lock.go`: `getActiveTmuxSessions()`
|
||||
- `doctor/identity_check.go`: Similar logic
|
||||
- `cmd/agents.go`: Uses `tmux.NewTmux()`
|
||||
|
||||
**Action**: Consolidate into `internal/tmux/sessions.go`
|
||||
|
||||
### Pattern: Load/Validate Config Files (Priority: LOW)
|
||||
|
||||
**Current state**: 8 near-identical Load* functions in config/loader.go
|
||||
- `LoadTownConfig`, `LoadRigsConfig`, `LoadRigConfig`, etc.
|
||||
|
||||
**Action**: Create generic loader using Go generics:
|
||||
```go
|
||||
func loadConfigFile[T Validator](path string) (*T, error)
|
||||
```
|
||||
|
||||
### Pattern: Math Utilities (Priority: LOW)
|
||||
|
||||
**Current state**: `min()`, `max()`, `min3()`, `abs()` in suggest/suggest.go
|
||||
|
||||
**Action**: If needed elsewhere, move to `internal/util/math.go`
|
||||
|
||||
---
|
||||
|
||||
## 3. Performance Concerns
|
||||
|
||||
### Critical: File I/O Per-Event
|
||||
|
||||
| Package | Issue | Impact | Recommendation |
|
||||
|---------|-------|--------|----------------|
|
||||
| `events` | Opens/closes file for every event | High on busy systems | Batch writes or buffered logger |
|
||||
| `townlog` | Opens/closes file per log entry | Medium | Same as events |
|
||||
| `events` | `workspace.FindFromCwd()` on every Log() | Low-medium | Cache town root |
|
||||
|
||||
### Critical: Process Tree Walking
|
||||
|
||||
| Package | Issue | Impact | Recommendation |
|
||||
|---------|-------|--------|----------------|
|
||||
| `doctor/orphan_check` | `hasCrewAncestor()` calls `ps` in loop | O(n) subprocess calls | Batch gather process info |
|
||||
|
||||
### High: Directory Traversal Inefficiencies
|
||||
|
||||
| Package | Issue | Impact | Recommendation |
|
||||
|---------|-------|--------|----------------|
|
||||
| `doctor/hook_check` | Uses `exec.Command("find")` | Subprocess overhead | Use `filepath.Walk` |
|
||||
| `lock` | `FindAllLocks()` - unbounded Walk | Scales poorly | Add depth limits |
|
||||
| `townlog` | `TailEvents()` reads entire file | Memory for large logs | Implement true tail |
|
||||
|
||||
### Medium: Redundant Operations
|
||||
|
||||
| Package | Issue | Recommendation |
|
||||
|---------|-------|----------------|
|
||||
| `dog` | `List()` + iterate = double work | Provide `CountByState()` |
|
||||
| `dog` | Creates new git.Git per worktree | Cache or batch |
|
||||
| `doctor/rig_check` | Runs git status twice per polecat | Combine operations |
|
||||
| `checkpoint/Capture` | 3 separate git commands | Use combined flags |
|
||||
|
||||
### Low: JSON Formatting Overhead
|
||||
|
||||
| Package | Issue | Recommendation |
|
||||
|---------|-------|----------------|
|
||||
| `lock` | `MarshalIndent()` for lock files | Use `Marshal()` (no indentation needed) |
|
||||
| `townlog` | No compression for old logs | Consider gzip rotation |
|
||||
|
||||
---
|
||||
|
||||
## 4. Error Handling Issues
|
||||
|
||||
### Pattern: Silent Failures
|
||||
|
||||
| Package | Location | Issue | Fix |
|
||||
|---------|----------|-------|-----|
|
||||
| `events` | All callers | 19 instances of `_ = events.LogFeed()` | Standardize: always ignore or always check |
|
||||
| `townlog` | `ParseLogLines()` | Silently skips malformed lines | Log warnings |
|
||||
| `lock` | Lines 91, 180, 194-195 | Silent `_ =` without comments | Document intent |
|
||||
| `checkpoint` | `Capture()` | Returns nil error but git commands fail | Return actual errors |
|
||||
| `deps` | `BeadsUnknown` case | Silently passes | Log warning or fail |
|
||||
|
||||
### Pattern: Inconsistent State Handling
|
||||
|
||||
| Package | Issue | Recommendation |
|
||||
|---------|-------|----------------|
|
||||
| `dog/Get()` | Returns minimal Dog if state missing | Document or error |
|
||||
| `config/GetAccount()` | Returns pointer to loop variable (bug!) | Return by value |
|
||||
| `boot` | `LoadStatus()` returns empty struct if missing | Document behavior |
|
||||
|
||||
### Bug: Missing Role Mapping
|
||||
|
||||
| Package | Issue | Impact |
|
||||
|---------|-------|--------|
|
||||
| `claude` | `RoleTypeFor()` missing `deacon`, `crew` | Wrong settings applied |
|
||||
|
||||
---
|
||||
|
||||
## 5. Testing Gaps
|
||||
|
||||
| Package | Gap | Priority |
|
||||
|---------|-----|----------|
|
||||
| `checkpoint` | No unit tests | HIGH (crash recovery) |
|
||||
| `dog` | 4 tests, major paths untested | HIGH |
|
||||
| `deps` | Minimal failure path testing | MEDIUM |
|
||||
| `claude` | No tests | LOW |
|
||||
|
||||
---
|
||||
|
||||
## Summary Statistics
|
||||
|
||||
| Category | Count | Packages Affected |
|
||||
|----------|-------|-------------------|
|
||||
| **Dead Code Items** | 25+ | config, constants, doctor, lock, events, boot, dog, keepalive |
|
||||
| **Duplicate Patterns** | 6 | util, doctor, config, lock |
|
||||
| **Performance Issues** | 12 | events, townlog, doctor, dog, lock, checkpoint |
|
||||
| **Error Handling Issues** | 15 | events, townlog, lock, checkpoint, deps, claude |
|
||||
| **Testing Gaps** | 4 packages | checkpoint, dog, deps, claude |
|
||||
|
||||
## Recommended Priority
|
||||
|
||||
1. **Delete keepalive package** (entire package unused)
|
||||
2. **Fix claude/RoleTypeFor()** (incorrect behavior)
|
||||
3. **Fix config/GetAccount()** (pointer to stack bug)
|
||||
4. **Fix polecat/pending.go** (non-atomic writes)
|
||||
5. **Delete 21 unused constants** (maintenance burden)
|
||||
6. **Consolidate atomic write pattern** (DRY)
|
||||
7. **Add checkpoint tests** (crash recovery critical)
|
||||
@@ -1,74 +0,0 @@
|
||||
# Swarm (Ephemeral Worker View)
|
||||
|
||||
> **Note**: "Swarm" is an ephemeral concept, not a persistent entity.
|
||||
> For tracking work, see [Convoys](convoy.md).
|
||||
|
||||
## What is a Swarm?
|
||||
|
||||
A **swarm** is simply "the workers currently assigned to a convoy's issues."
|
||||
It has no separate ID and no persistent state - it's just a view of active workers.
|
||||
|
||||
| Concept | Persistent? | ID | Description |
|
||||
|---------|-------------|-----|-------------|
|
||||
| **Convoy** | Yes | hq-* | The tracking unit. What you create and track. |
|
||||
| **Swarm** | No | None | The workers. Ephemeral view of who's working. |
|
||||
|
||||
## The Relationship
|
||||
|
||||
```
|
||||
Convoy hq-abc ─────────tracks───────────► Issues
|
||||
│
|
||||
│ assigned to
|
||||
▼
|
||||
Polecats
|
||||
│
|
||||
────────┴────────
|
||||
"the swarm"
|
||||
(ephemeral)
|
||||
```
|
||||
|
||||
When you say "kick off a swarm," you're really:
|
||||
1. Creating a convoy (persistent tracking)
|
||||
2. Assigning polecats to the convoy's issues
|
||||
3. The swarm = those polecats while they work
|
||||
|
||||
When the work completes, the convoy lands and the swarm dissolves.
|
||||
|
||||
## Viewing the Swarm
|
||||
|
||||
The swarm appears in convoy status:
|
||||
|
||||
```bash
|
||||
gt convoy status hq-abc
|
||||
```
|
||||
|
||||
```
|
||||
Convoy: hq-abc (Deploy v2.0)
|
||||
════════════════════════════
|
||||
|
||||
Progress: 2/3 complete
|
||||
|
||||
Issues
|
||||
✓ gt-xyz: Update API closed
|
||||
→ bd-ghi: Update docs in_progress @beads/amber
|
||||
○ gt-jkl: Final review open
|
||||
|
||||
Workers (the swarm) ← this is the swarm
|
||||
beads/amber bd-ghi running 12m
|
||||
```
|
||||
|
||||
## Historical Note
|
||||
|
||||
Earlier Gas Town development used "swarm" as if it were a persistent entity
|
||||
with its own lifecycle. The `gt swarm` commands were built on this model.
|
||||
|
||||
The correct model is:
|
||||
- **Convoy** = the persistent tracking unit (what `gt swarm` was trying to be)
|
||||
- **Swarm** = ephemeral workers (no separate tracking needed)
|
||||
|
||||
The `gt swarm` command is being deprecated in favor of `gt convoy`.
|
||||
|
||||
## See Also
|
||||
|
||||
- [Convoys](convoy.md) - The persistent tracking unit
|
||||
- [Propulsion Principle](propulsion-principle.md) - Worker execution model
|
||||
@@ -1,154 +0,0 @@
|
||||
# Test Coverage and Quality Review
|
||||
|
||||
**Reviewed by**: polecat/gus
|
||||
**Date**: 2026-01-04
|
||||
**Issue**: gt-a02fj.9
|
||||
|
||||
## Executive Summary
|
||||
|
||||
- **80 test files** covering **32 out of 42 packages** (76% package coverage)
|
||||
- **631 test functions** with 192 subtests (30% use table-driven pattern)
|
||||
- **10 packages** with **0 test coverage** (2,452 lines)
|
||||
- **1 confirmed flaky test** candidate
|
||||
- Test quality is generally good with moderate mocking
|
||||
|
||||
---
|
||||
|
||||
## Coverage Gap Inventory
|
||||
|
||||
### Packages Without Tests (Priority Order)
|
||||
|
||||
| Priority | Package | Lines | Risk | Notes |
|
||||
|----------|---------|-------|------|-------|
|
||||
| **P0** | `internal/lock` | 402 | **CRITICAL** | Multi-agent lock management. Bugs cause worker collisions. Already has `execCommand` mockable for testing. |
|
||||
| **P1** | `internal/events` | 295 | HIGH | Event bus for audit trail. Mutex-protected writes. Core observability. |
|
||||
| **P1** | `internal/boot` | 242 | HIGH | Boot watchdog lifecycle. Spawns tmux sessions. |
|
||||
| **P1** | `internal/checkpoint` | 216 | HIGH | Session crash recovery. Critical for polecat continuity. |
|
||||
| **P2** | `internal/tui/convoy` | 601 | MEDIUM | TUI component. Harder to test but user-facing. |
|
||||
| **P2** | `internal/constants` | 221 | LOW | Mostly configuration constants. Low behavioral risk. |
|
||||
| **P3** | `internal/style` | 331 | LOW | Output formatting. Visual only. |
|
||||
| **P3** | `internal/claude` | 80 | LOW | Claude settings parsing. |
|
||||
| **P3** | `internal/wisp` | 52 | LOW | Ephemeral molecule I/O. Small surface. |
|
||||
| **P4** | `cmd/gt` | 12 | TRIVIAL | Main entry point. Minimal code. |
|
||||
|
||||
**Total untested lines**: 2,452
|
||||
|
||||
---
|
||||
|
||||
## Flaky Test Candidates
|
||||
|
||||
### Confirmed: `internal/feed/curator_test.go`
|
||||
|
||||
**Issue**: Uses `time.Sleep()` for synchronization (lines 59, 71, 119, 138)
|
||||
|
||||
```go
|
||||
// Give curator time to start
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
...
|
||||
// Wait for processing
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
```
|
||||
|
||||
**Risk**: Flaky under load, CI delays, or slow machines.
|
||||
|
||||
**Fix**: Replace with channel-based synchronization or polling with timeout:
|
||||
```go
|
||||
// Wait for condition with timeout
|
||||
deadline := time.Now().Add(time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
if conditionMet() {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Test Quality Analysis
|
||||
|
||||
### Strengths
|
||||
|
||||
1. **Table-driven tests**: 30% of tests use `t.Run()` (192/631)
|
||||
2. **Good isolation**: Only 2 package-level test variables
|
||||
3. **Dedicated integration tests**: 15 files with explicit integration/e2e naming
|
||||
4. **Error handling**: 316 uses of `if err != nil` in tests
|
||||
5. **No random data**: No `rand.` usage in tests (deterministic)
|
||||
6. **Environment safety**: Uses `t.Setenv()` for clean env var handling
|
||||
|
||||
### Areas for Improvement
|
||||
|
||||
1. **`testing.Short()`**: Only 1 usage. Long-running tests should check this.
|
||||
2. **External dependencies**: 26 tests skip when `bd` or `tmux` unavailable - consider mocking more.
|
||||
3. **time.Sleep usage**: Found in `curator_test.go` - should be eliminated.
|
||||
|
||||
---
|
||||
|
||||
## Test Smells (Minor)
|
||||
|
||||
| Smell | Location | Severity | Notes |
|
||||
|-------|----------|----------|-------|
|
||||
| Sleep-based sync | `feed/curator_test.go` | HIGH | See flaky section |
|
||||
| External dep skips | Multiple files | LOW | Reasonable for integration tests |
|
||||
| Skip-heavy file | `tmux/tmux_test.go` | LOW | Acceptable - tmux not always available |
|
||||
|
||||
---
|
||||
|
||||
## Priority List for New Tests
|
||||
|
||||
### Immediate (P0)
|
||||
|
||||
1. **`internal/lock`** - Critical path
|
||||
- Test `Acquire()` with stale lock cleanup
|
||||
- Test `Check()` with live/dead PIDs
|
||||
- Test `CleanStaleLocks()` with mock tmux sessions
|
||||
- Test `DetectCollisions()`
|
||||
- Test concurrent lock acquisition (race detection)
|
||||
|
||||
### High Priority (P1)
|
||||
|
||||
2. **`internal/events`**
|
||||
- Test `Log()` file creation and append
|
||||
- Test `write()` mutex behavior
|
||||
- Test payload helpers
|
||||
- Test graceful handling when not in workspace
|
||||
|
||||
3. **`internal/boot`**
|
||||
- Test `IsRunning()` with stale markers
|
||||
- Test `AcquireLock()` / `ReleaseLock()` cycle
|
||||
- Test `SaveStatus()` / `LoadStatus()` round-trip
|
||||
- Test degraded mode path
|
||||
|
||||
4. **`internal/checkpoint`**
|
||||
- Test `Read()` / `Write()` round-trip
|
||||
- Test `Capture()` git state extraction
|
||||
- Test `IsStale()` with various durations
|
||||
- Test `Summary()` output
|
||||
|
||||
### Medium Priority (P2)
|
||||
|
||||
5. **`internal/tui/convoy`** - Consider golden file tests for view output
|
||||
6. **`internal/constants`** - Test any validation logic
|
||||
|
||||
---
|
||||
|
||||
## Missing Test Types
|
||||
|
||||
| Type | Current State | Recommendation |
|
||||
|------|--------------|----------------|
|
||||
| Unit tests | Good coverage where present | Add for P0-P1 packages |
|
||||
| Integration tests | 15 dedicated files | Adequate |
|
||||
| E2E tests | `browser_e2e_test.go` | Consider more CLI E2E |
|
||||
| Fuzz tests | None | Consider for parsers (`formula/parser.go`) |
|
||||
| Benchmark tests | None visible | Add for hot paths (`lock`, `events`) |
|
||||
|
||||
---
|
||||
|
||||
## Actionable Next Steps
|
||||
|
||||
1. **Fix flaky test**: Refactor `feed/curator_test.go` to use channels/polling
|
||||
2. **Add lock tests**: Highest priority - bugs here break multi-agent
|
||||
3. **Add events tests**: Core observability must be tested
|
||||
4. **Add checkpoint tests**: Session recovery is critical path
|
||||
5. **Run with race detector**: `go test -race ./...` to catch data races
|
||||
6. **Consider `-short` flag**: Add `testing.Short()` checks to slow tests
|
||||
@@ -1,372 +0,0 @@
|
||||
# Wisp Squash Design: Cadences, Rules, Templates
|
||||
|
||||
Design specification for how wisps squash to digests in Gas Town.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Wisps are ephemeral molecules that need to be condensed into digests for:
|
||||
- **Audit trail**: What happened, when, by whom
|
||||
- **Activity feed**: Observable progress in the capability ledger
|
||||
- **Space efficiency**: Ephemeral data doesn't accumulate indefinitely
|
||||
|
||||
Currently under-designed:
|
||||
- **Cadences**: When should squash happen?
|
||||
- **Templates**: What should digests contain?
|
||||
- **Retention**: How long to keep, when to aggregate?
|
||||
|
||||
## Squash Cadences
|
||||
|
||||
### Patrol Wisps (Deacon, Witness, Refinery)
|
||||
|
||||
**Trigger**: End of each patrol cycle
|
||||
|
||||
```
|
||||
patrol-start → steps → loop-or-exit step → squash → new wisp
|
||||
```
|
||||
|
||||
| Decision Point | Action |
|
||||
|----------------|--------|
|
||||
| `loop-or-exit` with low context | Squash current wisp, create new wisp |
|
||||
| `loop-or-exit` with high context | Squash current wisp, handoff |
|
||||
| Extraordinary action | Squash immediately, handoff |
|
||||
|
||||
**Rationale**: Each patrol cycle is a logical unit. Squashing per-cycle keeps
|
||||
digests meaningful and prevents context-filling sessions from losing history.
|
||||
|
||||
### Work Wisps (Polecats)
|
||||
|
||||
**Trigger**: Before `gt done` or molecule completion
|
||||
|
||||
```
|
||||
work-assigned → steps → all-complete → squash → gt done → merge queue
|
||||
```
|
||||
|
||||
Polecats typically use regular molecules (not wisps), but when wisps are used
|
||||
for exploratory work:
|
||||
|
||||
| Scenario | Action |
|
||||
|----------|--------|
|
||||
| Molecule completes | Squash to digest |
|
||||
| Molecule abandoned | Burn (no digest) |
|
||||
| Molecule handed off | Squash, include handoff context |
|
||||
|
||||
### Time-Based Cadences (Future)
|
||||
|
||||
For long-running molecules that span multiple sessions:
|
||||
|
||||
| Duration | Action |
|
||||
|----------|--------|
|
||||
| Session ends | Auto-squash if molecule in progress |
|
||||
| > 24 hours | Create checkpoint digest |
|
||||
| > 7 days | Warning: stale molecule |
|
||||
|
||||
**Not implemented initially** - simplicity first.
|
||||
|
||||
## Summary Templates
|
||||
|
||||
### Template Structure
|
||||
|
||||
Digests have three sections:
|
||||
1. **Header**: Standard metadata (who, what, when)
|
||||
2. **Body**: Context-specific content (from template)
|
||||
3. **Footer**: System metrics (steps, duration, commit refs)
|
||||
|
||||
### Patrol Digest Template
|
||||
|
||||
```markdown
|
||||
## Patrol Digest: {{.Agent}}
|
||||
|
||||
**Cycle**: {{.CycleNumber}} | **Duration**: {{.Duration}}
|
||||
|
||||
### Actions Taken
|
||||
{{range .Actions}}
|
||||
- {{.Icon}} {{.Description}}
|
||||
{{end}}
|
||||
|
||||
### Issues Filed
|
||||
{{range .IssuesFiled}}
|
||||
- {{.ID}}: {{.Title}}
|
||||
{{end}}
|
||||
|
||||
### Metrics
|
||||
- Inbox: {{.InboxCount}} messages processed
|
||||
- Health checks: {{.HealthChecks}}
|
||||
- Alerts: {{.AlertCount}}
|
||||
```
|
||||
|
||||
### Work Digest Template
|
||||
|
||||
```markdown
|
||||
## Work Digest: {{.IssueTitle}}
|
||||
|
||||
**Issue**: {{.IssueID}} | **Agent**: {{.Agent}} | **Duration**: {{.Duration}}
|
||||
|
||||
### Summary
|
||||
{{.Summary}}
|
||||
|
||||
### Steps Completed
|
||||
{{range .Steps}}
|
||||
- [{{.Status}}] {{.Title}}
|
||||
{{end}}
|
||||
|
||||
### Artifacts
|
||||
- Commits: {{range .Commits}}{{.Short}}, {{end}}
|
||||
- Files changed: {{.FilesChanged}}
|
||||
- Lines: +{{.LinesAdded}} -{{.LinesRemoved}}
|
||||
```
|
||||
|
||||
### Formula-Defined Templates
|
||||
|
||||
Formulas can define custom squash templates in `[squash]` section:
|
||||
|
||||
```toml
|
||||
formula = "mol-my-workflow"
|
||||
version = 1
|
||||
|
||||
[squash]
|
||||
template = """
|
||||
## {{.Title}} Complete
|
||||
|
||||
Duration: {{.Duration}}
|
||||
Key metrics:
|
||||
{{range .Steps}}
|
||||
- {{.ID}}: {{.CustomField}}
|
||||
{{end}}
|
||||
"""
|
||||
|
||||
# Template variables from step outputs
|
||||
[squash.vars]
|
||||
include_metrics = true
|
||||
summary_length = "short" # short | medium | detailed
|
||||
```
|
||||
|
||||
**Resolution order**:
|
||||
1. Formula-defined template (if present)
|
||||
2. Type-specific default (patrol vs work)
|
||||
3. Minimal fallback (current behavior)
|
||||
|
||||
## Retention Rules
|
||||
|
||||
### Digest Lifecycle
|
||||
|
||||
```
|
||||
Wisp → Squash → Digest (active) → Digest (archived) → Rollup
|
||||
```
|
||||
|
||||
| Phase | Duration | Storage |
|
||||
|-------|----------|---------|
|
||||
| Active | 30 days | `.beads/issues.jsonl` |
|
||||
| Archived | 1 year | `.beads/archive/` (compressed) |
|
||||
| Rollup | Permanent | Weekly/monthly summaries |
|
||||
|
||||
### Rollup Strategy
|
||||
|
||||
After retention period, digests aggregate into rollups:
|
||||
|
||||
**Weekly Patrol Rollup**:
|
||||
```markdown
|
||||
## Week of {{.WeekStart}}
|
||||
|
||||
| Agent | Cycles | Issues Filed | Merges | Incidents |
|
||||
|-------|--------|--------------|--------|-----------|
|
||||
| Deacon | 140 | 3 | - | 0 |
|
||||
| Witness | 168 | 12 | - | 2 |
|
||||
| Refinery | 84 | 0 | 47 | 1 |
|
||||
```
|
||||
|
||||
**Monthly Work Rollup**:
|
||||
```markdown
|
||||
## {{.Month}} Work Summary
|
||||
|
||||
Issues completed: {{.TotalIssues}}
|
||||
Total duration: {{.TotalDuration}}
|
||||
Contributors: {{range .Contributors}}{{.Name}}, {{end}}
|
||||
|
||||
Top categories:
|
||||
{{range .Categories}}
|
||||
- {{.Name}}: {{.Count}} issues
|
||||
{{end}}
|
||||
```
|
||||
|
||||
### Retention Configuration
|
||||
|
||||
Per-rig settings in `config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"retention": {
|
||||
"digest_active_days": 30,
|
||||
"digest_archive_days": 365,
|
||||
"rollup_weekly": true,
|
||||
"rollup_monthly": true,
|
||||
"auto_archive": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Template System (MVP)
|
||||
|
||||
1. Add `[squash]` section parsing to formula loader
|
||||
2. Create default templates for patrol and work digests
|
||||
3. Enhance `bd mol squash` to use templates
|
||||
4. Add `--template` flag for override
|
||||
|
||||
### Phase 2: Cadence Automation
|
||||
|
||||
1. Hook squash into `gt done` flow
|
||||
2. Add patrol cycle completion detection
|
||||
3. Emit squash events for activity feed
|
||||
|
||||
### Phase 3: Retention & Archival
|
||||
|
||||
1. Implement digest aging (active → archived)
|
||||
2. Add `bd archive` command for manual archival
|
||||
3. Create rollup generator for weekly/monthly summaries
|
||||
4. Background daemon task for auto-archival
|
||||
|
||||
## Commands
|
||||
|
||||
### Squash with Template
|
||||
|
||||
```bash
|
||||
# Use formula-defined template
|
||||
bd mol squash <id>
|
||||
|
||||
# Use explicit template
|
||||
bd mol squash <id> --template=detailed
|
||||
|
||||
# Add custom summary
|
||||
bd mol squash <id> --summary="Patrol complete: 3 issues filed"
|
||||
```
|
||||
|
||||
### View Digests
|
||||
|
||||
```bash
|
||||
# List recent digests
|
||||
bd list --label=digest
|
||||
|
||||
# View rollups
|
||||
bd rollup list
|
||||
bd rollup show weekly-2025-01
|
||||
```
|
||||
|
||||
### Archive Management
|
||||
|
||||
```bash
|
||||
# Archive old digests
|
||||
bd archive --older-than=30d
|
||||
|
||||
# Generate rollup
|
||||
bd rollup generate --week=2025-01
|
||||
|
||||
# Restore from archive
|
||||
bd archive restore <digest-id>
|
||||
```
|
||||
|
||||
## Activity Feed Integration
|
||||
|
||||
Digests feed into the activity feed for observability:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "digest",
|
||||
"agent": "greenplace/witness",
|
||||
"timestamp": "2025-12-30T10:00:00Z",
|
||||
"summary": "Patrol cycle 47 complete",
|
||||
"metrics": {
|
||||
"issues_filed": 2,
|
||||
"polecats_nudged": 1,
|
||||
"duration_minutes": 12
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The feed curator (daemon) can aggregate these for dashboards.
|
||||
|
||||
## Formula Example
|
||||
|
||||
Complete formula with squash configuration:
|
||||
|
||||
```toml
|
||||
formula = "mol-witness-patrol"
|
||||
version = 1
|
||||
type = "workflow"
|
||||
description = "Witness patrol cycle"
|
||||
|
||||
[squash]
|
||||
trigger = "on_complete"
|
||||
template_type = "patrol"
|
||||
include_metrics = true
|
||||
|
||||
[[steps]]
|
||||
id = "inbox-check"
|
||||
title = "Check inbox"
|
||||
description = "Process messages and escalations"
|
||||
|
||||
[[steps]]
|
||||
id = "health-scan"
|
||||
title = "Scan polecat health"
|
||||
description = "Check all polecats for stuck/idle"
|
||||
|
||||
[[steps]]
|
||||
id = "nudge-stuck"
|
||||
title = "Nudge stuck workers"
|
||||
description = "Send nudges to idle polecats"
|
||||
|
||||
[[steps]]
|
||||
id = "loop-or-exit"
|
||||
title = "Loop or exit decision"
|
||||
description = "Decide whether to continue or handoff"
|
||||
```
|
||||
|
||||
## Migration
|
||||
|
||||
### Existing Digests
|
||||
|
||||
Current minimal digests remain valid. New template system is additive:
|
||||
- Old digests: Title, basic description
|
||||
- New digests: Structured content, metrics
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
- `bd mol squash` without template uses current behavior
|
||||
- Formulas without `[squash]` section use type defaults
|
||||
- No breaking changes to existing workflows
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Why Squash Per-Cycle?
|
||||
|
||||
**Alternative**: Squash on session end only
|
||||
|
||||
**Rejected because**:
|
||||
- Sessions can crash mid-cycle (lost audit trail)
|
||||
- High-context sessions may span multiple cycles
|
||||
- Per-cycle gives finer granularity
|
||||
|
||||
### Why Formula-Defined Templates?
|
||||
|
||||
**Alternative**: Hard-coded templates per role
|
||||
|
||||
**Rejected because**:
|
||||
- Different workflows have different metrics
|
||||
- Extensibility for custom formulas
|
||||
- Separation of concerns (workflow defines its own output)
|
||||
|
||||
### Why Retain Forever (as Rollups)?
|
||||
|
||||
**Alternative**: Delete after N days
|
||||
|
||||
**Rejected because**:
|
||||
- Capability ledger needs long-term history
|
||||
- Rollups are small (aggregate stats)
|
||||
- Audit requirements vary by use case
|
||||
|
||||
## Future Considerations
|
||||
|
||||
- **Search**: Full-text search over archived digests
|
||||
- **Analytics**: Metrics aggregation dashboard
|
||||
- **Export**: Export digests to external systems
|
||||
- **Compliance**: Configurable retention for regulatory needs
|
||||
@@ -92,6 +92,10 @@ func formatDays(d time.Duration) string {
|
||||
return formatInt(days) + "d"
|
||||
}
|
||||
|
||||
// formatInt converts a non-negative integer to its decimal string representation.
|
||||
// For single digits (0-9), it uses direct rune conversion for efficiency.
|
||||
// For larger numbers, it extracts digits iteratively from least to most significant.
|
||||
// This avoids importing strconv for simple integer formatting in the activity package.
|
||||
func formatInt(n int) string {
|
||||
if n < 10 {
|
||||
return string(rune('0'+n))
|
||||
|
||||
189
internal/agent/state_test.go
Normal file
189
internal/agent/state_test.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStateConstants(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state State
|
||||
value string
|
||||
}{
|
||||
{"StateStopped", StateStopped, "stopped"},
|
||||
{"StateRunning", StateRunning, "running"},
|
||||
{"StatePaused", StatePaused, "paused"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if string(tt.state) != tt.value {
|
||||
t.Errorf("State constant = %q, want %q", tt.state, tt.value)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_StateFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
expectedPath := filepath.Join(tmpDir, ".runtime", "test-state.json")
|
||||
if manager.StateFile() != expectedPath {
|
||||
t.Errorf("StateFile() = %q, want %q", manager.StateFile(), expectedPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_NoFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "nonexistent.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
state, err := manager.Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error = %v", err)
|
||||
}
|
||||
if state.Value != "default" {
|
||||
t.Errorf("Load() value = %q, want %q", state.Value, "default")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_Save_Load(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
// Save initial state
|
||||
state := &TestState{Value: "test-value", Count: 42}
|
||||
if err := manager.Save(state); err != nil {
|
||||
t.Fatalf("Save() error = %v", err)
|
||||
}
|
||||
|
||||
// Load it back
|
||||
loaded, err := manager.Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error = %v", err)
|
||||
}
|
||||
if loaded.Value != state.Value {
|
||||
t.Errorf("Load() value = %q, want %q", loaded.Value, state.Value)
|
||||
}
|
||||
if loaded.Count != state.Count {
|
||||
t.Errorf("Load() count = %d, want %d", loaded.Count, state.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_CreatesDirectory(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
// Save should create .runtime directory
|
||||
state := &TestState{Value: "test"}
|
||||
if err := manager.Save(state); err != nil {
|
||||
t.Fatalf("Save() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify directory was created
|
||||
runtimeDir := filepath.Join(tmpDir, ".runtime")
|
||||
if _, err := os.Stat(runtimeDir); err != nil {
|
||||
t.Errorf("Save() should create .runtime directory: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_InvalidJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
// Write invalid JSON
|
||||
statePath := manager.StateFile()
|
||||
if err := os.MkdirAll(filepath.Dir(statePath), 0755); err != nil {
|
||||
t.Fatalf("Failed to create directory: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(statePath, []byte("invalid json"), 0644); err != nil {
|
||||
t.Fatalf("Failed to write file: %v", err)
|
||||
}
|
||||
|
||||
_, err := manager.Load()
|
||||
if err == nil {
|
||||
t.Error("Load() with invalid JSON should return error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestState_String(t *testing.T) {
|
||||
tests := []struct {
|
||||
state State
|
||||
want string
|
||||
}{
|
||||
{StateStopped, "stopped"},
|
||||
{StateRunning, "running"},
|
||||
{StatePaused, "paused"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if string(tt.state) != tt.want {
|
||||
t.Errorf("State(%q) = %q, want %q", tt.state, string(tt.state), tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_GenericType(t *testing.T) {
|
||||
// Test that StateManager works with different types
|
||||
|
||||
type ComplexState struct {
|
||||
Name string `json:"name"`
|
||||
Values []int `json:"values"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Nested struct {
|
||||
X int `json:"x"`
|
||||
} `json:"nested"`
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[ComplexState](tmpDir, "complex.json", func() *ComplexState {
|
||||
return &ComplexState{Name: "default", Values: []int{}}
|
||||
})
|
||||
|
||||
original := &ComplexState{
|
||||
Name: "test",
|
||||
Values: []int{1, 2, 3},
|
||||
Enabled: true,
|
||||
}
|
||||
original.Nested.X = 42
|
||||
|
||||
if err := manager.Save(original); err != nil {
|
||||
t.Fatalf("Save() error = %v", err)
|
||||
}
|
||||
|
||||
loaded, err := manager.Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error = %v", err)
|
||||
}
|
||||
|
||||
if loaded.Name != original.Name {
|
||||
t.Errorf("Name = %q, want %q", loaded.Name, original.Name)
|
||||
}
|
||||
if len(loaded.Values) != len(original.Values) {
|
||||
t.Errorf("Values length = %d, want %d", len(loaded.Values), len(original.Values))
|
||||
}
|
||||
if loaded.Enabled != original.Enabled {
|
||||
t.Errorf("Enabled = %v, want %v", loaded.Enabled, original.Enabled)
|
||||
}
|
||||
if loaded.Nested.X != original.Nested.X {
|
||||
t.Errorf("Nested.X = %d, want %d", loaded.Nested.X, original.Nested.X)
|
||||
}
|
||||
}
|
||||
|
||||
// TestState is a simple type for testing
|
||||
type TestState struct {
|
||||
Value string `json:"value"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
@@ -44,8 +44,8 @@ type Issue struct {
|
||||
|
||||
// Agent bead slots (type=agent only)
|
||||
HookBead string `json:"hook_bead,omitempty"` // Current work attached to agent's hook
|
||||
RoleBead string `json:"role_bead,omitempty"` // Role definition bead (shared)
|
||||
AgentState string `json:"agent_state,omitempty"` // Agent lifecycle state (spawning, working, done, stuck)
|
||||
// Note: role_bead field removed - role definitions are now config-based
|
||||
|
||||
// Counts from list output
|
||||
DependencyCount int `json:"dependency_count,omitempty"`
|
||||
@@ -86,6 +86,7 @@ type CreateOptions struct {
|
||||
Description string
|
||||
Parent string
|
||||
Actor string // Who is creating this issue (populates created_by)
|
||||
Ephemeral bool // Create as ephemeral (wisp) - not exported to JSONL
|
||||
}
|
||||
|
||||
// UpdateOptions specifies options for updating an issue.
|
||||
@@ -112,6 +113,12 @@ type SyncStatus struct {
|
||||
type Beads struct {
|
||||
workDir string
|
||||
beadsDir string // Optional BEADS_DIR override for cross-database access
|
||||
isolated bool // If true, suppress inherited beads env vars (for test isolation)
|
||||
|
||||
// Lazy-cached town root for routing resolution.
|
||||
// Populated on first call to getTownRoot() to avoid filesystem walk on every operation.
|
||||
townRoot string
|
||||
searchedRoot bool
|
||||
}
|
||||
|
||||
// New creates a new Beads wrapper for the given directory.
|
||||
@@ -119,24 +126,93 @@ func New(workDir string) *Beads {
|
||||
return &Beads{workDir: workDir}
|
||||
}
|
||||
|
||||
// NewIsolated creates a Beads wrapper for test isolation.
|
||||
// This suppresses inherited beads env vars (BD_ACTOR, BEADS_DB) to prevent
|
||||
// tests from accidentally routing to production databases.
|
||||
func NewIsolated(workDir string) *Beads {
|
||||
return &Beads{workDir: workDir, isolated: true}
|
||||
}
|
||||
|
||||
// NewWithBeadsDir creates a Beads wrapper with an explicit BEADS_DIR.
|
||||
// This is needed when running from a polecat worktree but accessing town-level beads.
|
||||
func NewWithBeadsDir(workDir, beadsDir string) *Beads {
|
||||
return &Beads{workDir: workDir, beadsDir: beadsDir}
|
||||
}
|
||||
|
||||
// getActor returns the BD_ACTOR value for this context.
|
||||
// Returns empty string when in isolated mode (tests) to prevent
|
||||
// inherited actors from routing to production databases.
|
||||
func (b *Beads) getActor() string {
|
||||
if b.isolated {
|
||||
return ""
|
||||
}
|
||||
return os.Getenv("BD_ACTOR")
|
||||
}
|
||||
|
||||
// getTownRoot returns the Gas Town root directory, using lazy caching.
|
||||
// The town root is found by walking up from workDir looking for mayor/town.json.
|
||||
// Returns empty string if not in a Gas Town project.
|
||||
func (b *Beads) getTownRoot() string {
|
||||
if !b.searchedRoot {
|
||||
b.townRoot = FindTownRoot(b.workDir)
|
||||
b.searchedRoot = true
|
||||
}
|
||||
return b.townRoot
|
||||
}
|
||||
|
||||
// getResolvedBeadsDir returns the beads directory this wrapper is operating on.
|
||||
// This follows any redirects and returns the actual beads directory path.
|
||||
func (b *Beads) getResolvedBeadsDir() string {
|
||||
if b.beadsDir != "" {
|
||||
return b.beadsDir
|
||||
}
|
||||
return ResolveBeadsDir(b.workDir)
|
||||
}
|
||||
|
||||
// Init initializes a new beads database in the working directory.
|
||||
// This uses the same environment isolation as other commands.
|
||||
func (b *Beads) Init(prefix string) error {
|
||||
_, err := b.run("init", "--prefix", prefix, "--quiet")
|
||||
return err
|
||||
}
|
||||
|
||||
// run executes a bd command and returns stdout.
|
||||
func (b *Beads) run(args ...string) ([]byte, error) {
|
||||
// Use --no-daemon for faster read operations (avoids daemon IPC overhead)
|
||||
// The daemon is primarily useful for write coalescing, not reads
|
||||
fullArgs := append([]string{"--no-daemon"}, args...)
|
||||
// The daemon is primarily useful for write coalescing, not reads.
|
||||
// Use --allow-stale to prevent failures when db is out of sync with JSONL
|
||||
// (e.g., after daemon is killed during shutdown before syncing).
|
||||
fullArgs := append([]string{"--no-daemon", "--allow-stale"}, args...)
|
||||
|
||||
// Always explicitly set BEADS_DIR to prevent inherited env vars from
|
||||
// causing prefix mismatches. Use explicit beadsDir if set, otherwise
|
||||
// resolve from working directory.
|
||||
beadsDir := b.beadsDir
|
||||
if beadsDir == "" {
|
||||
beadsDir = ResolveBeadsDir(b.workDir)
|
||||
}
|
||||
|
||||
// In isolated mode, use --db flag to force specific database path
|
||||
// This bypasses bd's routing logic that can redirect to .beads-planning
|
||||
// Skip --db for init command since it creates the database
|
||||
isInit := len(args) > 0 && args[0] == "init"
|
||||
if b.isolated && !isInit {
|
||||
beadsDB := filepath.Join(beadsDir, "beads.db")
|
||||
fullArgs = append([]string{"--db", beadsDB}, fullArgs...)
|
||||
}
|
||||
|
||||
cmd := exec.Command("bd", fullArgs...) //nolint:gosec // G204: bd is a trusted internal tool
|
||||
cmd.Dir = b.workDir
|
||||
|
||||
// Set BEADS_DIR if specified (enables cross-database access)
|
||||
if b.beadsDir != "" {
|
||||
cmd.Env = append(os.Environ(), "BEADS_DIR="+b.beadsDir)
|
||||
// Build environment: filter beads env vars when in isolated mode (tests)
|
||||
// to prevent routing to production databases.
|
||||
var env []string
|
||||
if b.isolated {
|
||||
env = filterBeadsEnv(os.Environ())
|
||||
} else {
|
||||
env = os.Environ()
|
||||
}
|
||||
cmd.Env = append(env, "BEADS_DIR="+beadsDir)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
@@ -147,6 +223,13 @@ func (b *Beads) run(args ...string) ([]byte, error) {
|
||||
return nil, b.wrapError(err, stderr.String(), args)
|
||||
}
|
||||
|
||||
// Handle bd --no-daemon exit code 0 bug: when issue not found,
|
||||
// --no-daemon exits 0 but writes error to stderr with empty stdout.
|
||||
// Detect this case and treat as error to avoid JSON parse failures.
|
||||
if stdout.Len() == 0 && stderr.Len() > 0 {
|
||||
return nil, b.wrapError(fmt.Errorf("command produced no output"), stderr.String(), args)
|
||||
}
|
||||
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
||||
@@ -170,7 +253,9 @@ func (b *Beads) wrapError(err error, stderr string, args []string) error {
|
||||
}
|
||||
|
||||
// ErrNotFound is widely used for issue lookups - acceptable exception
|
||||
if strings.Contains(stderr, "not found") || strings.Contains(stderr, "Issue not found") {
|
||||
// Match various "not found" error patterns from bd
|
||||
if strings.Contains(stderr, "not found") || strings.Contains(stderr, "Issue not found") ||
|
||||
strings.Contains(stderr, "no issue found") {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
@@ -180,6 +265,27 @@ func (b *Beads) wrapError(err error, stderr string, args []string) error {
|
||||
return fmt.Errorf("bd %s: %w", strings.Join(args, " "), err)
|
||||
}
|
||||
|
||||
// filterBeadsEnv removes beads-related environment variables from the given
|
||||
// environment slice. This ensures test isolation by preventing inherited
|
||||
// BD_ACTOR, BEADS_DB, GT_ROOT, HOME etc. from routing commands to production databases.
|
||||
func filterBeadsEnv(environ []string) []string {
|
||||
filtered := make([]string, 0, len(environ))
|
||||
for _, env := range environ {
|
||||
// Skip beads-related env vars that could interfere with test isolation
|
||||
// BD_ACTOR, BEADS_* - direct beads config
|
||||
// GT_ROOT - causes bd to find global routes file
|
||||
// HOME - causes bd to find ~/.beads-planning routing
|
||||
if strings.HasPrefix(env, "BD_ACTOR=") ||
|
||||
strings.HasPrefix(env, "BEADS_") ||
|
||||
strings.HasPrefix(env, "GT_ROOT=") ||
|
||||
strings.HasPrefix(env, "HOME=") {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, env)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// List returns issues matching the given options.
|
||||
func (b *Beads) List(opts ListOptions) ([]*Issue, error) {
|
||||
args := []string{"list", "--json"}
|
||||
@@ -378,10 +484,14 @@ func (b *Beads) Create(opts CreateOptions) (*Issue, error) {
|
||||
if opts.Parent != "" {
|
||||
args = append(args, "--parent="+opts.Parent)
|
||||
}
|
||||
if opts.Ephemeral {
|
||||
args = append(args, "--ephemeral")
|
||||
}
|
||||
// Default Actor from BD_ACTOR env var if not specified
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
actor := opts.Actor
|
||||
if actor == "" {
|
||||
actor = os.Getenv("BD_ACTOR")
|
||||
actor = b.getActor()
|
||||
}
|
||||
if actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
@@ -405,6 +515,9 @@ func (b *Beads) Create(opts CreateOptions) (*Issue, error) {
|
||||
// deterministic IDs rather than auto-generated ones.
|
||||
func (b *Beads) CreateWithID(id string, opts CreateOptions) (*Issue, error) {
|
||||
args := []string{"create", "--json", "--id=" + id}
|
||||
if NeedsForceForID(id) {
|
||||
args = append(args, "--force")
|
||||
}
|
||||
|
||||
if opts.Title != "" {
|
||||
args = append(args, "--title="+opts.Title)
|
||||
@@ -423,9 +536,10 @@ func (b *Beads) CreateWithID(id string, opts CreateOptions) (*Issue, error) {
|
||||
args = append(args, "--parent="+opts.Parent)
|
||||
}
|
||||
// Default Actor from BD_ACTOR env var if not specified
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
actor := opts.Actor
|
||||
if actor == "" {
|
||||
actor = os.Getenv("BD_ACTOR")
|
||||
actor = b.getActor()
|
||||
}
|
||||
if actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
@@ -637,15 +751,16 @@ This is physics, not politeness. Gas Town is a steam engine - you are a piston.
|
||||
|
||||
## Session Close Protocol
|
||||
|
||||
Before saying "done":
|
||||
Before signaling completion:
|
||||
1. git status (check what changed)
|
||||
2. git add <files> (stage code changes)
|
||||
3. bd sync (commit beads changes)
|
||||
4. git commit -m "..." (commit code)
|
||||
5. bd sync (commit any new beads changes)
|
||||
6. git push (push to remote)
|
||||
7. ` + "`gt done`" + ` (submit to merge queue and exit)
|
||||
|
||||
**Work is not done until pushed.**
|
||||
**Polecats MUST call ` + "`gt done`" + ` - this submits work and exits the session.**
|
||||
`
|
||||
|
||||
// ProvisionPrimeMD writes the Gas Town PRIME.md file to the specified beads directory.
|
||||
|
||||
@@ -5,10 +5,32 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// runSlotSet runs `bd slot set` from a specific directory.
|
||||
// This is needed when the agent bead was created via routing to a different
|
||||
// database than the Beads wrapper's default directory.
|
||||
func runSlotSet(workDir, beadID, slotName, slotValue string) error {
|
||||
cmd := exec.Command("bd", "slot", "set", beadID, slotName, slotValue)
|
||||
cmd.Dir = workDir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("%s: %w", strings.TrimSpace(string(output)), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// runSlotClear runs `bd slot clear` from a specific directory.
|
||||
func runSlotClear(workDir, beadID, slotName string) error {
|
||||
cmd := exec.Command("bd", "slot", "clear", beadID, slotName)
|
||||
cmd.Dir = workDir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("%s: %w", strings.TrimSpace(string(output)), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AgentFields holds structured fields for agent beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type AgentFields struct {
|
||||
@@ -16,10 +38,11 @@ type AgentFields struct {
|
||||
Rig string // Rig name (empty for global agents like mayor/deacon)
|
||||
AgentState string // spawning, working, done, stuck
|
||||
HookBead string // Currently pinned work bead ID
|
||||
RoleBead string // Role definition bead ID (canonical location; may not exist yet)
|
||||
CleanupStatus string // ZFC: polecat self-reports git state (clean, has_uncommitted, has_stash, has_unpushed)
|
||||
ActiveMR string // Currently active merge request bead ID (for traceability)
|
||||
NotificationLevel string // DND mode: verbose, normal, muted (default: normal)
|
||||
// Note: RoleBead field removed - role definitions are now config-based.
|
||||
// See internal/config/roles/*.toml and config-based-roles.md.
|
||||
}
|
||||
|
||||
// Notification level constants
|
||||
@@ -54,11 +77,7 @@ func FormatAgentDescription(title string, fields *AgentFields) string {
|
||||
lines = append(lines, "hook_bead: null")
|
||||
}
|
||||
|
||||
if fields.RoleBead != "" {
|
||||
lines = append(lines, fmt.Sprintf("role_bead: %s", fields.RoleBead))
|
||||
} else {
|
||||
lines = append(lines, "role_bead: null")
|
||||
}
|
||||
// Note: role_bead field no longer written - role definitions are config-based
|
||||
|
||||
if fields.CleanupStatus != "" {
|
||||
lines = append(lines, fmt.Sprintf("cleanup_status: %s", fields.CleanupStatus))
|
||||
@@ -112,7 +131,7 @@ func ParseAgentFields(description string) *AgentFields {
|
||||
case "hook_bead":
|
||||
fields.HookBead = value
|
||||
case "role_bead":
|
||||
fields.RoleBead = value
|
||||
// Ignored - role definitions are now config-based (backward compat)
|
||||
case "cleanup_status":
|
||||
fields.CleanupStatus = value
|
||||
case "active_mr":
|
||||
@@ -129,18 +148,37 @@ func ParseAgentFields(description string) *AgentFields {
|
||||
// The ID format is: <prefix>-<rig>-<role>-<name> (e.g., gt-gastown-polecat-Toast)
|
||||
// Use AgentBeadID() helper to generate correct IDs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
//
|
||||
// This function automatically ensures custom types are configured in the target
|
||||
// database before creating the bead. This handles multi-repo routing scenarios
|
||||
// where the bead may be routed to a different database than the one this wrapper
|
||||
// is connected to.
|
||||
func (b *Beads) CreateAgentBead(id, title string, fields *AgentFields) (*Issue, error) {
|
||||
// Resolve where this bead will actually be written (handles multi-repo routing)
|
||||
targetDir := ResolveRoutingTarget(b.getTownRoot(), id, b.getResolvedBeadsDir())
|
||||
|
||||
// Ensure target database has custom types configured
|
||||
// This is cached (sentinel file + in-memory) so repeated calls are fast
|
||||
if err := EnsureCustomTypes(targetDir); err != nil {
|
||||
return nil, fmt.Errorf("prepare target for agent bead %s: %w", id, err)
|
||||
}
|
||||
|
||||
description := FormatAgentDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=agent",
|
||||
"--labels=gt:agent",
|
||||
}
|
||||
if NeedsForceForID(id) {
|
||||
args = append(args, "--force")
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
if actor := os.Getenv("BD_ACTOR"); actor != "" {
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
@@ -154,19 +192,14 @@ func (b *Beads) CreateAgentBead(id, title string, fields *AgentFields) (*Issue,
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
// Set the role slot if specified (this is the authoritative storage)
|
||||
if fields != nil && fields.RoleBead != "" {
|
||||
if _, err := b.run("slot", "set", id, "role", fields.RoleBead); err != nil {
|
||||
// Non-fatal: warn but continue
|
||||
fmt.Printf("Warning: could not set role slot: %v\n", err)
|
||||
}
|
||||
}
|
||||
// Note: role slot no longer set - role definitions are config-based
|
||||
|
||||
// Set the hook slot if specified (this is the authoritative storage)
|
||||
// This fixes the slot inconsistency bug where bead status is 'hooked' but
|
||||
// agent's hook slot is empty. See mi-619.
|
||||
// Must run from targetDir since that's where the agent bead was created
|
||||
if fields != nil && fields.HookBead != "" {
|
||||
if _, err := b.run("slot", "set", id, "hook", fields.HookBead); err != nil {
|
||||
if err := runSlotSet(targetDir, id, "hook", fields.HookBead); err != nil {
|
||||
// Non-fatal: warn but continue - description text has the backup
|
||||
fmt.Printf("Warning: could not set hook slot: %v\n", err)
|
||||
}
|
||||
@@ -175,6 +208,72 @@ func (b *Beads) CreateAgentBead(id, title string, fields *AgentFields) (*Issue,
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// CreateOrReopenAgentBead creates an agent bead or reopens an existing one.
|
||||
// This handles the case where a polecat is nuked and re-spawned with the same name:
|
||||
// the old agent bead exists as a closed bead, so we reopen and update it instead of
|
||||
// failing with a UNIQUE constraint error.
|
||||
//
|
||||
// NOTE: This does NOT handle tombstones. If the old bead was hard-deleted (creating
|
||||
// a tombstone), this function will fail. Use CloseAndClearAgentBead instead of DeleteAgentBead
|
||||
// when cleaning up agent beads to ensure they can be reopened later.
|
||||
//
|
||||
//
|
||||
// The function:
|
||||
// 1. Tries to create the agent bead
|
||||
// 2. If UNIQUE constraint fails, reopens the existing bead and updates its fields
|
||||
func (b *Beads) CreateOrReopenAgentBead(id, title string, fields *AgentFields) (*Issue, error) {
|
||||
// First try to create the bead
|
||||
issue, err := b.CreateAgentBead(id, title, fields)
|
||||
if err == nil {
|
||||
return issue, nil
|
||||
}
|
||||
|
||||
// Check if it's a UNIQUE constraint error
|
||||
if !strings.Contains(err.Error(), "UNIQUE constraint failed") {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve where this bead lives (for slot operations)
|
||||
targetDir := ResolveRoutingTarget(b.getTownRoot(), id, b.getResolvedBeadsDir())
|
||||
|
||||
// The bead already exists (should be closed from previous polecat lifecycle)
|
||||
// Reopen it and update its fields
|
||||
if _, reopenErr := b.run("reopen", id, "--reason=re-spawning agent"); reopenErr != nil {
|
||||
// If reopen fails, the bead might already be open - continue with update
|
||||
if !strings.Contains(reopenErr.Error(), "already open") {
|
||||
return nil, fmt.Errorf("reopening existing agent bead: %w (original error: %v)", reopenErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the bead with new fields
|
||||
description := FormatAgentDescription(title, fields)
|
||||
updateOpts := UpdateOptions{
|
||||
Title: &title,
|
||||
Description: &description,
|
||||
}
|
||||
if err := b.Update(id, updateOpts); err != nil {
|
||||
return nil, fmt.Errorf("updating reopened agent bead: %w", err)
|
||||
}
|
||||
|
||||
// Note: role slot no longer set - role definitions are config-based
|
||||
|
||||
// Clear any existing hook slot (handles stale state from previous lifecycle)
|
||||
// Must run from targetDir since that's where the agent bead lives
|
||||
_ = runSlotClear(targetDir, id, "hook")
|
||||
|
||||
// Set the hook slot if specified
|
||||
// Must run from targetDir since that's where the agent bead lives
|
||||
if fields != nil && fields.HookBead != "" {
|
||||
if err := runSlotSet(targetDir, id, "hook", fields.HookBead); err != nil {
|
||||
// Non-fatal: warn but continue - description text has the backup
|
||||
fmt.Printf("Warning: could not set hook slot: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the updated bead
|
||||
return b.Show(id)
|
||||
}
|
||||
|
||||
// UpdateAgentState updates the agent_state field in an agent bead.
|
||||
// Optionally updates hook_bead if provided.
|
||||
//
|
||||
@@ -338,11 +437,70 @@ func (b *Beads) GetAgentNotificationLevel(id string) (string, error) {
|
||||
|
||||
// DeleteAgentBead permanently deletes an agent bead.
|
||||
// Uses --hard --force for immediate permanent deletion (no tombstone).
|
||||
//
|
||||
// WARNING: Due to a bd bug, --hard --force still creates tombstones instead of
|
||||
// truly deleting. This breaks CreateOrReopenAgentBead because tombstones are
|
||||
// invisible to bd show/reopen but still block bd create via UNIQUE constraint.
|
||||
//
|
||||
//
|
||||
// WORKAROUND: Use CloseAndClearAgentBead instead, which allows CreateOrReopenAgentBead
|
||||
// to reopen the bead on re-spawn.
|
||||
func (b *Beads) DeleteAgentBead(id string) error {
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// CloseAndClearAgentBead closes an agent bead (soft delete).
|
||||
// This is the recommended way to clean up agent beads because CreateOrReopenAgentBead
|
||||
// can reopen closed beads when re-spawning polecats with the same name.
|
||||
//
|
||||
// This is a workaround for the bd tombstone bug where DeleteAgentBead creates
|
||||
// tombstones that cannot be reopened.
|
||||
//
|
||||
// To emulate the clean slate of delete --force --hard, this clears all mutable
|
||||
// fields (hook_bead, active_mr, cleanup_status, agent_state) before closing.
|
||||
func (b *Beads) CloseAndClearAgentBead(id, reason string) error {
|
||||
// Clear mutable fields to emulate delete --force --hard behavior.
|
||||
// This ensures reopened agent beads don't have stale state.
|
||||
|
||||
// First get current issue to preserve immutable fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
// If we can't read the issue, still attempt to close
|
||||
args := []string{"close", id}
|
||||
if reason != "" {
|
||||
args = append(args, "--reason="+reason)
|
||||
}
|
||||
_, closeErr := b.run(args...)
|
||||
return closeErr
|
||||
}
|
||||
|
||||
// Parse existing fields and clear mutable ones
|
||||
fields := ParseAgentFields(issue.Description)
|
||||
fields.HookBead = "" // Clear hook_bead
|
||||
fields.ActiveMR = "" // Clear active_mr
|
||||
fields.CleanupStatus = "" // Clear cleanup_status
|
||||
fields.AgentState = "closed"
|
||||
|
||||
// Update description with cleared fields
|
||||
description := FormatAgentDescription(issue.Title, fields)
|
||||
if err := b.Update(id, UpdateOptions{Description: &description}); err != nil {
|
||||
// Non-fatal: continue with close even if update fails
|
||||
}
|
||||
|
||||
// Also clear the hook slot in the database
|
||||
if err := b.ClearHookBead(id); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
|
||||
args := []string{"close", id}
|
||||
if reason != "" {
|
||||
args = append(args, "--reason="+reason)
|
||||
}
|
||||
_, err = b.run(args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetAgentBead retrieves an agent bead by ID.
|
||||
// Returns nil if not found.
|
||||
func (b *Beads) GetAgentBead(id string) (*Issue, *AgentFields, error) {
|
||||
|
||||
529
internal/beads/beads_channel.go
Normal file
529
internal/beads/beads_channel.go
Normal file
@@ -0,0 +1,529 @@
|
||||
// Package beads provides channel bead management for beads-native messaging.
|
||||
// Channels are named pub/sub streams where messages are broadcast to subscribers.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ChannelFields holds structured fields for channel beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type ChannelFields struct {
|
||||
Name string // Unique channel name (e.g., "alerts", "builds")
|
||||
Subscribers []string // Addresses subscribed to this channel
|
||||
Status string // active, closed
|
||||
RetentionCount int // Number of recent messages to retain (0 = unlimited)
|
||||
RetentionHours int // Hours to retain messages (0 = forever)
|
||||
CreatedBy string // Who created the channel
|
||||
CreatedAt string // ISO 8601 timestamp
|
||||
}
|
||||
|
||||
// Channel status constants
|
||||
const (
|
||||
ChannelStatusActive = "active"
|
||||
ChannelStatusClosed = "closed"
|
||||
)
|
||||
|
||||
// FormatChannelDescription creates a description string from channel fields.
|
||||
func FormatChannelDescription(title string, fields *ChannelFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("name: %s", fields.Name))
|
||||
|
||||
// Subscribers stored as comma-separated list
|
||||
if len(fields.Subscribers) > 0 {
|
||||
lines = append(lines, fmt.Sprintf("subscribers: %s", strings.Join(fields.Subscribers, ",")))
|
||||
} else {
|
||||
lines = append(lines, "subscribers: null")
|
||||
}
|
||||
|
||||
if fields.Status != "" {
|
||||
lines = append(lines, fmt.Sprintf("status: %s", fields.Status))
|
||||
} else {
|
||||
lines = append(lines, "status: active")
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("retention_count: %d", fields.RetentionCount))
|
||||
lines = append(lines, fmt.Sprintf("retention_hours: %d", fields.RetentionHours))
|
||||
|
||||
if fields.CreatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_by: %s", fields.CreatedBy))
|
||||
} else {
|
||||
lines = append(lines, "created_by: null")
|
||||
}
|
||||
|
||||
if fields.CreatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_at: %s", fields.CreatedAt))
|
||||
} else {
|
||||
lines = append(lines, "created_at: null")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseChannelFields extracts channel fields from an issue's description.
|
||||
func ParseChannelFields(description string) *ChannelFields {
|
||||
fields := &ChannelFields{
|
||||
Status: ChannelStatusActive,
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "name":
|
||||
fields.Name = value
|
||||
case "subscribers":
|
||||
if value != "" {
|
||||
// Parse comma-separated subscribers
|
||||
for _, s := range strings.Split(value, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if s != "" {
|
||||
fields.Subscribers = append(fields.Subscribers, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "status":
|
||||
fields.Status = value
|
||||
case "retention_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.RetentionCount = v
|
||||
}
|
||||
case "retention_hours":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.RetentionHours = v
|
||||
}
|
||||
case "created_by":
|
||||
fields.CreatedBy = value
|
||||
case "created_at":
|
||||
fields.CreatedAt = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// ChannelBeadID returns the bead ID for a channel name.
|
||||
// Format: hq-channel-<name> (town-level, channels span rigs)
|
||||
func ChannelBeadID(name string) string {
|
||||
return "hq-channel-" + name
|
||||
}
|
||||
|
||||
// CreateChannelBead creates a channel bead for pub/sub messaging.
|
||||
// The ID format is: hq-channel-<name> (e.g., hq-channel-alerts)
|
||||
// Channels are town-level entities (hq- prefix) because they span rigs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateChannelBead(name string, subscribers []string, createdBy string) (*Issue, error) {
|
||||
id := ChannelBeadID(name)
|
||||
title := fmt.Sprintf("Channel: %s", name)
|
||||
|
||||
fields := &ChannelFields{
|
||||
Name: name,
|
||||
Subscribers: subscribers,
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: createdBy,
|
||||
CreatedAt: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
description := FormatChannelDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=task", // Channels use task type with gt:channel label
|
||||
"--labels=gt:channel",
|
||||
"--force", // Override prefix check (town beads may have mixed prefixes)
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// GetChannelBead retrieves a channel bead by name.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetChannelBead(name string) (*Issue, *ChannelFields, error) {
|
||||
id := ChannelBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:channel") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a channel bead (missing gt:channel label)", id)
|
||||
}
|
||||
|
||||
fields := ParseChannelFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// GetChannelByID retrieves a channel bead by its full ID.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetChannelByID(id string) (*Issue, *ChannelFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:channel") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a channel bead (missing gt:channel label)", id)
|
||||
}
|
||||
|
||||
fields := ParseChannelFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// UpdateChannelSubscribers updates the subscribers list for a channel.
|
||||
func (b *Beads) UpdateChannelSubscribers(name string, subscribers []string) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
fields.Subscribers = subscribers
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// SubscribeToChannel adds a subscriber to a channel if not already subscribed.
|
||||
func (b *Beads) SubscribeToChannel(name string, subscriber string) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
// Check if already subscribed
|
||||
for _, s := range fields.Subscribers {
|
||||
if s == subscriber {
|
||||
return nil // Already subscribed
|
||||
}
|
||||
}
|
||||
|
||||
fields.Subscribers = append(fields.Subscribers, subscriber)
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UnsubscribeFromChannel removes a subscriber from a channel.
|
||||
func (b *Beads) UnsubscribeFromChannel(name string, subscriber string) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
// Filter out the subscriber
|
||||
var newSubscribers []string
|
||||
for _, s := range fields.Subscribers {
|
||||
if s != subscriber {
|
||||
newSubscribers = append(newSubscribers, s)
|
||||
}
|
||||
}
|
||||
|
||||
fields.Subscribers = newSubscribers
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateChannelRetention updates the retention policy for a channel.
|
||||
func (b *Beads) UpdateChannelRetention(name string, retentionCount, retentionHours int) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
fields.RetentionCount = retentionCount
|
||||
fields.RetentionHours = retentionHours
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateChannelStatus updates the status of a channel bead.
|
||||
func (b *Beads) UpdateChannelStatus(name, status string) error {
|
||||
// Validate status
|
||||
if status != ChannelStatusActive && status != ChannelStatusClosed {
|
||||
return fmt.Errorf("invalid channel status %q: must be active or closed", status)
|
||||
}
|
||||
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
fields.Status = status
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// DeleteChannelBead permanently deletes a channel bead.
|
||||
func (b *Beads) DeleteChannelBead(name string) error {
|
||||
id := ChannelBeadID(name)
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// ListChannelBeads returns all channel beads.
|
||||
func (b *Beads) ListChannelBeads() (map[string]*ChannelFields, error) {
|
||||
out, err := b.run("list", "--label=gt:channel", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]*ChannelFields, len(issues))
|
||||
for _, issue := range issues {
|
||||
fields := ParseChannelFields(issue.Description)
|
||||
if fields.Name != "" {
|
||||
result[fields.Name] = fields
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// LookupChannelByName finds a channel by its name field (not by ID).
|
||||
// This is used for address resolution where we may not know the full bead ID.
|
||||
func (b *Beads) LookupChannelByName(name string) (*Issue, *ChannelFields, error) {
|
||||
// First try direct lookup by standard ID format
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if issue != nil {
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// If not found by ID, search all channels by name field
|
||||
channels, err := b.ListChannelBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if fields, ok := channels[name]; ok {
|
||||
// Found by name, now get the full issue
|
||||
id := ChannelBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
return nil, nil, nil // Not found
|
||||
}
|
||||
|
||||
// EnforceChannelRetention prunes old messages from a channel to enforce retention.
|
||||
// Called after posting a new message to the channel (on-write cleanup).
|
||||
// Enforces both count-based (RetentionCount) and time-based (RetentionHours) limits.
|
||||
func (b *Beads) EnforceChannelRetention(name string) error {
|
||||
// Get channel config
|
||||
_, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fields == nil {
|
||||
return fmt.Errorf("channel not found: %s", name)
|
||||
}
|
||||
|
||||
// Skip if no retention limits configured
|
||||
if fields.RetentionCount <= 0 && fields.RetentionHours <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query messages in this channel (oldest first)
|
||||
out, err := b.run("list",
|
||||
"--type=message",
|
||||
"--label=channel:"+name,
|
||||
"--json",
|
||||
"--limit=0",
|
||||
"--sort=created",
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing channel messages: %w", err)
|
||||
}
|
||||
|
||||
var messages []struct {
|
||||
ID string `json:"id"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &messages); err != nil {
|
||||
return fmt.Errorf("parsing channel messages: %w", err)
|
||||
}
|
||||
|
||||
// Track which messages to delete (use map to avoid duplicates)
|
||||
toDeleteIDs := make(map[string]bool)
|
||||
|
||||
// Time-based retention: delete messages older than RetentionHours
|
||||
if fields.RetentionHours > 0 {
|
||||
cutoff := time.Now().Add(-time.Duration(fields.RetentionHours) * time.Hour)
|
||||
for _, msg := range messages {
|
||||
createdAt, err := time.Parse(time.RFC3339, msg.CreatedAt)
|
||||
if err != nil {
|
||||
continue // Skip messages with unparseable timestamps
|
||||
}
|
||||
if createdAt.Before(cutoff) {
|
||||
toDeleteIDs[msg.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count-based retention: delete oldest messages beyond RetentionCount
|
||||
if fields.RetentionCount > 0 {
|
||||
toDeleteByCount := len(messages) - fields.RetentionCount
|
||||
for i := 0; i < toDeleteByCount && i < len(messages); i++ {
|
||||
toDeleteIDs[messages[i].ID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Delete marked messages (best-effort)
|
||||
for id := range toDeleteIDs {
|
||||
// Use close instead of delete for audit trail
|
||||
_, _ = b.run("close", id, "--reason=channel retention pruning")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneAllChannels enforces retention on all channels.
|
||||
// Called by Deacon patrol as a backup cleanup mechanism.
|
||||
// Enforces both count-based (RetentionCount) and time-based (RetentionHours) limits.
|
||||
// Uses a 10% buffer for count-based pruning to avoid thrashing.
|
||||
func (b *Beads) PruneAllChannels() (int, error) {
|
||||
channels, err := b.ListChannelBeads()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pruned := 0
|
||||
for name, fields := range channels {
|
||||
// Skip if no retention limits configured
|
||||
if fields.RetentionCount <= 0 && fields.RetentionHours <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get messages with timestamps
|
||||
out, err := b.run("list",
|
||||
"--type=message",
|
||||
"--label=channel:"+name,
|
||||
"--json",
|
||||
"--limit=0",
|
||||
"--sort=created",
|
||||
)
|
||||
if err != nil {
|
||||
continue // Skip on error
|
||||
}
|
||||
|
||||
var messages []struct {
|
||||
ID string `json:"id"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &messages); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track which messages to delete (use map to avoid duplicates)
|
||||
toDeleteIDs := make(map[string]bool)
|
||||
|
||||
// Time-based retention: delete messages older than RetentionHours
|
||||
if fields.RetentionHours > 0 {
|
||||
cutoff := time.Now().Add(-time.Duration(fields.RetentionHours) * time.Hour)
|
||||
for _, msg := range messages {
|
||||
createdAt, err := time.Parse(time.RFC3339, msg.CreatedAt)
|
||||
if err != nil {
|
||||
continue // Skip messages with unparseable timestamps
|
||||
}
|
||||
if createdAt.Before(cutoff) {
|
||||
toDeleteIDs[msg.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count-based retention with 10% buffer to avoid thrashing
|
||||
if fields.RetentionCount > 0 {
|
||||
threshold := int(float64(fields.RetentionCount) * 1.1)
|
||||
if len(messages) > threshold {
|
||||
toDeleteByCount := len(messages) - fields.RetentionCount
|
||||
for i := 0; i < toDeleteByCount && i < len(messages); i++ {
|
||||
toDeleteIDs[messages[i].ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete marked messages
|
||||
for id := range toDeleteIDs {
|
||||
if _, err := b.run("close", id, "--reason=patrol retention pruning"); err == nil {
|
||||
pruned++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pruned, nil
|
||||
}
|
||||
271
internal/beads/beads_channel_test.go
Normal file
271
internal/beads/beads_channel_test.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFormatChannelDescription(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
title string
|
||||
fields *ChannelFields
|
||||
want []string // Lines that should be present
|
||||
}{
|
||||
{
|
||||
name: "basic channel",
|
||||
title: "Channel: alerts",
|
||||
fields: &ChannelFields{
|
||||
Name: "alerts",
|
||||
Subscribers: []string{"gastown/crew/max", "gastown/witness"},
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
want: []string{
|
||||
"Channel: alerts",
|
||||
"name: alerts",
|
||||
"subscribers: gastown/crew/max,gastown/witness",
|
||||
"status: active",
|
||||
"created_by: human",
|
||||
"created_at: 2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty subscribers",
|
||||
title: "Channel: empty",
|
||||
fields: &ChannelFields{
|
||||
Name: "empty",
|
||||
Subscribers: nil,
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
want: []string{
|
||||
"name: empty",
|
||||
"subscribers: null",
|
||||
"created_by: admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with retention",
|
||||
title: "Channel: builds",
|
||||
fields: &ChannelFields{
|
||||
Name: "builds",
|
||||
Subscribers: []string{"*/witness"},
|
||||
RetentionCount: 100,
|
||||
RetentionHours: 24,
|
||||
},
|
||||
want: []string{
|
||||
"name: builds",
|
||||
"retention_count: 100",
|
||||
"retention_hours: 24",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "closed channel",
|
||||
title: "Channel: old",
|
||||
fields: &ChannelFields{
|
||||
Name: "old",
|
||||
Status: ChannelStatusClosed,
|
||||
},
|
||||
want: []string{
|
||||
"status: closed",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil fields",
|
||||
title: "Just a title",
|
||||
fields: nil,
|
||||
want: []string{"Just a title"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := FormatChannelDescription(tt.title, tt.fields)
|
||||
for _, line := range tt.want {
|
||||
if !strings.Contains(got, line) {
|
||||
t.Errorf("FormatChannelDescription() missing line %q\ngot:\n%s", line, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseChannelFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
want *ChannelFields
|
||||
}{
|
||||
{
|
||||
name: "full channel",
|
||||
description: `Channel: alerts
|
||||
|
||||
name: alerts
|
||||
subscribers: gastown/crew/max,gastown/witness,*/refinery
|
||||
status: active
|
||||
retention_count: 50
|
||||
retention_hours: 48
|
||||
created_by: human
|
||||
created_at: 2024-01-15T10:00:00Z`,
|
||||
want: &ChannelFields{
|
||||
Name: "alerts",
|
||||
Subscribers: []string{"gastown/crew/max", "gastown/witness", "*/refinery"},
|
||||
Status: ChannelStatusActive,
|
||||
RetentionCount: 50,
|
||||
RetentionHours: 48,
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "null subscribers",
|
||||
description: `Channel: empty
|
||||
|
||||
name: empty
|
||||
subscribers: null
|
||||
status: active
|
||||
created_by: admin`,
|
||||
want: &ChannelFields{
|
||||
Name: "empty",
|
||||
Subscribers: nil,
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single subscriber",
|
||||
description: `name: solo
|
||||
subscribers: gastown/crew/max
|
||||
status: active`,
|
||||
want: &ChannelFields{
|
||||
Name: "solo",
|
||||
Subscribers: []string{"gastown/crew/max"},
|
||||
Status: ChannelStatusActive,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
want: &ChannelFields{
|
||||
Status: ChannelStatusActive, // Default
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "subscribers with spaces",
|
||||
description: `name: spaced
|
||||
subscribers: a, b , c
|
||||
status: active`,
|
||||
want: &ChannelFields{
|
||||
Name: "spaced",
|
||||
Subscribers: []string{"a", "b", "c"},
|
||||
Status: ChannelStatusActive,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "closed status",
|
||||
description: `name: archived
|
||||
status: closed`,
|
||||
want: &ChannelFields{
|
||||
Name: "archived",
|
||||
Status: ChannelStatusClosed,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseChannelFields(tt.description)
|
||||
if got.Name != tt.want.Name {
|
||||
t.Errorf("Name = %q, want %q", got.Name, tt.want.Name)
|
||||
}
|
||||
if got.Status != tt.want.Status {
|
||||
t.Errorf("Status = %q, want %q", got.Status, tt.want.Status)
|
||||
}
|
||||
if got.RetentionCount != tt.want.RetentionCount {
|
||||
t.Errorf("RetentionCount = %d, want %d", got.RetentionCount, tt.want.RetentionCount)
|
||||
}
|
||||
if got.RetentionHours != tt.want.RetentionHours {
|
||||
t.Errorf("RetentionHours = %d, want %d", got.RetentionHours, tt.want.RetentionHours)
|
||||
}
|
||||
if got.CreatedBy != tt.want.CreatedBy {
|
||||
t.Errorf("CreatedBy = %q, want %q", got.CreatedBy, tt.want.CreatedBy)
|
||||
}
|
||||
if got.CreatedAt != tt.want.CreatedAt {
|
||||
t.Errorf("CreatedAt = %q, want %q", got.CreatedAt, tt.want.CreatedAt)
|
||||
}
|
||||
if len(got.Subscribers) != len(tt.want.Subscribers) {
|
||||
t.Errorf("Subscribers count = %d, want %d", len(got.Subscribers), len(tt.want.Subscribers))
|
||||
} else {
|
||||
for i, s := range got.Subscribers {
|
||||
if s != tt.want.Subscribers[i] {
|
||||
t.Errorf("Subscribers[%d] = %q, want %q", i, s, tt.want.Subscribers[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelBeadID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want string
|
||||
}{
|
||||
{"alerts", "hq-channel-alerts"},
|
||||
{"builds", "hq-channel-builds"},
|
||||
{"team-updates", "hq-channel-team-updates"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := ChannelBeadID(tt.name); got != tt.want {
|
||||
t.Errorf("ChannelBeadID(%q) = %q, want %q", tt.name, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelRoundTrip(t *testing.T) {
|
||||
// Test that Format -> Parse preserves data
|
||||
original := &ChannelFields{
|
||||
Name: "test-channel",
|
||||
Subscribers: []string{"gastown/crew/max", "*/witness", "@town"},
|
||||
Status: ChannelStatusActive,
|
||||
RetentionCount: 100,
|
||||
RetentionHours: 72,
|
||||
CreatedBy: "tester",
|
||||
CreatedAt: "2024-01-15T12:00:00Z",
|
||||
}
|
||||
|
||||
description := FormatChannelDescription("Channel: test-channel", original)
|
||||
parsed := ParseChannelFields(description)
|
||||
|
||||
if parsed.Name != original.Name {
|
||||
t.Errorf("Name: got %q, want %q", parsed.Name, original.Name)
|
||||
}
|
||||
if parsed.Status != original.Status {
|
||||
t.Errorf("Status: got %q, want %q", parsed.Status, original.Status)
|
||||
}
|
||||
if parsed.RetentionCount != original.RetentionCount {
|
||||
t.Errorf("RetentionCount: got %d, want %d", parsed.RetentionCount, original.RetentionCount)
|
||||
}
|
||||
if parsed.RetentionHours != original.RetentionHours {
|
||||
t.Errorf("RetentionHours: got %d, want %d", parsed.RetentionHours, original.RetentionHours)
|
||||
}
|
||||
if parsed.CreatedBy != original.CreatedBy {
|
||||
t.Errorf("CreatedBy: got %q, want %q", parsed.CreatedBy, original.CreatedBy)
|
||||
}
|
||||
if parsed.CreatedAt != original.CreatedAt {
|
||||
t.Errorf("CreatedAt: got %q, want %q", parsed.CreatedAt, original.CreatedAt)
|
||||
}
|
||||
if len(parsed.Subscribers) != len(original.Subscribers) {
|
||||
t.Fatalf("Subscribers count: got %d, want %d", len(parsed.Subscribers), len(original.Subscribers))
|
||||
}
|
||||
for i, s := range original.Subscribers {
|
||||
if parsed.Subscribers[i] != s {
|
||||
t.Errorf("Subscribers[%d]: got %q, want %q", i, parsed.Subscribers[i], s)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,6 @@ package beads
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -28,7 +27,8 @@ func (b *Beads) CreateDogAgentBead(name, location string) (*Issue, error) {
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
if actor := os.Getenv("BD_ACTOR"); actor != "" {
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
|
||||
441
internal/beads/beads_escalation.go
Normal file
441
internal/beads/beads_escalation.go
Normal file
@@ -0,0 +1,441 @@
|
||||
// Package beads provides escalation bead management.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EscalationFields holds structured fields for escalation beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type EscalationFields struct {
|
||||
Severity string // critical, high, medium, low
|
||||
Reason string // Why this was escalated
|
||||
Source string // Source identifier (e.g., plugin:rebuild-gt, patrol:deacon)
|
||||
EscalatedBy string // Agent address that escalated (e.g., "gastown/Toast")
|
||||
EscalatedAt string // ISO 8601 timestamp
|
||||
AckedBy string // Agent that acknowledged (empty if not acked)
|
||||
AckedAt string // When acknowledged (empty if not acked)
|
||||
ClosedBy string // Agent that closed (empty if not closed)
|
||||
ClosedReason string // Resolution reason (empty if not closed)
|
||||
RelatedBead string // Optional: related bead ID (task, bug, etc.)
|
||||
OriginalSeverity string // Original severity before any re-escalation
|
||||
ReescalationCount int // Number of times this has been re-escalated
|
||||
LastReescalatedAt string // When last re-escalated (empty if never)
|
||||
LastReescalatedBy string // Who last re-escalated (empty if never)
|
||||
}
|
||||
|
||||
// EscalationState constants for bead status tracking.
|
||||
const (
|
||||
EscalationOpen = "open" // Unacknowledged
|
||||
EscalationAcked = "acked" // Acknowledged but not resolved
|
||||
EscalationClosed = "closed" // Resolved/closed
|
||||
)
|
||||
|
||||
// FormatEscalationDescription creates a description string from escalation fields.
|
||||
func FormatEscalationDescription(title string, fields *EscalationFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("severity: %s", fields.Severity))
|
||||
lines = append(lines, fmt.Sprintf("reason: %s", fields.Reason))
|
||||
if fields.Source != "" {
|
||||
lines = append(lines, fmt.Sprintf("source: %s", fields.Source))
|
||||
} else {
|
||||
lines = append(lines, "source: null")
|
||||
}
|
||||
lines = append(lines, fmt.Sprintf("escalated_by: %s", fields.EscalatedBy))
|
||||
lines = append(lines, fmt.Sprintf("escalated_at: %s", fields.EscalatedAt))
|
||||
|
||||
if fields.AckedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("acked_by: %s", fields.AckedBy))
|
||||
} else {
|
||||
lines = append(lines, "acked_by: null")
|
||||
}
|
||||
|
||||
if fields.AckedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("acked_at: %s", fields.AckedAt))
|
||||
} else {
|
||||
lines = append(lines, "acked_at: null")
|
||||
}
|
||||
|
||||
if fields.ClosedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("closed_by: %s", fields.ClosedBy))
|
||||
} else {
|
||||
lines = append(lines, "closed_by: null")
|
||||
}
|
||||
|
||||
if fields.ClosedReason != "" {
|
||||
lines = append(lines, fmt.Sprintf("closed_reason: %s", fields.ClosedReason))
|
||||
} else {
|
||||
lines = append(lines, "closed_reason: null")
|
||||
}
|
||||
|
||||
if fields.RelatedBead != "" {
|
||||
lines = append(lines, fmt.Sprintf("related_bead: %s", fields.RelatedBead))
|
||||
} else {
|
||||
lines = append(lines, "related_bead: null")
|
||||
}
|
||||
|
||||
// Reescalation fields
|
||||
if fields.OriginalSeverity != "" {
|
||||
lines = append(lines, fmt.Sprintf("original_severity: %s", fields.OriginalSeverity))
|
||||
} else {
|
||||
lines = append(lines, "original_severity: null")
|
||||
}
|
||||
lines = append(lines, fmt.Sprintf("reescalation_count: %d", fields.ReescalationCount))
|
||||
if fields.LastReescalatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("last_reescalated_at: %s", fields.LastReescalatedAt))
|
||||
} else {
|
||||
lines = append(lines, "last_reescalated_at: null")
|
||||
}
|
||||
if fields.LastReescalatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("last_reescalated_by: %s", fields.LastReescalatedBy))
|
||||
} else {
|
||||
lines = append(lines, "last_reescalated_by: null")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseEscalationFields extracts escalation fields from an issue's description.
|
||||
func ParseEscalationFields(description string) *EscalationFields {
|
||||
fields := &EscalationFields{}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "severity":
|
||||
fields.Severity = value
|
||||
case "reason":
|
||||
fields.Reason = value
|
||||
case "source":
|
||||
fields.Source = value
|
||||
case "escalated_by":
|
||||
fields.EscalatedBy = value
|
||||
case "escalated_at":
|
||||
fields.EscalatedAt = value
|
||||
case "acked_by":
|
||||
fields.AckedBy = value
|
||||
case "acked_at":
|
||||
fields.AckedAt = value
|
||||
case "closed_by":
|
||||
fields.ClosedBy = value
|
||||
case "closed_reason":
|
||||
fields.ClosedReason = value
|
||||
case "related_bead":
|
||||
fields.RelatedBead = value
|
||||
case "original_severity":
|
||||
fields.OriginalSeverity = value
|
||||
case "reescalation_count":
|
||||
if n, err := strconv.Atoi(value); err == nil {
|
||||
fields.ReescalationCount = n
|
||||
}
|
||||
case "last_reescalated_at":
|
||||
fields.LastReescalatedAt = value
|
||||
case "last_reescalated_by":
|
||||
fields.LastReescalatedBy = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// CreateEscalationBead creates an escalation bead for tracking escalations.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateEscalationBead(title string, fields *EscalationFields) (*Issue, error) {
|
||||
description := FormatEscalationDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=task",
|
||||
"--labels=gt:escalation",
|
||||
}
|
||||
|
||||
// Add severity as a label for easy filtering
|
||||
if fields != nil && fields.Severity != "" {
|
||||
args = append(args, fmt.Sprintf("--labels=severity:%s", fields.Severity))
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// AckEscalation acknowledges an escalation bead.
|
||||
// Sets acked_by and acked_at fields, adds "acked" label.
|
||||
func (b *Beads) AckEscalation(id, ackedBy string) error {
|
||||
// First get current issue to preserve other fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's an escalation
|
||||
if !HasLabel(issue, "gt:escalation") {
|
||||
return fmt.Errorf("issue %s is not an escalation bead (missing gt:escalation label)", id)
|
||||
}
|
||||
|
||||
// Parse existing fields
|
||||
fields := ParseEscalationFields(issue.Description)
|
||||
fields.AckedBy = ackedBy
|
||||
fields.AckedAt = time.Now().Format(time.RFC3339)
|
||||
|
||||
// Format new description
|
||||
description := FormatEscalationDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(id, UpdateOptions{
|
||||
Description: &description,
|
||||
AddLabels: []string{"acked"},
|
||||
})
|
||||
}
|
||||
|
||||
// CloseEscalation closes an escalation bead with a resolution reason.
|
||||
// Sets closed_by and closed_reason fields, closes the issue.
|
||||
func (b *Beads) CloseEscalation(id, closedBy, reason string) error {
|
||||
// First get current issue to preserve other fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's an escalation
|
||||
if !HasLabel(issue, "gt:escalation") {
|
||||
return fmt.Errorf("issue %s is not an escalation bead (missing gt:escalation label)", id)
|
||||
}
|
||||
|
||||
// Parse existing fields
|
||||
fields := ParseEscalationFields(issue.Description)
|
||||
fields.ClosedBy = closedBy
|
||||
fields.ClosedReason = reason
|
||||
|
||||
// Format new description
|
||||
description := FormatEscalationDescription(issue.Title, fields)
|
||||
|
||||
// Update description first
|
||||
if err := b.Update(id, UpdateOptions{
|
||||
Description: &description,
|
||||
AddLabels: []string{"resolved"},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the issue
|
||||
_, err = b.run("close", id, "--reason="+reason)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetEscalationBead retrieves an escalation bead by ID.
|
||||
// Returns nil if not found.
|
||||
func (b *Beads) GetEscalationBead(id string) (*Issue, *EscalationFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:escalation") {
|
||||
return nil, nil, fmt.Errorf("issue %s is not an escalation bead (missing gt:escalation label)", id)
|
||||
}
|
||||
|
||||
fields := ParseEscalationFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// ListEscalations returns all open escalation beads.
|
||||
func (b *Beads) ListEscalations() ([]*Issue, error) {
|
||||
out, err := b.run("list", "--label=gt:escalation", "--status=open", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// ListEscalationsBySeverity returns open escalation beads filtered by severity.
|
||||
func (b *Beads) ListEscalationsBySeverity(severity string) ([]*Issue, error) {
|
||||
out, err := b.run("list",
|
||||
"--label=gt:escalation",
|
||||
"--label=severity:"+severity,
|
||||
"--status=open",
|
||||
"--json",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// ListStaleEscalations returns escalations older than the given threshold.
|
||||
// threshold is a duration string like "1h" or "30m".
|
||||
func (b *Beads) ListStaleEscalations(threshold time.Duration) ([]*Issue, error) {
|
||||
// Get all open escalations
|
||||
escalations, err := b.ListEscalations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cutoff := time.Now().Add(-threshold)
|
||||
var stale []*Issue
|
||||
|
||||
for _, issue := range escalations {
|
||||
// Skip acknowledged escalations
|
||||
if HasLabel(issue, "acked") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if older than threshold
|
||||
createdAt, err := time.Parse(time.RFC3339, issue.CreatedAt)
|
||||
if err != nil {
|
||||
continue // Skip if can't parse
|
||||
}
|
||||
|
||||
if createdAt.Before(cutoff) {
|
||||
stale = append(stale, issue)
|
||||
}
|
||||
}
|
||||
|
||||
return stale, nil
|
||||
}
|
||||
|
||||
// ReescalationResult holds the result of a reescalation operation.
|
||||
type ReescalationResult struct {
|
||||
ID string
|
||||
Title string
|
||||
OldSeverity string
|
||||
NewSeverity string
|
||||
ReescalationNum int
|
||||
Skipped bool
|
||||
SkipReason string
|
||||
}
|
||||
|
||||
// ReescalateEscalation bumps the severity of an escalation and updates tracking fields.
|
||||
// Returns the new severity if successful, or an error.
|
||||
// reescalatedBy should be the identity of the agent/process doing the reescalation.
|
||||
// maxReescalations limits how many times an escalation can be bumped (0 = unlimited).
|
||||
func (b *Beads) ReescalateEscalation(id, reescalatedBy string, maxReescalations int) (*ReescalationResult, error) {
|
||||
// Get the escalation
|
||||
issue, fields, err := b.GetEscalationBead(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if issue == nil {
|
||||
return nil, fmt.Errorf("escalation not found: %s", id)
|
||||
}
|
||||
|
||||
result := &ReescalationResult{
|
||||
ID: id,
|
||||
Title: issue.Title,
|
||||
OldSeverity: fields.Severity,
|
||||
}
|
||||
|
||||
// Check if already at max reescalations
|
||||
if maxReescalations > 0 && fields.ReescalationCount >= maxReescalations {
|
||||
result.Skipped = true
|
||||
result.SkipReason = fmt.Sprintf("already at max reescalations (%d)", maxReescalations)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Check if already at critical (can't bump further)
|
||||
if fields.Severity == "critical" {
|
||||
result.Skipped = true
|
||||
result.SkipReason = "already at critical severity"
|
||||
result.NewSeverity = "critical"
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Save original severity on first reescalation
|
||||
if fields.OriginalSeverity == "" {
|
||||
fields.OriginalSeverity = fields.Severity
|
||||
}
|
||||
|
||||
// Bump severity
|
||||
newSeverity := bumpSeverity(fields.Severity)
|
||||
fields.Severity = newSeverity
|
||||
fields.ReescalationCount++
|
||||
fields.LastReescalatedAt = time.Now().Format(time.RFC3339)
|
||||
fields.LastReescalatedBy = reescalatedBy
|
||||
|
||||
result.NewSeverity = newSeverity
|
||||
result.ReescalationNum = fields.ReescalationCount
|
||||
|
||||
// Format new description
|
||||
description := FormatEscalationDescription(issue.Title, fields)
|
||||
|
||||
// Update the bead with new description and severity label
|
||||
if err := b.Update(id, UpdateOptions{
|
||||
Description: &description,
|
||||
AddLabels: []string{"reescalated", "severity:" + newSeverity},
|
||||
RemoveLabels: []string{"severity:" + result.OldSeverity},
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("updating escalation: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// bumpSeverity returns the next higher severity level.
|
||||
// low -> medium -> high -> critical
|
||||
func bumpSeverity(severity string) string {
|
||||
switch severity {
|
||||
case "low":
|
||||
return "medium"
|
||||
case "medium":
|
||||
return "high"
|
||||
case "high":
|
||||
return "critical"
|
||||
default:
|
||||
return "critical"
|
||||
}
|
||||
}
|
||||
311
internal/beads/beads_group.go
Normal file
311
internal/beads/beads_group.go
Normal file
@@ -0,0 +1,311 @@
|
||||
// Package beads provides group bead management for beads-native messaging.
|
||||
// Groups are named collections of addresses used for mail distribution.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GroupFields holds structured fields for group beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type GroupFields struct {
|
||||
Name string // Unique group name (e.g., "ops-team", "all-witnesses")
|
||||
Members []string // Addresses, patterns, or group names (can nest)
|
||||
CreatedBy string // Who created the group
|
||||
CreatedAt string // ISO 8601 timestamp
|
||||
}
|
||||
|
||||
// FormatGroupDescription creates a description string from group fields.
|
||||
func FormatGroupDescription(title string, fields *GroupFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("name: %s", fields.Name))
|
||||
|
||||
// Members stored as comma-separated list
|
||||
if len(fields.Members) > 0 {
|
||||
lines = append(lines, fmt.Sprintf("members: %s", strings.Join(fields.Members, ",")))
|
||||
} else {
|
||||
lines = append(lines, "members: null")
|
||||
}
|
||||
|
||||
if fields.CreatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_by: %s", fields.CreatedBy))
|
||||
} else {
|
||||
lines = append(lines, "created_by: null")
|
||||
}
|
||||
|
||||
if fields.CreatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_at: %s", fields.CreatedAt))
|
||||
} else {
|
||||
lines = append(lines, "created_at: null")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseGroupFields extracts group fields from an issue's description.
|
||||
func ParseGroupFields(description string) *GroupFields {
|
||||
fields := &GroupFields{}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "name":
|
||||
fields.Name = value
|
||||
case "members":
|
||||
if value != "" {
|
||||
// Parse comma-separated members
|
||||
for _, m := range strings.Split(value, ",") {
|
||||
m = strings.TrimSpace(m)
|
||||
if m != "" {
|
||||
fields.Members = append(fields.Members, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "created_by":
|
||||
fields.CreatedBy = value
|
||||
case "created_at":
|
||||
fields.CreatedAt = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// GroupBeadID returns the bead ID for a group name.
|
||||
// Format: hq-group-<name> (town-level, groups span rigs)
|
||||
func GroupBeadID(name string) string {
|
||||
return "hq-group-" + name
|
||||
}
|
||||
|
||||
// CreateGroupBead creates a group bead for mail distribution.
|
||||
// The ID format is: hq-group-<name> (e.g., hq-group-ops-team)
|
||||
// Groups are town-level entities (hq- prefix) because they span rigs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateGroupBead(name string, members []string, createdBy string) (*Issue, error) {
|
||||
id := GroupBeadID(name)
|
||||
title := fmt.Sprintf("Group: %s", name)
|
||||
|
||||
fields := &GroupFields{
|
||||
Name: name,
|
||||
Members: members,
|
||||
CreatedBy: createdBy,
|
||||
CreatedAt: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
description := FormatGroupDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=task", // Groups use task type with gt:group label
|
||||
"--labels=gt:group",
|
||||
"--force", // Override prefix check (town beads may have mixed prefixes)
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// GetGroupBead retrieves a group bead by name.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetGroupBead(name string) (*Issue, *GroupFields, error) {
|
||||
id := GroupBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:group") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a group bead (missing gt:group label)", id)
|
||||
}
|
||||
|
||||
fields := ParseGroupFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// GetGroupByID retrieves a group bead by its full ID.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetGroupByID(id string) (*Issue, *GroupFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:group") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a group bead (missing gt:group label)", id)
|
||||
}
|
||||
|
||||
fields := ParseGroupFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// UpdateGroupMembers updates the members list for a group.
|
||||
func (b *Beads) UpdateGroupMembers(name string, members []string) error {
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("group %q not found", name)
|
||||
}
|
||||
|
||||
fields.Members = members
|
||||
description := FormatGroupDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// AddGroupMember adds a member to a group if not already present.
|
||||
func (b *Beads) AddGroupMember(name string, member string) error {
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("group %q not found", name)
|
||||
}
|
||||
|
||||
// Check if already a member
|
||||
for _, m := range fields.Members {
|
||||
if m == member {
|
||||
return nil // Already a member
|
||||
}
|
||||
}
|
||||
|
||||
fields.Members = append(fields.Members, member)
|
||||
description := FormatGroupDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// RemoveGroupMember removes a member from a group.
|
||||
func (b *Beads) RemoveGroupMember(name string, member string) error {
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("group %q not found", name)
|
||||
}
|
||||
|
||||
// Filter out the member
|
||||
var newMembers []string
|
||||
for _, m := range fields.Members {
|
||||
if m != member {
|
||||
newMembers = append(newMembers, m)
|
||||
}
|
||||
}
|
||||
|
||||
fields.Members = newMembers
|
||||
description := FormatGroupDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// DeleteGroupBead permanently deletes a group bead.
|
||||
func (b *Beads) DeleteGroupBead(name string) error {
|
||||
id := GroupBeadID(name)
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// ListGroupBeads returns all group beads.
|
||||
func (b *Beads) ListGroupBeads() (map[string]*GroupFields, error) {
|
||||
out, err := b.run("list", "--label=gt:group", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]*GroupFields, len(issues))
|
||||
for _, issue := range issues {
|
||||
fields := ParseGroupFields(issue.Description)
|
||||
if fields.Name != "" {
|
||||
result[fields.Name] = fields
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// LookupGroupByName finds a group by its name field (not by ID).
|
||||
// This is used for address resolution where we may not know the full bead ID.
|
||||
func (b *Beads) LookupGroupByName(name string) (*Issue, *GroupFields, error) {
|
||||
// First try direct lookup by standard ID format
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if issue != nil {
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// If not found by ID, search all groups by name field
|
||||
groups, err := b.ListGroupBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if fields, ok := groups[name]; ok {
|
||||
// Found by name, now get the full issue
|
||||
id := GroupBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
return nil, nil, nil // Not found
|
||||
}
|
||||
209
internal/beads/beads_group_test.go
Normal file
209
internal/beads/beads_group_test.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFormatGroupDescription(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
title string
|
||||
fields *GroupFields
|
||||
want []string // Lines that should be present
|
||||
}{
|
||||
{
|
||||
name: "basic group",
|
||||
title: "Group: ops-team",
|
||||
fields: &GroupFields{
|
||||
Name: "ops-team",
|
||||
Members: []string{"gastown/crew/max", "gastown/witness"},
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
want: []string{
|
||||
"Group: ops-team",
|
||||
"name: ops-team",
|
||||
"members: gastown/crew/max,gastown/witness",
|
||||
"created_by: human",
|
||||
"created_at: 2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty members",
|
||||
title: "Group: empty",
|
||||
fields: &GroupFields{
|
||||
Name: "empty",
|
||||
Members: nil,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
want: []string{
|
||||
"name: empty",
|
||||
"members: null",
|
||||
"created_by: admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "patterns in members",
|
||||
title: "Group: all-witnesses",
|
||||
fields: &GroupFields{
|
||||
Name: "all-witnesses",
|
||||
Members: []string{"*/witness", "@crew"},
|
||||
},
|
||||
want: []string{
|
||||
"members: */witness,@crew",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil fields",
|
||||
title: "Just a title",
|
||||
fields: nil,
|
||||
want: []string{"Just a title"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := FormatGroupDescription(tt.title, tt.fields)
|
||||
for _, line := range tt.want {
|
||||
if !strings.Contains(got, line) {
|
||||
t.Errorf("FormatGroupDescription() missing line %q\ngot:\n%s", line, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGroupFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
want *GroupFields
|
||||
}{
|
||||
{
|
||||
name: "full group",
|
||||
description: `Group: ops-team
|
||||
|
||||
name: ops-team
|
||||
members: gastown/crew/max,gastown/witness,*/refinery
|
||||
created_by: human
|
||||
created_at: 2024-01-15T10:00:00Z`,
|
||||
want: &GroupFields{
|
||||
Name: "ops-team",
|
||||
Members: []string{"gastown/crew/max", "gastown/witness", "*/refinery"},
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "null members",
|
||||
description: `Group: empty
|
||||
|
||||
name: empty
|
||||
members: null
|
||||
created_by: admin`,
|
||||
want: &GroupFields{
|
||||
Name: "empty",
|
||||
Members: nil,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single member",
|
||||
description: `name: solo
|
||||
members: gastown/crew/max`,
|
||||
want: &GroupFields{
|
||||
Name: "solo",
|
||||
Members: []string{"gastown/crew/max"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
want: &GroupFields{},
|
||||
},
|
||||
{
|
||||
name: "members with spaces",
|
||||
description: `name: spaced
|
||||
members: a, b , c`,
|
||||
want: &GroupFields{
|
||||
Name: "spaced",
|
||||
Members: []string{"a", "b", "c"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseGroupFields(tt.description)
|
||||
if got.Name != tt.want.Name {
|
||||
t.Errorf("Name = %q, want %q", got.Name, tt.want.Name)
|
||||
}
|
||||
if got.CreatedBy != tt.want.CreatedBy {
|
||||
t.Errorf("CreatedBy = %q, want %q", got.CreatedBy, tt.want.CreatedBy)
|
||||
}
|
||||
if got.CreatedAt != tt.want.CreatedAt {
|
||||
t.Errorf("CreatedAt = %q, want %q", got.CreatedAt, tt.want.CreatedAt)
|
||||
}
|
||||
if len(got.Members) != len(tt.want.Members) {
|
||||
t.Errorf("Members count = %d, want %d", len(got.Members), len(tt.want.Members))
|
||||
} else {
|
||||
for i, m := range got.Members {
|
||||
if m != tt.want.Members[i] {
|
||||
t.Errorf("Members[%d] = %q, want %q", i, m, tt.want.Members[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupBeadID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want string
|
||||
}{
|
||||
{"ops-team", "hq-group-ops-team"},
|
||||
{"all", "hq-group-all"},
|
||||
{"crew-leads", "hq-group-crew-leads"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := GroupBeadID(tt.name); got != tt.want {
|
||||
t.Errorf("GroupBeadID(%q) = %q, want %q", tt.name, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
// Test that Format -> Parse preserves data
|
||||
original := &GroupFields{
|
||||
Name: "test-group",
|
||||
Members: []string{"gastown/crew/max", "*/witness", "@town"},
|
||||
CreatedBy: "tester",
|
||||
CreatedAt: "2024-01-15T12:00:00Z",
|
||||
}
|
||||
|
||||
description := FormatGroupDescription("Group: test-group", original)
|
||||
parsed := ParseGroupFields(description)
|
||||
|
||||
if parsed.Name != original.Name {
|
||||
t.Errorf("Name: got %q, want %q", parsed.Name, original.Name)
|
||||
}
|
||||
if parsed.CreatedBy != original.CreatedBy {
|
||||
t.Errorf("CreatedBy: got %q, want %q", parsed.CreatedBy, original.CreatedBy)
|
||||
}
|
||||
if parsed.CreatedAt != original.CreatedAt {
|
||||
t.Errorf("CreatedAt: got %q, want %q", parsed.CreatedAt, original.CreatedAt)
|
||||
}
|
||||
if len(parsed.Members) != len(original.Members) {
|
||||
t.Fatalf("Members count: got %d, want %d", len(parsed.Members), len(original.Members))
|
||||
}
|
||||
for i, m := range original.Members {
|
||||
if parsed.Members[i] != m {
|
||||
t.Errorf("Members[%d]: got %q, want %q", i, parsed.Members[i], m)
|
||||
}
|
||||
}
|
||||
}
|
||||
393
internal/beads/beads_queue.go
Normal file
393
internal/beads/beads_queue.go
Normal file
@@ -0,0 +1,393 @@
|
||||
// Package beads provides queue bead management.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// QueueFields holds structured fields for queue beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type QueueFields struct {
|
||||
Name string // Queue name (human-readable identifier)
|
||||
ClaimPattern string // Pattern for who can claim from queue (e.g., "gastown/polecats/*")
|
||||
Status string // active, paused, closed
|
||||
MaxConcurrency int // Maximum number of concurrent workers (0 = unlimited)
|
||||
ProcessingOrder string // fifo, priority (default: fifo)
|
||||
AvailableCount int // Number of items ready to process
|
||||
ProcessingCount int // Number of items currently being processed
|
||||
CompletedCount int // Number of items completed
|
||||
FailedCount int // Number of items that failed
|
||||
CreatedBy string // Who created this queue
|
||||
CreatedAt string // ISO 8601 timestamp of creation
|
||||
}
|
||||
|
||||
// Queue status constants
|
||||
const (
|
||||
QueueStatusActive = "active"
|
||||
QueueStatusPaused = "paused"
|
||||
QueueStatusClosed = "closed"
|
||||
)
|
||||
|
||||
// Queue processing order constants
|
||||
const (
|
||||
QueueOrderFIFO = "fifo"
|
||||
QueueOrderPriority = "priority"
|
||||
)
|
||||
|
||||
// FormatQueueDescription creates a description string from queue fields.
|
||||
func FormatQueueDescription(title string, fields *QueueFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
|
||||
if fields.Name != "" {
|
||||
lines = append(lines, fmt.Sprintf("name: %s", fields.Name))
|
||||
} else {
|
||||
lines = append(lines, "name: null")
|
||||
}
|
||||
|
||||
if fields.ClaimPattern != "" {
|
||||
lines = append(lines, fmt.Sprintf("claim_pattern: %s", fields.ClaimPattern))
|
||||
} else {
|
||||
lines = append(lines, "claim_pattern: *") // Default: anyone can claim
|
||||
}
|
||||
|
||||
if fields.Status != "" {
|
||||
lines = append(lines, fmt.Sprintf("status: %s", fields.Status))
|
||||
} else {
|
||||
lines = append(lines, "status: active")
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("max_concurrency: %d", fields.MaxConcurrency))
|
||||
|
||||
if fields.ProcessingOrder != "" {
|
||||
lines = append(lines, fmt.Sprintf("processing_order: %s", fields.ProcessingOrder))
|
||||
} else {
|
||||
lines = append(lines, "processing_order: fifo")
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("available_count: %d", fields.AvailableCount))
|
||||
lines = append(lines, fmt.Sprintf("processing_count: %d", fields.ProcessingCount))
|
||||
lines = append(lines, fmt.Sprintf("completed_count: %d", fields.CompletedCount))
|
||||
lines = append(lines, fmt.Sprintf("failed_count: %d", fields.FailedCount))
|
||||
|
||||
if fields.CreatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_by: %s", fields.CreatedBy))
|
||||
}
|
||||
if fields.CreatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_at: %s", fields.CreatedAt))
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseQueueFields extracts queue fields from an issue's description.
|
||||
func ParseQueueFields(description string) *QueueFields {
|
||||
fields := &QueueFields{
|
||||
Status: QueueStatusActive,
|
||||
ProcessingOrder: QueueOrderFIFO,
|
||||
ClaimPattern: "*", // Default: anyone can claim
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "name":
|
||||
fields.Name = value
|
||||
case "claim_pattern":
|
||||
if value != "" {
|
||||
fields.ClaimPattern = value
|
||||
}
|
||||
case "status":
|
||||
fields.Status = value
|
||||
case "max_concurrency":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.MaxConcurrency = v
|
||||
}
|
||||
case "processing_order":
|
||||
fields.ProcessingOrder = value
|
||||
case "available_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.AvailableCount = v
|
||||
}
|
||||
case "processing_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.ProcessingCount = v
|
||||
}
|
||||
case "completed_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.CompletedCount = v
|
||||
}
|
||||
case "failed_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.FailedCount = v
|
||||
}
|
||||
case "created_by":
|
||||
fields.CreatedBy = value
|
||||
case "created_at":
|
||||
fields.CreatedAt = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// QueueBeadID returns the queue bead ID for a given queue name.
|
||||
// Format: hq-q-<name> for town-level queues, gt-q-<name> for rig-level queues.
|
||||
func QueueBeadID(name string, isTownLevel bool) string {
|
||||
if isTownLevel {
|
||||
return "hq-q-" + name
|
||||
}
|
||||
return "gt-q-" + name
|
||||
}
|
||||
|
||||
// CreateQueueBead creates a queue bead for tracking work queues.
|
||||
// The ID format is: <prefix>-q-<name> (e.g., gt-q-merge, hq-q-dispatch)
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateQueueBead(id, title string, fields *QueueFields) (*Issue, error) {
|
||||
description := FormatQueueDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=queue",
|
||||
"--labels=gt:queue",
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// GetQueueBead retrieves a queue bead by ID.
|
||||
// Returns nil if not found.
|
||||
func (b *Beads) GetQueueBead(id string) (*Issue, *QueueFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:queue") {
|
||||
return nil, nil, fmt.Errorf("issue %s is not a queue bead (missing gt:queue label)", id)
|
||||
}
|
||||
|
||||
fields := ParseQueueFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// UpdateQueueFields updates the fields of a queue bead.
|
||||
func (b *Beads) UpdateQueueFields(id string, fields *QueueFields) error {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
description := FormatQueueDescription(issue.Title, fields)
|
||||
return b.Update(id, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateQueueCounts updates the count fields of a queue bead.
|
||||
// This is a convenience method for incrementing/decrementing counts.
|
||||
func (b *Beads) UpdateQueueCounts(id string, available, processing, completed, failed int) error {
|
||||
issue, currentFields, err := b.GetQueueBead(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
currentFields.AvailableCount = available
|
||||
currentFields.ProcessingCount = processing
|
||||
currentFields.CompletedCount = completed
|
||||
currentFields.FailedCount = failed
|
||||
|
||||
return b.UpdateQueueFields(id, currentFields)
|
||||
}
|
||||
|
||||
// UpdateQueueStatus updates the status of a queue bead.
|
||||
func (b *Beads) UpdateQueueStatus(id, status string) error {
|
||||
// Validate status
|
||||
if status != QueueStatusActive && status != QueueStatusPaused && status != QueueStatusClosed {
|
||||
return fmt.Errorf("invalid queue status %q: must be active, paused, or closed", status)
|
||||
}
|
||||
|
||||
issue, currentFields, err := b.GetQueueBead(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
currentFields.Status = status
|
||||
return b.UpdateQueueFields(id, currentFields)
|
||||
}
|
||||
|
||||
// ListQueueBeads returns all queue beads.
|
||||
func (b *Beads) ListQueueBeads() (map[string]*Issue, error) {
|
||||
out, err := b.run("list", "--label=gt:queue", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]*Issue, len(issues))
|
||||
for _, issue := range issues {
|
||||
result[issue.ID] = issue
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DeleteQueueBead permanently deletes a queue bead.
|
||||
// Uses --hard --force for immediate permanent deletion (no tombstone).
|
||||
func (b *Beads) DeleteQueueBead(id string) error {
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// LookupQueueByName finds a queue by its name field (not by ID).
|
||||
// This is used for address resolution where we may not know the full bead ID.
|
||||
func (b *Beads) LookupQueueByName(name string) (*Issue, *QueueFields, error) {
|
||||
// First try direct lookup by standard ID formats (town and rig level)
|
||||
for _, isTownLevel := range []bool{true, false} {
|
||||
id := QueueBeadID(name, isTownLevel)
|
||||
issue, fields, err := b.GetQueueBead(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if issue != nil {
|
||||
return issue, fields, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If not found by ID, search all queues by name field
|
||||
queues, err := b.ListQueueBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, issue := range queues {
|
||||
fields := ParseQueueFields(issue.Description)
|
||||
if fields.Name == name {
|
||||
return issue, fields, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, nil // Not found
|
||||
}
|
||||
|
||||
// MatchClaimPattern checks if an identity matches a claim pattern.
|
||||
// Patterns support:
|
||||
// - "*" matches anyone
|
||||
// - "gastown/polecats/*" matches any polecat in gastown rig
|
||||
// - "*/witness" matches any witness role across rigs
|
||||
// - Exact match for specific identities
|
||||
func MatchClaimPattern(pattern, identity string) bool {
|
||||
// Wildcard matches anyone
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Exact match
|
||||
if pattern == identity {
|
||||
return true
|
||||
}
|
||||
|
||||
// Wildcard pattern matching
|
||||
if strings.Contains(pattern, "*") {
|
||||
// Convert to simple glob matching
|
||||
// "gastown/polecats/*" should match "gastown/polecats/capable"
|
||||
// "*/witness" should match "gastown/witness"
|
||||
parts := strings.Split(pattern, "*")
|
||||
if len(parts) == 2 {
|
||||
prefix := parts[0]
|
||||
suffix := parts[1]
|
||||
if strings.HasPrefix(identity, prefix) && strings.HasSuffix(identity, suffix) {
|
||||
// Check that the middle part doesn't contain path separators
|
||||
// unless the pattern allows it (e.g., "*/" at start)
|
||||
middle := identity[len(prefix) : len(identity)-len(suffix)]
|
||||
// Only allow single segment match (no extra slashes)
|
||||
if !strings.Contains(middle, "/") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FindEligibleQueues returns all queue beads that the given identity can claim from.
|
||||
func (b *Beads) FindEligibleQueues(identity string) ([]*Issue, []*QueueFields, error) {
|
||||
queues, err := b.ListQueueBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var eligibleIssues []*Issue
|
||||
var eligibleFields []*QueueFields
|
||||
|
||||
for _, issue := range queues {
|
||||
fields := ParseQueueFields(issue.Description)
|
||||
|
||||
// Skip inactive queues
|
||||
if fields.Status != QueueStatusActive {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if identity matches claim pattern
|
||||
if MatchClaimPattern(fields.ClaimPattern, identity) {
|
||||
eligibleIssues = append(eligibleIssues, issue)
|
||||
eligibleFields = append(eligibleFields, fields)
|
||||
}
|
||||
}
|
||||
|
||||
return eligibleIssues, eligibleFields, nil
|
||||
}
|
||||
301
internal/beads/beads_queue_test.go
Normal file
301
internal/beads/beads_queue_test.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMatchClaimPattern(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pattern string
|
||||
identity string
|
||||
want bool
|
||||
}{
|
||||
// Wildcard matches anyone
|
||||
{
|
||||
name: "wildcard matches anyone",
|
||||
pattern: "*",
|
||||
identity: "gastown/crew/max",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard matches town-level agent",
|
||||
pattern: "*",
|
||||
identity: "mayor/",
|
||||
want: true,
|
||||
},
|
||||
|
||||
// Exact match
|
||||
{
|
||||
name: "exact match",
|
||||
pattern: "gastown/crew/max",
|
||||
identity: "gastown/crew/max",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "exact match fails on different identity",
|
||||
pattern: "gastown/crew/max",
|
||||
identity: "gastown/crew/nux",
|
||||
want: false,
|
||||
},
|
||||
|
||||
// Suffix wildcard
|
||||
{
|
||||
name: "suffix wildcard matches",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "gastown/polecats/capable",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "suffix wildcard matches different name",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "gastown/polecats/nux",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "suffix wildcard doesn't match nested path",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "gastown/polecats/sub/capable",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "suffix wildcard doesn't match different rig",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "bartertown/polecats/capable",
|
||||
want: false,
|
||||
},
|
||||
|
||||
// Prefix wildcard
|
||||
{
|
||||
name: "prefix wildcard matches",
|
||||
pattern: "*/witness",
|
||||
identity: "gastown/witness",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "prefix wildcard matches different rig",
|
||||
pattern: "*/witness",
|
||||
identity: "bartertown/witness",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "prefix wildcard doesn't match different role",
|
||||
pattern: "*/witness",
|
||||
identity: "gastown/refinery",
|
||||
want: false,
|
||||
},
|
||||
|
||||
// Crew patterns
|
||||
{
|
||||
name: "crew wildcard",
|
||||
pattern: "gastown/crew/*",
|
||||
identity: "gastown/crew/max",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "crew wildcard matches any crew member",
|
||||
pattern: "gastown/crew/*",
|
||||
identity: "gastown/crew/jack",
|
||||
want: true,
|
||||
},
|
||||
|
||||
// Edge cases
|
||||
{
|
||||
name: "empty identity doesn't match",
|
||||
pattern: "*",
|
||||
identity: "",
|
||||
want: true, // * matches anything
|
||||
},
|
||||
{
|
||||
name: "empty pattern doesn't match",
|
||||
pattern: "",
|
||||
identity: "gastown/crew/max",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := MatchClaimPattern(tt.pattern, tt.identity)
|
||||
if got != tt.want {
|
||||
t.Errorf("MatchClaimPattern(%q, %q) = %v, want %v",
|
||||
tt.pattern, tt.identity, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatQueueDescription(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
title string
|
||||
fields *QueueFields
|
||||
want []string // Lines that should be present
|
||||
}{
|
||||
{
|
||||
name: "basic queue",
|
||||
title: "Queue: work-requests",
|
||||
fields: &QueueFields{
|
||||
Name: "work-requests",
|
||||
ClaimPattern: "gastown/crew/*",
|
||||
Status: QueueStatusActive,
|
||||
},
|
||||
want: []string{
|
||||
"Queue: work-requests",
|
||||
"name: work-requests",
|
||||
"claim_pattern: gastown/crew/*",
|
||||
"status: active",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "queue with default claim pattern",
|
||||
title: "Queue: public",
|
||||
fields: &QueueFields{
|
||||
Name: "public",
|
||||
Status: QueueStatusActive,
|
||||
},
|
||||
want: []string{
|
||||
"name: public",
|
||||
"claim_pattern: *", // Default
|
||||
"status: active",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "queue with counts",
|
||||
title: "Queue: processing",
|
||||
fields: &QueueFields{
|
||||
Name: "processing",
|
||||
ClaimPattern: "*/refinery",
|
||||
Status: QueueStatusActive,
|
||||
AvailableCount: 5,
|
||||
ProcessingCount: 2,
|
||||
CompletedCount: 10,
|
||||
FailedCount: 1,
|
||||
},
|
||||
want: []string{
|
||||
"name: processing",
|
||||
"claim_pattern: */refinery",
|
||||
"available_count: 5",
|
||||
"processing_count: 2",
|
||||
"completed_count: 10",
|
||||
"failed_count: 1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil fields",
|
||||
title: "Just Title",
|
||||
fields: nil,
|
||||
want: []string{"Just Title"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := FormatQueueDescription(tt.title, tt.fields)
|
||||
for _, line := range tt.want {
|
||||
if !strings.Contains(got, line) {
|
||||
t.Errorf("FormatQueueDescription() missing line %q in:\n%s", line, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseQueueFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
wantName string
|
||||
wantPattern string
|
||||
wantStatus string
|
||||
}{
|
||||
{
|
||||
name: "basic queue",
|
||||
description: `Queue: work-requests
|
||||
|
||||
name: work-requests
|
||||
claim_pattern: gastown/crew/*
|
||||
status: active`,
|
||||
wantName: "work-requests",
|
||||
wantPattern: "gastown/crew/*",
|
||||
wantStatus: QueueStatusActive,
|
||||
},
|
||||
{
|
||||
name: "queue with defaults",
|
||||
description: `Queue: minimal
|
||||
|
||||
name: minimal`,
|
||||
wantName: "minimal",
|
||||
wantPattern: "*", // Default
|
||||
wantStatus: QueueStatusActive,
|
||||
},
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
wantName: "",
|
||||
wantPattern: "*", // Default
|
||||
wantStatus: QueueStatusActive,
|
||||
},
|
||||
{
|
||||
name: "queue with counts",
|
||||
description: `Queue: processing
|
||||
|
||||
name: processing
|
||||
claim_pattern: */refinery
|
||||
status: paused
|
||||
available_count: 5
|
||||
processing_count: 2`,
|
||||
wantName: "processing",
|
||||
wantPattern: "*/refinery",
|
||||
wantStatus: QueueStatusPaused,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseQueueFields(tt.description)
|
||||
if got.Name != tt.wantName {
|
||||
t.Errorf("Name = %q, want %q", got.Name, tt.wantName)
|
||||
}
|
||||
if got.ClaimPattern != tt.wantPattern {
|
||||
t.Errorf("ClaimPattern = %q, want %q", got.ClaimPattern, tt.wantPattern)
|
||||
}
|
||||
if got.Status != tt.wantStatus {
|
||||
t.Errorf("Status = %q, want %q", got.Status, tt.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueueBeadID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queueName string
|
||||
isTownLevel bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "town-level queue",
|
||||
queueName: "dispatch",
|
||||
isTownLevel: true,
|
||||
want: "hq-q-dispatch",
|
||||
},
|
||||
{
|
||||
name: "rig-level queue",
|
||||
queueName: "merge",
|
||||
isTownLevel: false,
|
||||
want: "gt-q-merge",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := QueueBeadID(tt.queueName, tt.isTownLevel)
|
||||
if got != tt.want {
|
||||
t.Errorf("QueueBeadID(%q, %v) = %q, want %q",
|
||||
tt.queueName, tt.isTownLevel, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -23,6 +23,9 @@ import (
|
||||
// this indicates an errant redirect file that should be removed. The function logs a
|
||||
// warning and returns the original beads directory.
|
||||
func ResolveBeadsDir(workDir string) string {
|
||||
if filepath.Base(workDir) == ".beads" {
|
||||
workDir = filepath.Dir(workDir)
|
||||
}
|
||||
beadsDir := filepath.Join(workDir, ".beads")
|
||||
redirectPath := filepath.Join(beadsDir, "redirect")
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ package beads
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -85,9 +84,13 @@ func (b *Beads) CreateRigBead(id, title string, fields *RigFields) (*Issue, erro
|
||||
"--description=" + description,
|
||||
"--labels=gt:rig",
|
||||
}
|
||||
if NeedsForceForID(id) {
|
||||
args = append(args, "--force")
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
if actor := os.Getenv("BD_ACTOR"); actor != "" {
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
// Package beads provides role bead management.
|
||||
//
|
||||
// DEPRECATED: Role beads are deprecated. Role definitions are now config-based.
|
||||
// See internal/config/roles/*.toml and config-based-roles.md for the new system.
|
||||
//
|
||||
// This file is kept for backward compatibility with existing role beads but
|
||||
// new code should use config.LoadRoleDefinition() instead of reading role beads.
|
||||
// The daemon no longer uses role beads as of Phase 2 (config-based roles).
|
||||
package beads
|
||||
|
||||
import (
|
||||
@@ -6,10 +13,12 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Role bead ID naming convention:
|
||||
// Role beads are stored in town beads (~/.beads/) with hq- prefix.
|
||||
// DEPRECATED: Role bead ID naming convention is no longer used.
|
||||
// Role definitions are now config-based (internal/config/roles/*.toml).
|
||||
//
|
||||
// Canonical format: hq-<role>-role
|
||||
// Role beads were stored in town beads (~/.beads/) with hq- prefix.
|
||||
//
|
||||
// Canonical format was: hq-<role>-role
|
||||
//
|
||||
// Examples:
|
||||
// - hq-mayor-role
|
||||
@@ -19,8 +28,8 @@ import (
|
||||
// - hq-crew-role
|
||||
// - hq-polecat-role
|
||||
//
|
||||
// Use RoleBeadIDTown() to get canonical role bead IDs.
|
||||
// The legacy RoleBeadID() function returns gt-<role>-role for backward compatibility.
|
||||
// Legacy functions RoleBeadID() and RoleBeadIDTown() still work for
|
||||
// backward compatibility but should not be used in new code.
|
||||
|
||||
// RoleBeadID returns the role bead ID for a given role type.
|
||||
// Role beads define lifecycle configuration for each agent type.
|
||||
@@ -67,6 +76,9 @@ func PolecatRoleBeadID() string {
|
||||
|
||||
// GetRoleConfig looks up a role bead and returns its parsed RoleConfig.
|
||||
// Returns nil, nil if the role bead doesn't exist or has no config.
|
||||
//
|
||||
// Deprecated: Use config.LoadRoleDefinition() instead. Role definitions
|
||||
// are now config-based, not stored as beads.
|
||||
func (b *Beads) GetRoleConfig(roleBeadID string) (*RoleConfig, error) {
|
||||
issue, err := b.Show(roleBeadID)
|
||||
if err != nil {
|
||||
@@ -92,3 +104,57 @@ func HasLabel(issue *Issue, label string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RoleBeadDef defines a role bead's metadata.
|
||||
//
|
||||
// Deprecated: Role beads are no longer created. Role definitions are
|
||||
// now config-based (internal/config/roles/*.toml).
|
||||
type RoleBeadDef struct {
|
||||
ID string // e.g., "hq-witness-role"
|
||||
Title string // e.g., "Witness Role"
|
||||
Desc string // Description of the role
|
||||
}
|
||||
|
||||
// AllRoleBeadDefs returns all role bead definitions.
|
||||
//
|
||||
// Deprecated: Role beads are no longer created by gt install or gt doctor.
|
||||
// This function is kept for backward compatibility only.
|
||||
func AllRoleBeadDefs() []RoleBeadDef {
|
||||
return []RoleBeadDef{
|
||||
{
|
||||
ID: MayorRoleBeadIDTown(),
|
||||
Title: "Mayor Role",
|
||||
Desc: "Role definition for Mayor agents. Global coordinator for cross-rig work.",
|
||||
},
|
||||
{
|
||||
ID: DeaconRoleBeadIDTown(),
|
||||
Title: "Deacon Role",
|
||||
Desc: "Role definition for Deacon agents. Daemon beacon for heartbeats and monitoring.",
|
||||
},
|
||||
{
|
||||
ID: DogRoleBeadIDTown(),
|
||||
Title: "Dog Role",
|
||||
Desc: "Role definition for Dog agents. Town-level workers for cross-rig tasks.",
|
||||
},
|
||||
{
|
||||
ID: WitnessRoleBeadIDTown(),
|
||||
Title: "Witness Role",
|
||||
Desc: "Role definition for Witness agents. Per-rig worker monitor with progressive nudging.",
|
||||
},
|
||||
{
|
||||
ID: RefineryRoleBeadIDTown(),
|
||||
Title: "Refinery Role",
|
||||
Desc: "Role definition for Refinery agents. Merge queue processor with verification gates.",
|
||||
},
|
||||
{
|
||||
ID: PolecatRoleBeadIDTown(),
|
||||
Title: "Polecat Role",
|
||||
Desc: "Role definition for Polecat agents. Ephemeral workers for batch work dispatch.",
|
||||
},
|
||||
{
|
||||
ID: CrewRoleBeadIDTown(),
|
||||
Title: "Crew Role",
|
||||
Desc: "Role definition for Crew agents. Persistent user-managed workspaces.",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -1799,3 +1800,577 @@ func TestSetupRedirect(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestAgentBeadTombstoneBug demonstrates the bd bug where `bd delete --hard --force`
|
||||
// creates tombstones instead of truly deleting records.
|
||||
//
|
||||
//
|
||||
// This test documents the bug behavior:
|
||||
// 1. Create agent bead
|
||||
// 2. Delete with --hard --force (supposed to permanently delete)
|
||||
// 3. BUG: Tombstone is created instead
|
||||
// 4. BUG: bd create fails with UNIQUE constraint
|
||||
// 5. BUG: bd reopen fails with "issue not found" (tombstones are invisible)
|
||||
func TestAgentBeadTombstoneBug(t *testing.T) {
|
||||
// Skip: bd CLI 0.47.2 has a bug where database writes don't commit
|
||||
// ("sql: database is closed" during auto-flush). This blocks all tests
|
||||
// that need to create issues. See internal issue for tracking.
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create isolated beads instance and initialize database
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-tombstone"
|
||||
|
||||
// Step 1: Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Delete with --hard --force (supposed to permanently delete)
|
||||
err = bd.DeleteAgentBead(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: BUG - Tombstone exists (check via bd list --status=tombstone)
|
||||
out, err := bd.run("list", "--status=tombstone", "--json")
|
||||
if err != nil {
|
||||
t.Fatalf("list tombstones: %v", err)
|
||||
}
|
||||
|
||||
// Parse to check if our agent is in the tombstone list
|
||||
var tombstones []Issue
|
||||
if err := json.Unmarshal(out, &tombstones); err != nil {
|
||||
t.Fatalf("parse tombstones: %v", err)
|
||||
}
|
||||
|
||||
foundTombstone := false
|
||||
for _, ts := range tombstones {
|
||||
if ts.ID == agentID {
|
||||
foundTombstone = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundTombstone {
|
||||
// If bd ever fixes the --hard flag, this test will fail here
|
||||
// That's a good thing - it means the bug is fixed!
|
||||
t.Skip("bd --hard appears to be fixed (no tombstone created) - update this test")
|
||||
}
|
||||
|
||||
// Step 4: BUG - bd create fails with UNIQUE constraint
|
||||
_, err = bd.CreateAgentBead(agentID, "Test agent 2", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected UNIQUE constraint error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "UNIQUE constraint") {
|
||||
t.Errorf("expected UNIQUE constraint error, got: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: BUG - bd reopen fails (tombstones are invisible)
|
||||
_, err = bd.run("reopen", agentID, "--reason=test")
|
||||
if err == nil {
|
||||
t.Fatal("expected reopen to fail on tombstone, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no issue found") && !strings.Contains(err.Error(), "issue not found") {
|
||||
t.Errorf("expected 'issue not found' error, got: %v", err)
|
||||
}
|
||||
|
||||
t.Log("BUG CONFIRMED: bd delete --hard creates tombstones that block recreation")
|
||||
}
|
||||
|
||||
// TestAgentBeadCloseReopenWorkaround demonstrates the workaround for the tombstone bug:
|
||||
// use Close instead of Delete, then Reopen works.
|
||||
func TestAgentBeadCloseReopenWorkaround(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-closereopen"
|
||||
|
||||
// Step 1: Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Close (not delete) - this is the workaround
|
||||
err = bd.CloseAndClearAgentBead(agentID, "polecat removed")
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Verify bead is closed (not tombstone)
|
||||
issue, err := bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show after close: %v", err)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
|
||||
// Step 4: Reopen works on closed beads
|
||||
_, err = bd.run("reopen", agentID, "--reason=re-spawning")
|
||||
if err != nil {
|
||||
t.Fatalf("reopen failed: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Verify bead is open again
|
||||
issue, err = bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show after reopen: %v", err)
|
||||
}
|
||||
if issue.Status != "open" {
|
||||
t.Errorf("status = %q, want 'open'", issue.Status)
|
||||
}
|
||||
|
||||
t.Log("WORKAROUND CONFIRMED: Close + Reopen works for agent bead lifecycle")
|
||||
}
|
||||
|
||||
// TestCreateOrReopenAgentBead_ClosedBead tests that CreateOrReopenAgentBead
|
||||
// successfully reopens a closed agent bead and updates its fields.
|
||||
func TestCreateOrReopenAgentBead_ClosedBead(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-lifecycle"
|
||||
|
||||
// Simulate polecat lifecycle: spawn → nuke → respawn
|
||||
|
||||
// Spawn 1: Create agent bead with first task
|
||||
issue1, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Spawn 1 - CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
if issue1.Status != "open" {
|
||||
t.Errorf("Spawn 1: status = %q, want 'open'", issue1.Status)
|
||||
}
|
||||
|
||||
// Nuke 1: Close agent bead (workaround for tombstone bug)
|
||||
err = bd.CloseAndClearAgentBead(agentID, "polecat nuked")
|
||||
if err != nil {
|
||||
t.Fatalf("Nuke 1 - CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Spawn 2: CreateOrReopenAgentBead should reopen and update
|
||||
issue2, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-2", // Different task
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Spawn 2 - CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
if issue2.Status != "open" {
|
||||
t.Errorf("Spawn 2: status = %q, want 'open'", issue2.Status)
|
||||
}
|
||||
|
||||
// Verify the hook was updated to the new task
|
||||
fields := ParseAgentFields(issue2.Description)
|
||||
if fields.HookBead != "test-task-2" {
|
||||
t.Errorf("Spawn 2: hook_bead = %q, want 'test-task-2'", fields.HookBead)
|
||||
}
|
||||
|
||||
// Nuke 2: Close again
|
||||
err = bd.CloseAndClearAgentBead(agentID, "polecat nuked again")
|
||||
if err != nil {
|
||||
t.Fatalf("Nuke 2 - CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Spawn 3: Should still work
|
||||
issue3, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-3",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Spawn 3 - CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
|
||||
fields = ParseAgentFields(issue3.Description)
|
||||
if fields.HookBead != "test-task-3" {
|
||||
t.Errorf("Spawn 3: hook_bead = %q, want 'test-task-3'", fields.HookBead)
|
||||
}
|
||||
|
||||
t.Log("LIFECYCLE TEST PASSED: spawn → nuke → respawn works with close/reopen")
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_FieldClearing tests that CloseAndClearAgentBead clears all mutable
|
||||
// fields to emulate delete --force --hard behavior. This ensures reopened agent
|
||||
// beads don't have stale state from previous lifecycle.
|
||||
func TestCloseAndClearAgentBead_FieldClearing(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
// Test cases for field clearing permutations
|
||||
tests := []struct {
|
||||
name string
|
||||
fields *AgentFields
|
||||
reason string
|
||||
}{
|
||||
{
|
||||
name: "all_fields_populated",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
HookBead: "test-issue-123",
|
||||
CleanupStatus: "clean",
|
||||
ActiveMR: "test-mr-456",
|
||||
NotificationLevel: "normal",
|
||||
},
|
||||
reason: "polecat completed work",
|
||||
},
|
||||
{
|
||||
name: "only_hook_bead",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-issue-789",
|
||||
},
|
||||
reason: "polecat nuked",
|
||||
},
|
||||
{
|
||||
name: "only_active_mr",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
ActiveMR: "test-mr-abc",
|
||||
},
|
||||
reason: "",
|
||||
},
|
||||
{
|
||||
name: "only_cleanup_status",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "idle",
|
||||
CleanupStatus: "has_uncommitted",
|
||||
},
|
||||
reason: "cleanup required",
|
||||
},
|
||||
{
|
||||
name: "no_mutable_fields",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
},
|
||||
reason: "fresh spawn closed",
|
||||
},
|
||||
{
|
||||
name: "polecat_with_all_field_types",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "processing",
|
||||
HookBead: "test-task-xyz",
|
||||
ActiveMR: "test-mr-processing",
|
||||
CleanupStatus: "has_uncommitted",
|
||||
NotificationLevel: "verbose",
|
||||
},
|
||||
reason: "comprehensive cleanup",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Use tc.name for suffix to avoid hash-like patterns (e.g., single digits)
|
||||
// that trigger bd's isLikelyHash() prefix extraction in v0.47.1+
|
||||
agentID := fmt.Sprintf("test-testrig-%s-%s", tc.fields.RoleType, tc.name)
|
||||
|
||||
// Step 1: Create agent bead with specified fields
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", tc.fields)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Verify fields were set
|
||||
issue, err := bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show before close: %v", err)
|
||||
}
|
||||
beforeFields := ParseAgentFields(issue.Description)
|
||||
if tc.fields.HookBead != "" && beforeFields.HookBead != tc.fields.HookBead {
|
||||
t.Errorf("before close: hook_bead = %q, want %q", beforeFields.HookBead, tc.fields.HookBead)
|
||||
}
|
||||
|
||||
// Step 2: Close the agent bead
|
||||
err = bd.CloseAndClearAgentBead(agentID, tc.reason)
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Verify bead is closed
|
||||
issue, err = bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show after close: %v", err)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
|
||||
// Step 4: Verify mutable fields were cleared
|
||||
afterFields := ParseAgentFields(issue.Description)
|
||||
|
||||
// hook_bead should be cleared (empty or "null")
|
||||
if afterFields.HookBead != "" {
|
||||
t.Errorf("after close: hook_bead = %q, want empty (was %q)", afterFields.HookBead, tc.fields.HookBead)
|
||||
}
|
||||
|
||||
// active_mr should be cleared
|
||||
if afterFields.ActiveMR != "" {
|
||||
t.Errorf("after close: active_mr = %q, want empty (was %q)", afterFields.ActiveMR, tc.fields.ActiveMR)
|
||||
}
|
||||
|
||||
// cleanup_status should be cleared
|
||||
if afterFields.CleanupStatus != "" {
|
||||
t.Errorf("after close: cleanup_status = %q, want empty (was %q)", afterFields.CleanupStatus, tc.fields.CleanupStatus)
|
||||
}
|
||||
|
||||
// agent_state should be "closed"
|
||||
if afterFields.AgentState != "closed" {
|
||||
t.Errorf("after close: agent_state = %q, want 'closed' (was %q)", afterFields.AgentState, tc.fields.AgentState)
|
||||
}
|
||||
|
||||
// Immutable fields should be preserved
|
||||
if afterFields.RoleType != tc.fields.RoleType {
|
||||
t.Errorf("after close: role_type = %q, want %q (should be preserved)", afterFields.RoleType, tc.fields.RoleType)
|
||||
}
|
||||
if afterFields.Rig != tc.fields.Rig {
|
||||
t.Errorf("after close: rig = %q, want %q (should be preserved)", afterFields.Rig, tc.fields.Rig)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_NonExistent tests behavior when closing a non-existent agent bead.
|
||||
func TestCloseAndClearAgentBead_NonExistent(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
// Attempt to close non-existent bead
|
||||
err := bd.CloseAndClearAgentBead("test-nonexistent-polecat-xyz", "should fail")
|
||||
|
||||
// Should return an error (bd close on non-existent issue fails)
|
||||
if err == nil {
|
||||
t.Error("CloseAndClearAgentBead on non-existent bead should return error")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_AlreadyClosed tests behavior when closing an already-closed agent bead.
|
||||
func TestCloseAndClearAgentBead_AlreadyClosed(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-doubleclosed"
|
||||
|
||||
// Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
HookBead: "test-issue-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// First close - should succeed
|
||||
err = bd.CloseAndClearAgentBead(agentID, "first close")
|
||||
if err != nil {
|
||||
t.Fatalf("First CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Second close - behavior depends on bd close semantics
|
||||
// Document actual behavior: bd close on already-closed bead may error or be idempotent
|
||||
err = bd.CloseAndClearAgentBead(agentID, "second close")
|
||||
|
||||
// Verify bead is still closed regardless of error
|
||||
issue, showErr := bd.Show(agentID)
|
||||
if showErr != nil {
|
||||
t.Fatalf("Show after double close: %v", showErr)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status after double close = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
|
||||
// Log actual behavior for documentation
|
||||
if err != nil {
|
||||
t.Logf("BEHAVIOR: CloseAndClearAgentBead on already-closed bead returns error: %v", err)
|
||||
} else {
|
||||
t.Log("BEHAVIOR: CloseAndClearAgentBead on already-closed bead is idempotent (no error)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_ReopenHasCleanState tests that reopening a closed agent bead
|
||||
// starts with clean state (no stale hook_bead, active_mr, etc.).
|
||||
func TestCloseAndClearAgentBead_ReopenHasCleanState(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-cleanreopen"
|
||||
|
||||
// Step 1: Create agent with all fields populated
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
HookBead: "test-old-issue",
|
||||
CleanupStatus: "clean",
|
||||
ActiveMR: "test-old-mr",
|
||||
NotificationLevel: "normal",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Close - should clear mutable fields
|
||||
err = bd.CloseAndClearAgentBead(agentID, "completing old work")
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Reopen with new fields
|
||||
newIssue, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-new-issue",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: Verify new state - should have new hook, no stale data
|
||||
fields := ParseAgentFields(newIssue.Description)
|
||||
|
||||
if fields.HookBead != "test-new-issue" {
|
||||
t.Errorf("hook_bead = %q, want 'test-new-issue'", fields.HookBead)
|
||||
}
|
||||
|
||||
// The old active_mr should NOT be present (was cleared on close)
|
||||
if fields.ActiveMR == "test-old-mr" {
|
||||
t.Error("active_mr still has stale value 'test-old-mr' - CloseAndClearAgentBead didn't clear it")
|
||||
}
|
||||
|
||||
// agent_state should be the new state
|
||||
if fields.AgentState != "spawning" {
|
||||
t.Errorf("agent_state = %q, want 'spawning'", fields.AgentState)
|
||||
}
|
||||
|
||||
t.Log("CLEAN STATE CONFIRMED: Reopened agent bead has no stale mutable fields")
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_ReasonVariations tests close with different reason values.
|
||||
func TestCloseAndClearAgentBead_ReasonVariations(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
reason string
|
||||
}{
|
||||
{"empty_reason", ""},
|
||||
{"simple_reason", "polecat nuked"},
|
||||
{"reason_with_spaces", "polecat completed work successfully"},
|
||||
{"reason_with_special_chars", "closed: issue #123 (resolved)"},
|
||||
{"long_reason", "This is a very long reason that explains in detail why the agent bead was closed including multiple sentences and detailed context about the situation."},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Use tc.name for suffix to avoid hash-like patterns (e.g., "reason0")
|
||||
// that trigger bd's isLikelyHash() prefix extraction in v0.47.1+
|
||||
agentID := fmt.Sprintf("test-testrig-polecat-%s", tc.name)
|
||||
|
||||
// Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Close with specified reason
|
||||
err = bd.CloseAndClearAgentBead(agentID, tc.reason)
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Verify closed
|
||||
issue, err := bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show: %v", err)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
130
internal/beads/beads_types.go
Normal file
130
internal/beads/beads_types.go
Normal file
@@ -0,0 +1,130 @@
|
||||
// Package beads provides custom type management for agent beads.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
)
|
||||
|
||||
// typesSentinel is a marker file indicating custom types have been configured.
|
||||
// This persists across CLI invocations to avoid redundant bd config calls.
|
||||
const typesSentinel = ".gt-types-configured"
|
||||
|
||||
// ensuredDirs tracks which beads directories have been ensured this session.
|
||||
// This provides fast in-memory caching for multiple creates in the same CLI run.
|
||||
var (
|
||||
ensuredDirs = make(map[string]bool)
|
||||
ensuredMu sync.Mutex
|
||||
)
|
||||
|
||||
// FindTownRoot walks up from startDir to find the Gas Town root directory.
|
||||
// The town root is identified by the presence of mayor/town.json.
|
||||
// Returns empty string if not found (reached filesystem root).
|
||||
func FindTownRoot(startDir string) string {
|
||||
dir := startDir
|
||||
for {
|
||||
townFile := filepath.Join(dir, "mayor", "town.json")
|
||||
if _, err := os.Stat(townFile); err == nil {
|
||||
return dir
|
||||
}
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
return "" // Reached filesystem root
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveRoutingTarget determines which beads directory a bead ID will route to.
|
||||
// It extracts the prefix from the bead ID and looks up the corresponding route.
|
||||
// Returns the resolved beads directory path, following any redirects.
|
||||
//
|
||||
// If townRoot is empty or prefix is not found, falls back to the provided fallbackDir.
|
||||
func ResolveRoutingTarget(townRoot, beadID, fallbackDir string) string {
|
||||
if townRoot == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
// Extract prefix from bead ID (e.g., "gt-gastown-polecat-Toast" -> "gt-")
|
||||
prefix := ExtractPrefix(beadID)
|
||||
if prefix == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
// Look up rig path for this prefix
|
||||
rigPath := GetRigPathForPrefix(townRoot, prefix)
|
||||
if rigPath == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
// Resolve redirects and get final beads directory
|
||||
beadsDir := ResolveBeadsDir(rigPath)
|
||||
if beadsDir == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// EnsureCustomTypes ensures the target beads directory has custom types configured.
|
||||
// Uses a two-level caching strategy:
|
||||
// - In-memory cache for multiple creates in the same CLI invocation
|
||||
// - Sentinel file on disk for persistence across CLI invocations
|
||||
//
|
||||
// This function is thread-safe and idempotent.
|
||||
func EnsureCustomTypes(beadsDir string) error {
|
||||
if beadsDir == "" {
|
||||
return fmt.Errorf("empty beads directory")
|
||||
}
|
||||
|
||||
ensuredMu.Lock()
|
||||
defer ensuredMu.Unlock()
|
||||
|
||||
// Fast path: in-memory cache (same CLI invocation)
|
||||
if ensuredDirs[beadsDir] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fast path: sentinel file exists (previous CLI invocation)
|
||||
sentinelPath := filepath.Join(beadsDir, typesSentinel)
|
||||
if _, err := os.Stat(sentinelPath); err == nil {
|
||||
ensuredDirs[beadsDir] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify beads directory exists
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
return fmt.Errorf("beads directory does not exist: %s", beadsDir)
|
||||
}
|
||||
|
||||
// Configure custom types via bd CLI
|
||||
typesList := strings.Join(constants.BeadsCustomTypesList(), ",")
|
||||
cmd := exec.Command("bd", "config", "set", "types.custom", typesList)
|
||||
cmd.Dir = beadsDir
|
||||
cmd.Env = append(os.Environ(), "BEADS_DIR="+beadsDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("configure custom types in %s: %s: %w",
|
||||
beadsDir, strings.TrimSpace(string(output)), err)
|
||||
}
|
||||
|
||||
// Write sentinel file (best effort - don't fail if this fails)
|
||||
// The sentinel contains a version marker for future compatibility
|
||||
_ = os.WriteFile(sentinelPath, []byte("v1\n"), 0644)
|
||||
|
||||
ensuredDirs[beadsDir] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetEnsuredDirs clears the in-memory cache of ensured directories.
|
||||
// This is primarily useful for testing.
|
||||
func ResetEnsuredDirs() {
|
||||
ensuredMu.Lock()
|
||||
defer ensuredMu.Unlock()
|
||||
ensuredDirs = make(map[string]bool)
|
||||
}
|
||||
234
internal/beads/beads_types_test.go
Normal file
234
internal/beads/beads_types_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindTownRoot(t *testing.T) {
|
||||
// Create a temporary town structure
|
||||
tmpDir := t.TempDir()
|
||||
mayorDir := filepath.Join(tmpDir, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(mayorDir, "town.json"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create nested directories
|
||||
deepDir := filepath.Join(tmpDir, "rig1", "crew", "worker1")
|
||||
if err := os.MkdirAll(deepDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
startDir string
|
||||
expected string
|
||||
}{
|
||||
{"from town root", tmpDir, tmpDir},
|
||||
{"from mayor dir", mayorDir, tmpDir},
|
||||
{"from deep nested dir", deepDir, tmpDir},
|
||||
{"from non-town dir", t.TempDir(), ""},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := FindTownRoot(tc.startDir)
|
||||
if result != tc.expected {
|
||||
t.Errorf("FindTownRoot(%q) = %q, want %q", tc.startDir, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveRoutingTarget(t *testing.T) {
|
||||
// Create a temporary town with routes
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create mayor/town.json for FindTownRoot
|
||||
mayorDir := filepath.Join(tmpDir, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(mayorDir, "town.json"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create routes.jsonl
|
||||
routesContent := `{"prefix": "gt-", "path": "gastown/mayor/rig"}
|
||||
{"prefix": "hq-", "path": "."}
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "routes.jsonl"), []byte(routesContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create the rig beads directory
|
||||
rigBeadsDir := filepath.Join(tmpDir, "gastown", "mayor", "rig", ".beads")
|
||||
if err := os.MkdirAll(rigBeadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fallback := "/fallback/.beads"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
townRoot string
|
||||
beadID string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "rig-level bead routes to rig",
|
||||
townRoot: tmpDir,
|
||||
beadID: "gt-gastown-polecat-Toast",
|
||||
expected: rigBeadsDir,
|
||||
},
|
||||
{
|
||||
name: "town-level bead routes to town",
|
||||
townRoot: tmpDir,
|
||||
beadID: "hq-mayor",
|
||||
expected: beadsDir,
|
||||
},
|
||||
{
|
||||
name: "unknown prefix falls back",
|
||||
townRoot: tmpDir,
|
||||
beadID: "xx-unknown",
|
||||
expected: fallback,
|
||||
},
|
||||
{
|
||||
name: "empty townRoot falls back",
|
||||
townRoot: "",
|
||||
beadID: "gt-gastown-polecat-Toast",
|
||||
expected: fallback,
|
||||
},
|
||||
{
|
||||
name: "no prefix falls back",
|
||||
townRoot: tmpDir,
|
||||
beadID: "noprefixid",
|
||||
expected: fallback,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := ResolveRoutingTarget(tc.townRoot, tc.beadID, fallback)
|
||||
if result != tc.expected {
|
||||
t.Errorf("ResolveRoutingTarget(%q, %q, %q) = %q, want %q",
|
||||
tc.townRoot, tc.beadID, fallback, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureCustomTypes(t *testing.T) {
|
||||
// Reset the in-memory cache before testing
|
||||
ResetEnsuredDirs()
|
||||
|
||||
t.Run("empty beads dir returns error", func(t *testing.T) {
|
||||
err := EnsureCustomTypes("")
|
||||
if err == nil {
|
||||
t.Error("expected error for empty beads dir")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("non-existent beads dir returns error", func(t *testing.T) {
|
||||
err := EnsureCustomTypes("/nonexistent/path/.beads")
|
||||
if err == nil {
|
||||
t.Error("expected error for non-existent beads dir")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sentinel file triggers cache hit", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create sentinel file
|
||||
sentinelPath := filepath.Join(beadsDir, typesSentinel)
|
||||
if err := os.WriteFile(sentinelPath, []byte("v1\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reset cache to ensure we're testing sentinel detection
|
||||
ResetEnsuredDirs()
|
||||
|
||||
// This should succeed without running bd (sentinel exists)
|
||||
err := EnsureCustomTypes(beadsDir)
|
||||
if err != nil {
|
||||
t.Errorf("expected success with sentinel file, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("in-memory cache prevents repeated calls", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create sentinel to avoid bd call
|
||||
sentinelPath := filepath.Join(beadsDir, typesSentinel)
|
||||
if err := os.WriteFile(sentinelPath, []byte("v1\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ResetEnsuredDirs()
|
||||
|
||||
// First call
|
||||
if err := EnsureCustomTypes(beadsDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove sentinel - second call should still succeed due to in-memory cache
|
||||
os.Remove(sentinelPath)
|
||||
|
||||
if err := EnsureCustomTypes(beadsDir); err != nil {
|
||||
t.Errorf("expected cache hit, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeads_getTownRoot(t *testing.T) {
|
||||
// Create a temporary town
|
||||
tmpDir := t.TempDir()
|
||||
mayorDir := filepath.Join(tmpDir, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(mayorDir, "town.json"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create nested directory
|
||||
rigDir := filepath.Join(tmpDir, "myrig", "mayor", "rig")
|
||||
if err := os.MkdirAll(rigDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := New(rigDir)
|
||||
|
||||
// First call should find town root
|
||||
root1 := b.getTownRoot()
|
||||
if root1 != tmpDir {
|
||||
t.Errorf("first getTownRoot() = %q, want %q", root1, tmpDir)
|
||||
}
|
||||
|
||||
// Second call should return cached value
|
||||
root2 := b.getTownRoot()
|
||||
if root2 != root1 {
|
||||
t.Errorf("second getTownRoot() = %q, want cached %q", root2, root1)
|
||||
}
|
||||
|
||||
// Verify searchedRoot flag is set
|
||||
if !b.searchedRoot {
|
||||
t.Error("expected searchedRoot to be true after getTownRoot()")
|
||||
}
|
||||
}
|
||||
@@ -5,9 +5,15 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
gracefulTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// BdDaemonInfo represents the status of a single bd daemon instance.
|
||||
type BdDaemonInfo struct {
|
||||
Workspace string `json:"workspace"`
|
||||
@@ -69,21 +75,12 @@ func EnsureBdDaemonHealth(workDir string) string {
|
||||
|
||||
// Check if any daemons need attention
|
||||
needsRestart := false
|
||||
var issues []string
|
||||
|
||||
for _, d := range health.Daemons {
|
||||
switch d.Status {
|
||||
case "healthy":
|
||||
// Good
|
||||
case "version_mismatch":
|
||||
case "version_mismatch", "stale", "unresponsive":
|
||||
needsRestart = true
|
||||
issues = append(issues, fmt.Sprintf("%s: version mismatch", d.Workspace))
|
||||
case "stale":
|
||||
needsRestart = true
|
||||
issues = append(issues, fmt.Sprintf("%s: stale", d.Workspace))
|
||||
case "unresponsive":
|
||||
needsRestart = true
|
||||
issues = append(issues, fmt.Sprintf("%s: unresponsive", d.Workspace))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,9 +109,8 @@ func EnsureBdDaemonHealth(workDir string) string {
|
||||
|
||||
// restartBdDaemons restarts all bd daemons.
|
||||
func restartBdDaemons() error { //nolint:unparam // error return kept for future use
|
||||
// Stop all daemons first
|
||||
stopCmd := exec.Command("bd", "daemon", "killall")
|
||||
_ = stopCmd.Run() // Ignore errors - daemons might not be running
|
||||
// Stop all daemons first using pkill to avoid auto-start side effects
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd daemon").Run()
|
||||
|
||||
// Give time for cleanup
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
@@ -128,7 +124,121 @@ func restartBdDaemons() error { //nolint:unparam // error return kept for future
|
||||
// StartBdDaemonIfNeeded starts the bd daemon for a specific workspace if not running.
|
||||
// This is a best-effort operation - failures are logged but don't block execution.
|
||||
func StartBdDaemonIfNeeded(workDir string) error {
|
||||
cmd := exec.Command("bd", "daemon", "--start")
|
||||
cmd := exec.Command("bd", "daemon", "start")
|
||||
cmd.Dir = workDir
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// StopAllBdProcesses stops all bd daemon and activity processes.
|
||||
// Returns (daemonsKilled, activityKilled, error).
|
||||
// If dryRun is true, returns counts without stopping anything.
|
||||
func StopAllBdProcesses(dryRun, force bool) (int, int, error) {
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
daemonsBefore := CountBdDaemons()
|
||||
activityBefore := CountBdActivityProcesses()
|
||||
|
||||
if dryRun {
|
||||
return daemonsBefore, activityBefore, nil
|
||||
}
|
||||
|
||||
daemonsKilled, daemonsRemaining := stopBdDaemons(force)
|
||||
activityKilled, activityRemaining := stopBdActivityProcesses(force)
|
||||
|
||||
if daemonsRemaining > 0 {
|
||||
return daemonsKilled, activityKilled, fmt.Errorf("bd daemon shutdown incomplete: %d still running", daemonsRemaining)
|
||||
}
|
||||
if activityRemaining > 0 {
|
||||
return daemonsKilled, activityKilled, fmt.Errorf("bd activity shutdown incomplete: %d still running", activityRemaining)
|
||||
}
|
||||
|
||||
return daemonsKilled, activityKilled, nil
|
||||
}
|
||||
|
||||
// CountBdDaemons returns count of running bd daemons.
|
||||
// Uses pgrep instead of "bd daemon list" to avoid triggering daemon auto-start
|
||||
// during shutdown verification.
|
||||
func CountBdDaemons() int {
|
||||
// Use pgrep -f with wc -l for cross-platform compatibility
|
||||
// (macOS pgrep doesn't support -c flag)
|
||||
cmd := exec.Command("sh", "-c", "pgrep -f 'bd daemon' 2>/dev/null | wc -l")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||
return count
|
||||
}
|
||||
|
||||
|
||||
func stopBdDaemons(force bool) (int, int) {
|
||||
before := CountBdDaemons()
|
||||
if before == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Use pkill directly instead of "bd daemon killall" to avoid triggering
|
||||
// daemon auto-start as a side effect of running bd commands.
|
||||
// Note: pkill -f pattern may match unintended processes in rare cases
|
||||
// (e.g., editors with "bd daemon" in file content). This is acceptable
|
||||
// given the alternative of respawning daemons during shutdown.
|
||||
if force {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd daemon").Run()
|
||||
} else {
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd daemon").Run()
|
||||
time.Sleep(gracefulTimeout)
|
||||
if remaining := CountBdDaemons(); remaining > 0 {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd daemon").Run()
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
final := CountBdDaemons()
|
||||
killed := before - final
|
||||
if killed < 0 {
|
||||
killed = 0 // Race condition: more processes spawned than we killed
|
||||
}
|
||||
return killed, final
|
||||
}
|
||||
|
||||
// CountBdActivityProcesses returns count of running `bd activity` processes.
|
||||
func CountBdActivityProcesses() int {
|
||||
// Use pgrep -f with wc -l for cross-platform compatibility
|
||||
// (macOS pgrep doesn't support -c flag)
|
||||
cmd := exec.Command("sh", "-c", "pgrep -f 'bd activity' 2>/dev/null | wc -l")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||
return count
|
||||
}
|
||||
|
||||
func stopBdActivityProcesses(force bool) (int, int) {
|
||||
before := CountBdActivityProcesses()
|
||||
if before == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
if force {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd activity").Run()
|
||||
} else {
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd activity").Run()
|
||||
time.Sleep(gracefulTimeout)
|
||||
if remaining := CountBdActivityProcesses(); remaining > 0 {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd activity").Run()
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
after := CountBdActivityProcesses()
|
||||
killed := before - after
|
||||
if killed < 0 {
|
||||
killed = 0 // Race condition: more processes spawned than we killed
|
||||
}
|
||||
return killed, after
|
||||
}
|
||||
|
||||
33
internal/beads/daemon_test.go
Normal file
33
internal/beads/daemon_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCountBdActivityProcesses(t *testing.T) {
|
||||
count := CountBdActivityProcesses()
|
||||
if count < 0 {
|
||||
t.Errorf("count should be non-negative, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCountBdDaemons(t *testing.T) {
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
t.Skip("bd not installed")
|
||||
}
|
||||
count := CountBdDaemons()
|
||||
if count < 0 {
|
||||
t.Errorf("count should be non-negative, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStopAllBdProcesses_DryRun(t *testing.T) {
|
||||
daemonsKilled, activityKilled, err := StopAllBdProcesses(true, false)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if daemonsKilled < 0 || activityKilled < 0 {
|
||||
t.Errorf("counts should be non-negative: daemons=%d, activity=%d", daemonsKilled, activityKilled)
|
||||
}
|
||||
}
|
||||
11
internal/beads/force.go
Normal file
11
internal/beads/force.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package beads
|
||||
|
||||
import "strings"
|
||||
|
||||
// NeedsForceForID returns true when a bead ID uses multiple hyphens.
|
||||
// Recent bd versions infer the prefix from the last hyphen, which can cause
|
||||
// prefix-mismatch errors for valid system IDs like "st-stockdrop-polecat-nux"
|
||||
// and "hq-cv-abc". We pass --force to honor the explicit ID in those cases.
|
||||
func NeedsForceForID(id string) bool {
|
||||
return strings.Count(id, "-") > 1
|
||||
}
|
||||
23
internal/beads/force_test.go
Normal file
23
internal/beads/force_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package beads
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestNeedsForceForID(t *testing.T) {
|
||||
tests := []struct {
|
||||
id string
|
||||
want bool
|
||||
}{
|
||||
{id: "", want: false},
|
||||
{id: "hq-mayor", want: false},
|
||||
{id: "gt-abc123", want: false},
|
||||
{id: "hq-mayor-role", want: true},
|
||||
{id: "st-stockdrop-polecat-nux", want: true},
|
||||
{id: "hq-cv-abc", want: true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if got := NeedsForceForID(tc.id); got != tc.want {
|
||||
t.Fatalf("NeedsForceForID(%q) = %v, want %v", tc.id, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -158,6 +158,7 @@ func (b *Beads) AttachMolecule(pinnedBeadID, moleculeID string) (*Issue, error)
|
||||
return nil, fmt.Errorf("fetching pinned bead: %w", err)
|
||||
}
|
||||
|
||||
// Only allow pinned beads (permanent records like role definitions)
|
||||
if issue.Status != StatusPinned {
|
||||
return nil, fmt.Errorf("issue %s is not pinned (status: %s)", pinnedBeadID, issue.Status)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
// Route represents a prefix-to-path routing rule.
|
||||
@@ -111,6 +113,11 @@ func RemoveRoute(townRoot string, prefix string) error {
|
||||
|
||||
// WriteRoutes writes routes to routes.jsonl, overwriting existing content.
|
||||
func WriteRoutes(beadsDir string, routes []Route) error {
|
||||
// Ensure beads directory exists
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating beads directory: %w", err)
|
||||
}
|
||||
|
||||
routesPath := filepath.Join(beadsDir, RoutesFileName)
|
||||
|
||||
file, err := os.Create(routesPath)
|
||||
@@ -150,7 +157,7 @@ func GetPrefixForRig(townRoot, rigName string) string {
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil || routes == nil {
|
||||
return "gt" // Default prefix
|
||||
return config.GetRigPrefix(townRoot, rigName)
|
||||
}
|
||||
|
||||
// Look for a route where the path starts with the rig name
|
||||
@@ -163,7 +170,7 @@ func GetPrefixForRig(townRoot, rigName string) string {
|
||||
}
|
||||
}
|
||||
|
||||
return "gt" // Default prefix
|
||||
return config.GetRigPrefix(townRoot, rigName)
|
||||
}
|
||||
|
||||
// FindConflictingPrefixes checks for duplicate prefixes in routes.
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
func TestGetPrefixForRig(t *testing.T) {
|
||||
@@ -52,6 +54,33 @@ func TestGetPrefixForRig_NoRoutesFile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPrefixForRig_RigsConfigFallback(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Write rigs.json with a non-gt prefix
|
||||
rigsPath := filepath.Join(tmpDir, "mayor", "rigs.json")
|
||||
if err := os.MkdirAll(filepath.Dir(rigsPath), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := &config.RigsConfig{
|
||||
Version: config.CurrentRigsVersion,
|
||||
Rigs: map[string]config.RigEntry{
|
||||
"project_ideas": {
|
||||
BeadsConfig: &config.BeadsConfig{Prefix: "pi"},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := config.SaveRigsConfig(rigsPath, cfg); err != nil {
|
||||
t.Fatalf("SaveRigsConfig: %v", err)
|
||||
}
|
||||
|
||||
result := GetPrefixForRig(tmpDir, "project_ideas")
|
||||
if result != "pi" {
|
||||
t.Errorf("Expected prefix from rigs config, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
beadID string
|
||||
@@ -100,7 +129,7 @@ func TestGetRigPathForPrefix(t *testing.T) {
|
||||
}{
|
||||
{"ap-", filepath.Join(tmpDir, "ai_platform/mayor/rig")},
|
||||
{"gt-", filepath.Join(tmpDir, "gastown/mayor/rig")},
|
||||
{"hq-", tmpDir}, // Town-level beads return townRoot
|
||||
{"hq-", tmpDir}, // Town-level beads return townRoot
|
||||
{"unknown-", ""}, // Unknown prefix returns empty
|
||||
{"", ""}, // Empty prefix returns empty
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
@@ -41,11 +40,11 @@ type Status struct {
|
||||
|
||||
// Boot manages the Boot watchdog lifecycle.
|
||||
type Boot struct {
|
||||
townRoot string
|
||||
bootDir string // ~/gt/deacon/dogs/boot/
|
||||
deaconDir string // ~/gt/deacon/
|
||||
tmux *tmux.Tmux
|
||||
degraded bool
|
||||
townRoot string
|
||||
bootDir string // ~/gt/deacon/dogs/boot/
|
||||
deaconDir string // ~/gt/deacon/
|
||||
tmux *tmux.Tmux
|
||||
degraded bool
|
||||
}
|
||||
|
||||
// New creates a new Boot manager.
|
||||
@@ -145,7 +144,8 @@ func (b *Boot) LoadStatus() (*Status, error) {
|
||||
// Spawn starts Boot in a fresh tmux session.
|
||||
// Boot runs the mol-boot-triage molecule and exits when done.
|
||||
// In degraded mode (no tmux), it runs in a subprocess.
|
||||
func (b *Boot) Spawn() error {
|
||||
// The agentOverride parameter allows specifying an agent alias to use instead of the town default.
|
||||
func (b *Boot) Spawn(agentOverride string) error {
|
||||
if b.IsRunning() {
|
||||
return fmt.Errorf("boot is already running")
|
||||
}
|
||||
@@ -155,14 +155,15 @@ func (b *Boot) Spawn() error {
|
||||
return b.spawnDegraded()
|
||||
}
|
||||
|
||||
return b.spawnTmux()
|
||||
return b.spawnTmux(agentOverride)
|
||||
}
|
||||
|
||||
// spawnTmux spawns Boot in a tmux session.
|
||||
func (b *Boot) spawnTmux() error {
|
||||
// Kill any stale session first
|
||||
func (b *Boot) spawnTmux(agentOverride string) error {
|
||||
// Kill any stale session first.
|
||||
// Use KillSessionWithProcesses to ensure all descendant processes are killed.
|
||||
if b.IsSessionAlive() {
|
||||
_ = b.tmux.KillSession(SessionName)
|
||||
_ = b.tmux.KillSessionWithProcesses(SessionName)
|
||||
}
|
||||
|
||||
// Ensure boot directory exists (it should have CLAUDE.md with Boot context)
|
||||
@@ -170,8 +171,22 @@ func (b *Boot) spawnTmux() error {
|
||||
return fmt.Errorf("ensuring boot dir: %w", err)
|
||||
}
|
||||
|
||||
// Create new session in boot directory (not deacon dir) so Claude reads Boot's CLAUDE.md
|
||||
if err := b.tmux.NewSession(SessionName, b.bootDir); err != nil {
|
||||
// Build startup command with optional agent override
|
||||
// The "gt boot triage" prompt tells Boot to immediately start triage (GUPP principle)
|
||||
var startCmd string
|
||||
if agentOverride != "" {
|
||||
var err error
|
||||
startCmd, err = config.BuildAgentStartupCommandWithAgentOverride("boot", "", b.townRoot, "", "gt boot triage", agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command with agent override: %w", err)
|
||||
}
|
||||
} else {
|
||||
startCmd = config.BuildAgentStartupCommand("boot", "", b.townRoot, "", "gt boot triage")
|
||||
}
|
||||
|
||||
// Create session with command directly to avoid send-keys race condition.
|
||||
// See: https://github.com/anthropics/gastown/issues/280
|
||||
if err := b.tmux.NewSessionWithCommand(SessionName, b.bootDir, startCmd); err != nil {
|
||||
return fmt.Errorf("creating boot session: %w", err)
|
||||
}
|
||||
|
||||
@@ -179,24 +194,11 @@ func (b *Boot) spawnTmux() error {
|
||||
envVars := config.AgentEnv(config.AgentEnvConfig{
|
||||
Role: "boot",
|
||||
TownRoot: b.townRoot,
|
||||
BeadsDir: beads.ResolveBeadsDir(b.townRoot),
|
||||
})
|
||||
for k, v := range envVars {
|
||||
_ = b.tmux.SetEnvironment(SessionName, k, v)
|
||||
}
|
||||
|
||||
// Launch Claude with environment exported inline and initial triage prompt
|
||||
// The "gt boot triage" prompt tells Boot to immediately start triage (GUPP principle)
|
||||
startCmd := config.BuildAgentStartupCommand("boot", "deacon-boot", "", "gt boot triage")
|
||||
// Wait for shell to be ready before sending keys (prevents "can't find pane" under load)
|
||||
if err := b.tmux.WaitForShellReady(SessionName, 5*time.Second); err != nil {
|
||||
_ = b.tmux.KillSession(SessionName)
|
||||
return fmt.Errorf("waiting for shell: %w", err)
|
||||
}
|
||||
if err := b.tmux.SendKeys(SessionName, startCmd); err != nil {
|
||||
return fmt.Errorf("sending startup command: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -212,7 +214,6 @@ func (b *Boot) spawnDegraded() error {
|
||||
envVars := config.AgentEnv(config.AgentEnvConfig{
|
||||
Role: "boot",
|
||||
TownRoot: b.townRoot,
|
||||
BeadsDir: beads.ResolveBeadsDir(b.townRoot),
|
||||
})
|
||||
cmd.Env = config.EnvForExecCommand(envVars)
|
||||
cmd.Env = append(cmd.Env, "GT_DEGRADED=true")
|
||||
|
||||
@@ -181,9 +181,9 @@ func (cp *Checkpoint) Age() time.Duration {
|
||||
return time.Since(cp.Timestamp)
|
||||
}
|
||||
|
||||
// IsStale returns true if the checkpoint is older than the threshold.
|
||||
// IsStale returns true if the checkpoint is at or older than the threshold.
|
||||
func (cp *Checkpoint) IsStale(threshold time.Duration) bool {
|
||||
return cp.Age() > threshold
|
||||
return cp.Age() >= threshold
|
||||
}
|
||||
|
||||
// Summary returns a concise summary of the checkpoint.
|
||||
|
||||
398
internal/checkpoint/checkpoint_test.go
Normal file
398
internal/checkpoint/checkpoint_test.go
Normal file
@@ -0,0 +1,398 @@
|
||||
package checkpoint
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPath(t *testing.T) {
|
||||
dir := "/some/polecat/dir"
|
||||
got := Path(dir)
|
||||
want := filepath.Join(dir, Filename)
|
||||
if got != want {
|
||||
t.Errorf("Path(%q) = %q, want %q", dir, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWrite(t *testing.T) {
|
||||
// Create temp directory
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Test reading non-existent checkpoint returns nil, nil
|
||||
cp, err := Read(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Read non-existent: unexpected error: %v", err)
|
||||
}
|
||||
if cp != nil {
|
||||
t.Fatal("Read non-existent: expected nil checkpoint")
|
||||
}
|
||||
|
||||
// Create and write a checkpoint
|
||||
original := &Checkpoint{
|
||||
MoleculeID: "mol-123",
|
||||
CurrentStep: "step-1",
|
||||
StepTitle: "Build the thing",
|
||||
ModifiedFiles: []string{"file1.go", "file2.go"},
|
||||
LastCommit: "abc123",
|
||||
Branch: "feature/test",
|
||||
HookedBead: "gt-xyz",
|
||||
Notes: "Some notes",
|
||||
}
|
||||
|
||||
if err := Write(tmpDir, original); err != nil {
|
||||
t.Fatalf("Write: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Verify file exists
|
||||
path := Path(tmpDir)
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
t.Fatal("Write: checkpoint file not created")
|
||||
}
|
||||
|
||||
// Read it back
|
||||
loaded, err := Read(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: unexpected error: %v", err)
|
||||
}
|
||||
if loaded == nil {
|
||||
t.Fatal("Read: expected non-nil checkpoint")
|
||||
}
|
||||
|
||||
// Verify fields
|
||||
if loaded.MoleculeID != original.MoleculeID {
|
||||
t.Errorf("MoleculeID = %q, want %q", loaded.MoleculeID, original.MoleculeID)
|
||||
}
|
||||
if loaded.CurrentStep != original.CurrentStep {
|
||||
t.Errorf("CurrentStep = %q, want %q", loaded.CurrentStep, original.CurrentStep)
|
||||
}
|
||||
if loaded.StepTitle != original.StepTitle {
|
||||
t.Errorf("StepTitle = %q, want %q", loaded.StepTitle, original.StepTitle)
|
||||
}
|
||||
if loaded.Branch != original.Branch {
|
||||
t.Errorf("Branch = %q, want %q", loaded.Branch, original.Branch)
|
||||
}
|
||||
if loaded.HookedBead != original.HookedBead {
|
||||
t.Errorf("HookedBead = %q, want %q", loaded.HookedBead, original.HookedBead)
|
||||
}
|
||||
if loaded.Notes != original.Notes {
|
||||
t.Errorf("Notes = %q, want %q", loaded.Notes, original.Notes)
|
||||
}
|
||||
if len(loaded.ModifiedFiles) != len(original.ModifiedFiles) {
|
||||
t.Errorf("ModifiedFiles len = %d, want %d", len(loaded.ModifiedFiles), len(original.ModifiedFiles))
|
||||
}
|
||||
|
||||
// Verify timestamp was set
|
||||
if loaded.Timestamp.IsZero() {
|
||||
t.Error("Timestamp should be set by Write")
|
||||
}
|
||||
|
||||
// Verify SessionID was set
|
||||
if loaded.SessionID == "" {
|
||||
t.Error("SessionID should be set by Write")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWritePreservesTimestamp(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create checkpoint with explicit timestamp
|
||||
ts := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
cp := &Checkpoint{
|
||||
Timestamp: ts,
|
||||
Notes: "test",
|
||||
}
|
||||
|
||||
if err := Write(tmpDir, cp); err != nil {
|
||||
t.Fatalf("Write: %v", err)
|
||||
}
|
||||
|
||||
loaded, err := Read(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
|
||||
if !loaded.Timestamp.Equal(ts) {
|
||||
t.Errorf("Timestamp = %v, want %v", loaded.Timestamp, ts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadCorruptedJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := Path(tmpDir)
|
||||
|
||||
// Write invalid JSON
|
||||
if err := os.WriteFile(path, []byte("not valid json{"), 0600); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
_, err := Read(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("Read corrupted JSON: expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Write a checkpoint
|
||||
cp := &Checkpoint{Notes: "to be removed"}
|
||||
if err := Write(tmpDir, cp); err != nil {
|
||||
t.Fatalf("Write: %v", err)
|
||||
}
|
||||
|
||||
// Verify it exists
|
||||
path := Path(tmpDir)
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
t.Fatal("checkpoint should exist before Remove")
|
||||
}
|
||||
|
||||
// Remove it
|
||||
if err := Remove(tmpDir); err != nil {
|
||||
t.Fatalf("Remove: %v", err)
|
||||
}
|
||||
|
||||
// Verify it's gone
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
t.Fatal("checkpoint should not exist after Remove")
|
||||
}
|
||||
|
||||
// Remove again should not error
|
||||
if err := Remove(tmpDir); err != nil {
|
||||
t.Fatalf("Remove non-existent: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapture(t *testing.T) {
|
||||
// Use current directory (should be a git repo)
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("Getwd: %v", err)
|
||||
}
|
||||
|
||||
// Find git root
|
||||
gitRoot := cwd
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(gitRoot, ".git")); err == nil {
|
||||
break
|
||||
}
|
||||
parent := filepath.Dir(gitRoot)
|
||||
if parent == gitRoot {
|
||||
t.Skip("not in a git repository")
|
||||
}
|
||||
gitRoot = parent
|
||||
}
|
||||
|
||||
cp, err := Capture(gitRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("Capture: %v", err)
|
||||
}
|
||||
|
||||
// Should have timestamp
|
||||
if cp.Timestamp.IsZero() {
|
||||
t.Error("Timestamp should be set")
|
||||
}
|
||||
|
||||
// Should have branch (we're in a git repo)
|
||||
if cp.Branch == "" {
|
||||
t.Error("Branch should be set in git repo")
|
||||
}
|
||||
|
||||
// Should have last commit
|
||||
if cp.LastCommit == "" {
|
||||
t.Error("LastCommit should be set in git repo")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithMolecule(t *testing.T) {
|
||||
cp := &Checkpoint{}
|
||||
result := cp.WithMolecule("mol-abc", "step-1", "Do the thing")
|
||||
|
||||
if result != cp {
|
||||
t.Error("WithMolecule should return same checkpoint")
|
||||
}
|
||||
if cp.MoleculeID != "mol-abc" {
|
||||
t.Errorf("MoleculeID = %q, want %q", cp.MoleculeID, "mol-abc")
|
||||
}
|
||||
if cp.CurrentStep != "step-1" {
|
||||
t.Errorf("CurrentStep = %q, want %q", cp.CurrentStep, "step-1")
|
||||
}
|
||||
if cp.StepTitle != "Do the thing" {
|
||||
t.Errorf("StepTitle = %q, want %q", cp.StepTitle, "Do the thing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithHookedBead(t *testing.T) {
|
||||
cp := &Checkpoint{}
|
||||
result := cp.WithHookedBead("gt-123")
|
||||
|
||||
if result != cp {
|
||||
t.Error("WithHookedBead should return same checkpoint")
|
||||
}
|
||||
if cp.HookedBead != "gt-123" {
|
||||
t.Errorf("HookedBead = %q, want %q", cp.HookedBead, "gt-123")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithNotes(t *testing.T) {
|
||||
cp := &Checkpoint{}
|
||||
result := cp.WithNotes("important context")
|
||||
|
||||
if result != cp {
|
||||
t.Error("WithNotes should return same checkpoint")
|
||||
}
|
||||
if cp.Notes != "important context" {
|
||||
t.Errorf("Notes = %q, want %q", cp.Notes, "important context")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAge(t *testing.T) {
|
||||
cp := &Checkpoint{
|
||||
Timestamp: time.Now().Add(-5 * time.Minute),
|
||||
}
|
||||
|
||||
age := cp.Age()
|
||||
if age < 4*time.Minute || age > 6*time.Minute {
|
||||
t.Errorf("Age = %v, expected ~5 minutes", age)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsStale(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
age time.Duration
|
||||
threshold time.Duration
|
||||
want bool
|
||||
}{
|
||||
{"fresh", 5 * time.Minute, 1 * time.Hour, false},
|
||||
{"stale", 2 * time.Hour, 1 * time.Hour, true},
|
||||
{"exactly threshold", 1 * time.Hour, 1 * time.Hour, true}, // timing race: by the time IsStale runs, age > threshold
|
||||
{"just over threshold", 1*time.Hour + time.Second, 1 * time.Hour, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cp := &Checkpoint{
|
||||
Timestamp: time.Now().Add(-tt.age),
|
||||
}
|
||||
got := cp.IsStale(tt.threshold)
|
||||
if got != tt.want {
|
||||
t.Errorf("IsStale(%v) = %v, want %v", tt.threshold, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummary(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cp *Checkpoint
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
cp: &Checkpoint{},
|
||||
want: "no significant state",
|
||||
},
|
||||
{
|
||||
name: "molecule only",
|
||||
cp: &Checkpoint{MoleculeID: "mol-123"},
|
||||
want: "molecule mol-123",
|
||||
},
|
||||
{
|
||||
name: "molecule with step",
|
||||
cp: &Checkpoint{MoleculeID: "mol-123", CurrentStep: "step-1"},
|
||||
want: "molecule mol-123, step step-1",
|
||||
},
|
||||
{
|
||||
name: "hooked bead",
|
||||
cp: &Checkpoint{HookedBead: "gt-abc"},
|
||||
want: "hooked: gt-abc",
|
||||
},
|
||||
{
|
||||
name: "modified files",
|
||||
cp: &Checkpoint{ModifiedFiles: []string{"a.go", "b.go"}},
|
||||
want: "2 modified files",
|
||||
},
|
||||
{
|
||||
name: "branch",
|
||||
cp: &Checkpoint{Branch: "feature/test"},
|
||||
want: "branch: feature/test",
|
||||
},
|
||||
{
|
||||
name: "full",
|
||||
cp: &Checkpoint{
|
||||
MoleculeID: "mol-123",
|
||||
CurrentStep: "step-1",
|
||||
HookedBead: "gt-abc",
|
||||
ModifiedFiles: []string{"a.go"},
|
||||
Branch: "main",
|
||||
},
|
||||
want: "molecule mol-123, step step-1, hooked: gt-abc, 1 modified files, branch: main",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.cp.Summary()
|
||||
if got != tt.want {
|
||||
t.Errorf("Summary() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointJSONRoundtrip(t *testing.T) {
|
||||
original := &Checkpoint{
|
||||
MoleculeID: "mol-test",
|
||||
CurrentStep: "step-2",
|
||||
StepTitle: "Testing JSON",
|
||||
ModifiedFiles: []string{"x.go", "y.go", "z.go"},
|
||||
LastCommit: "deadbeef",
|
||||
Branch: "develop",
|
||||
HookedBead: "gt-roundtrip",
|
||||
Timestamp: time.Date(2025, 6, 15, 10, 30, 0, 0, time.UTC),
|
||||
SessionID: "session-123",
|
||||
Notes: "Testing round trip",
|
||||
}
|
||||
|
||||
data, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
|
||||
var loaded Checkpoint
|
||||
if err := json.Unmarshal(data, &loaded); err != nil {
|
||||
t.Fatalf("Unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if loaded.MoleculeID != original.MoleculeID {
|
||||
t.Errorf("MoleculeID mismatch")
|
||||
}
|
||||
if loaded.CurrentStep != original.CurrentStep {
|
||||
t.Errorf("CurrentStep mismatch")
|
||||
}
|
||||
if loaded.StepTitle != original.StepTitle {
|
||||
t.Errorf("StepTitle mismatch")
|
||||
}
|
||||
if loaded.Branch != original.Branch {
|
||||
t.Errorf("Branch mismatch")
|
||||
}
|
||||
if loaded.HookedBead != original.HookedBead {
|
||||
t.Errorf("HookedBead mismatch")
|
||||
}
|
||||
if loaded.SessionID != original.SessionID {
|
||||
t.Errorf("SessionID mismatch")
|
||||
}
|
||||
if loaded.Notes != original.Notes {
|
||||
t.Errorf("Notes mismatch")
|
||||
}
|
||||
if !loaded.Timestamp.Equal(original.Timestamp) {
|
||||
t.Errorf("Timestamp mismatch")
|
||||
}
|
||||
if len(loaded.ModifiedFiles) != len(original.ModifiedFiles) {
|
||||
t.Errorf("ModifiedFiles length mismatch")
|
||||
}
|
||||
}
|
||||
@@ -3,13 +3,42 @@
|
||||
"beads@beads-marketplace": false
|
||||
},
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash(gh pr create*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git checkout -b*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git switch -c*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime && gt mail check --inject && gt nudge deacon session-started"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook && gt mail check --inject && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -20,7 +49,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -3,13 +3,42 @@
|
||||
"beads@beads-marketplace": false
|
||||
},
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash(gh pr create*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git checkout -b*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git switch -c*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime && gt nudge deacon session-started"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -20,7 +49,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package cmd
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -54,15 +56,33 @@ func setupTestTownForAccount(t *testing.T) (townRoot string, accountsDir string)
|
||||
return townRoot, accountsDir
|
||||
}
|
||||
|
||||
func setTestHome(t *testing.T, fakeHome string) {
|
||||
t.Helper()
|
||||
|
||||
t.Setenv("HOME", fakeHome)
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
return
|
||||
}
|
||||
|
||||
t.Setenv("USERPROFILE", fakeHome)
|
||||
|
||||
drive := filepath.VolumeName(fakeHome)
|
||||
if drive == "" {
|
||||
return
|
||||
}
|
||||
|
||||
t.Setenv("HOMEDRIVE", drive)
|
||||
t.Setenv("HOMEPATH", strings.TrimPrefix(fakeHome, drive))
|
||||
}
|
||||
|
||||
func TestAccountSwitch(t *testing.T) {
|
||||
t.Run("switch between accounts", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
// Create fake home directory for ~/.claude
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
// Create account config directories
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
@@ -133,9 +153,7 @@ func TestAccountSwitch(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
@@ -186,9 +204,7 @@ func TestAccountSwitch(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
@@ -224,9 +240,7 @@ func TestAccountSwitch(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
originalHome := os.Getenv("HOME")
|
||||
os.Setenv("HOME", fakeHome)
|
||||
defer os.Setenv("HOME", originalHome)
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
personalConfigDir := filepath.Join(accountsDir, "personal")
|
||||
|
||||
187
internal/cmd/bead.go
Normal file
187
internal/cmd/bead.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var beadCmd = &cobra.Command{
|
||||
Use: "bead",
|
||||
Aliases: []string{"bd"},
|
||||
GroupID: GroupWork,
|
||||
Short: "Bead management utilities",
|
||||
Long: `Utilities for managing beads across repositories.`,
|
||||
}
|
||||
|
||||
var beadMoveCmd = &cobra.Command{
|
||||
Use: "move <bead-id> <target-prefix>",
|
||||
Short: "Move a bead to a different repository",
|
||||
Long: `Move a bead from one repository to another.
|
||||
|
||||
This creates a copy of the bead in the target repository (with the new prefix)
|
||||
and closes the source bead with a reference to the new location.
|
||||
|
||||
The target prefix determines which repository receives the bead.
|
||||
Common prefixes: gt- (gastown), bd- (beads), hq- (headquarters)
|
||||
|
||||
Examples:
|
||||
gt bead move gt-abc123 bd- # Move gt-abc123 to beads repo as bd-*
|
||||
gt bead move hq-xyz bd- # Move hq-xyz to beads repo
|
||||
gt bead move bd-123 gt- # Move bd-123 to gastown repo`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: runBeadMove,
|
||||
}
|
||||
|
||||
var beadMoveDryRun bool
|
||||
|
||||
var beadShowCmd = &cobra.Command{
|
||||
Use: "show <bead-id> [flags]",
|
||||
Short: "Show details of a bead",
|
||||
Long: `Displays the full details of a bead by ID.
|
||||
|
||||
This is an alias for 'gt show'. All bd show flags are supported.
|
||||
|
||||
Examples:
|
||||
gt bead show gt-abc123 # Show a gastown issue
|
||||
gt bead show hq-xyz789 # Show a town-level bead
|
||||
gt bead show bd-def456 # Show a beads issue
|
||||
gt bead show gt-abc123 --json # Output as JSON`,
|
||||
DisableFlagParsing: true, // Pass all flags through to bd show
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runShow(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var beadReadCmd = &cobra.Command{
|
||||
Use: "read <bead-id> [flags]",
|
||||
Short: "Show details of a bead (alias for 'show')",
|
||||
Long: `Displays the full details of a bead by ID.
|
||||
|
||||
This is an alias for 'gt bead show'. All bd show flags are supported.
|
||||
|
||||
Examples:
|
||||
gt bead read gt-abc123 # Show a gastown issue
|
||||
gt bead read hq-xyz789 # Show a town-level bead
|
||||
gt bead read bd-def456 # Show a beads issue
|
||||
gt bead read gt-abc123 --json # Output as JSON`,
|
||||
DisableFlagParsing: true, // Pass all flags through to bd show
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runShow(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
beadMoveCmd.Flags().BoolVarP(&beadMoveDryRun, "dry-run", "n", false, "Show what would be done")
|
||||
beadCmd.AddCommand(beadMoveCmd)
|
||||
beadCmd.AddCommand(beadShowCmd)
|
||||
beadCmd.AddCommand(beadReadCmd)
|
||||
rootCmd.AddCommand(beadCmd)
|
||||
}
|
||||
|
||||
// moveBeadInfo holds the essential fields we need to copy when moving beads
|
||||
type moveBeadInfo struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Type string `json:"issue_type"`
|
||||
Priority int `json:"priority"`
|
||||
Description string `json:"description"`
|
||||
Labels []string `json:"labels"`
|
||||
Assignee string `json:"assignee"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func runBeadMove(cmd *cobra.Command, args []string) error {
|
||||
sourceID := args[0]
|
||||
targetPrefix := args[1]
|
||||
|
||||
// Normalize prefix (ensure it ends with -)
|
||||
if !strings.HasSuffix(targetPrefix, "-") {
|
||||
targetPrefix = targetPrefix + "-"
|
||||
}
|
||||
|
||||
// Get source bead details
|
||||
showCmd := exec.Command("bd", "show", sourceID, "--json")
|
||||
output, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting bead %s: %w", sourceID, err)
|
||||
}
|
||||
|
||||
// bd show --json returns an array
|
||||
var sources []moveBeadInfo
|
||||
if err := json.Unmarshal(output, &sources); err != nil {
|
||||
return fmt.Errorf("parsing bead data: %w", err)
|
||||
}
|
||||
if len(sources) == 0 {
|
||||
return fmt.Errorf("bead %s not found", sourceID)
|
||||
}
|
||||
source := sources[0]
|
||||
|
||||
// Don't move closed beads
|
||||
if source.Status == "closed" {
|
||||
return fmt.Errorf("cannot move closed bead %s", sourceID)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Moving %s to %s...\n", style.Bold.Render("→"), sourceID, targetPrefix)
|
||||
fmt.Printf(" Title: %s\n", source.Title)
|
||||
fmt.Printf(" Type: %s\n", source.Type)
|
||||
|
||||
if beadMoveDryRun {
|
||||
fmt.Printf("\nDry run - would:\n")
|
||||
fmt.Printf(" 1. Create new bead with prefix %s\n", targetPrefix)
|
||||
fmt.Printf(" 2. Close %s with reference to new bead\n", sourceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build create command for target
|
||||
createArgs := []string{
|
||||
"create",
|
||||
"--prefix", targetPrefix,
|
||||
"--title", source.Title,
|
||||
"--type", source.Type,
|
||||
"--priority", fmt.Sprintf("%d", source.Priority),
|
||||
"--silent", // Only output the ID
|
||||
}
|
||||
|
||||
if source.Description != "" {
|
||||
createArgs = append(createArgs, "--description", source.Description)
|
||||
}
|
||||
if source.Assignee != "" {
|
||||
createArgs = append(createArgs, "--assignee", source.Assignee)
|
||||
}
|
||||
for _, label := range source.Labels {
|
||||
createArgs = append(createArgs, "--label", label)
|
||||
}
|
||||
|
||||
// Create the new bead
|
||||
createCmd := exec.Command("bd", createArgs...)
|
||||
createCmd.Stderr = os.Stderr
|
||||
newIDBytes, err := createCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new bead: %w", err)
|
||||
}
|
||||
newID := strings.TrimSpace(string(newIDBytes))
|
||||
|
||||
fmt.Printf("%s Created %s\n", style.Bold.Render("✓"), newID)
|
||||
|
||||
// Close the source bead with reference
|
||||
closeReason := fmt.Sprintf("Moved to %s", newID)
|
||||
closeCmd := exec.Command("bd", "close", sourceID, "--reason", closeReason)
|
||||
closeCmd.Stderr = os.Stderr
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
// Try to clean up the new bead if close fails
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to close source bead: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "New bead %s was created but source %s remains open\n", newID, sourceID)
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s Closed %s (moved to %s)\n", style.Bold.Render("✓"), sourceID, newID)
|
||||
fmt.Printf("\nBead moved: %s → %s\n", sourceID, newID)
|
||||
|
||||
return nil
|
||||
}
|
||||
419
internal/cmd/beads_db_init_test.go
Normal file
419
internal/cmd/beads_db_init_test.go
Normal file
@@ -0,0 +1,419 @@
|
||||
//go:build integration
|
||||
|
||||
// Package cmd contains integration tests for beads db initialization after clone.
|
||||
//
|
||||
// Run with: go test -tags=integration ./internal/cmd -run TestBeadsDbInitAfterClone -v
|
||||
//
|
||||
// Bug: GitHub Issue #72
|
||||
// When a repo with tracked .beads/ is added as a rig, beads.db doesn't exist
|
||||
// (it's gitignored) and bd operations fail because no one runs `bd init`.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// createTrackedBeadsRepoWithIssues creates a git repo with .beads/ tracked that contains existing issues.
|
||||
// This simulates a clone of a repo that has tracked beads with issues exported to issues.jsonl.
|
||||
// The beads.db is NOT included (gitignored), so prefix must be detected from issues.jsonl.
|
||||
func createTrackedBeadsRepoWithIssues(t *testing.T, path, prefix string, numIssues int) {
|
||||
t.Helper()
|
||||
|
||||
// Create directory
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
t.Fatalf("mkdir repo: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo with explicit main branch
|
||||
cmds := [][]string{
|
||||
{"git", "init", "--initial-branch=main"},
|
||||
{"git", "config", "user.email", "test@test.com"},
|
||||
{"git", "config", "user.name", "Test User"},
|
||||
}
|
||||
for _, args := range cmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Create initial file and commit (so we have something before beads)
|
||||
readmePath := filepath.Join(path, "README.md")
|
||||
if err := os.WriteFile(readmePath, []byte("# Test Repo\n"), 0644); err != nil {
|
||||
t.Fatalf("write README: %v", err)
|
||||
}
|
||||
|
||||
commitCmds := [][]string{
|
||||
{"git", "add", "."},
|
||||
{"git", "commit", "-m", "Initial commit"},
|
||||
}
|
||||
for _, args := range commitCmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beads
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
|
||||
// Run bd init
|
||||
cmd := exec.Command("bd", "--no-daemon", "init", "--prefix", prefix)
|
||||
cmd.Dir = path
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd init failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Create issues
|
||||
for i := 1; i <= numIssues; i++ {
|
||||
cmd = exec.Command("bd", "--no-daemon", "-q", "create",
|
||||
"--type", "task", "--title", fmt.Sprintf("Test issue %d", i))
|
||||
cmd.Dir = path
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd create issue %d failed: %v\nOutput: %s", i, err, output)
|
||||
}
|
||||
}
|
||||
|
||||
// Add .beads to git (simulating tracked beads)
|
||||
cmd = exec.Command("git", "add", ".beads")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git add .beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", "Add beads with issues")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git commit beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Remove beads.db to simulate what a clone would look like
|
||||
// (beads.db is gitignored, so cloned repos don't have it)
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
if err := os.Remove(dbPath); err != nil {
|
||||
t.Fatalf("remove beads.db: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBeadsDbInitAfterClone tests that when a tracked beads repo is added as a rig,
|
||||
// the beads database is properly initialized even though beads.db doesn't exist.
|
||||
func TestBeadsDbInitAfterClone(t *testing.T) {
|
||||
// Skip if bd is not available
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
t.Skip("bd not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
gtBinary := buildGT(t)
|
||||
|
||||
t.Run("TrackedRepoWithExistingPrefix", func(t *testing.T) {
|
||||
// GitHub Issue #72: gt rig add should detect existing prefix from tracked beads
|
||||
// https://github.com/steveyegge/gastown/issues/72
|
||||
//
|
||||
// This tests that when a tracked beads repo has existing issues in issues.jsonl,
|
||||
// gt rig add can detect the prefix from those issues WITHOUT --prefix flag.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-prefix-test")
|
||||
reposDir := filepath.Join(tmpDir, "repos")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a repo with existing beads prefix "existing-prefix" AND issues
|
||||
// This creates issues.jsonl with issues like "existing-prefix-1", etc.
|
||||
existingRepo := filepath.Join(reposDir, "existing-repo")
|
||||
createTrackedBeadsRepoWithIssues(t, existingRepo, "existing-prefix", 3)
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "prefix-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig WITHOUT specifying --prefix - should detect "existing-prefix" from issues.jsonl
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "myrig", existingRepo)
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt rig add failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Verify routes.jsonl has the prefix
|
||||
routesContent, err := os.ReadFile(filepath.Join(townRoot, ".beads", "routes.jsonl"))
|
||||
if err != nil {
|
||||
t.Fatalf("read routes.jsonl: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(routesContent), `"prefix":"existing-prefix-"`) {
|
||||
t.Errorf("routes.jsonl should contain existing-prefix-, got:\n%s", routesContent)
|
||||
}
|
||||
|
||||
// NOW TRY TO USE bd - this is the key test for the bug
|
||||
// Without the fix, beads.db doesn't exist and bd operations fail
|
||||
rigPath := filepath.Join(townRoot, "myrig", "mayor", "rig")
|
||||
cmd = exec.Command("bd", "--no-daemon", "--json", "-q", "create",
|
||||
"--type", "task", "--title", "test-from-rig")
|
||||
cmd.Dir = rigPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("bd create failed (bug!): %v\nOutput: %s\n\nThis is the bug: beads.db doesn't exist after clone because bd init was never run", err, output)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("parse output: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(result.ID, "existing-prefix-") {
|
||||
t.Errorf("expected existing-prefix- prefix, got %s", result.ID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TrackedRepoWithNoIssuesRequiresPrefix", func(t *testing.T) {
|
||||
// Regression test: When a tracked beads repo has NO issues (fresh init),
|
||||
// gt rig add must use the --prefix flag since there's nothing to detect from.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-no-issues")
|
||||
reposDir := filepath.Join(tmpDir, "repos-no-issues")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a tracked beads repo with NO issues (just bd init)
|
||||
emptyRepo := filepath.Join(reposDir, "empty-repo")
|
||||
createTrackedBeadsRepoWithNoIssues(t, emptyRepo, "empty-prefix")
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "no-issues-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig WITH --prefix since we can't detect from empty issues.jsonl
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "emptyrig", emptyRepo, "--prefix", "empty-prefix")
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt rig add with --prefix failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Verify routes.jsonl has the prefix
|
||||
routesContent, err := os.ReadFile(filepath.Join(townRoot, ".beads", "routes.jsonl"))
|
||||
if err != nil {
|
||||
t.Fatalf("read routes.jsonl: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(routesContent), `"prefix":"empty-prefix-"`) {
|
||||
t.Errorf("routes.jsonl should contain empty-prefix-, got:\n%s", routesContent)
|
||||
}
|
||||
|
||||
// Verify bd operations work with the configured prefix
|
||||
rigPath := filepath.Join(townRoot, "emptyrig", "mayor", "rig")
|
||||
cmd = exec.Command("bd", "--no-daemon", "--json", "-q", "create",
|
||||
"--type", "task", "--title", "test-from-empty-repo")
|
||||
cmd.Dir = rigPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("bd create failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("parse output: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(result.ID, "empty-prefix-") {
|
||||
t.Errorf("expected empty-prefix- prefix, got %s", result.ID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TrackedRepoWithPrefixMismatchErrors", func(t *testing.T) {
|
||||
// Test that when --prefix is explicitly provided but doesn't match
|
||||
// the prefix detected from existing issues, gt rig add fails with an error.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-mismatch")
|
||||
reposDir := filepath.Join(tmpDir, "repos-mismatch")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a repo with existing beads prefix "real-prefix" with issues
|
||||
mismatchRepo := filepath.Join(reposDir, "mismatch-repo")
|
||||
createTrackedBeadsRepoWithIssues(t, mismatchRepo, "real-prefix", 2)
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "mismatch-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig with WRONG --prefix - should fail
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "mismatchrig", mismatchRepo, "--prefix", "wrong-prefix")
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
// Should fail
|
||||
if err == nil {
|
||||
t.Fatalf("gt rig add should have failed with prefix mismatch, but succeeded.\nOutput: %s", output)
|
||||
}
|
||||
|
||||
// Verify error message mentions the mismatch
|
||||
outputStr := string(output)
|
||||
if !strings.Contains(outputStr, "prefix mismatch") {
|
||||
t.Errorf("expected 'prefix mismatch' in error, got:\n%s", outputStr)
|
||||
}
|
||||
if !strings.Contains(outputStr, "real-prefix") {
|
||||
t.Errorf("expected 'real-prefix' (detected) in error, got:\n%s", outputStr)
|
||||
}
|
||||
if !strings.Contains(outputStr, "wrong-prefix") {
|
||||
t.Errorf("expected 'wrong-prefix' (provided) in error, got:\n%s", outputStr)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TrackedRepoWithNoIssuesFallsBackToDerivedPrefix", func(t *testing.T) {
|
||||
// Test the fallback behavior: when a tracked beads repo has NO issues
|
||||
// and NO --prefix is provided, gt rig add should derive prefix from rig name.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-derived")
|
||||
reposDir := filepath.Join(tmpDir, "repos-derived")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a tracked beads repo with NO issues
|
||||
derivedRepo := filepath.Join(reposDir, "derived-repo")
|
||||
createTrackedBeadsRepoWithNoIssues(t, derivedRepo, "original-prefix")
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "derived-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig WITHOUT --prefix - should derive from rig name "testrig"
|
||||
// deriveBeadsPrefix("testrig") should produce some abbreviation
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "testrig", derivedRepo)
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("gt rig add (no --prefix) failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// The output should mention "Using prefix" since detection failed
|
||||
if !strings.Contains(string(output), "Using prefix") {
|
||||
t.Logf("Output: %s", output)
|
||||
}
|
||||
|
||||
// Verify bd operations work - the key test is that beads.db was initialized
|
||||
rigPath := filepath.Join(townRoot, "testrig", "mayor", "rig")
|
||||
cmd = exec.Command("bd", "--no-daemon", "--json", "-q", "create",
|
||||
"--type", "task", "--title", "test-derived-prefix")
|
||||
cmd.Dir = rigPath
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("bd create failed (beads.db not initialized?): %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("parse output: %v", err)
|
||||
}
|
||||
|
||||
// The ID should have SOME prefix (derived from "testrig")
|
||||
// We don't care exactly what it is, just that bd works
|
||||
if result.ID == "" {
|
||||
t.Error("expected non-empty issue ID")
|
||||
}
|
||||
t.Logf("Created issue with derived prefix: %s", result.ID)
|
||||
})
|
||||
}
|
||||
|
||||
// createTrackedBeadsRepoWithNoIssues creates a git repo with .beads/ tracked but NO issues.
|
||||
// This simulates a fresh bd init that was committed before any issues were created.
|
||||
func createTrackedBeadsRepoWithNoIssues(t *testing.T, path, prefix string) {
|
||||
t.Helper()
|
||||
|
||||
// Create directory
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
t.Fatalf("mkdir repo: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo with explicit main branch
|
||||
cmds := [][]string{
|
||||
{"git", "init", "--initial-branch=main"},
|
||||
{"git", "config", "user.email", "test@test.com"},
|
||||
{"git", "config", "user.name", "Test User"},
|
||||
}
|
||||
for _, args := range cmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Create initial file and commit
|
||||
readmePath := filepath.Join(path, "README.md")
|
||||
if err := os.WriteFile(readmePath, []byte("# Test Repo\n"), 0644); err != nil {
|
||||
t.Fatalf("write README: %v", err)
|
||||
}
|
||||
|
||||
commitCmds := [][]string{
|
||||
{"git", "add", "."},
|
||||
{"git", "commit", "-m", "Initial commit"},
|
||||
}
|
||||
for _, args := range commitCmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beads
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
|
||||
// Run bd init (creates beads.db but no issues)
|
||||
cmd := exec.Command("bd", "--no-daemon", "init", "--prefix", prefix)
|
||||
cmd.Dir = path
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd init failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add .beads to git (simulating tracked beads)
|
||||
cmd = exec.Command("git", "add", ".beads")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git add .beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", "Add beads (no issues)")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git commit beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Remove beads.db to simulate what a clone would look like
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
if err := os.Remove(dbPath); err != nil {
|
||||
t.Fatalf("remove beads.db: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -6,10 +6,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
@@ -104,6 +104,58 @@ func setupRoutingTestTown(t *testing.T) string {
|
||||
return townRoot
|
||||
}
|
||||
|
||||
func initBeadsDBWithPrefix(t *testing.T, dir, prefix string) {
|
||||
t.Helper()
|
||||
|
||||
cmd := exec.Command("bd", "--no-daemon", "init", "--quiet", "--prefix", prefix)
|
||||
cmd.Dir = dir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd init failed in %s: %v\n%s", dir, err, output)
|
||||
}
|
||||
|
||||
// Create empty issues.jsonl to prevent bd auto-export from corrupting routes.jsonl.
|
||||
// Without this, bd create writes issue data to routes.jsonl (the first .jsonl file
|
||||
// it finds), corrupting the routing configuration. This mirrors what gt install does.
|
||||
issuesPath := filepath.Join(dir, ".beads", "issues.jsonl")
|
||||
if err := os.WriteFile(issuesPath, []byte(""), 0644); err != nil {
|
||||
t.Fatalf("create issues.jsonl in %s: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestIssue(t *testing.T, dir, title string) *beads.Issue {
|
||||
t.Helper()
|
||||
|
||||
args := []string{"--no-daemon", "create", "--json", "--title", title, "--type", "task",
|
||||
"--description", "Integration test issue"}
|
||||
cmd := exec.Command("bd", args...)
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
combinedCmd := exec.Command("bd", args...)
|
||||
combinedCmd.Dir = dir
|
||||
combinedOutput, _ := combinedCmd.CombinedOutput()
|
||||
t.Fatalf("create issue in %s: %v\n%s", dir, err, combinedOutput)
|
||||
}
|
||||
|
||||
var issue beads.Issue
|
||||
if err := json.Unmarshal(output, &issue); err != nil {
|
||||
t.Fatalf("parse create output in %s: %v", dir, err)
|
||||
}
|
||||
if issue.ID == "" {
|
||||
t.Fatalf("create issue in %s returned empty ID", dir)
|
||||
}
|
||||
return &issue
|
||||
}
|
||||
|
||||
func hasIssueID(issues []*beads.Issue, id string) bool {
|
||||
for _, issue := range issues {
|
||||
if issue.ID == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TestBeadsRoutingFromTownRoot verifies that bd show routes to correct rig
|
||||
// based on issue ID prefix when run from town root.
|
||||
func TestBeadsRoutingFromTownRoot(t *testing.T) {
|
||||
@@ -114,37 +166,38 @@ func TestBeadsRoutingFromTownRoot(t *testing.T) {
|
||||
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
|
||||
initBeadsDBWithPrefix(t, townRoot, "hq")
|
||||
|
||||
gastownRigPath := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
testrigRigPath := filepath.Join(townRoot, "testrig", "mayor", "rig")
|
||||
initBeadsDBWithPrefix(t, gastownRigPath, "gt")
|
||||
initBeadsDBWithPrefix(t, testrigRigPath, "tr")
|
||||
|
||||
townIssue := createTestIssue(t, townRoot, "Town-level routing test")
|
||||
gastownIssue := createTestIssue(t, gastownRigPath, "Gastown routing test")
|
||||
testrigIssue := createTestIssue(t, testrigRigPath, "Testrig routing test")
|
||||
|
||||
tests := []struct {
|
||||
prefix string
|
||||
expectedRig string // Expected rig path fragment in error/output
|
||||
id string
|
||||
title string
|
||||
}{
|
||||
{"hq-", "."}, // Town-level beads
|
||||
{"gt-", "gastown"},
|
||||
{"tr-", "testrig"},
|
||||
{townIssue.ID, townIssue.Title},
|
||||
{gastownIssue.ID, gastownIssue.Title},
|
||||
{testrigIssue.ID, testrigIssue.Title},
|
||||
}
|
||||
|
||||
townBeads := beads.New(townRoot)
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.prefix, func(t *testing.T) {
|
||||
// Create a fake issue ID with the prefix
|
||||
issueID := tc.prefix + "test123"
|
||||
|
||||
// Run bd show - it will fail since issue doesn't exist,
|
||||
// but we're testing routing, not the issue itself
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", issueID)
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "BD_DEBUG_ROUTING=1")
|
||||
output, _ := cmd.CombinedOutput()
|
||||
|
||||
// The debug routing output or error message should indicate
|
||||
// which beads directory was used
|
||||
outputStr := string(output)
|
||||
t.Logf("Output for %s: %s", issueID, outputStr)
|
||||
|
||||
// We expect either the routing debug output or an error from the correct beads
|
||||
// If routing works, the error will be about not finding the issue,
|
||||
// not about routing failure
|
||||
if strings.Contains(outputStr, "no matching route") {
|
||||
t.Errorf("routing failed for prefix %s: %s", tc.prefix, outputStr)
|
||||
t.Run(tc.id, func(t *testing.T) {
|
||||
issue, err := townBeads.Show(tc.id)
|
||||
if err != nil {
|
||||
t.Fatalf("bd show %s failed: %v", tc.id, err)
|
||||
}
|
||||
if issue.ID != tc.id {
|
||||
t.Errorf("issue.ID = %s, want %s", issue.ID, tc.id)
|
||||
}
|
||||
if issue.Title != tc.title {
|
||||
t.Errorf("issue.Title = %q, want %q", issue.Title, tc.title)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -263,30 +316,21 @@ func TestBeadsListFromPolecatDirectory(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
polecatDir := filepath.Join(townRoot, "gastown", "polecats", "rictus")
|
||||
|
||||
// Initialize beads in mayor/rig so bd list can work
|
||||
mayorRigBeads := filepath.Join(townRoot, "gastown", "mayor", "rig", ".beads")
|
||||
rigPath := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
initBeadsDBWithPrefix(t, rigPath, "gt")
|
||||
|
||||
// Create a minimal beads.db (or use bd init)
|
||||
// For now, just test that the redirect is followed
|
||||
cmd := exec.Command("bd", "--no-daemon", "list")
|
||||
cmd.Dir = polecatDir
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
// We expect either success (empty list) or an error about missing db,
|
||||
// but NOT an error about missing .beads directory (since redirect should work)
|
||||
outputStr := string(output)
|
||||
t.Logf("bd list output: %s", outputStr)
|
||||
issue := createTestIssue(t, rigPath, "Polecat list redirect test")
|
||||
|
||||
issues, err := beads.New(polecatDir).List(beads.ListOptions{
|
||||
Status: "open",
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil {
|
||||
// Check it's not a "no .beads directory" error
|
||||
if strings.Contains(outputStr, "no .beads directory") {
|
||||
t.Errorf("redirect not followed: %s", outputStr)
|
||||
}
|
||||
// Check it's finding the right beads directory via redirect
|
||||
if strings.Contains(outputStr, "redirect") && !strings.Contains(outputStr, mayorRigBeads) {
|
||||
// This is okay - the redirect is being processed
|
||||
t.Logf("redirect detected in output (expected)")
|
||||
}
|
||||
t.Fatalf("bd list from polecat dir failed: %v", err)
|
||||
}
|
||||
|
||||
if !hasIssueID(issues, issue.ID) {
|
||||
t.Errorf("bd list from polecat dir missing issue %s", issue.ID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -300,18 +344,20 @@ func TestBeadsListFromCrewDirectory(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
crewDir := filepath.Join(townRoot, "gastown", "crew", "max")
|
||||
|
||||
cmd := exec.Command("bd", "--no-daemon", "list")
|
||||
cmd.Dir = crewDir
|
||||
output, err := cmd.CombinedOutput()
|
||||
rigPath := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
initBeadsDBWithPrefix(t, rigPath, "gt")
|
||||
|
||||
outputStr := string(output)
|
||||
t.Logf("bd list output from crew: %s", outputStr)
|
||||
issue := createTestIssue(t, rigPath, "Crew list redirect test")
|
||||
|
||||
issues, err := beads.New(crewDir).List(beads.ListOptions{
|
||||
Status: "open",
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil {
|
||||
// Check it's not a "no .beads directory" error
|
||||
if strings.Contains(outputStr, "no .beads directory") {
|
||||
t.Errorf("redirect not followed for crew: %s", outputStr)
|
||||
}
|
||||
t.Fatalf("bd list from crew dir failed: %v", err)
|
||||
}
|
||||
if !hasIssueID(issues, issue.ID) {
|
||||
t.Errorf("bd list from crew dir missing issue %s", issue.ID)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MinBeadsVersion is the minimum required beads version for Gas Town.
|
||||
@@ -84,10 +87,19 @@ func (v beadsVersion) compare(other beadsVersion) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Pre-compiled regex for beads version parsing
|
||||
var beadsVersionRe = regexp.MustCompile(`bd version (\d+\.\d+(?:\.\d+)?(?:-\w+)?)`)
|
||||
|
||||
func getBeadsVersion() (string, error) {
|
||||
cmd := exec.Command("bd", "version")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "bd", "version")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return "", fmt.Errorf("bd version check timed out")
|
||||
}
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return "", fmt.Errorf("bd version failed: %s", string(exitErr.Stderr))
|
||||
}
|
||||
@@ -96,8 +108,7 @@ func getBeadsVersion() (string, error) {
|
||||
|
||||
// Parse output like "bd version 0.44.0 (dev)"
|
||||
// or "bd version 0.44.0"
|
||||
re := regexp.MustCompile(`bd version (\d+\.\d+(?:\.\d+)?(?:-\w+)?)`)
|
||||
matches := re.FindStringSubmatch(string(output))
|
||||
matches := beadsVersionRe.FindStringSubmatch(string(output))
|
||||
if len(matches) < 2 {
|
||||
return "", fmt.Errorf("could not parse beads version from: %s", strings.TrimSpace(string(output)))
|
||||
}
|
||||
@@ -105,9 +116,22 @@ func getBeadsVersion() (string, error) {
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
var (
|
||||
cachedVersionCheckResult error
|
||||
versionCheckOnce sync.Once
|
||||
)
|
||||
|
||||
// CheckBeadsVersion verifies that the installed beads version meets the minimum requirement.
|
||||
// Returns nil if the version is sufficient, or an error with details if not.
|
||||
// The check is performed only once per process execution.
|
||||
func CheckBeadsVersion() error {
|
||||
versionCheckOnce.Do(func() {
|
||||
cachedVersionCheckResult = checkBeadsVersionInternal()
|
||||
})
|
||||
return cachedVersionCheckResult
|
||||
}
|
||||
|
||||
func checkBeadsVersionInternal() error {
|
||||
installedStr, err := getBeadsVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot verify beads version: %w", err)
|
||||
|
||||
@@ -14,8 +14,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
bootStatusJSON bool
|
||||
bootDegraded bool
|
||||
bootStatusJSON bool
|
||||
bootDegraded bool
|
||||
bootAgentOverride string
|
||||
)
|
||||
|
||||
var bootCmd = &cobra.Command{
|
||||
@@ -84,6 +85,7 @@ Use --degraded flag when running in degraded mode.`,
|
||||
func init() {
|
||||
bootStatusCmd.Flags().BoolVar(&bootStatusJSON, "json", false, "Output as JSON")
|
||||
bootTriageCmd.Flags().BoolVar(&bootDegraded, "degraded", false, "Run in degraded mode (no tmux)")
|
||||
bootSpawnCmd.Flags().StringVar(&bootAgentOverride, "agent", "", "Agent alias to run Boot with (overrides town default)")
|
||||
|
||||
bootCmd.AddCommand(bootStatusCmd)
|
||||
bootCmd.AddCommand(bootSpawnCmd)
|
||||
@@ -206,7 +208,7 @@ func runBootSpawn(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Spawn Boot
|
||||
if err := b.Spawn(); err != nil {
|
||||
if err := b.Spawn(bootAgentOverride); err != nil {
|
||||
status.Error = err.Error()
|
||||
status.CompletedAt = time.Now()
|
||||
status.Running = false
|
||||
@@ -299,9 +301,10 @@ func runDegradedTriage(b *boot.Boot) (action, target string, err error) {
|
||||
// Nudge the session to try to wake it up
|
||||
age := hb.Age()
|
||||
if age > 30*time.Minute {
|
||||
// Very stuck - restart the session
|
||||
// Very stuck - restart the session.
|
||||
// Use KillSessionWithProcesses to ensure all descendant processes are killed.
|
||||
fmt.Printf("Deacon heartbeat is %s old - restarting session\n", age.Round(time.Minute))
|
||||
if err := tm.KillSession(deaconSession); err == nil {
|
||||
if err := tm.KillSessionWithProcesses(deaconSession); err == nil {
|
||||
return "restart", "deacon-stuck", nil
|
||||
}
|
||||
} else {
|
||||
|
||||
19
internal/cmd/boot_test.go
Normal file
19
internal/cmd/boot_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBootSpawnAgentFlag(t *testing.T) {
|
||||
flag := bootSpawnCmd.Flags().Lookup("agent")
|
||||
if flag == nil {
|
||||
t.Fatal("expected boot spawn to define --agent flag")
|
||||
}
|
||||
if flag.DefValue != "" {
|
||||
t.Errorf("expected default agent override to be empty, got %q", flag.DefValue)
|
||||
}
|
||||
if !strings.Contains(flag.Usage, "overrides town default") {
|
||||
t.Errorf("expected --agent usage to mention overrides town default, got %q", flag.Usage)
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -55,6 +56,9 @@ func runBroadcast(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("listing sessions: %w", err)
|
||||
}
|
||||
|
||||
// Get sender identity to exclude self
|
||||
sender := os.Getenv("BD_ACTOR")
|
||||
|
||||
// Filter to target agents
|
||||
var targets []*AgentSession
|
||||
for _, agent := range agents {
|
||||
@@ -70,6 +74,11 @@ func runBroadcast(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Skip self to avoid interrupting own session
|
||||
if sender != "" && formatAgentName(agent) == sender {
|
||||
continue
|
||||
}
|
||||
|
||||
targets = append(targets, agent)
|
||||
}
|
||||
|
||||
|
||||
66
internal/cmd/cat.go
Normal file
66
internal/cmd/cat.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var catJSON bool
|
||||
|
||||
var catCmd = &cobra.Command{
|
||||
Use: "cat <bead-id>",
|
||||
GroupID: GroupWork,
|
||||
Short: "Display bead content",
|
||||
Long: `Display the content of a bead (issue, task, molecule, etc.).
|
||||
|
||||
This is a convenience wrapper around 'bd show' that integrates with gt.
|
||||
Accepts any bead ID (bd-*, hq-*, mol-*).
|
||||
|
||||
Examples:
|
||||
gt cat bd-abc123 # Show a bead
|
||||
gt cat hq-xyz789 # Show a town-level bead
|
||||
gt cat bd-abc --json # Output as JSON`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runCat,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(catCmd)
|
||||
catCmd.Flags().BoolVar(&catJSON, "json", false, "Output as JSON")
|
||||
}
|
||||
|
||||
func runCat(cmd *cobra.Command, args []string) error {
|
||||
beadID := args[0]
|
||||
|
||||
// Validate it looks like a bead ID
|
||||
if !isBeadID(beadID) {
|
||||
return fmt.Errorf("invalid bead ID %q (expected bd-*, hq-*, or mol-* prefix)", beadID)
|
||||
}
|
||||
|
||||
// Build bd show command
|
||||
bdArgs := []string{"show", beadID}
|
||||
if catJSON {
|
||||
bdArgs = append(bdArgs, "--json")
|
||||
}
|
||||
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
bdCmd.Stdout = os.Stdout
|
||||
bdCmd.Stderr = os.Stderr
|
||||
|
||||
return bdCmd.Run()
|
||||
}
|
||||
|
||||
// isBeadID checks if a string looks like a bead ID.
|
||||
func isBeadID(s string) bool {
|
||||
prefixes := []string{"bd-", "hq-", "mol-"}
|
||||
for _, prefix := range prefixes {
|
||||
if strings.HasPrefix(s, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
54
internal/cmd/close.go
Normal file
54
internal/cmd/close.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var closeCmd = &cobra.Command{
|
||||
Use: "close [bead-id...]",
|
||||
GroupID: GroupWork,
|
||||
Short: "Close one or more beads",
|
||||
Long: `Close one or more beads (wrapper for 'bd close').
|
||||
|
||||
This is a convenience command that passes through to 'bd close' with
|
||||
all arguments and flags preserved.
|
||||
|
||||
Examples:
|
||||
gt close gt-abc # Close bead gt-abc
|
||||
gt close gt-abc gt-def # Close multiple beads
|
||||
gt close --reason "Done" # Close with reason
|
||||
gt close --comment "Done" # Same as --reason (alias)
|
||||
gt close --force # Force close pinned beads`,
|
||||
DisableFlagParsing: true, // Pass all flags through to bd close
|
||||
RunE: runClose,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(closeCmd)
|
||||
}
|
||||
|
||||
func runClose(cmd *cobra.Command, args []string) error {
|
||||
// Convert --comment to --reason (alias support)
|
||||
convertedArgs := make([]string, len(args))
|
||||
for i, arg := range args {
|
||||
if arg == "--comment" {
|
||||
convertedArgs[i] = "--reason"
|
||||
} else if strings.HasPrefix(arg, "--comment=") {
|
||||
convertedArgs[i] = "--reason=" + strings.TrimPrefix(arg, "--comment=")
|
||||
} else {
|
||||
convertedArgs[i] = arg
|
||||
}
|
||||
}
|
||||
|
||||
// Build bd close command with all args passed through
|
||||
bdArgs := append([]string{"close"}, convertedArgs...)
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
bdCmd.Stdin = os.Stdin
|
||||
bdCmd.Stdout = os.Stdout
|
||||
bdCmd.Stderr = os.Stderr
|
||||
return bdCmd.Run()
|
||||
}
|
||||
118
internal/cmd/commit.go
Normal file
118
internal/cmd/commit.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
// DefaultAgentEmailDomain is the default domain for agent git emails.
|
||||
const DefaultAgentEmailDomain = "gastown.local"
|
||||
|
||||
var commitCmd = &cobra.Command{
|
||||
Use: "commit [flags] [-- git-commit-args...]",
|
||||
Short: "Git commit with automatic agent identity",
|
||||
Long: `Git commit wrapper that automatically sets git author identity for agents.
|
||||
|
||||
When run by an agent (GT_ROLE set), this command:
|
||||
1. Detects the agent identity from environment variables
|
||||
2. Converts it to a git-friendly name and email
|
||||
3. Runs 'git commit' with the correct identity
|
||||
|
||||
The email domain is configurable in town settings (agent_email_domain).
|
||||
Default: gastown.local
|
||||
|
||||
Examples:
|
||||
gt commit -m "Fix bug" # Commit as current agent
|
||||
gt commit -am "Quick fix" # Stage all and commit
|
||||
gt commit -- --amend # Amend last commit
|
||||
|
||||
Identity mapping:
|
||||
Agent: gastown/crew/jack → Name: gastown/crew/jack
|
||||
Email: gastown.crew.jack@gastown.local
|
||||
|
||||
When run without GT_ROLE (human), passes through to git commit with no changes.`,
|
||||
RunE: runCommit,
|
||||
DisableFlagParsing: true, // We'll parse flags ourselves to pass them to git
|
||||
}
|
||||
|
||||
func init() {
|
||||
commitCmd.GroupID = GroupWork
|
||||
rootCmd.AddCommand(commitCmd)
|
||||
}
|
||||
|
||||
func runCommit(cmd *cobra.Command, args []string) error {
|
||||
// Detect agent identity
|
||||
identity := detectSender()
|
||||
|
||||
// If overseer (human), just pass through to git commit
|
||||
if identity == "overseer" {
|
||||
return runGitCommit(args, "", "")
|
||||
}
|
||||
|
||||
// Load agent email domain from town settings
|
||||
domain := DefaultAgentEmailDomain
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err == nil && townRoot != "" {
|
||||
settings, err := config.LoadOrCreateTownSettings(config.TownSettingsPath(townRoot))
|
||||
if err == nil && settings.AgentEmailDomain != "" {
|
||||
domain = settings.AgentEmailDomain
|
||||
}
|
||||
}
|
||||
|
||||
// Convert identity to git-friendly email
|
||||
// "gastown/crew/jack" → "gastown.crew.jack@domain"
|
||||
email := identityToEmail(identity, domain)
|
||||
|
||||
// Use identity as the author name (human-readable)
|
||||
name := identity
|
||||
|
||||
return runGitCommit(args, name, email)
|
||||
}
|
||||
|
||||
// identityToEmail converts a Gas Town identity to a git email address.
|
||||
// "gastown/crew/jack" → "gastown.crew.jack@domain"
|
||||
// "mayor/" → "mayor@domain"
|
||||
func identityToEmail(identity, domain string) string {
|
||||
// Remove trailing slash if present
|
||||
identity = strings.TrimSuffix(identity, "/")
|
||||
|
||||
// Replace slashes with dots for email local part
|
||||
localPart := strings.ReplaceAll(identity, "/", ".")
|
||||
|
||||
return localPart + "@" + domain
|
||||
}
|
||||
|
||||
// runGitCommit executes git commit with optional identity override.
|
||||
// If name and email are empty, runs git commit with no overrides.
|
||||
// Preserves git's exit code for proper wrapper behavior.
|
||||
func runGitCommit(args []string, name, email string) error {
|
||||
var gitArgs []string
|
||||
|
||||
// If we have an identity, prepend -c flags
|
||||
if name != "" && email != "" {
|
||||
gitArgs = append(gitArgs, "-c", "user.name="+name)
|
||||
gitArgs = append(gitArgs, "-c", "user.email="+email)
|
||||
}
|
||||
|
||||
gitArgs = append(gitArgs, "commit")
|
||||
gitArgs = append(gitArgs, args...)
|
||||
|
||||
gitCmd := exec.Command("git", gitArgs...)
|
||||
gitCmd.Stdin = os.Stdin
|
||||
gitCmd.Stdout = os.Stdout
|
||||
gitCmd.Stderr = os.Stderr
|
||||
|
||||
if err := gitCmd.Run(); err != nil {
|
||||
// Preserve git's exit code for proper wrapper behavior
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
os.Exit(exitErr.ExitCode())
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
71
internal/cmd/commit_test.go
Normal file
71
internal/cmd/commit_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIdentityToEmail(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
identity string
|
||||
domain string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "crew member",
|
||||
identity: "gastown/crew/jack",
|
||||
domain: "gastown.local",
|
||||
want: "gastown.crew.jack@gastown.local",
|
||||
},
|
||||
{
|
||||
name: "polecat",
|
||||
identity: "gastown/polecats/max",
|
||||
domain: "gastown.local",
|
||||
want: "gastown.polecats.max@gastown.local",
|
||||
},
|
||||
{
|
||||
name: "witness",
|
||||
identity: "gastown/witness",
|
||||
domain: "gastown.local",
|
||||
want: "gastown.witness@gastown.local",
|
||||
},
|
||||
{
|
||||
name: "refinery",
|
||||
identity: "gastown/refinery",
|
||||
domain: "gastown.local",
|
||||
want: "gastown.refinery@gastown.local",
|
||||
},
|
||||
{
|
||||
name: "mayor with trailing slash",
|
||||
identity: "mayor/",
|
||||
domain: "gastown.local",
|
||||
want: "mayor@gastown.local",
|
||||
},
|
||||
{
|
||||
name: "deacon with trailing slash",
|
||||
identity: "deacon/",
|
||||
domain: "gastown.local",
|
||||
want: "deacon@gastown.local",
|
||||
},
|
||||
{
|
||||
name: "custom domain",
|
||||
identity: "myrig/crew/alice",
|
||||
domain: "example.com",
|
||||
want: "myrig.crew.alice@example.com",
|
||||
},
|
||||
{
|
||||
name: "deeply nested",
|
||||
identity: "rig/polecats/nested/deep",
|
||||
domain: "test.io",
|
||||
want: "rig.polecats.nested.deep@test.io",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := identityToEmail(tt.identity, tt.domain)
|
||||
if got != tt.want {
|
||||
t.Errorf("identityToEmail(%q, %q) = %q, want %q",
|
||||
tt.identity, tt.domain, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -119,6 +119,27 @@ Examples:
|
||||
RunE: runConfigDefaultAgent,
|
||||
}
|
||||
|
||||
var configAgentEmailDomainCmd = &cobra.Command{
|
||||
Use: "agent-email-domain [domain]",
|
||||
Short: "Get or set agent email domain",
|
||||
Long: `Get or set the domain used for agent git commit emails.
|
||||
|
||||
When agents commit code via 'gt commit', their identity is converted
|
||||
to a git email address. For example, "gastown/crew/jack" becomes
|
||||
"gastown.crew.jack@{domain}".
|
||||
|
||||
With no arguments, shows the current domain.
|
||||
With an argument, sets the domain.
|
||||
|
||||
Default: gastown.local
|
||||
|
||||
Examples:
|
||||
gt config agent-email-domain # Show current domain
|
||||
gt config agent-email-domain gastown.local # Set to gastown.local
|
||||
gt config agent-email-domain example.com # Set custom domain`,
|
||||
RunE: runConfigAgentEmailDomain,
|
||||
}
|
||||
|
||||
// Flags
|
||||
var (
|
||||
configAgentListJSON bool
|
||||
@@ -444,6 +465,54 @@ func runConfigDefaultAgent(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runConfigAgentEmailDomain(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
}
|
||||
|
||||
// Load town settings
|
||||
settingsPath := config.TownSettingsPath(townRoot)
|
||||
townSettings, err := config.LoadOrCreateTownSettings(settingsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading town settings: %w", err)
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
// Show current domain
|
||||
domain := townSettings.AgentEmailDomain
|
||||
if domain == "" {
|
||||
domain = DefaultAgentEmailDomain
|
||||
}
|
||||
fmt.Printf("Agent email domain: %s\n", style.Bold.Render(domain))
|
||||
fmt.Printf("\nExample: gastown/crew/jack → gastown.crew.jack@%s\n", domain)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set new domain
|
||||
domain := args[0]
|
||||
|
||||
// Basic validation - domain should not be empty and should not start with @
|
||||
if domain == "" {
|
||||
return fmt.Errorf("domain cannot be empty")
|
||||
}
|
||||
if strings.HasPrefix(domain, "@") {
|
||||
return fmt.Errorf("domain should not include @: use '%s' instead", strings.TrimPrefix(domain, "@"))
|
||||
}
|
||||
|
||||
// Set domain
|
||||
townSettings.AgentEmailDomain = domain
|
||||
|
||||
// Save settings
|
||||
if err := config.SaveTownSettings(settingsPath, townSettings); err != nil {
|
||||
return fmt.Errorf("saving town settings: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Agent email domain set to '%s'\n", style.Bold.Render(domain))
|
||||
fmt.Printf("\nExample: gastown/crew/jack → gastown.crew.jack@%s\n", domain)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Add flags
|
||||
configAgentListCmd.Flags().BoolVar(&configAgentListJSON, "json", false, "Output as JSON")
|
||||
@@ -462,6 +531,7 @@ func init() {
|
||||
// Add subcommands to config
|
||||
configCmd.AddCommand(configAgentCmd)
|
||||
configCmd.AddCommand(configDefaultAgentCmd)
|
||||
configCmd.AddCommand(configAgentEmailDomainCmd)
|
||||
|
||||
// Register with root
|
||||
rootCmd.AddCommand(configCmd)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tui/convoy"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
@@ -62,6 +63,7 @@ func looksLikeIssueID(s string) bool {
|
||||
var (
|
||||
convoyMolecule string
|
||||
convoyNotify string
|
||||
convoyOwner string
|
||||
convoyStatusJSON bool
|
||||
convoyListJSON bool
|
||||
convoyListStatus string
|
||||
@@ -69,6 +71,9 @@ var (
|
||||
convoyListTree bool
|
||||
convoyInteractive bool
|
||||
convoyStrandedJSON bool
|
||||
convoyCloseReason string
|
||||
convoyCloseNotify string
|
||||
convoyCheckDryRun bool
|
||||
)
|
||||
|
||||
var convoyCmd = &cobra.Command{
|
||||
@@ -106,6 +111,7 @@ TRACKING SEMANTICS:
|
||||
COMMANDS:
|
||||
create Create a convoy tracking specified issues
|
||||
add Add issues to an existing convoy (reopens if closed)
|
||||
close Close a convoy (manually, regardless of tracked issue status)
|
||||
status Show convoy progress, tracked issues, and active workers
|
||||
list List convoys (the dashboard view)`,
|
||||
}
|
||||
@@ -118,10 +124,15 @@ var convoyCreateCmd = &cobra.Command{
|
||||
The convoy is created in town-level beads (hq-* prefix) and can track
|
||||
issues across any rig.
|
||||
|
||||
The --owner flag specifies who requested the convoy (receives completion
|
||||
notification by default). If not specified, defaults to created_by.
|
||||
The --notify flag adds additional subscribers beyond the owner.
|
||||
|
||||
Examples:
|
||||
gt convoy create "Deploy v2.0" gt-abc bd-xyz
|
||||
gt convoy create "Release prep" gt-abc --notify # defaults to mayor/
|
||||
gt convoy create "Release prep" gt-abc --notify ops/ # notify ops/
|
||||
gt convoy create "Feature rollout" gt-a gt-b --owner mayor/ --notify ops/
|
||||
gt convoy create "Feature rollout" gt-a gt-b gt-c --molecule mol-release`,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: runConvoyCreate,
|
||||
@@ -167,14 +178,22 @@ Examples:
|
||||
}
|
||||
|
||||
var convoyCheckCmd = &cobra.Command{
|
||||
Use: "check",
|
||||
Use: "check [convoy-id]",
|
||||
Short: "Check and auto-close completed convoys",
|
||||
Long: `Check all open convoys and auto-close any where all tracked issues are complete.
|
||||
Long: `Check convoys and auto-close any where all tracked issues are complete.
|
||||
|
||||
Without arguments, checks all open convoys. With a convoy ID, checks only that convoy.
|
||||
|
||||
This handles cross-rig convoy completion: convoys in town beads tracking issues
|
||||
in rig beads won't auto-close via bd close alone. This command bridges that gap.
|
||||
|
||||
Can be run manually or by deacon patrol to ensure convoys close promptly.`,
|
||||
Can be run manually or by deacon patrol to ensure convoys close promptly.
|
||||
|
||||
Examples:
|
||||
gt convoy check # Check all open convoys
|
||||
gt convoy check hq-cv-abc # Check specific convoy
|
||||
gt convoy check --dry-run # Preview what would close without acting`,
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runConvoyCheck,
|
||||
}
|
||||
|
||||
@@ -199,10 +218,31 @@ Examples:
|
||||
RunE: runConvoyStranded,
|
||||
}
|
||||
|
||||
var convoyCloseCmd = &cobra.Command{
|
||||
Use: "close <convoy-id>",
|
||||
Short: "Close a convoy",
|
||||
Long: `Close a convoy, optionally with a reason.
|
||||
|
||||
Closes the convoy regardless of tracked issue status. Use this to:
|
||||
- Force-close abandoned convoys no longer relevant
|
||||
- Close convoys where work completed outside the tracked path
|
||||
- Manually close stuck convoys
|
||||
|
||||
The close is idempotent - closing an already-closed convoy is a no-op.
|
||||
|
||||
Examples:
|
||||
gt convoy close hq-cv-abc
|
||||
gt convoy close hq-cv-abc --reason="work done differently"
|
||||
gt convoy close hq-cv-xyz --notify mayor/`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runConvoyClose,
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Create flags
|
||||
convoyCreateCmd.Flags().StringVar(&convoyMolecule, "molecule", "", "Associated molecule ID")
|
||||
convoyCreateCmd.Flags().StringVar(&convoyNotify, "notify", "", "Address to notify on completion (default: mayor/ if flag used without value)")
|
||||
convoyCreateCmd.Flags().StringVar(&convoyOwner, "owner", "", "Owner who requested convoy (gets completion notification)")
|
||||
convoyCreateCmd.Flags().StringVar(&convoyNotify, "notify", "", "Additional address to notify on completion (default: mayor/ if flag used without value)")
|
||||
convoyCreateCmd.Flags().Lookup("notify").NoOptDefVal = "mayor/"
|
||||
|
||||
// Status flags
|
||||
@@ -217,9 +257,16 @@ func init() {
|
||||
// Interactive TUI flag (on parent command)
|
||||
convoyCmd.Flags().BoolVarP(&convoyInteractive, "interactive", "i", false, "Interactive tree view")
|
||||
|
||||
// Check flags
|
||||
convoyCheckCmd.Flags().BoolVar(&convoyCheckDryRun, "dry-run", false, "Preview what would close without acting")
|
||||
|
||||
// Stranded flags
|
||||
convoyStrandedCmd.Flags().BoolVar(&convoyStrandedJSON, "json", false, "Output as JSON")
|
||||
|
||||
// Close flags
|
||||
convoyCloseCmd.Flags().StringVar(&convoyCloseReason, "reason", "", "Reason for closing the convoy")
|
||||
convoyCloseCmd.Flags().StringVar(&convoyCloseNotify, "notify", "", "Agent to notify on close (e.g., mayor/)")
|
||||
|
||||
// Add subcommands
|
||||
convoyCmd.AddCommand(convoyCreateCmd)
|
||||
convoyCmd.AddCommand(convoyStatusCmd)
|
||||
@@ -227,6 +274,7 @@ func init() {
|
||||
convoyCmd.AddCommand(convoyAddCmd)
|
||||
convoyCmd.AddCommand(convoyCheckCmd)
|
||||
convoyCmd.AddCommand(convoyStrandedCmd)
|
||||
convoyCmd.AddCommand(convoyCloseCmd)
|
||||
|
||||
rootCmd.AddCommand(convoyCmd)
|
||||
}
|
||||
@@ -263,6 +311,15 @@ func runConvoyCreate(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Create convoy issue in town beads
|
||||
description := fmt.Sprintf("Convoy tracking %d issues", len(trackedIssues))
|
||||
|
||||
// Default owner to creator identity if not specified
|
||||
owner := convoyOwner
|
||||
if owner == "" {
|
||||
owner = detectSender()
|
||||
}
|
||||
if owner != "" {
|
||||
description += fmt.Sprintf("\nOwner: %s", owner)
|
||||
}
|
||||
if convoyNotify != "" {
|
||||
description += fmt.Sprintf("\nNotify: %s", convoyNotify)
|
||||
}
|
||||
@@ -281,6 +338,9 @@ func runConvoyCreate(cmd *cobra.Command, args []string) error {
|
||||
"--description=" + description,
|
||||
"--json",
|
||||
}
|
||||
if beads.NeedsForceForID(convoyID) {
|
||||
createArgs = append(createArgs, "--force")
|
||||
}
|
||||
|
||||
createCmd := exec.Command("bd", createArgs...)
|
||||
createCmd.Dir = townBeads
|
||||
@@ -302,9 +362,15 @@ func runConvoyCreate(cmd *cobra.Command, args []string) error {
|
||||
depArgs := []string{"dep", "add", convoyID, issueID, "--type=tracks"}
|
||||
depCmd := exec.Command("bd", depArgs...)
|
||||
depCmd.Dir = townBeads
|
||||
var depStderr bytes.Buffer
|
||||
depCmd.Stderr = &depStderr
|
||||
|
||||
if err := depCmd.Run(); err != nil {
|
||||
style.PrintWarning("couldn't track %s: %v", issueID, err)
|
||||
errMsg := strings.TrimSpace(depStderr.String())
|
||||
if errMsg == "" {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
style.PrintWarning("couldn't track %s: %s", issueID, errMsg)
|
||||
} else {
|
||||
trackedCount++
|
||||
}
|
||||
@@ -317,6 +383,9 @@ func runConvoyCreate(cmd *cobra.Command, args []string) error {
|
||||
if len(trackedIssues) > 0 {
|
||||
fmt.Printf(" Issues: %s\n", strings.Join(trackedIssues, ", "))
|
||||
}
|
||||
if owner != "" {
|
||||
fmt.Printf(" Owner: %s\n", owner)
|
||||
}
|
||||
if convoyNotify != "" {
|
||||
fmt.Printf(" Notify: %s\n", convoyNotify)
|
||||
}
|
||||
@@ -389,9 +458,15 @@ func runConvoyAdd(cmd *cobra.Command, args []string) error {
|
||||
depArgs := []string{"dep", "add", convoyID, issueID, "--type=tracks"}
|
||||
depCmd := exec.Command("bd", depArgs...)
|
||||
depCmd.Dir = townBeads
|
||||
var depStderr bytes.Buffer
|
||||
depCmd.Stderr = &depStderr
|
||||
|
||||
if err := depCmd.Run(); err != nil {
|
||||
style.PrintWarning("couldn't add %s: %v", issueID, err)
|
||||
errMsg := strings.TrimSpace(depStderr.String())
|
||||
if errMsg == "" {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
style.PrintWarning("couldn't add %s: %s", issueID, errMsg)
|
||||
} else {
|
||||
addedCount++
|
||||
}
|
||||
@@ -415,7 +490,14 @@ func runConvoyCheck(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
closed, err := checkAndCloseCompletedConvoys(townBeads)
|
||||
// If a specific convoy ID is provided, check only that convoy
|
||||
if len(args) == 1 {
|
||||
convoyID := args[0]
|
||||
return checkSingleConvoy(townBeads, convoyID, convoyCheckDryRun)
|
||||
}
|
||||
|
||||
// Check all open convoys
|
||||
closed, err := checkAndCloseCompletedConvoys(townBeads, convoyCheckDryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -423,7 +505,11 @@ func runConvoyCheck(cmd *cobra.Command, args []string) error {
|
||||
if len(closed) == 0 {
|
||||
fmt.Println("No convoys ready to close.")
|
||||
} else {
|
||||
fmt.Printf("%s Auto-closed %d convoy(s):\n", style.Bold.Render("✓"), len(closed))
|
||||
if convoyCheckDryRun {
|
||||
fmt.Printf("%s Would auto-close %d convoy(s):\n", style.Warning.Render("⚠"), len(closed))
|
||||
} else {
|
||||
fmt.Printf("%s Auto-closed %d convoy(s):\n", style.Bold.Render("✓"), len(closed))
|
||||
}
|
||||
for _, c := range closed {
|
||||
fmt.Printf(" 🚚 %s: %s\n", c.ID, c.Title)
|
||||
}
|
||||
@@ -432,6 +518,184 @@ func runConvoyCheck(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkSingleConvoy checks a specific convoy and closes it if all tracked issues are complete.
|
||||
func checkSingleConvoy(townBeads, convoyID string, dryRun bool) error {
|
||||
// Get convoy details
|
||||
showArgs := []string{"show", convoyID, "--json"}
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showCmd.Dir = townBeads
|
||||
var stdout bytes.Buffer
|
||||
showCmd.Stdout = &stdout
|
||||
|
||||
if err := showCmd.Run(); err != nil {
|
||||
return fmt.Errorf("convoy '%s' not found", convoyID)
|
||||
}
|
||||
|
||||
var convoys []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Type string `json:"issue_type"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &convoys); err != nil {
|
||||
return fmt.Errorf("parsing convoy data: %w", err)
|
||||
}
|
||||
|
||||
if len(convoys) == 0 {
|
||||
return fmt.Errorf("convoy '%s' not found", convoyID)
|
||||
}
|
||||
|
||||
convoy := convoys[0]
|
||||
|
||||
// Verify it's actually a convoy type
|
||||
if convoy.Type != "convoy" {
|
||||
return fmt.Errorf("'%s' is not a convoy (type: %s)", convoyID, convoy.Type)
|
||||
}
|
||||
|
||||
// Check if convoy is already closed
|
||||
if convoy.Status == "closed" {
|
||||
fmt.Printf("%s Convoy %s is already closed\n", style.Dim.Render("○"), convoyID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get tracked issues
|
||||
tracked := getTrackedIssues(townBeads, convoyID)
|
||||
if len(tracked) == 0 {
|
||||
fmt.Printf("%s Convoy %s has no tracked issues\n", style.Dim.Render("○"), convoyID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if all tracked issues are closed
|
||||
allClosed := true
|
||||
openCount := 0
|
||||
for _, t := range tracked {
|
||||
if t.Status != "closed" && t.Status != "tombstone" {
|
||||
allClosed = false
|
||||
openCount++
|
||||
}
|
||||
}
|
||||
|
||||
if !allClosed {
|
||||
fmt.Printf("%s Convoy %s has %d open issue(s) remaining\n", style.Dim.Render("○"), convoyID, openCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// All tracked issues are complete - close the convoy
|
||||
if dryRun {
|
||||
fmt.Printf("%s Would auto-close convoy 🚚 %s: %s\n", style.Warning.Render("⚠"), convoyID, convoy.Title)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Actually close the convoy
|
||||
closeArgs := []string{"close", convoyID, "-r", "All tracked issues completed"}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
closeCmd.Dir = townBeads
|
||||
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
return fmt.Errorf("closing convoy: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Auto-closed convoy 🚚 %s: %s\n", style.Bold.Render("✓"), convoyID, convoy.Title)
|
||||
|
||||
// Send completion notification
|
||||
notifyConvoyCompletion(townBeads, convoyID, convoy.Title)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runConvoyClose(cmd *cobra.Command, args []string) error {
|
||||
convoyID := args[0]
|
||||
|
||||
townBeads, err := getTownBeadsDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get convoy details
|
||||
showArgs := []string{"show", convoyID, "--json"}
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showCmd.Dir = townBeads
|
||||
var stdout bytes.Buffer
|
||||
showCmd.Stdout = &stdout
|
||||
|
||||
if err := showCmd.Run(); err != nil {
|
||||
return fmt.Errorf("convoy '%s' not found", convoyID)
|
||||
}
|
||||
|
||||
var convoys []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Type string `json:"issue_type"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &convoys); err != nil {
|
||||
return fmt.Errorf("parsing convoy data: %w", err)
|
||||
}
|
||||
|
||||
if len(convoys) == 0 {
|
||||
return fmt.Errorf("convoy '%s' not found", convoyID)
|
||||
}
|
||||
|
||||
convoy := convoys[0]
|
||||
|
||||
// Verify it's actually a convoy type
|
||||
if convoy.Type != "convoy" {
|
||||
return fmt.Errorf("'%s' is not a convoy (type: %s)", convoyID, convoy.Type)
|
||||
}
|
||||
|
||||
// Idempotent: if already closed, just report it
|
||||
if convoy.Status == "closed" {
|
||||
fmt.Printf("%s Convoy %s is already closed\n", style.Dim.Render("○"), convoyID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build close reason
|
||||
reason := convoyCloseReason
|
||||
if reason == "" {
|
||||
reason = "Manually closed"
|
||||
}
|
||||
|
||||
// Close the convoy
|
||||
closeArgs := []string{"close", convoyID, "-r", reason}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
closeCmd.Dir = townBeads
|
||||
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
return fmt.Errorf("closing convoy: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Closed convoy 🚚 %s: %s\n", style.Bold.Render("✓"), convoyID, convoy.Title)
|
||||
if convoyCloseReason != "" {
|
||||
fmt.Printf(" Reason: %s\n", convoyCloseReason)
|
||||
}
|
||||
|
||||
// Send notification if --notify flag provided
|
||||
if convoyCloseNotify != "" {
|
||||
sendCloseNotification(convoyCloseNotify, convoyID, convoy.Title, reason)
|
||||
} else {
|
||||
// Check if convoy has a notify address in description
|
||||
notifyConvoyCompletion(townBeads, convoyID, convoy.Title)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendCloseNotification sends a notification about convoy closure.
|
||||
func sendCloseNotification(addr, convoyID, title, reason string) {
|
||||
subject := fmt.Sprintf("🚚 Convoy closed: %s", title)
|
||||
body := fmt.Sprintf("Convoy %s has been closed.\n\nReason: %s", convoyID, reason)
|
||||
|
||||
mailArgs := []string{"mail", "send", addr, "-s", subject, "-m", body}
|
||||
mailCmd := exec.Command("gt", mailArgs...)
|
||||
if err := mailCmd.Run(); err != nil {
|
||||
style.PrintWarning("couldn't send notification: %v", err)
|
||||
} else {
|
||||
fmt.Printf(" Notified: %s\n", addr)
|
||||
}
|
||||
}
|
||||
|
||||
// strandedConvoyInfo holds info about a stranded convoy.
|
||||
type strandedConvoyInfo struct {
|
||||
ID string `json:"id"`
|
||||
@@ -606,8 +870,9 @@ func isReadyIssue(t trackedIssueInfo, blockedIssues map[string]bool) bool {
|
||||
}
|
||||
|
||||
// checkAndCloseCompletedConvoys finds open convoys where all tracked issues are closed
|
||||
// and auto-closes them. Returns the list of convoys that were closed.
|
||||
func checkAndCloseCompletedConvoys(townBeads string) ([]struct{ ID, Title string }, error) {
|
||||
// and auto-closes them. Returns the list of convoys that were closed (or would be closed in dry-run mode).
|
||||
// If dryRun is true, no changes are made and the function returns what would have been closed.
|
||||
func checkAndCloseCompletedConvoys(townBeads string, dryRun bool) ([]struct{ ID, Title string }, error) {
|
||||
var closed []struct{ ID, Title string }
|
||||
|
||||
// List all open convoys
|
||||
@@ -646,6 +911,12 @@ func checkAndCloseCompletedConvoys(townBeads string) ([]struct{ ID, Title string
|
||||
}
|
||||
|
||||
if allClosed {
|
||||
if dryRun {
|
||||
// In dry-run mode, just record what would be closed
|
||||
closed = append(closed, struct{ ID, Title string }{convoy.ID, convoy.Title})
|
||||
continue
|
||||
}
|
||||
|
||||
// Close the convoy
|
||||
closeArgs := []string{"close", convoy.ID, "-r", "All tracked issues completed"}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
@@ -666,9 +937,9 @@ func checkAndCloseCompletedConvoys(townBeads string) ([]struct{ ID, Title string
|
||||
return closed, nil
|
||||
}
|
||||
|
||||
// notifyConvoyCompletion sends a notification if the convoy has a notify address.
|
||||
// notifyConvoyCompletion sends notifications to owner and any notify addresses.
|
||||
func notifyConvoyCompletion(townBeads, convoyID, title string) {
|
||||
// Get convoy description to find notify address
|
||||
// Get convoy description to find owner and notify addresses
|
||||
showArgs := []string{"show", convoyID, "--json"}
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showCmd.Dir = townBeads
|
||||
@@ -686,20 +957,26 @@ func notifyConvoyCompletion(townBeads, convoyID, title string) {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse notify address from description
|
||||
// Parse owner and notify addresses from description
|
||||
desc := convoys[0].Description
|
||||
notified := make(map[string]bool) // Track who we've notified to avoid duplicates
|
||||
|
||||
for _, line := range strings.Split(desc, "\n") {
|
||||
if strings.HasPrefix(line, "Notify: ") {
|
||||
addr := strings.TrimPrefix(line, "Notify: ")
|
||||
if addr != "" {
|
||||
// Send notification via gt mail
|
||||
mailArgs := []string{"mail", "send", addr,
|
||||
"-s", fmt.Sprintf("🚚 Convoy landed: %s", title),
|
||||
"-m", fmt.Sprintf("Convoy %s has completed.\n\nAll tracked issues are now closed.", convoyID)}
|
||||
mailCmd := exec.Command("gt", mailArgs...)
|
||||
_ = mailCmd.Run() // Best effort, ignore errors
|
||||
}
|
||||
break
|
||||
var addr string
|
||||
if strings.HasPrefix(line, "Owner: ") {
|
||||
addr = strings.TrimPrefix(line, "Owner: ")
|
||||
} else if strings.HasPrefix(line, "Notify: ") {
|
||||
addr = strings.TrimPrefix(line, "Notify: ")
|
||||
}
|
||||
|
||||
if addr != "" && !notified[addr] {
|
||||
// Send notification via gt mail
|
||||
mailArgs := []string{"mail", "send", addr,
|
||||
"-s", fmt.Sprintf("🚚 Convoy landed: %s", title),
|
||||
"-m", fmt.Sprintf("Convoy %s has completed.\n\nAll tracked issues are now closed.", convoyID)}
|
||||
mailCmd := exec.Command("gt", mailArgs...)
|
||||
_ = mailCmd.Run() // Best effort, ignore errors
|
||||
notified[addr] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1186,6 +1463,10 @@ func getIssueDetails(issueID string) *issueDetails {
|
||||
if err := showCmd.Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
// Handle bd --no-daemon exit 0 bug: empty stdout means not found
|
||||
if stdout.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var issues []struct {
|
||||
ID string `json:"id"`
|
||||
|
||||
@@ -6,15 +6,18 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -41,13 +44,22 @@ var (
|
||||
var costsCmd = &cobra.Command{
|
||||
Use: "costs",
|
||||
GroupID: GroupDiag,
|
||||
Short: "Show costs for running Claude sessions",
|
||||
Short: "Show costs for running Claude sessions [DISABLED]",
|
||||
Long: `Display costs for Claude Code sessions in Gas Town.
|
||||
|
||||
By default, shows live costs scraped from running tmux sessions.
|
||||
⚠️ COST TRACKING IS CURRENTLY DISABLED
|
||||
|
||||
Cost tracking uses ephemeral wisps for individual sessions that are
|
||||
aggregated into daily "Cost Report" digest beads for audit purposes.
|
||||
Claude Code displays costs in the TUI status bar, which cannot be captured
|
||||
via tmux. All sessions will show $0.00 until Claude Code exposes cost data
|
||||
through an API or environment variable.
|
||||
|
||||
What we need from Claude Code:
|
||||
- Stop hook env var (e.g., $CLAUDE_SESSION_COST)
|
||||
- Or queryable file/API endpoint
|
||||
|
||||
See: GH#24, gt-7awfj
|
||||
|
||||
The infrastructure remains in place and will work once cost data is available.
|
||||
|
||||
Examples:
|
||||
gt costs # Live costs from running sessions
|
||||
@@ -191,6 +203,11 @@ func runCosts(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func runLiveCosts() error {
|
||||
// Warn that cost tracking is disabled
|
||||
fmt.Fprintf(os.Stderr, "%s Cost tracking is disabled - Claude Code does not expose session costs.\n",
|
||||
style.Warning.Render("⚠"))
|
||||
fmt.Fprintf(os.Stderr, " All sessions will show $0.00. See: GH#24, gt-7awfj\n\n")
|
||||
|
||||
t := tmux.NewTmux()
|
||||
|
||||
// Get all tmux sessions
|
||||
@@ -250,6 +267,11 @@ func runLiveCosts() error {
|
||||
}
|
||||
|
||||
func runCostsFromLedger() error {
|
||||
// Warn that cost tracking is disabled
|
||||
fmt.Fprintf(os.Stderr, "%s Cost tracking is disabled - Claude Code does not expose session costs.\n",
|
||||
style.Warning.Render("⚠"))
|
||||
fmt.Fprintf(os.Stderr, " Historical data may show $0.00 for all sessions. See: GH#24, gt-7awfj\n\n")
|
||||
|
||||
now := time.Now()
|
||||
var entries []CostEntry
|
||||
var err error
|
||||
@@ -275,10 +297,7 @@ func runCostsFromLedger() error {
|
||||
} else {
|
||||
// No time filter: query both digests and legacy session.ended events
|
||||
// (for backwards compatibility during migration)
|
||||
entries, err = querySessionEvents()
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session events: %w", err)
|
||||
}
|
||||
entries = querySessionEvents()
|
||||
}
|
||||
|
||||
if len(entries) == 0 {
|
||||
@@ -353,7 +372,62 @@ type EventListItem struct {
|
||||
}
|
||||
|
||||
// querySessionEvents queries beads for session.ended events and converts them to CostEntry.
|
||||
func querySessionEvents() ([]CostEntry, error) {
|
||||
// It queries both town-level beads and all rig-level beads to find all session events.
|
||||
// Errors from individual locations are logged (if verbose) but don't fail the query.
|
||||
func querySessionEvents() []CostEntry {
|
||||
// Discover town root for cwd-based bd discovery
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
// Not in a Gas Town workspace - return empty list
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect all beads locations to query
|
||||
beadsLocations := []string{townRoot}
|
||||
|
||||
// Load rigs to find all rig beads locations
|
||||
rigsConfigPath := filepath.Join(townRoot, constants.DirMayor, constants.FileRigsJSON)
|
||||
rigsConfig, err := config.LoadRigsConfig(rigsConfigPath)
|
||||
if err == nil && rigsConfig != nil {
|
||||
for rigName := range rigsConfig.Rigs {
|
||||
rigPath := filepath.Join(townRoot, rigName)
|
||||
// Verify rig has a beads database
|
||||
rigBeadsPath := filepath.Join(rigPath, constants.DirBeads)
|
||||
if _, statErr := os.Stat(rigBeadsPath); statErr == nil {
|
||||
beadsLocations = append(beadsLocations, rigPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query each beads location and merge results
|
||||
var allEntries []CostEntry
|
||||
seenIDs := make(map[string]bool)
|
||||
|
||||
for _, location := range beadsLocations {
|
||||
entries, err := querySessionEventsFromLocation(location)
|
||||
if err != nil {
|
||||
// Log but continue with other locations
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] query from %s failed: %v\n", location, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Deduplicate by event ID (use SessionID as key)
|
||||
for _, entry := range entries {
|
||||
key := entry.SessionID + entry.EndedAt.String()
|
||||
if !seenIDs[key] {
|
||||
seenIDs[key] = true
|
||||
allEntries = append(allEntries, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allEntries
|
||||
}
|
||||
|
||||
// querySessionEventsFromLocation queries a single beads location for session.ended events.
|
||||
func querySessionEventsFromLocation(location string) ([]CostEntry, error) {
|
||||
// Step 1: Get list of event IDs
|
||||
listArgs := []string{
|
||||
"list",
|
||||
@@ -364,6 +438,7 @@ func querySessionEvents() ([]CostEntry, error) {
|
||||
}
|
||||
|
||||
listCmd := exec.Command("bd", listArgs...)
|
||||
listCmd.Dir = location
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
// If bd fails (e.g., no beads database), return empty list
|
||||
@@ -387,6 +462,7 @@ func querySessionEvents() ([]CostEntry, error) {
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showCmd.Dir = location
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("showing events: %w", err)
|
||||
@@ -749,8 +825,20 @@ func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
// event fields (event_kind, actor, payload) to not be stored properly.
|
||||
// The bd command will auto-detect the correct rig from cwd.
|
||||
|
||||
// Execute bd create
|
||||
// Find town root so bd can find the .beads database.
|
||||
// The stop hook may run from a role subdirectory (e.g., mayor/) that
|
||||
// doesn't have its own .beads, so we need to run bd from town root.
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
}
|
||||
if townRoot == "" {
|
||||
return fmt.Errorf("not in a Gas Town workspace")
|
||||
}
|
||||
|
||||
// Execute bd create from town root
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
bdCmd.Dir = townRoot
|
||||
output, err := bdCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating session cost wisp: %w\nOutput: %s", err, string(output))
|
||||
@@ -762,6 +850,7 @@ func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
// These are informational records that don't need to stay open.
|
||||
// The wisp data is preserved and queryable until digested.
|
||||
closeCmd := exec.Command("bd", "close", wispID, "--reason=auto-closed session cost wisp")
|
||||
closeCmd.Dir = townRoot
|
||||
if closeErr := closeCmd.Run(); closeErr != nil {
|
||||
// Non-fatal: wisp was created, just couldn't auto-close
|
||||
fmt.Fprintf(os.Stderr, "warning: could not auto-close session cost wisp %s: %v\n", wispID, closeErr)
|
||||
|
||||
260
internal/cmd/costs_workdir_test.go
Normal file
260
internal/cmd/costs_workdir_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
// filterGTEnv removes GT_* and BD_* environment variables to isolate test subprocess.
|
||||
// This prevents tests from inheriting the parent workspace's Gas Town configuration.
|
||||
func filterGTEnv(env []string) []string {
|
||||
filtered := make([]string, 0, len(env))
|
||||
for _, e := range env {
|
||||
if strings.HasPrefix(e, "GT_") || strings.HasPrefix(e, "BD_") {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// TestQuerySessionEvents_FindsEventsFromAllLocations verifies that querySessionEvents
|
||||
// finds session.ended events from both town-level and rig-level beads databases.
|
||||
//
|
||||
// Bug: Events created by rig-level agents (polecats, witness, etc.) are stored in
|
||||
// the rig's .beads database. Events created by town-level agents (mayor, deacon)
|
||||
// are stored in the town's .beads database. querySessionEvents must query ALL
|
||||
// beads locations to find all events.
|
||||
//
|
||||
// This test:
|
||||
// 1. Creates a town with a rig
|
||||
// 2. Creates session.ended events in both town and rig beads
|
||||
// 3. Verifies querySessionEvents finds events from both locations
|
||||
func TestQuerySessionEvents_FindsEventsFromAllLocations(t *testing.T) {
|
||||
// Skip: bd CLI 0.47.2 has a bug where database writes don't commit
|
||||
// ("sql: database is closed" during auto-flush). This affects all tests
|
||||
// that create issues via bd create. See gt-lnn1xn for tracking.
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
// Skip if gt and bd are not installed
|
||||
if _, err := exec.LookPath("gt"); err != nil {
|
||||
t.Skip("gt not installed, skipping integration test")
|
||||
}
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
t.Skip("bd not installed, skipping integration test")
|
||||
}
|
||||
|
||||
// Skip when running inside a Gas Town workspace - this integration test
|
||||
// creates a separate workspace and the subprocesses can interact with
|
||||
// the parent workspace's daemon, causing hangs.
|
||||
if os.Getenv("GT_TOWN_ROOT") != "" || os.Getenv("BD_ACTOR") != "" {
|
||||
t.Skip("skipping integration test inside Gas Town workspace (use 'go test' outside workspace)")
|
||||
}
|
||||
|
||||
// Create a temporary directory structure
|
||||
tmpDir := t.TempDir()
|
||||
townRoot := filepath.Join(tmpDir, "test-town")
|
||||
|
||||
// Create town directory
|
||||
if err := os.MkdirAll(townRoot, 0755); err != nil {
|
||||
t.Fatalf("creating town directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize a git repo (required for gt install)
|
||||
gitInitCmd := exec.Command("git", "init")
|
||||
gitInitCmd.Dir = townRoot
|
||||
if out, err := gitInitCmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git init: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Use gt install to set up the town
|
||||
// Clear GT environment variables to isolate test from parent workspace
|
||||
gtInstallCmd := exec.Command("gt", "install")
|
||||
gtInstallCmd.Dir = townRoot
|
||||
gtInstallCmd.Env = filterGTEnv(os.Environ())
|
||||
if out, err := gtInstallCmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Create a bare repo to use as the rig source
|
||||
bareRepo := filepath.Join(tmpDir, "bare-repo.git")
|
||||
bareInitCmd := exec.Command("git", "init", "--bare", bareRepo)
|
||||
if out, err := bareInitCmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git init --bare: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Create a temporary clone to add initial content (bare repos need content)
|
||||
tempClone := filepath.Join(tmpDir, "temp-clone")
|
||||
cloneCmd := exec.Command("git", "clone", bareRepo, tempClone)
|
||||
if out, err := cloneCmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git clone bare: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Add initial commit to bare repo
|
||||
initFileCmd := exec.Command("bash", "-c", "echo 'test' > README.md && git add . && git commit -m 'init'")
|
||||
initFileCmd.Dir = tempClone
|
||||
if out, err := initFileCmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("initial commit: %v\n%s", err, out)
|
||||
}
|
||||
pushCmd := exec.Command("git", "push", "origin", "main")
|
||||
pushCmd.Dir = tempClone
|
||||
// Try main first, fall back to master
|
||||
if _, err := pushCmd.CombinedOutput(); err != nil {
|
||||
pushCmd2 := exec.Command("git", "push", "origin", "master")
|
||||
pushCmd2.Dir = tempClone
|
||||
if out, err := pushCmd2.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git push: %v\n%s", err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Add rig using gt rig add
|
||||
rigAddCmd := exec.Command("gt", "rig", "add", "testrig", bareRepo, "--prefix=tr")
|
||||
rigAddCmd.Dir = townRoot
|
||||
rigAddCmd.Env = filterGTEnv(os.Environ())
|
||||
if out, err := rigAddCmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt rig add: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Find the rig path
|
||||
rigPath := filepath.Join(townRoot, "testrig")
|
||||
|
||||
// Verify rig has its own .beads
|
||||
rigBeadsPath := filepath.Join(rigPath, ".beads")
|
||||
if _, err := os.Stat(rigBeadsPath); os.IsNotExist(err) {
|
||||
t.Fatalf("rig .beads not created at %s", rigBeadsPath)
|
||||
}
|
||||
|
||||
// Create a session.ended event in TOWN beads (simulating mayor/deacon)
|
||||
townEventPayload := `{"cost_usd":1.50,"session_id":"hq-mayor","role":"mayor","ended_at":"2026-01-12T10:00:00Z"}`
|
||||
townEventCmd := exec.Command("bd", "create",
|
||||
"--type=event",
|
||||
"--title=Town session ended",
|
||||
"--event-category=session.ended",
|
||||
"--event-payload="+townEventPayload,
|
||||
"--json",
|
||||
)
|
||||
townEventCmd.Dir = townRoot
|
||||
townEventCmd.Env = filterGTEnv(os.Environ())
|
||||
townOut, err := townEventCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("creating town event: %v\n%s", err, townOut)
|
||||
}
|
||||
t.Logf("Created town event: %s", string(townOut))
|
||||
|
||||
// Create a session.ended event in RIG beads (simulating polecat)
|
||||
rigEventPayload := `{"cost_usd":2.50,"session_id":"gt-testrig-toast","role":"polecat","rig":"testrig","worker":"toast","ended_at":"2026-01-12T11:00:00Z"}`
|
||||
rigEventCmd := exec.Command("bd", "create",
|
||||
"--type=event",
|
||||
"--title=Rig session ended",
|
||||
"--event-category=session.ended",
|
||||
"--event-payload="+rigEventPayload,
|
||||
"--json",
|
||||
)
|
||||
rigEventCmd.Dir = rigPath
|
||||
rigEventCmd.Env = filterGTEnv(os.Environ())
|
||||
rigOut, err := rigEventCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("creating rig event: %v\n%s", err, rigOut)
|
||||
}
|
||||
t.Logf("Created rig event: %s", string(rigOut))
|
||||
|
||||
// Verify events are in separate databases by querying each directly
|
||||
townListCmd := exec.Command("bd", "list", "--type=event", "--all", "--json")
|
||||
townListCmd.Dir = townRoot
|
||||
townListCmd.Env = filterGTEnv(os.Environ())
|
||||
townListOut, err := townListCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("listing town events: %v\n%s", err, townListOut)
|
||||
}
|
||||
|
||||
rigListCmd := exec.Command("bd", "list", "--type=event", "--all", "--json")
|
||||
rigListCmd.Dir = rigPath
|
||||
rigListCmd.Env = filterGTEnv(os.Environ())
|
||||
rigListOut, err := rigListCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("listing rig events: %v\n%s", err, rigListOut)
|
||||
}
|
||||
|
||||
var townEvents, rigEvents []struct{ ID string }
|
||||
json.Unmarshal(townListOut, &townEvents)
|
||||
json.Unmarshal(rigListOut, &rigEvents)
|
||||
|
||||
t.Logf("Town beads has %d events", len(townEvents))
|
||||
t.Logf("Rig beads has %d events", len(rigEvents))
|
||||
|
||||
// Both should have events (they're in separate DBs)
|
||||
if len(townEvents) == 0 {
|
||||
t.Error("Expected town beads to have events")
|
||||
}
|
||||
if len(rigEvents) == 0 {
|
||||
t.Error("Expected rig beads to have events")
|
||||
}
|
||||
|
||||
// Save current directory and change to town root for query
|
||||
origDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getting current directory: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Chdir(origDir); err != nil {
|
||||
t.Errorf("restoring directory: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("changing to town root: %v", err)
|
||||
}
|
||||
|
||||
// Verify workspace discovery works
|
||||
foundTownRoot, wsErr := workspace.FindFromCwdOrError()
|
||||
if wsErr != nil {
|
||||
t.Fatalf("workspace.FindFromCwdOrError failed: %v", wsErr)
|
||||
}
|
||||
normalizePath := func(path string) string {
|
||||
resolved, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
return resolved
|
||||
}
|
||||
if normalizePath(foundTownRoot) != normalizePath(townRoot) {
|
||||
t.Errorf("workspace.FindFromCwdOrError returned %s, expected %s", foundTownRoot, townRoot)
|
||||
}
|
||||
|
||||
// Call querySessionEvents - this should find events from ALL locations
|
||||
entries := querySessionEvents()
|
||||
|
||||
t.Logf("querySessionEvents returned %d entries", len(entries))
|
||||
|
||||
// We created 2 session.ended events (one town, one rig)
|
||||
// The fix should find BOTH
|
||||
if len(entries) < 2 {
|
||||
t.Errorf("querySessionEvents found %d entries, expected at least 2 (one from town, one from rig)", len(entries))
|
||||
t.Log("This indicates the bug: querySessionEvents only queries town-level beads, missing rig-level events")
|
||||
}
|
||||
|
||||
// Verify we found both the mayor and polecat sessions
|
||||
var foundMayor, foundPolecat bool
|
||||
for _, e := range entries {
|
||||
t.Logf(" Entry: session=%s role=%s cost=$%.2f", e.SessionID, e.Role, e.CostUSD)
|
||||
if e.Role == "mayor" {
|
||||
foundMayor = true
|
||||
}
|
||||
if e.Role == "polecat" {
|
||||
foundPolecat = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundMayor {
|
||||
t.Error("Missing mayor session from town beads")
|
||||
}
|
||||
if !foundPolecat {
|
||||
t.Error("Missing polecat session from rig beads")
|
||||
}
|
||||
}
|
||||
@@ -21,32 +21,39 @@ var (
|
||||
crewAll bool
|
||||
crewListAll bool
|
||||
crewDryRun bool
|
||||
crewDebug bool
|
||||
)
|
||||
|
||||
var crewCmd = &cobra.Command{
|
||||
Use: "crew",
|
||||
GroupID: GroupWorkspace,
|
||||
Short: "Manage crew workspaces (user-managed persistent workspaces)",
|
||||
Short: "Manage crew workers (persistent workspaces for humans)",
|
||||
RunE: requireSubcommand,
|
||||
Long: `Crew workers are user-managed persistent workspaces within a rig.
|
||||
Long: `Manage crew workers - persistent workspaces for human developers.
|
||||
|
||||
Unlike polecats which are witness-managed and transient, crew workers are:
|
||||
- Persistent: Not auto-garbage-collected
|
||||
- User-managed: Overseer controls lifecycle
|
||||
- Long-lived identities: recognizable names like dave, emma, fred
|
||||
- Gas Town integrated: Mail, handoff mechanics work
|
||||
- Tmux optional: Can work in terminal directly
|
||||
CREW VS POLECATS:
|
||||
Polecats: Ephemeral. Witness-managed. Auto-nuked after work.
|
||||
Crew: Persistent. User-managed. Stays until you remove it.
|
||||
|
||||
Crew workers are full git clones (not worktrees) for human developers
|
||||
who want persistent context and control over their workspace lifecycle.
|
||||
Use crew workers for exploratory work, long-running tasks, or when you
|
||||
want to keep uncommitted changes around.
|
||||
|
||||
Features:
|
||||
- Gas Town integrated: Mail, nudge, handoff all work
|
||||
- Recognizable names: dave, emma, fred (not ephemeral pool names)
|
||||
- Tmux optional: Can work in terminal directly without tmux session
|
||||
|
||||
Commands:
|
||||
gt crew start <name> Start a crew workspace (creates if needed)
|
||||
gt crew stop <name> Stop crew workspace session(s)
|
||||
gt crew add <name> Create a new crew workspace
|
||||
gt crew list List crew workspaces with status
|
||||
gt crew at <name> Attach to crew workspace session
|
||||
gt crew remove <name> Remove a crew workspace
|
||||
gt crew refresh <name> Context cycling with mail-to-self handoff
|
||||
gt crew restart <name> Kill and restart session fresh (alias: rs)
|
||||
gt crew status [<name>] Show detailed workspace status`,
|
||||
gt crew start <name> Start session (creates workspace if needed)
|
||||
gt crew stop <name> Stop session(s)
|
||||
gt crew add <name> Create workspace without starting
|
||||
gt crew list List workspaces with status
|
||||
gt crew at <name> Attach to session
|
||||
gt crew remove <name> Remove workspace
|
||||
gt crew refresh <name> Context cycle with handoff mail
|
||||
gt crew restart <name> Kill and restart session fresh`,
|
||||
}
|
||||
|
||||
var crewAddCmd = &cobra.Command{
|
||||
@@ -333,6 +340,7 @@ func init() {
|
||||
crewAtCmd.Flags().BoolVarP(&crewDetached, "detached", "d", false, "Start session without attaching")
|
||||
crewAtCmd.Flags().StringVar(&crewAccount, "account", "", "Claude Code account handle to use (overrides default)")
|
||||
crewAtCmd.Flags().StringVar(&crewAgentOverride, "agent", "", "Agent alias to run crew worker with (overrides rig/town default)")
|
||||
crewAtCmd.Flags().BoolVar(&crewDebug, "debug", false, "Show debug output for troubleshooting")
|
||||
|
||||
crewRemoveCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use")
|
||||
crewRemoveCmd.Flags().BoolVar(&crewForce, "force", false, "Force remove (skip safety checks)")
|
||||
|
||||
@@ -106,7 +106,6 @@ func runCrewAdd(cmd *cobra.Command, args []string) error {
|
||||
RoleType: "crew",
|
||||
Rig: rigName,
|
||||
AgentState: "idle",
|
||||
RoleBead: beads.RoleBeadIDTown("crew"),
|
||||
}
|
||||
desc := fmt.Sprintf("Crew worker %s in %s - human-managed persistent workspace.", name, rigName)
|
||||
if _, err := bd.CreateAgentBead(crewID, desc, fields); err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user