Compare commits
667 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5cc2995345 | ||
|
|
e57297cb1b | ||
|
|
dff6c3fb3c | ||
|
|
fb4c415127 | ||
|
|
b612df0463 | ||
|
|
785d9adfef | ||
|
|
3d7b109395 | ||
|
|
b14835b140 | ||
|
|
35abe21c50 | ||
|
|
405d40ee4b | ||
|
|
748fa73931 | ||
|
|
1dc31024ca | ||
|
|
94c2d71c13 | ||
|
|
02390251fc | ||
|
|
0dfb0be368 | ||
|
|
1feb48dd11 | ||
|
|
58d5226f30 | ||
|
|
c42b5db7ab | ||
|
|
2119841d57 | ||
|
|
2514507a49 | ||
|
|
e4ebd0784a | ||
|
|
1e97d1e637 | ||
|
|
7e5c3dd695 | ||
|
|
0cdcd0a20b | ||
|
|
aba0a5069c | ||
|
|
a8bedd2172 | ||
|
|
b9f5797b9e | ||
|
|
5791cd7e34 | ||
|
|
3931d10af3 | ||
|
|
d67aa0212c | ||
|
|
b333bf8146 | ||
|
|
7016b33b39 | ||
|
|
1a0f2d6b3b | ||
|
|
39b1c11bb6 | ||
|
|
f6fab3afad | ||
|
|
40cc4c9335 | ||
|
|
82079f9715 | ||
|
|
53fd6bad33 | ||
|
|
6e2169de7f | ||
|
|
d0e49a216a | ||
|
|
6616a4726c | ||
|
|
f00b0254f2 | ||
|
|
e12aa45dd6 | ||
|
|
9f06eb94c4 | ||
|
|
7a2090bb15 | ||
|
|
a5bbe24444 | ||
|
|
87f9a7cfd1 | ||
|
|
78001d2c01 | ||
|
|
d96b53e173 | ||
|
|
fa1f812ce9 | ||
|
|
dfd4199396 | ||
|
|
77126283dd | ||
|
|
afc1ff04b1 | ||
|
|
987502ebb3 | ||
|
|
3588dbc5e4 | ||
|
|
4fbe00e224 | ||
|
|
3afd1a1dcd | ||
|
|
535647cefc | ||
|
|
3c44e2202d | ||
|
|
b2b9cbc836 | ||
|
|
035b7775ea | ||
|
|
a8be623eeb | ||
|
|
63a30ce548 | ||
|
|
1b036aadf5 | ||
|
|
9de8859be0 | ||
|
|
560431d2f5 | ||
|
|
aef99753df | ||
|
|
d610d444d7 | ||
|
|
cd347dfdf9 | ||
|
|
d0a1e165e5 | ||
|
|
2b56ee2545 | ||
|
|
9b412707ab | ||
|
|
45951c0fad | ||
|
|
9caf5302d4 | ||
|
|
78ca8bd5bf | ||
|
|
44d5b4fdd2 | ||
|
|
77ac332a41 | ||
|
|
b71188d0b4 | ||
|
|
6bfe61f796 | ||
|
|
2aadb0165b | ||
|
|
05ea767149 | ||
|
|
f4072e58cc | ||
|
|
7c2f9687ec | ||
|
|
e591f2ae25 | ||
|
|
0a6b0b892f | ||
|
|
6a3780d282 | ||
|
|
8357a94cae | ||
|
|
8b393b7c39 | ||
|
|
195ecf7578 | ||
|
|
5218102f49 | ||
|
|
126ec84bb3 | ||
|
|
9a91a1b94f | ||
|
|
f82477d6a6 | ||
|
|
4dd11d4ffa | ||
|
|
7564cd5997 | ||
|
|
5a14053a6b | ||
|
|
d2f7dbd3ae | ||
|
|
65c1fad8ce | ||
|
|
0db2bda6e6 | ||
|
|
48ace2cbf3 | ||
|
|
3d5a66f850 | ||
|
|
b8a679c30c | ||
|
|
183a0d7d8d | ||
|
|
477c28c9d1 | ||
|
|
f58a516b7b | ||
|
|
fd61259336 | ||
|
|
6a22b47ef6 | ||
|
|
5c45b4438a | ||
|
|
08cee416a4 | ||
|
|
2fe23b7be5 | ||
|
|
6c5c671595 | ||
|
|
371074cc67 | ||
|
|
6966eb4c28 | ||
|
|
55a3b9858a | ||
|
|
e59955a580 | ||
|
|
08bc632a03 | ||
|
|
a610283078 | ||
|
|
544cacf36d | ||
|
|
b8eb936219 | ||
|
|
dcf7b81011 | ||
|
|
37f465bde5 | ||
|
|
b73ee91970 | ||
|
|
b41a5ef243 | ||
|
|
4eb3915ce9 | ||
|
|
b28c25b8a2 | ||
|
|
2333b38ecf | ||
|
|
6f9bfec60f | ||
|
|
7421d1554d | ||
|
|
e2bd5ef76c | ||
|
|
61e9a36dfd | ||
|
|
8c200d4a83 | ||
|
|
9cd2696abe | ||
|
|
2b3f287f02 | ||
|
|
021b087a12 | ||
|
|
3cb3a0bbf7 | ||
|
|
7714295a43 | ||
|
|
616ff01e2c | ||
|
|
8d41f817b9 | ||
|
|
3f724336f4 | ||
|
|
576e73a924 | ||
|
|
5ecf8ccaf5 | ||
|
|
238ad8cd95 | ||
|
|
50bcf96afb | ||
|
|
2feefd1731 | ||
|
|
4a856f6e0d | ||
|
|
e853ac3539 | ||
|
|
f14dadc956 | ||
|
|
f19a0ab5d6 | ||
|
|
38d3c0c4f1 | ||
|
|
d4ad4c0726 | ||
|
|
88a74c50f7 | ||
|
|
7ff87ff012 | ||
|
|
bd655f58f9 | ||
|
|
72b03469d1 | ||
|
|
d6a4bc22fd | ||
|
|
3283ee42aa | ||
|
|
b40a6b0736 | ||
|
|
265239d4a1 | ||
|
|
cd67eae044 | ||
|
|
5badb54048 | ||
|
|
4deeba6304 | ||
|
|
93c6c70296 | ||
|
|
bda1dc97c5 | ||
|
|
5823c9fb36 | ||
|
|
885b5023d3 | ||
|
|
4ef93e1d8a | ||
|
|
6d29f34cd0 | ||
|
|
8880c61067 | ||
|
|
0cc4867ad7 | ||
|
|
d8bb9a9ba9 | ||
|
|
8dab7b662a | ||
|
|
938b068145 | ||
|
|
eed5cddc97 | ||
|
|
15d1dc8fa8 | ||
|
|
11b38294d4 | ||
|
|
d4026b79cf | ||
|
|
eb18dbf9e2 | ||
|
|
4d8236e26c | ||
|
|
6b895e56de | ||
|
|
ae2fddf4fc | ||
|
|
eea3dd564d | ||
|
|
5178fa7f0a | ||
|
|
0545d596c3 | ||
|
|
22064b0730 | ||
|
|
5a56525655 | ||
|
|
74050cd0ab | ||
|
|
fbc67e89e1 | ||
|
|
43e38f037c | ||
|
|
22a24c5648 | ||
|
|
9b34b6bfec | ||
|
|
301a42a90e | ||
|
|
7af7634022 | ||
|
|
29f8dd67e2 | ||
|
|
91433e8b1d | ||
|
|
c7e1451ce6 | ||
|
|
f89ac47ff9 | ||
|
|
e344e77921 | ||
|
|
a09c6b71d7 | ||
|
|
4fa6cfa0da | ||
|
|
c51047b654 | ||
|
|
d42a9bd6e0 | ||
|
|
08ef50047d | ||
|
|
95cb58e36f | ||
|
|
d3606c8c46 | ||
|
|
a88d2e1a9e | ||
|
|
29039ed69d | ||
|
|
b1a5241430 | ||
|
|
03213a7307 | ||
|
|
7e158cddd6 | ||
|
|
e5aea04fa1 | ||
|
|
8332a719ab | ||
|
|
139f3aeba3 | ||
|
|
add3d56c8b | ||
|
|
5c13e5f95a | ||
|
|
3ebb1118d3 | ||
|
|
618b0d9810 | ||
|
|
39185f8d00 | ||
|
|
a4776b9bee | ||
|
|
20effb0a51 | ||
|
|
4f02abb535 | ||
|
|
cbbf566f06 | ||
|
|
e30e46a87a | ||
|
|
7bbc09230e | ||
|
|
2ffc8e8712 | ||
|
|
012d50b2b2 | ||
|
|
bf8bddb004 | ||
|
|
42999d883d | ||
|
|
b3b980fd79 | ||
|
|
839fa19e90 | ||
|
|
7164e7a6d2 | ||
|
|
8eafcc8a16 | ||
|
|
a244c3d498 | ||
|
|
0bf68de517 | ||
|
|
42d9890e5c | ||
|
|
92144757ac | ||
|
|
e7ca4908dc | ||
|
|
3cf77b2e8b | ||
|
|
a1195cb104 | ||
|
|
80af0547ea | ||
|
|
08755f62cd | ||
|
|
5d96243414 | ||
|
|
60da5de104 | ||
|
|
0a6fa457f6 | ||
|
|
1043f00d06 | ||
|
|
8660641009 | ||
|
|
4ee1a4472d | ||
|
|
5882039715 | ||
|
|
7d8d96f7f9 | ||
|
|
69110309cc | ||
|
|
901b60e927 | ||
|
|
712a37b9c1 | ||
|
|
aa0bfd0c40 | ||
|
|
1453b8b592 | ||
|
|
65c5e05c43 | ||
|
|
bd2a5ab56a | ||
|
|
f32a63e6e5 | ||
|
|
c61b67eb03 | ||
|
|
fa99e615f0 | ||
|
|
ff6c02b15d | ||
|
|
66805079de | ||
|
|
bedccb1634 | ||
|
|
e0e5a00dfc | ||
|
|
275910b702 | ||
|
|
fdd4b0aeb0 | ||
|
|
f42ec42268 | ||
|
|
503e66ba8d | ||
|
|
8051c8bdd7 | ||
|
|
c0526f244e | ||
|
|
bda248fb9a | ||
|
|
45de02db43 | ||
|
|
9315248134 | ||
|
|
73a349e5ee | ||
|
|
a2607b5b72 | ||
|
|
18893e713a | ||
|
|
ea12679a5a | ||
|
|
b1fcb7d3e7 | ||
|
|
a43c89c01b | ||
|
|
e043f4a16c | ||
|
|
87fde4b4fd | ||
|
|
e083317cc3 | ||
|
|
7924921d17 | ||
|
|
278b2f2d4d | ||
|
|
791b388a93 | ||
|
|
6becab4a60 | ||
|
|
38bedc03e8 | ||
|
|
e7b0af0295 | ||
|
|
f9ca7bb87b | ||
|
|
392ff1d31b | ||
|
|
58207a00ec | ||
|
|
f0192c8b3d | ||
|
|
15cfb76c2c | ||
|
|
2d8949a3d3 | ||
|
|
f79614d764 | ||
|
|
e442212c05 | ||
|
|
6b2a7438e1 | ||
|
|
1902182f3a | ||
|
|
c99b004aeb | ||
|
|
c860112cf6 | ||
|
|
ee2ca10b0a | ||
|
|
5a373fbd57 | ||
|
|
efac19d184 | ||
|
|
ff3f3b4580 | ||
|
|
5a7c328f1f | ||
|
|
069fe0f285 | ||
|
|
1e3bf292f9 | ||
|
|
d6dc43938d | ||
|
|
6b8480c483 | ||
|
|
cd2de6ec46 | ||
|
|
025586e16b | ||
|
|
b990094010 | ||
|
|
716bab396f | ||
|
|
605eeec84e | ||
|
|
3caf32f9f7 | ||
|
|
3cdc98651e | ||
|
|
9779ae3190 | ||
|
|
b9ecb7b82e | ||
|
|
98b11eda3c | ||
|
|
3247b57926 | ||
|
|
f6fd76172e | ||
|
|
77e1199196 | ||
|
|
36ffa379b8 | ||
|
|
9835e13fee | ||
|
|
eae08ee509 | ||
|
|
7ee708ffef | ||
|
|
7182599b42 | ||
|
|
39a51c0d14 | ||
|
|
a9080ed04f | ||
|
|
043a6abc59 | ||
|
|
a1008f6f58 | ||
|
|
995476a9c0 | ||
|
|
7b35398ebc | ||
|
|
0d0d2763a8 | ||
|
|
ea5d72a07b | ||
|
|
cdea53e221 | ||
|
|
b0f377f973 | ||
|
|
28c55bd451 | ||
|
|
2a0a8c760b | ||
|
|
1f272ffc53 | ||
|
|
4bbf97ab82 | ||
|
|
add77eea84 | ||
|
|
a144c99f46 | ||
|
|
956f8cc5f0 | ||
|
|
30a6f27404 | ||
|
|
f5832188a6 | ||
|
|
a106796a0e | ||
|
|
88f784a9aa | ||
|
|
8ed31e9634 | ||
|
|
833724a7ed | ||
|
|
c7e1b207df | ||
|
|
d22b5b6ab5 | ||
|
|
91641b01a0 | ||
|
|
7ef4ddab6c | ||
|
|
5aa218fc96 | ||
|
|
e16d5840c6 | ||
|
|
947111f6d8 | ||
|
|
66f6e37844 | ||
|
|
96632fe4ba | ||
|
|
54be24ab5b | ||
|
|
ce9cd72c37 | ||
|
|
d126c967a0 | ||
|
|
b9025379b7 | ||
|
|
598a39e708 | ||
|
|
ea84079f8b | ||
|
|
b9e8be4352 | ||
|
|
89aec8e19e | ||
|
|
5d554a616a | ||
|
|
dceabab8db | ||
|
|
1418b1123a | ||
|
|
2c73cf35f1 | ||
|
|
0b90837a18 | ||
|
|
566bdfbcd8 | ||
|
|
1ece29e1fd | ||
|
|
7f4c3201cf | ||
|
|
8deb5ed1bd | ||
|
|
dab619b3d0 | ||
|
|
3246c7c6b7 | ||
|
|
6a705f6210 | ||
|
|
62d5e4b550 | ||
|
|
0f6759e4a2 | ||
|
|
1bed63f087 | ||
|
|
5607bc4f01 | ||
|
|
e7d7a1bd6b | ||
|
|
982ce6c5d1 | ||
|
|
f1c49630ca | ||
|
|
21a88e2c18 | ||
|
|
8219fd5abe | ||
|
|
ad6386809c | ||
|
|
d13922523a | ||
|
|
84b6780a87 | ||
|
|
40c67e0796 | ||
|
|
0d7f5d1f05 | ||
|
|
30984dcf95 | ||
|
|
064f7b1a40 | ||
|
|
e3d99c3aed | ||
|
|
819c9dd179 | ||
|
|
9bb63900d4 | ||
|
|
fc1a1dea88 | ||
|
|
dd9cd61075 | ||
|
|
272f83f1fc | ||
|
|
6e6e5ce08c | ||
|
|
db353c247b | ||
|
|
7533fed55e | ||
|
|
afb944f616 | ||
|
|
6016f15da9 | ||
|
|
f90b58bc6d | ||
|
|
b60f016955 | ||
|
|
609a4af087 | ||
|
|
e1f2bb8b4b | ||
|
|
f7d497ba07 | ||
|
|
131dac91c8 | ||
|
|
b92e46474a | ||
|
|
fc8718e680 | ||
|
|
2f50a59e74 | ||
|
|
aeb4c0d26f | ||
|
|
c4fcdd88c8 | ||
|
|
c94d59dca7 | ||
|
|
e0858096f6 | ||
|
|
0f633be4b1 | ||
|
|
593185873d | ||
|
|
86751e1ea5 | ||
|
|
f9473c7b9e | ||
|
|
dcb085e64e | ||
|
|
369cf82b77 | ||
|
|
bfd3096b49 | ||
|
|
271bd7ea0a | ||
|
|
0d3f6c9654 | ||
|
|
24136ebaa1 | ||
|
|
7a1ed80068 | ||
|
|
e6bdc639ab | ||
|
|
65334320c7 | ||
|
|
ce231a31af | ||
|
|
f1a2c56900 | ||
|
|
cc87fdd03d | ||
|
|
9b4c7ac28b | ||
|
|
64e1448981 | ||
|
|
e9a013c0d2 | ||
|
|
e999ceb1c1 | ||
|
|
52b9a95f98 | ||
|
|
1d88a73eaa | ||
|
|
7150ce2624 | ||
|
|
2343e6b0ef | ||
|
|
491b635cbc | ||
|
|
cb2b130ca2 | ||
|
|
be35b3eaab | ||
|
|
b8075a5e06 | ||
|
|
9697007182 | ||
|
|
ad8189b010 | ||
|
|
7367aa7572 | ||
|
|
ba76bf1232 | ||
|
|
692d6819f2 | ||
|
|
97b70517cc | ||
|
|
73a8889c3e | ||
|
|
61b561a540 | ||
|
|
86739556c2 | ||
|
|
ff3d1b2e23 | ||
|
|
69299e9a43 | ||
|
|
1701474b3d | ||
|
|
a7e9fbf699 | ||
|
|
358fcaf935 | ||
|
|
f19ddc5400 | ||
|
|
64b58b31ab | ||
|
|
afff85cdff | ||
|
|
a91e6cd643 | ||
|
|
9b2f4a7652 | ||
|
|
c8c97fdf64 | ||
|
|
43272f6fbb | ||
|
|
65c3e90374 | ||
|
|
0eacdd367b | ||
|
|
9fe9323b9c | ||
|
|
bfafb9c179 | ||
|
|
677a6ed84f | ||
|
|
da2d71c3fe | ||
|
|
e124402b7b | ||
|
|
705a7c2137 | ||
|
|
c2c6ddeaf9 | ||
|
|
b509107100 | ||
|
|
34cb28e0b9 | ||
|
|
1da3e18e60 | ||
|
|
5adb096d9d | ||
|
|
81bfe48ed3 | ||
|
|
41a758d6d8 | ||
|
|
5250e9e12a | ||
|
|
b3407759d2 | ||
|
|
c8c765a239 | ||
|
|
775af2973d | ||
|
|
da906847dd | ||
|
|
0a649e6faa | ||
|
|
fb40fa1405 | ||
|
|
7bfc2fcb76 | ||
|
|
376305e9d9 | ||
|
|
73f5b4025b | ||
|
|
c756f12d00 | ||
|
|
8d5611f14e | ||
|
|
98e154b18e | ||
|
|
38adfa4d8b | ||
|
|
03b0f7ff52 | ||
|
|
3b628150c2 | ||
|
|
1afe3fb823 | ||
|
|
caa88d96c5 | ||
|
|
4c9e8b8b99 | ||
|
|
c699e3e2ed | ||
|
|
65ecb6cafd | ||
|
|
540e33dbe9 | ||
|
|
85dd150d75 | ||
|
|
45634059dd | ||
|
|
d4da2b325d | ||
|
|
4985bdfbcc | ||
|
|
f4cbcb4ce9 | ||
|
|
c4d956ebe7 | ||
|
|
7f6fe53c6f | ||
|
|
19f4fa3ddb | ||
|
|
e648edce8c | ||
|
|
8a8b56e9e6 | ||
|
|
c91ab85457 | ||
|
|
00a59dec44 | ||
|
|
2de2d6b7e4 | ||
|
|
f30178265c | ||
|
|
5141facb21 | ||
|
|
15caf62b9f | ||
|
|
28a9de64d5 | ||
|
|
a9ed342be6 | ||
|
|
f9e788ccfb | ||
|
|
c220678162 | ||
|
|
b649635f48 | ||
|
|
117b91b87f | ||
|
|
ffa8dd56cb | ||
|
|
92042d679c | ||
|
|
585c204648 | ||
|
|
6209a49d54 | ||
|
|
ffeff97d9f | ||
|
|
9b5c889795 | ||
|
|
a07fa8bf7f | ||
|
|
06d40925d1 | ||
|
|
e2a211e295 | ||
|
|
b834bf5858 | ||
|
|
11d469edc3 | ||
|
|
de376007e0 | ||
|
|
5855b525fd | ||
|
|
f89c6f3693 | ||
|
|
ee167ae1f1 | ||
|
|
a6157829d7 | ||
|
|
4a94187068 | ||
|
|
93bdf88f6e | ||
|
|
59799f551c | ||
|
|
85d1e783b0 | ||
|
|
bf16f7894b | ||
|
|
0dc0174b26 | ||
|
|
5f8690dbda | ||
|
|
5f206fb658 | ||
|
|
d6add3f9b4 | ||
|
|
c25368cbe1 | ||
|
|
52ef89c559 | ||
|
|
541e1ac2a3 | ||
|
|
2922affa02 | ||
|
|
ab0d56dec9 | ||
|
|
b095b9c04c | ||
|
|
feeee3912a | ||
|
|
29e2c6ed9c | ||
|
|
454b2f76e7 | ||
|
|
21c1bbc118 | ||
|
|
ea8bef2029 | ||
|
|
432d14d9df | ||
|
|
b7b8e141b1 | ||
|
|
72544cc06d | ||
|
|
81a7d04239 | ||
|
|
fc4b9de02c | ||
|
|
9729e05f86 | ||
|
|
3f920048cb | ||
|
|
d00e73f110 | ||
|
|
87169a3fc7 | ||
|
|
6e84489ca3 | ||
|
|
29aed4b42f | ||
|
|
805ac7c17a | ||
|
|
ec53dfbb40 | ||
|
|
1f44482ad0 | ||
|
|
950e35317e | ||
|
|
6dbb841e22 | ||
|
|
d89aae5b5c | ||
|
|
11e3e85e9d | ||
|
|
99aae0bf02 | ||
|
|
22693c1dcc | ||
|
|
02ca9e43fa | ||
|
|
6afd85df4b | ||
|
|
3b9ca71fc4 | ||
|
|
93b19a7e72 | ||
|
|
c2451b85e7 | ||
|
|
ae88c12e07 | ||
|
|
e7a8e0a3db | ||
|
|
56742d95da | ||
|
|
60e7471cea | ||
|
|
7edd75021b | ||
|
|
a787d60add | ||
|
|
a3bccc881b | ||
|
|
74409dc32b | ||
|
|
ac63b10aa8 | ||
|
|
c306879a31 | ||
|
|
ac4649ba7d | ||
|
|
63af29284b | ||
|
|
b79e4a7c3b | ||
|
|
6fe25c757c | ||
|
|
9cb14cc41a | ||
|
|
201ef3a9c8 | ||
|
|
9e416e9ff5 | ||
|
|
83c47df980 | ||
|
|
7fe505d673 | ||
|
|
9d7dcde1e2 | ||
|
|
16fb45bb2a | ||
|
|
87a2e27fcc | ||
|
|
ad6169201a | ||
|
|
09bbb0f430 | ||
|
|
be815db5e4 | ||
|
|
31a32c084b | ||
|
|
f6f6acdb2d | ||
|
|
4799cb086f | ||
|
|
6e4f2bea29 | ||
|
|
c8150ab017 | ||
|
|
637df1d289 | ||
|
|
cf1eac8521 | ||
|
|
296440579a | ||
|
|
03fef16748 | ||
|
|
e8d27e7212 | ||
|
|
fc0b506253 | ||
|
|
5224dfb50d | ||
|
|
b33df5fa36 | ||
|
|
5ae89b3a27 | ||
|
|
2ed8de0e20 | ||
|
|
155e7dd438 | ||
|
|
8249e8a7f6 | ||
|
|
2ec66214e1 | ||
|
|
c199f7e940 | ||
|
|
b9d1813301 | ||
|
|
362917f52e | ||
|
|
0607c3a749 | ||
|
|
c073125b3b | ||
|
|
86c79e750c | ||
|
|
43cca06460 | ||
|
|
b88d3e8ee7 | ||
|
|
97564dfc13 | ||
|
|
688624ca6b | ||
|
|
c529d09e77 | ||
|
|
0c5cfcea2a | ||
|
|
c24c3ba873 | ||
|
|
8110aab257 | ||
|
|
d34e9b006c | ||
|
|
85a522f725 | ||
|
|
5be232ff8c | ||
|
|
eb6fb3c73b | ||
|
|
52533c354d | ||
|
|
a5ff31428b | ||
|
|
f49197243d | ||
|
|
904a773ade | ||
|
|
ef248a1824 | ||
|
|
4ebb96fbbc | ||
|
|
168e805d0c | ||
|
|
c678d2e3d4 | ||
|
|
8c91ff22db | ||
|
|
1be9edc272 | ||
|
|
bdaff31117 | ||
|
|
e30ebaf8ac | ||
|
|
59414834ec | ||
|
|
7e591ec0a1 | ||
|
|
59484b2af7 | ||
|
|
39d904e125 | ||
|
|
84009a3ee8 | ||
|
|
3d0183a3bb | ||
|
|
fec51d60e0 | ||
|
|
569cb182a6 |
21
.beads/.gitignore
vendored
21
.beads/.gitignore
vendored
@@ -10,6 +10,8 @@ daemon.lock
|
||||
daemon.log
|
||||
daemon.pid
|
||||
bd.sock
|
||||
sync-state.json
|
||||
last-touched
|
||||
|
||||
# Local version tracking (prevents upgrade notification spam after git ops)
|
||||
.local_version
|
||||
@@ -18,6 +20,10 @@ bd.sock
|
||||
db.sqlite
|
||||
bd.db
|
||||
|
||||
# Worktree redirect file (contains relative path to main repo's .beads/)
|
||||
# Must not be committed as paths would be wrong in other clones
|
||||
redirect
|
||||
|
||||
# Merge artifacts (temporary files from 3-way merge)
|
||||
beads.base.jsonl
|
||||
beads.base.meta.json
|
||||
@@ -26,8 +32,13 @@ beads.left.meta.json
|
||||
beads.right.jsonl
|
||||
beads.right.meta.json
|
||||
|
||||
# Keep JSONL exports and config (source of truth for git)
|
||||
!issues.jsonl
|
||||
!interactions.jsonl
|
||||
!metadata.json
|
||||
!config.json
|
||||
# Sync state (local-only, per-machine)
|
||||
# These files are machine-specific and should not be shared across clones
|
||||
.sync.lock
|
||||
sync_base.jsonl
|
||||
|
||||
# NOTE: Do NOT add negation patterns (e.g., !issues.jsonl) here.
|
||||
# They would override fork protection in .git/info/exclude, allowing
|
||||
# contributors to accidentally commit upstream issue databases.
|
||||
# The JSONL files (issues.jsonl, interactions.jsonl) and config files
|
||||
# are tracked by git by default since no pattern above ignores them.
|
||||
|
||||
43
.beads/PRIME.md
Normal file
43
.beads/PRIME.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Gas Town Worker Context
|
||||
|
||||
> **Context Recovery**: Run `gt prime` for full context after compaction or new session.
|
||||
|
||||
## The Propulsion Principle (GUPP)
|
||||
|
||||
**If you find work on your hook, YOU RUN IT.**
|
||||
|
||||
No confirmation. No waiting. No announcements. The hook having work IS the assignment.
|
||||
This is physics, not politeness. Gas Town is a steam engine - you are a piston.
|
||||
|
||||
**Failure mode we're preventing:**
|
||||
- Agent starts with work on hook
|
||||
- Agent announces itself and waits for human to say "ok go"
|
||||
- Human is AFK / trusting the engine to run
|
||||
- Work sits idle. The whole system stalls.
|
||||
|
||||
## Startup Protocol
|
||||
|
||||
1. Check your hook: `gt mol status`
|
||||
2. If work is hooked → EXECUTE (no announcement, no waiting)
|
||||
3. If hook empty → Check mail: `gt mail inbox`
|
||||
4. Still nothing? Wait for user instructions
|
||||
|
||||
## Key Commands
|
||||
|
||||
- `gt prime` - Get full role context (run after compaction)
|
||||
- `gt mol status` - Check your hooked work
|
||||
- `gt mail inbox` - Check for messages
|
||||
- `bd ready` - Find available work (no blockers)
|
||||
- `bd sync` - Sync beads changes
|
||||
|
||||
## Session Close Protocol
|
||||
|
||||
Before saying "done":
|
||||
1. git status (check what changed)
|
||||
2. git add <files> (stage code changes)
|
||||
3. bd sync (commit beads changes)
|
||||
4. git commit -m "..." (commit code)
|
||||
5. bd sync (commit any new beads changes)
|
||||
6. git push (push to remote)
|
||||
|
||||
**Work is not done until pushed.**
|
||||
@@ -15,6 +15,8 @@ Each leg examines the code from a different perspective. Findings are
|
||||
collected and synthesized into a prioritized, actionable review.
|
||||
|
||||
## Legs (parallel execution)
|
||||
|
||||
### Analysis Legs (read and analyze code)
|
||||
- **correctness**: Logic errors, bugs, edge cases
|
||||
- **performance**: Bottlenecks, efficiency issues
|
||||
- **security**: Vulnerabilities, OWASP concerns
|
||||
@@ -23,6 +25,16 @@ collected and synthesized into a prioritized, actionable review.
|
||||
- **style**: Convention compliance, consistency
|
||||
- **smells**: Anti-patterns, technical debt
|
||||
|
||||
### Verification Legs (check implementation quality)
|
||||
- **wiring**: Installed-but-not-wired gaps (deps added but not used)
|
||||
- **commit-discipline**: Commit quality and atomicity
|
||||
- **test-quality**: Test meaningfulness, not just coverage
|
||||
|
||||
## Presets
|
||||
- **gate**: Light review for automatic flow (wiring, security, smells, test-quality)
|
||||
- **full**: Comprehensive review (all 10 legs)
|
||||
- **custom**: Select specific legs via --legs flag
|
||||
|
||||
## Execution Model
|
||||
1. Each leg spawns as a separate polecat
|
||||
2. Polecats work in parallel
|
||||
@@ -293,6 +305,125 @@ Review the code for code smells and anti-patterns.
|
||||
- Is technical debt being added or paid down?
|
||||
"""
|
||||
|
||||
# ============================================================================
|
||||
# VERIFICATION LEGS - Check implementation quality (not just code analysis)
|
||||
# ============================================================================
|
||||
|
||||
[[legs]]
|
||||
id = "wiring"
|
||||
title = "Wiring Review"
|
||||
focus = "Installed-but-not-wired gaps"
|
||||
description = """
|
||||
Detect dependencies, configs, or libraries that were added but not actually used.
|
||||
|
||||
This catches subtle bugs where the implementer THINKS they integrated something,
|
||||
but the old implementation is still being used.
|
||||
|
||||
**Look for:**
|
||||
- New dependency in manifest but never imported
|
||||
- Go: module in go.mod but no import
|
||||
- Rust: crate in Cargo.toml but no `use`
|
||||
- Node: package in package.json but no import/require
|
||||
|
||||
- SDK added but old implementation remains
|
||||
- Added Sentry but still using console.error for errors
|
||||
- Added Zod but still using manual typeof validation
|
||||
|
||||
- Config/env var defined but never loaded
|
||||
- New .env var that isn't accessed in code
|
||||
|
||||
**Questions to answer:**
|
||||
- Is every new dependency actually used?
|
||||
- Are there old patterns that should have been replaced?
|
||||
- Is there dead config that suggests incomplete migration?
|
||||
"""
|
||||
|
||||
[[legs]]
|
||||
id = "commit-discipline"
|
||||
title = "Commit Discipline Review"
|
||||
focus = "Commit quality and atomicity"
|
||||
description = """
|
||||
Review commit history for good practices.
|
||||
|
||||
Good commits make the codebase easier to understand, bisect, and revert.
|
||||
|
||||
**Look for:**
|
||||
- Giant "WIP" or "fix" commits
|
||||
- Multiple unrelated changes in one commit
|
||||
- Commits that touch 20+ files across different features
|
||||
|
||||
- Poor commit messages
|
||||
- "stuff", "update", "asdf", "fix"
|
||||
- No context about WHY the change was made
|
||||
|
||||
- Unatomic commits
|
||||
- Feature + refactor + bugfix in same commit
|
||||
- Should be separable logical units
|
||||
|
||||
- Missing type prefixes (if project uses conventional commits)
|
||||
- feat:, fix:, refactor:, test:, docs:, chore:
|
||||
|
||||
**Questions to answer:**
|
||||
- Could this history be bisected effectively?
|
||||
- Would a reviewer understand the progression?
|
||||
- Are commits atomic (one logical change each)?
|
||||
"""
|
||||
|
||||
[[legs]]
|
||||
id = "test-quality"
|
||||
title = "Test Quality Review"
|
||||
focus = "Test meaningfulness, not just coverage"
|
||||
description = """
|
||||
Verify tests are actually testing something meaningful.
|
||||
|
||||
Coverage numbers lie. A test that can't fail provides no value.
|
||||
|
||||
**Look for:**
|
||||
- Weak assertions
|
||||
- Only checking != nil / !== null / is not None
|
||||
- Using .is_ok() without checking the value
|
||||
- assertTrue(true) or equivalent
|
||||
|
||||
- Missing negative test cases
|
||||
- Happy path only, no error cases
|
||||
- No boundary testing
|
||||
- No invalid input testing
|
||||
|
||||
- Tests that can't fail
|
||||
- Mocked so heavily the test is meaningless
|
||||
- Testing implementation details, not behavior
|
||||
|
||||
- Flaky test indicators
|
||||
- Sleep/delay in tests
|
||||
- Time-dependent assertions
|
||||
|
||||
**Questions to answer:**
|
||||
- Do these tests actually verify behavior?
|
||||
- Would a bug in the implementation cause a test failure?
|
||||
- Are edge cases and error paths tested?
|
||||
"""
|
||||
|
||||
# ============================================================================
|
||||
# PRESETS - Configurable leg selection
|
||||
# ============================================================================
|
||||
|
||||
[presets]
|
||||
[presets.gate]
|
||||
description = "Light review for automatic flow - fast, focused on blockers"
|
||||
legs = ["wiring", "security", "smells", "test-quality"]
|
||||
|
||||
[presets.full]
|
||||
description = "Comprehensive review - all legs, for major features"
|
||||
legs = ["correctness", "performance", "security", "elegance", "resilience", "style", "smells", "wiring", "commit-discipline", "test-quality"]
|
||||
|
||||
[presets.security-focused]
|
||||
description = "Security-heavy review for sensitive changes"
|
||||
legs = ["security", "resilience", "correctness", "wiring"]
|
||||
|
||||
[presets.refactor]
|
||||
description = "Review focused on code quality during refactoring"
|
||||
legs = ["elegance", "smells", "style", "commit-discipline"]
|
||||
|
||||
# Synthesis step - combines all leg outputs
|
||||
[synthesis]
|
||||
title = "Review Synthesis"
|
||||
@@ -310,10 +441,13 @@ A synthesized review at: {{.output.directory}}/{{.output.synthesis}}
|
||||
2. **Critical Issues** - P0 items from all legs, deduplicated
|
||||
3. **Major Issues** - P1 items, grouped by theme
|
||||
4. **Minor Issues** - P2 items, briefly listed
|
||||
5. **Positive Observations** - What's done well
|
||||
6. **Recommendations** - Actionable next steps
|
||||
5. **Wiring Gaps** - Dependencies added but not used (from wiring leg)
|
||||
6. **Commit Quality** - Notes on commit discipline
|
||||
7. **Test Quality** - Assessment of test meaningfulness
|
||||
8. **Positive Observations** - What's done well
|
||||
9. **Recommendations** - Actionable next steps
|
||||
|
||||
Deduplicate issues found by multiple legs (note which legs found them).
|
||||
Prioritize by impact and effort. Be actionable.
|
||||
"""
|
||||
depends_on = ["correctness", "performance", "security", "elegance", "resilience", "style", "smells"]
|
||||
depends_on = ["correctness", "performance", "security", "elegance", "resilience", "style", "smells", "wiring", "commit-discipline", "test-quality"]
|
||||
|
||||
381
.beads/formulas/gastown-release.formula.toml
Normal file
381
.beads/formulas/gastown-release.formula.toml
Normal file
@@ -0,0 +1,381 @@
|
||||
description = """
|
||||
Gas Town release workflow - from version bump to verified release.
|
||||
|
||||
This formula orchestrates a release cycle for Gas Town:
|
||||
1. Preflight checks (workspace cleanliness, clean git, up to date)
|
||||
2. Documentation updates (CHANGELOG.md, info.go)
|
||||
3. Version bump (all components)
|
||||
4. Git operations (commit, tag, push)
|
||||
5. Local installation update
|
||||
6. Daemon restart
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
gt mol wisp create gastown-release --var version=0.3.0
|
||||
```
|
||||
|
||||
Or assign to a crew member:
|
||||
```bash
|
||||
gt sling gastown/crew/max --formula gastown-release --var version=0.3.0
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
- **Crew members (with user present)**: Attempt to resolve issues (merge branches,
|
||||
commit/stash work). Ask the user if blocked.
|
||||
- **Polecats (autonomous)**: Escalate via `gt escalate` if preflight fails or
|
||||
unrecoverable errors occur. Do not proceed with a release if workspaces have
|
||||
uncommitted work.
|
||||
"""
|
||||
formula = "gastown-release"
|
||||
type = "workflow"
|
||||
version = 1
|
||||
|
||||
[vars.version]
|
||||
description = "The semantic version to release (e.g., 0.3.0)"
|
||||
required = true
|
||||
|
||||
[[steps]]
|
||||
id = "preflight-workspaces"
|
||||
title = "Preflight: Check all workspaces for uncommitted work"
|
||||
description = """
|
||||
Before releasing, ensure no gastown workspaces have uncommitted work that would
|
||||
be excluded from the release.
|
||||
|
||||
Check all crew workspaces and the mayor rig:
|
||||
|
||||
```bash
|
||||
# Check each workspace
|
||||
for dir in $GT_ROOT/gastown/crew/* $GT_ROOT/gastown/mayor; do
|
||||
if [ -d "$dir/.git" ] || [ -d "$dir" ]; then
|
||||
echo "=== Checking $dir ==="
|
||||
cd "$dir" 2>/dev/null || continue
|
||||
|
||||
# Check for uncommitted changes
|
||||
if ! git diff-index --quiet HEAD -- 2>/dev/null; then
|
||||
echo " ⚠ UNCOMMITTED CHANGES"
|
||||
git status --short
|
||||
fi
|
||||
|
||||
# Check for stashes
|
||||
stash_count=$(git stash list 2>/dev/null | wc -l | tr -d ' ')
|
||||
if [ "$stash_count" -gt 0 ]; then
|
||||
echo " ⚠ HAS $stash_count STASH(ES)"
|
||||
git stash list
|
||||
fi
|
||||
|
||||
# Check for non-main branches with unpushed commits
|
||||
current_branch=$(git branch --show-current 2>/dev/null)
|
||||
if [ -n "$current_branch" ] && [ "$current_branch" != "main" ]; then
|
||||
echo " ⚠ ON BRANCH: $current_branch (not main)"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
```
|
||||
|
||||
## If issues found:
|
||||
|
||||
**For crew members (interactive)**:
|
||||
1. Try to resolve: merge branches, commit work, apply/drop stashes
|
||||
2. If work is in-progress and not ready, ask the user whether to:
|
||||
- Wait for completion
|
||||
- Stash and proceed
|
||||
- Exclude from this release
|
||||
3. Only proceed when all workspaces are clean on main
|
||||
|
||||
**For polecats (autonomous)**:
|
||||
1. If any workspace has uncommitted work: STOP and escalate
|
||||
2. Use: `gt escalate --severity medium "Release blocked: workspace X has uncommitted work"`
|
||||
3. Do NOT proceed with release - uncommitted work would be excluded
|
||||
|
||||
This step is critical. A release with uncommitted work means losing changes.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "preflight-git"
|
||||
title = "Preflight: Check git status"
|
||||
needs = ["preflight-workspaces"]
|
||||
description = """
|
||||
Ensure YOUR working tree is clean before starting release.
|
||||
|
||||
```bash
|
||||
git status
|
||||
```
|
||||
|
||||
If there are uncommitted changes:
|
||||
- Commit them first (if they should be in the release)
|
||||
- Stash them: `git stash` (if they should NOT be in the release)
|
||||
|
||||
## On failure:
|
||||
- **Crew**: Commit or stash your changes, then continue
|
||||
- **Polecat**: Escalate if you have uncommitted changes you didn't create
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "preflight-pull"
|
||||
title = "Preflight: Pull latest"
|
||||
needs = ["preflight-git"]
|
||||
description = """
|
||||
Ensure we're up to date with origin.
|
||||
|
||||
```bash
|
||||
git pull --rebase
|
||||
```
|
||||
|
||||
## On merge conflicts:
|
||||
- **Crew**: Resolve conflicts manually. Ask user if unsure about resolution.
|
||||
- **Polecat**: Escalate immediately. Do not attempt to resolve release-blocking
|
||||
merge conflicts autonomously.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "review-changes"
|
||||
title = "Review changes since last release"
|
||||
needs = ["preflight-pull"]
|
||||
description = """
|
||||
Understand what's being released.
|
||||
|
||||
```bash
|
||||
git log $(git describe --tags --abbrev=0)..HEAD --oneline
|
||||
```
|
||||
|
||||
Categorize changes:
|
||||
- Features (feat:)
|
||||
- Fixes (fix:)
|
||||
- Breaking changes
|
||||
- Documentation
|
||||
|
||||
If there are no changes since last release, ask whether to proceed with an
|
||||
empty release (version bump only).
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "update-changelog"
|
||||
title = "Update CHANGELOG.md"
|
||||
needs = ["review-changes"]
|
||||
description = """
|
||||
Write the [Unreleased] section with all changes for {{version}}.
|
||||
|
||||
Edit CHANGELOG.md and add entries under [Unreleased].
|
||||
|
||||
Format: Keep a Changelog (https://keepachangelog.com)
|
||||
|
||||
Sections to use:
|
||||
- ### Added - for new features
|
||||
- ### Changed - for changes in existing functionality
|
||||
- ### Fixed - for bug fixes
|
||||
- ### Deprecated - for soon-to-be removed features
|
||||
- ### Removed - for now removed features
|
||||
|
||||
Base entries on the git log from the previous step. Group related commits.
|
||||
|
||||
The bump script will automatically create the version header with today's date.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "update-info-go"
|
||||
title = "Update info.go versionChanges"
|
||||
needs = ["update-changelog"]
|
||||
description = """
|
||||
Add entry to versionChanges in internal/cmd/info.go.
|
||||
|
||||
This powers `gt info --whats-new` for agents.
|
||||
|
||||
Add a new entry at the TOP of the versionChanges slice:
|
||||
|
||||
```go
|
||||
{
|
||||
Version: "{{version}}",
|
||||
Date: "YYYY-MM-DD", // Today's date
|
||||
Changes: []string{
|
||||
"NEW: Key feature 1",
|
||||
"NEW: Key feature 2",
|
||||
"CHANGED: Modified behavior",
|
||||
"FIX: Bug that was fixed",
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
Focus on agent-relevant and workflow-impacting changes.
|
||||
Prefix with NEW:, CHANGED:, FIX:, or DEPRECATED: for clarity.
|
||||
|
||||
This is similar to CHANGELOG.md but focused on what agents need to know -
|
||||
new commands, changed behaviors, workflow impacts.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "run-bump-script"
|
||||
title = "Run bump-version.sh"
|
||||
needs = ["update-info-go"]
|
||||
description = """
|
||||
Update all component versions atomically.
|
||||
|
||||
```bash
|
||||
./scripts/bump-version.sh {{version}}
|
||||
```
|
||||
|
||||
This updates:
|
||||
- internal/cmd/version.go - CLI version constant
|
||||
- npm-package/package.json - npm package version
|
||||
- CHANGELOG.md - Creates [{{version}}] header with date
|
||||
|
||||
Review the changes shown by the script.
|
||||
|
||||
## On failure:
|
||||
If the script fails (e.g., version already exists, format error):
|
||||
- **Crew**: Debug and fix, or ask user
|
||||
- **Polecat**: Escalate with error details
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "verify-versions"
|
||||
title = "Verify version consistency"
|
||||
needs = ["run-bump-script"]
|
||||
description = """
|
||||
Confirm all versions match {{version}}.
|
||||
|
||||
```bash
|
||||
grep 'Version = ' internal/cmd/version.go
|
||||
grep '"version"' npm-package/package.json | head -1
|
||||
```
|
||||
|
||||
Both should show {{version}}.
|
||||
|
||||
## On mismatch:
|
||||
Do NOT proceed. Either the bump script failed or there's a bug.
|
||||
- **Crew**: Investigate and fix manually
|
||||
- **Polecat**: Escalate immediately - version mismatch is a release blocker
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "commit-release"
|
||||
title = "Commit release"
|
||||
needs = ["verify-versions"]
|
||||
description = """
|
||||
Stage and commit all version changes.
|
||||
|
||||
```bash
|
||||
git add -A
|
||||
git commit -m "chore: Bump version to {{version}}"
|
||||
```
|
||||
|
||||
Review the commit to ensure all expected files are included:
|
||||
- internal/cmd/version.go
|
||||
- internal/cmd/info.go
|
||||
- npm-package/package.json
|
||||
- CHANGELOG.md
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "create-tag"
|
||||
title = "Create release tag"
|
||||
needs = ["commit-release"]
|
||||
description = """
|
||||
Create annotated git tag.
|
||||
|
||||
```bash
|
||||
git tag -a v{{version}} -m "Release v{{version}}"
|
||||
```
|
||||
|
||||
Verify: `git tag -l | tail -5`
|
||||
|
||||
## If tag already exists:
|
||||
The version may have been previously (partially) released.
|
||||
- **Crew**: Ask user how to proceed (delete tag and retry? use different version?)
|
||||
- **Polecat**: Escalate - do not delete existing tags autonomously
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "push-release"
|
||||
title = "Push commit and tag"
|
||||
needs = ["create-tag"]
|
||||
description = """
|
||||
Push the release commit and tag to origin.
|
||||
|
||||
```bash
|
||||
git push origin main
|
||||
git push origin v{{version}}
|
||||
```
|
||||
|
||||
This triggers GitHub Actions to build release artifacts.
|
||||
|
||||
Monitor: https://github.com/steveyegge/gastown/actions
|
||||
|
||||
## On push rejection:
|
||||
Someone pushed while we were releasing.
|
||||
- **Crew**: Pull, rebase, re-tag, try again. Ask user if conflicts.
|
||||
- **Polecat**: Escalate - release coordination conflict requires human decision
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "local-install"
|
||||
title = "Update local installation"
|
||||
needs = ["push-release"]
|
||||
description = """
|
||||
Rebuild and install gt locally with the new version.
|
||||
|
||||
```bash
|
||||
go build -o $(go env GOPATH)/bin/gt ./cmd/gt
|
||||
```
|
||||
|
||||
On macOS, codesign the binary:
|
||||
```bash
|
||||
codesign -f -s - $(go env GOPATH)/bin/gt
|
||||
```
|
||||
|
||||
Verify:
|
||||
```bash
|
||||
gt version
|
||||
```
|
||||
|
||||
Should show {{version}}.
|
||||
|
||||
## On build failure:
|
||||
- **Crew**: Debug build error, fix, retry
|
||||
- **Polecat**: Escalate - release is pushed but local install failed
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "restart-daemons"
|
||||
title = "Restart daemons"
|
||||
needs = ["local-install"]
|
||||
description = """
|
||||
Restart gt daemon to pick up the new version.
|
||||
|
||||
```bash
|
||||
gt daemon stop && gt daemon start
|
||||
```
|
||||
|
||||
Verify:
|
||||
```bash
|
||||
gt daemon status
|
||||
```
|
||||
|
||||
The daemon should show the new binary timestamp and no stale warning.
|
||||
|
||||
Note: This step is safe to retry if it fails.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
id = "release-complete"
|
||||
title = "Release complete"
|
||||
needs = ["restart-daemons"]
|
||||
description = """
|
||||
Release v{{version}} is complete!
|
||||
|
||||
Summary:
|
||||
- All workspaces verified clean before release
|
||||
- Version files updated (version.go, package.json)
|
||||
- CHANGELOG.md updated with release date
|
||||
- info.go versionChanges updated for `gt info --whats-new`
|
||||
- Git tag v{{version}} pushed
|
||||
- GitHub Actions triggered for artifact builds
|
||||
- Local gt binary rebuilt and installed
|
||||
- Daemons restarted with new version
|
||||
|
||||
Optional next steps:
|
||||
- Monitor GitHub Actions for release build completion
|
||||
- Verify release artifacts at https://github.com/steveyegge/gastown/releases
|
||||
- Announce the release
|
||||
"""
|
||||
@@ -27,7 +27,7 @@ Observe the current system state to inform triage decisions.
|
||||
**Step 1: Check Deacon state**
|
||||
```bash
|
||||
# Is Deacon session alive?
|
||||
tmux has-session -t gt-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
tmux has-session -t hq-deacon 2>/dev/null && echo "alive" || echo "dead"
|
||||
|
||||
# If alive, what's the pane output showing?
|
||||
gt peek deacon --lines 20
|
||||
@@ -47,7 +47,7 @@ bd show hq-deacon 2>/dev/null
|
||||
gt feed --since 10m --plain | head -20
|
||||
|
||||
# Recent wisps (operational state)
|
||||
ls -lt ~/gt/.beads-wisp/*.wisp.json 2>/dev/null | head -5
|
||||
ls -lt $GT_ROOT/.beads-wisp/*.wisp.json 2>/dev/null | head -5
|
||||
```
|
||||
|
||||
**Step 4: Check Deacon mail**
|
||||
@@ -125,7 +125,7 @@ gt nudge deacon "Boot check-in: you have pending work"
|
||||
**WAKE**
|
||||
```bash
|
||||
# Send escape to break any tool waiting
|
||||
tmux send-keys -t gt-deacon Escape
|
||||
tmux send-keys -t hq-deacon Escape
|
||||
|
||||
# Brief pause
|
||||
sleep 1
|
||||
@@ -221,7 +221,7 @@ Then exit. The next daemon tick will spawn a fresh Boot.
|
||||
**Update status file**
|
||||
```bash
|
||||
# The gt boot command handles this automatically
|
||||
# Status is written to ~/gt/deacon/dogs/boot/.boot-status.json
|
||||
# Status is written to $GT_ROOT/deacon/dogs/boot/.boot-status.json
|
||||
```
|
||||
|
||||
Boot is ephemeral by design. Each instance runs fresh.
|
||||
|
||||
@@ -23,7 +23,7 @@ Witnesses detect it and escalate to the Mayor.
|
||||
The Deacon's agent bead last_activity timestamp is updated during each patrol
|
||||
cycle. Witnesses check this timestamp to verify health."""
|
||||
formula = "mol-deacon-patrol"
|
||||
version = 4
|
||||
version = 8
|
||||
|
||||
[[steps]]
|
||||
id = "inbox-check"
|
||||
@@ -84,10 +84,46 @@ Callbacks may spawn new polecats, update issue state, or trigger other actions.
|
||||
**Hygiene principle**: Archive messages after they're fully processed.
|
||||
Keep inbox near-empty - only unprocessed items should remain."""
|
||||
|
||||
[[steps]]
|
||||
id = "orphan-process-cleanup"
|
||||
title = "Clean up orphaned claude subagent processes"
|
||||
needs = ["inbox-check"]
|
||||
description = """
|
||||
Clean up orphaned claude subagent processes.
|
||||
|
||||
Claude Code's Task tool spawns subagent processes that sometimes don't clean up
|
||||
properly after completion. These accumulate and consume significant memory.
|
||||
|
||||
**Detection method:**
|
||||
Orphaned processes have no controlling terminal (TTY = "?"). Legitimate claude
|
||||
instances in terminals have a TTY like "pts/0".
|
||||
|
||||
**Run cleanup:**
|
||||
```bash
|
||||
gt deacon cleanup-orphans
|
||||
```
|
||||
|
||||
This command:
|
||||
1. Lists all claude/codex processes with `ps -eo pid,tty,comm`
|
||||
2. Filters for TTY = "?" (no controlling terminal)
|
||||
3. Sends SIGTERM to each orphaned process
|
||||
4. Reports how many were killed
|
||||
|
||||
**Why this is safe:**
|
||||
- Processes in terminals (your personal sessions) have a TTY - they won't be touched
|
||||
- Only kills processes that have no controlling terminal
|
||||
- These orphans are children of the tmux server with no TTY, indicating they're
|
||||
detached subagents that failed to exit
|
||||
|
||||
**If cleanup fails:**
|
||||
Log the error but continue patrol - this is best-effort cleanup.
|
||||
|
||||
**Exit criteria:** Orphan cleanup attempted (success or logged failure)."""
|
||||
|
||||
[[steps]]
|
||||
id = "trigger-pending-spawns"
|
||||
title = "Nudge newly spawned polecats"
|
||||
needs = ["inbox-check"]
|
||||
needs = ["orphan-process-cleanup"]
|
||||
description = """
|
||||
Nudge newly spawned polecats that are ready for input.
|
||||
|
||||
@@ -148,6 +184,49 @@ bd gate list --json
|
||||
After closing a gate, the Waiters field contains mail addresses to notify.
|
||||
Send a brief notification to each waiter that the gate has cleared."""
|
||||
|
||||
[[steps]]
|
||||
id = "dispatch-gated-molecules"
|
||||
title = "Dispatch molecules with resolved gates"
|
||||
needs = ["gate-evaluation"]
|
||||
description = """
|
||||
Find molecules blocked on gates that have now closed and dispatch them.
|
||||
|
||||
This completes the async resume cycle without explicit waiter tracking.
|
||||
The molecule state IS the waiter - patrol discovers reality each cycle.
|
||||
|
||||
**Step 1: Find gate-ready molecules**
|
||||
```bash
|
||||
bd mol ready --gated --json
|
||||
```
|
||||
|
||||
This returns molecules where:
|
||||
- Status is in_progress
|
||||
- Current step has a gate dependency
|
||||
- The gate bead is now closed
|
||||
- No polecat currently has it hooked
|
||||
|
||||
**Step 2: For each ready molecule, dispatch to the appropriate rig**
|
||||
```bash
|
||||
# Determine target rig from molecule metadata
|
||||
bd mol show <mol-id> --json
|
||||
# Look for rig field or infer from prefix
|
||||
|
||||
# Dispatch to that rig's polecat pool
|
||||
gt sling <mol-id> <rig>/polecats
|
||||
```
|
||||
|
||||
**Step 3: Log dispatch**
|
||||
Note which molecules were dispatched for observability:
|
||||
```bash
|
||||
# Molecule <mol-id> dispatched to <rig>/polecats (gate <gate-id> cleared)
|
||||
```
|
||||
|
||||
**If no gate-ready molecules:**
|
||||
Skip - nothing to dispatch. Gates haven't closed yet or molecules
|
||||
already have active polecats working on them.
|
||||
|
||||
**Exit criteria:** All gate-ready molecules dispatched to polecats."""
|
||||
|
||||
[[steps]]
|
||||
id = "check-convoy-completion"
|
||||
title = "Check convoy completion"
|
||||
@@ -258,7 +337,7 @@ Keep notifications brief and actionable. The recipient can run bd show for detai
|
||||
[[steps]]
|
||||
id = "health-scan"
|
||||
title = "Check Witness and Refinery health"
|
||||
needs = ["trigger-pending-spawns", "gate-evaluation", "fire-notifications"]
|
||||
needs = ["trigger-pending-spawns", "dispatch-gated-molecules", "fire-notifications"]
|
||||
description = """
|
||||
Check Witness and Refinery health for each rig.
|
||||
|
||||
@@ -342,14 +421,21 @@ Reset unresponsive_cycles to 0 when component responds normally."""
|
||||
|
||||
[[steps]]
|
||||
id = "zombie-scan"
|
||||
title = "Backup check for zombie polecats"
|
||||
title = "Detect zombie polecats (NO KILL AUTHORITY)"
|
||||
needs = ["health-scan"]
|
||||
description = """
|
||||
Defense-in-depth check for zombie polecats that Witness should have cleaned.
|
||||
Defense-in-depth DETECTION of zombie polecats that Witness should have cleaned.
|
||||
|
||||
**⚠️ CRITICAL: The Deacon has NO kill authority.**
|
||||
|
||||
These are workers with context, mid-task progress, unsaved state. Every kill
|
||||
destroys work. File the warrant and let Boot handle interrogation and execution.
|
||||
You do NOT have kill authority.
|
||||
|
||||
**Why this exists:**
|
||||
The Witness is responsible for nuking polecats after they complete work (via POLECAT_DONE).
|
||||
This step provides backup detection in case the Witness fails to clean up.
|
||||
The Witness is responsible for cleaning up polecats after they complete work.
|
||||
This step provides backup DETECTION in case the Witness fails to clean up.
|
||||
Detection only - Boot handles termination.
|
||||
|
||||
**Zombie criteria:**
|
||||
- State: idle or done (no active work assigned)
|
||||
@@ -357,26 +443,34 @@ This step provides backup detection in case the Witness fails to clean up.
|
||||
- No hooked work (nothing pending for this polecat)
|
||||
- Last activity: older than 10 minutes
|
||||
|
||||
**Run the zombie scan:**
|
||||
**Run the zombie scan (DRY RUN ONLY):**
|
||||
```bash
|
||||
gt deacon zombie-scan --dry-run
|
||||
```
|
||||
|
||||
**NEVER run:**
|
||||
- `gt deacon zombie-scan` (without --dry-run)
|
||||
- `tmux kill-session`
|
||||
- `gt polecat nuke`
|
||||
- Any command that terminates a session
|
||||
|
||||
**If zombies detected:**
|
||||
1. Review the output to confirm they are truly abandoned
|
||||
2. Run without --dry-run to nuke them:
|
||||
2. File a death warrant for each detected zombie:
|
||||
```bash
|
||||
gt deacon zombie-scan
|
||||
gt warrant file <polecat> --reason "Zombie detected: no session, no hook, idle >10m"
|
||||
```
|
||||
3. Boot will handle interrogation and execution
|
||||
4. Notify the Mayor about Witness failure:
|
||||
```bash
|
||||
gt mail send mayor/ -s "Witness cleanup failure" \
|
||||
-m "Filed death warrant for <polecat>. Witness failed to clean up."
|
||||
```
|
||||
3. This will:
|
||||
- Nuke each zombie polecat
|
||||
- Notify the Mayor about Witness failure
|
||||
- Log the cleanup action
|
||||
|
||||
**If no zombies:**
|
||||
No action needed - Witness is doing its job.
|
||||
|
||||
**Note:** This is a backup mechanism. If you frequently find zombies,
|
||||
**Note:** This is a backup mechanism. If you frequently detect zombies,
|
||||
investigate why the Witness isn't cleaning up properly."""
|
||||
|
||||
[[steps]]
|
||||
@@ -386,7 +480,7 @@ needs = ["zombie-scan"]
|
||||
description = """
|
||||
Execute registered plugins.
|
||||
|
||||
Scan ~/gt/plugins/ for plugin directories. Each plugin has a plugin.md with TOML frontmatter defining its gate (when to run) and instructions (what to do).
|
||||
Scan $GT_ROOT/plugins/ for plugin directories. Each plugin has a plugin.md with TOML frontmatter defining its gate (when to run) and instructions (what to do).
|
||||
|
||||
See docs/deacon-plugins.md for full documentation.
|
||||
|
||||
@@ -403,7 +497,7 @@ For each plugin:
|
||||
|
||||
Plugins marked parallel: true can run concurrently using Task tool subagents. Sequential plugins run one at a time in directory order.
|
||||
|
||||
Skip this step if ~/gt/plugins/ does not exist or is empty."""
|
||||
Skip this step if $GT_ROOT/plugins/ does not exist or is empty."""
|
||||
|
||||
[[steps]]
|
||||
id = "dog-pool-maintenance"
|
||||
@@ -441,10 +535,74 @@ gt dog status <name>
|
||||
|
||||
**Exit criteria:** Pool has at least 1 idle dog."""
|
||||
|
||||
[[steps]]
|
||||
id = "dog-health-check"
|
||||
title = "Check for stuck dogs"
|
||||
needs = ["dog-pool-maintenance"]
|
||||
description = """
|
||||
Check for dogs that have been working too long (stuck).
|
||||
|
||||
Dogs dispatched via `gt dog dispatch --plugin` are marked as "working" with
|
||||
a work description like "plugin:rebuild-gt". If a dog hangs, crashes, or
|
||||
takes too long, it needs intervention.
|
||||
|
||||
**Step 1: List working dogs**
|
||||
```bash
|
||||
gt dog list --json
|
||||
# Filter for state: "working"
|
||||
```
|
||||
|
||||
**Step 2: Check work duration**
|
||||
For each working dog:
|
||||
```bash
|
||||
gt dog status <name> --json
|
||||
# Check: work_started_at, current_work
|
||||
```
|
||||
|
||||
Compare against timeout:
|
||||
- If plugin has [execution] timeout in plugin.md, use that
|
||||
- Default timeout: 10 minutes for infrastructure tasks
|
||||
|
||||
**Duration calculation:**
|
||||
```
|
||||
stuck_threshold = plugin_timeout or 10m
|
||||
duration = now - work_started_at
|
||||
is_stuck = duration > stuck_threshold
|
||||
```
|
||||
|
||||
**Step 3: Handle stuck dogs**
|
||||
|
||||
For dogs working > timeout:
|
||||
```bash
|
||||
# Option A: File death warrant (Boot handles termination)
|
||||
gt warrant file deacon/dogs/<name> --reason "Stuck: working on <work> for <duration>"
|
||||
|
||||
# Option B: Force clear work and notify
|
||||
gt dog clear <name> --force
|
||||
gt mail send deacon/ -s "DOG_TIMEOUT <name>" -m "Dog <name> timed out on <work> after <duration>"
|
||||
```
|
||||
|
||||
**Decision matrix:**
|
||||
|
||||
| Duration over timeout | Action |
|
||||
|----------------------|--------|
|
||||
| < 2x timeout | Log warning, check next cycle |
|
||||
| 2x - 5x timeout | File death warrant |
|
||||
| > 5x timeout | Force clear + escalate to Mayor |
|
||||
|
||||
**Step 4: Track chronic failures**
|
||||
If same dog gets stuck repeatedly:
|
||||
```bash
|
||||
gt mail send mayor/ -s "Dog <name> chronic failures" \
|
||||
-m "Dog has timed out N times in last 24h. Consider removing from pool."
|
||||
```
|
||||
|
||||
**Exit criteria:** All stuck dogs handled (warrant filed or cleared)."""
|
||||
|
||||
[[steps]]
|
||||
id = "orphan-check"
|
||||
title = "Detect abandoned work"
|
||||
needs = ["dog-pool-maintenance"]
|
||||
needs = ["dog-health-check"]
|
||||
description = """
|
||||
**DETECT ONLY** - Check for orphaned state and dispatch to dog if found.
|
||||
|
||||
@@ -505,23 +663,86 @@ Skip dispatch - system is healthy.
|
||||
|
||||
**Exit criteria:** Session GC dispatched to dog (if needed)."""
|
||||
|
||||
[[steps]]
|
||||
id = "costs-digest"
|
||||
title = "Aggregate daily costs [DISABLED]"
|
||||
needs = ["session-gc"]
|
||||
description = """
|
||||
**⚠️ DISABLED** - Skip this step entirely.
|
||||
|
||||
Cost tracking is temporarily disabled because Claude Code does not expose
|
||||
session costs in a way that can be captured programmatically.
|
||||
|
||||
**Why disabled:**
|
||||
- The `gt costs` command uses tmux capture-pane to find costs
|
||||
- Claude Code displays costs in the TUI status bar, not in scrollback
|
||||
- All sessions show $0.00 because capture-pane can't see TUI chrome
|
||||
- The infrastructure is sound but has no data source
|
||||
|
||||
**What we need from Claude Code:**
|
||||
- Stop hook env var (e.g., `$CLAUDE_SESSION_COST`)
|
||||
- Or queryable file/API endpoint
|
||||
|
||||
**Re-enable when:** Claude Code exposes cost data via API or environment.
|
||||
|
||||
See: GH#24, gt-7awfj
|
||||
|
||||
**Exit criteria:** Skip this step - proceed to next."""
|
||||
|
||||
[[steps]]
|
||||
id = "patrol-digest"
|
||||
title = "Aggregate daily patrol digests"
|
||||
needs = ["costs-digest"]
|
||||
description = """
|
||||
**DAILY DIGEST** - Aggregate yesterday's patrol cycle digests.
|
||||
|
||||
Patrol cycles (Deacon, Witness, Refinery) create ephemeral per-cycle digests
|
||||
to avoid JSONL pollution. This step aggregates them into a single permanent
|
||||
"Patrol Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
**Step 1: Check if digest is needed**
|
||||
```bash
|
||||
# Preview yesterday's patrol digests (dry run)
|
||||
gt patrol digest --yesterday --dry-run
|
||||
```
|
||||
|
||||
If output shows "No patrol digests found", skip to Step 3.
|
||||
|
||||
**Step 2: Create the digest**
|
||||
```bash
|
||||
gt patrol digest --yesterday
|
||||
```
|
||||
|
||||
This:
|
||||
- Queries all ephemeral patrol digests from yesterday
|
||||
- Creates a single "Patrol Report YYYY-MM-DD" bead with aggregated data
|
||||
- Deletes the source digests
|
||||
|
||||
**Step 3: Verify**
|
||||
Daily patrol digests preserve audit trail without per-cycle pollution.
|
||||
|
||||
**Timing**: Run once per morning patrol cycle. The --yesterday flag ensures
|
||||
we don't try to digest today's incomplete data.
|
||||
|
||||
**Exit criteria:** Yesterday's patrol digests aggregated (or none to aggregate)."""
|
||||
|
||||
[[steps]]
|
||||
id = "log-maintenance"
|
||||
title = "Rotate logs and prune state"
|
||||
needs = ["session-gc"]
|
||||
needs = ["patrol-digest"]
|
||||
description = """
|
||||
Maintain daemon logs and state files.
|
||||
|
||||
**Step 1: Check daemon.log size**
|
||||
```bash
|
||||
# Get log file size
|
||||
ls -la ~/.beads/daemon*.log 2>/dev/null || ls -la ~/gt/.beads/daemon*.log 2>/dev/null
|
||||
ls -la ~/.beads/daemon*.log 2>/dev/null || ls -la $GT_ROOT/.beads/daemon*.log 2>/dev/null
|
||||
```
|
||||
|
||||
If daemon.log exceeds 10MB:
|
||||
```bash
|
||||
# Rotate with date suffix and gzip
|
||||
LOGFILE="$HOME/gt/.beads/daemon.log"
|
||||
LOGFILE="$GT_ROOT/.beads/daemon.log"
|
||||
if [ -f "$LOGFILE" ] && [ $(stat -f%z "$LOGFILE" 2>/dev/null || stat -c%s "$LOGFILE") -gt 10485760 ]; then
|
||||
DATE=$(date +%Y-%m-%dT%H-%M-%S)
|
||||
mv "$LOGFILE" "${LOGFILE%.log}-${DATE}.log"
|
||||
@@ -533,7 +754,7 @@ fi
|
||||
|
||||
Clean up daemon logs older than 7 days:
|
||||
```bash
|
||||
find ~/gt/.beads/ -name "daemon-*.log.gz" -mtime +7 -delete
|
||||
find $GT_ROOT/.beads/ -name "daemon-*.log.gz" -mtime +7 -delete
|
||||
```
|
||||
|
||||
**Step 3: Prune state.json of dead sessions**
|
||||
@@ -611,15 +832,39 @@ Burn and let daemon respawn, or exit if context high.
|
||||
Decision point at end of patrol cycle:
|
||||
|
||||
If context is LOW:
|
||||
- **Sleep 60 seconds minimum** before next patrol cycle
|
||||
- If town is idle (no in_progress work), sleep longer (2-5 minutes)
|
||||
- Return to inbox-check step
|
||||
Use await-signal with exponential backoff to wait for activity:
|
||||
|
||||
**Why longer sleep?**
|
||||
- Idle agents should not be disturbed
|
||||
- Health checks every few seconds flood inboxes and waste context
|
||||
- The daemon (10-minute heartbeat) is the safety net for dead sessions
|
||||
- Active work triggers feed events, which wake agents naturally
|
||||
```bash
|
||||
gt mol step await-signal --agent-bead hq-deacon \
|
||||
--backoff-base 60s --backoff-mult 2 --backoff-max 10m
|
||||
```
|
||||
|
||||
This command:
|
||||
1. Subscribes to `bd activity --follow` (beads activity feed)
|
||||
2. Returns IMMEDIATELY when any beads activity occurs
|
||||
3. If no activity, times out with exponential backoff:
|
||||
- First timeout: 60s
|
||||
- Second timeout: 120s
|
||||
- Third timeout: 240s
|
||||
- ...capped at 10 minutes max
|
||||
4. Tracks `idle:N` label on hq-deacon bead for backoff state
|
||||
|
||||
**On signal received** (activity detected):
|
||||
Reset the idle counter and start next patrol cycle:
|
||||
```bash
|
||||
gt agent state hq-deacon --set idle=0
|
||||
```
|
||||
Then return to inbox-check step.
|
||||
|
||||
**On timeout** (no activity):
|
||||
The idle counter was auto-incremented. Continue to next patrol cycle
|
||||
(the longer backoff will apply next time). Return to inbox-check step.
|
||||
|
||||
**Why this approach?**
|
||||
- Any `gt` or `bd` command triggers beads activity, waking the Deacon
|
||||
- Idle towns let the Deacon sleep longer (up to 10 min between patrols)
|
||||
- Active work wakes the Deacon immediately via the feed
|
||||
- No polling or fixed sleep intervals
|
||||
|
||||
If context is HIGH:
|
||||
- Write state to persistent storage
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"formula": "mol-gastown-boot",
|
||||
"description": "Mayor bootstraps Gas Town via a verification-gated lifecycle molecule.\n\n## Purpose\nWhen Mayor executes \"boot up gas town\", this proto provides the workflow.\nEach step has action + verification - steps stay open until outcome is confirmed.\n\n## Key Principles\n1. **Verification-gated steps** - Not \"command ran\" but \"outcome confirmed\"\n2. **gt peek for verification** - Capture session output to detect stalls\n3. **gt nudge for recovery** - Reliable message delivery to unstick agents\n4. **Parallel where possible** - Witnesses and refineries can start in parallel\n5. **Ephemeral execution** - Boot is a wisp, squashed to digest after completion\n\n## Execution\n```bash\nbd mol wisp mol-gastown-boot # Create wisp\n```",
|
||||
"version": 1,
|
||||
"steps": [
|
||||
{
|
||||
"id": "ensure-daemon",
|
||||
"title": "Ensure daemon",
|
||||
"description": "Verify the Gas Town daemon is running.\n\n## Action\n```bash\ngt daemon status || gt daemon start\n```\n\n## Verify\n1. Daemon PID file exists: `~/.gt/daemon.pid`\n2. Process is alive: `kill -0 $(cat ~/.gt/daemon.pid)`\n3. Daemon responds: `gt daemon status` returns success\n\n## OnFail\nCannot start daemon. Log error and continue - some commands work without daemon."
|
||||
},
|
||||
{
|
||||
"id": "ensure-deacon",
|
||||
"title": "Ensure deacon",
|
||||
"needs": ["ensure-daemon"],
|
||||
"description": "Start the Deacon and verify patrol mode is active.\n\n## Action\n```bash\ngt deacon start\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`\n2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago\n\n## OnStall\n```bash\ngt nudge deacon/ \"Start patrol.\"\nsleep 30\n# Re-verify\n```"
|
||||
},
|
||||
{
|
||||
"id": "ensure-witnesses",
|
||||
"title": "Ensure witnesses",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig witnesses.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-witness",
|
||||
"title": "Ensure gastown witness",
|
||||
"description": "Start the gastown rig Witness.\n\n## Action\n```bash\ngt witness start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-witness 2>/dev/null`\n2. Not stalled: `gt peek gastown/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-witness",
|
||||
"title": "Ensure beads witness",
|
||||
"description": "Start the beads rig Witness.\n\n## Action\n```bash\ngt witness start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-witness 2>/dev/null`\n2. Not stalled: `gt peek beads/witness` does NOT show \"> Try\" prompt\n3. Heartbeat fresh: Last patrol cycle < 5 min ago"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "ensure-refineries",
|
||||
"title": "Ensure refineries",
|
||||
"needs": ["ensure-deacon"],
|
||||
"type": "parallel",
|
||||
"description": "Parallel container: Start all rig refineries.\n\nChildren execute in parallel. Container completes when all children complete.",
|
||||
"children": [
|
||||
{
|
||||
"id": "ensure-gastown-refinery",
|
||||
"title": "Ensure gastown refinery",
|
||||
"description": "Start the gastown rig Refinery.\n\n## Action\n```bash\ngt refinery start gastown\n```\n\n## Verify\n1. Session exists: `tmux has-session -t gastown-refinery 2>/dev/null`\n2. Not stalled: `gt peek gastown/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
},
|
||||
{
|
||||
"id": "ensure-beads-refinery",
|
||||
"title": "Ensure beads refinery",
|
||||
"description": "Start the beads rig Refinery.\n\n## Action\n```bash\ngt refinery start beads\n```\n\n## Verify\n1. Session exists: `tmux has-session -t beads-refinery 2>/dev/null`\n2. Not stalled: `gt peek beads/refinery` does NOT show \"> Try\" prompt\n3. Queue processing: Refinery can receive merge requests"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "verify-town-health",
|
||||
"title": "Verify town health",
|
||||
"needs": ["ensure-witnesses", "ensure-refineries"],
|
||||
"description": "Final verification that Gas Town is healthy.\n\n## Action\n```bash\ngt status\n```\n\n## Verify\n1. Daemon running: Shows daemon status OK\n2. Deacon active: Shows deacon in patrol mode\n3. All witnesses: Each rig witness shows active\n4. All refineries: Each rig refinery shows active\n\n## OnFail\nLog degraded state but consider boot complete. Some agents may need manual recovery.\nRun `gt doctor` for detailed diagnostics."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -48,7 +48,7 @@ gt deacon start
|
||||
```
|
||||
|
||||
## Verify
|
||||
1. Session exists: `tmux has-session -t gt-deacon 2>/dev/null`
|
||||
1. Session exists: `tmux has-session -t hq-deacon 2>/dev/null`
|
||||
2. Not stalled: `gt peek deacon/` does NOT show \"> Try\" prompt
|
||||
3. Heartbeat fresh: `deacon/heartbeat.json` modified < 2 min ago
|
||||
|
||||
|
||||
318
.beads/formulas/mol-polecat-code-review.formula.toml
Normal file
318
.beads/formulas/mol-polecat-code-review.formula.toml
Normal file
@@ -0,0 +1,318 @@
|
||||
description = """
|
||||
Review code and file beads for issues found.
|
||||
|
||||
This molecule guides a polecat through a code review task - examining a portion
|
||||
of the codebase for bugs, security issues, code quality problems, or improvement
|
||||
opportunities. The output is a set of beads capturing actionable findings.
|
||||
|
||||
## Polecat Contract (Self-Cleaning Model)
|
||||
|
||||
You are a self-cleaning worker. You:
|
||||
1. Receive work via your hook (pinned molecule + review scope)
|
||||
2. Work through molecule steps using `bd ready` / `bd close <step>`
|
||||
3. Complete and self-clean via `gt done` (submit findings + nuke yourself)
|
||||
4. You are GONE - your findings are recorded in beads
|
||||
|
||||
**Self-cleaning:** When you run `gt done`, you submit your findings, nuke your
|
||||
sandbox, and exit. There is no idle state. Done means gone.
|
||||
|
||||
**Important:** This formula defines the template. Your molecule already has step
|
||||
beads created from it. Use `bd ready` to find them - do NOT read this file directly.
|
||||
|
||||
**You do NOT:**
|
||||
- Fix the issues yourself (file beads, let other polecats fix)
|
||||
- Scope creep into unrelated areas
|
||||
- Wait for someone to act on findings (you're done after filing)
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|----------|--------|-------------|
|
||||
| scope | hook_bead | What to review (file path, directory, or description) |
|
||||
| issue | hook_bead | The tracking issue for this review task |
|
||||
| focus | hook_bead | Optional focus area (security, performance, etc.) |
|
||||
|
||||
## Failure Modes
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| Scope too broad | Mail Witness, request narrower scope |
|
||||
| Can't understand code | Mail Witness for context |
|
||||
| Critical issue found | Mail Witness immediately, then continue |"""
|
||||
formula = "mol-polecat-code-review"
|
||||
version = 1
|
||||
|
||||
[[steps]]
|
||||
id = "load-context"
|
||||
title = "Load context and understand the review scope"
|
||||
description = """
|
||||
Initialize your session and understand what you're reviewing.
|
||||
|
||||
**1. Prime your environment:**
|
||||
```bash
|
||||
gt prime # Load role context
|
||||
bd prime # Load beads context
|
||||
```
|
||||
|
||||
**2. Check your hook:**
|
||||
```bash
|
||||
gt hook # Shows your pinned molecule and hook_bead
|
||||
```
|
||||
|
||||
The hook_bead describes your review scope. Read the tracking issue:
|
||||
```bash
|
||||
bd show {{issue}} # Full issue details
|
||||
```
|
||||
|
||||
**3. Understand the scope:**
|
||||
- What files/directories are in scope?
|
||||
- Is there a specific focus (security, performance, correctness)?
|
||||
- What's the context - why is this review happening?
|
||||
|
||||
**4. Locate the code:**
|
||||
```bash
|
||||
# If scope is a path:
|
||||
ls -la {{scope}}
|
||||
head -100 {{scope}} # Quick look at the code
|
||||
|
||||
# If scope is a directory:
|
||||
find {{scope}} -type f -name "*.go" | head -20
|
||||
```
|
||||
|
||||
**5. Check for recent changes:**
|
||||
```bash
|
||||
git log --oneline -10 -- {{scope}}
|
||||
```
|
||||
|
||||
**Exit criteria:** You understand what you're reviewing and why."""
|
||||
|
||||
[[steps]]
|
||||
id = "survey-code"
|
||||
title = "Survey the code structure"
|
||||
needs = ["load-context"]
|
||||
description = """
|
||||
Get a high-level understanding before diving into details.
|
||||
|
||||
**1. Understand the structure:**
|
||||
```bash
|
||||
# For a directory:
|
||||
tree {{scope}} -L 2
|
||||
|
||||
# For a file:
|
||||
wc -l {{scope}} # How big is it?
|
||||
```
|
||||
|
||||
**2. Identify key components:**
|
||||
- What are the main types/structs?
|
||||
- What are the public functions?
|
||||
- What are the dependencies?
|
||||
|
||||
**3. Read the tests (if any):**
|
||||
```bash
|
||||
find {{scope}} -name "*_test.go" | xargs head -50
|
||||
```
|
||||
Tests often reveal intended behavior.
|
||||
|
||||
**4. Note initial impressions:**
|
||||
- Is the code well-organized?
|
||||
- Are there obvious patterns or anti-patterns?
|
||||
- What areas look risky?
|
||||
|
||||
**Exit criteria:** You have a mental map of the code structure."""
|
||||
|
||||
[[steps]]
|
||||
id = "detailed-review"
|
||||
title = "Perform detailed code review"
|
||||
needs = ["survey-code"]
|
||||
description = """
|
||||
Systematically review the code for issues.
|
||||
|
||||
**Review checklist:**
|
||||
|
||||
| Category | Look For |
|
||||
|----------|----------|
|
||||
| **Correctness** | Logic errors, off-by-one, nil handling, race conditions |
|
||||
| **Security** | Injection, auth bypass, secrets in code, unsafe operations |
|
||||
| **Error handling** | Swallowed errors, missing checks, unclear error messages |
|
||||
| **Performance** | N+1 queries, unnecessary allocations, blocking calls |
|
||||
| **Maintainability** | Dead code, unclear naming, missing comments on complex logic |
|
||||
| **Testing** | Untested paths, missing edge cases, flaky tests |
|
||||
|
||||
**Focus on {{focus}} if specified.**
|
||||
|
||||
**1. Read through the code:**
|
||||
```bash
|
||||
cat {{scope}} # For single file
|
||||
# Or read files systematically for a directory
|
||||
```
|
||||
|
||||
**2. For each issue found, note:**
|
||||
- File and line number
|
||||
- Category (bug, security, performance, etc.)
|
||||
- Severity (critical, high, medium, low)
|
||||
- Description of the issue
|
||||
- Suggested fix (if obvious)
|
||||
|
||||
**3. Don't fix issues yourself:**
|
||||
Your job is to find and report, not fix. File beads.
|
||||
|
||||
**Exit criteria:** You've reviewed all code in scope and noted issues."""
|
||||
|
||||
[[steps]]
|
||||
id = "prioritize-findings"
|
||||
title = "Prioritize and categorize findings"
|
||||
needs = ["detailed-review"]
|
||||
description = """
|
||||
Organize your findings by priority and category.
|
||||
|
||||
**Priority levels:**
|
||||
|
||||
| Priority | Description | Action |
|
||||
|----------|-------------|--------|
|
||||
| P0 | Security vulnerability, data loss risk | Mail Witness immediately |
|
||||
| P1 | Bug affecting users, broken functionality | File as bug, high priority |
|
||||
| P2 | Code quality issue, potential future bug | File as task |
|
||||
| P3 | Improvement opportunity, nice-to-have | File as task, low priority |
|
||||
|
||||
**1. Sort your findings:**
|
||||
Group by priority, then by category.
|
||||
|
||||
**2. For P0 issues:**
|
||||
```bash
|
||||
gt mail send {{rig}}/witness -s "CRITICAL: Security issue found" -m "Scope: {{scope}}
|
||||
Issue: {{issue}}
|
||||
Finding: <description of critical issue>
|
||||
Location: <file:line>"
|
||||
```
|
||||
|
||||
**3. Prepare bead descriptions:**
|
||||
For each finding, prepare:
|
||||
- Clear title
|
||||
- File/line location
|
||||
- Description of the issue
|
||||
- Why it matters
|
||||
- Suggested fix (if known)
|
||||
|
||||
**Exit criteria:** Findings prioritized and ready to file."""
|
||||
|
||||
[[steps]]
|
||||
id = "file-beads"
|
||||
title = "File beads for all findings"
|
||||
needs = ["prioritize-findings"]
|
||||
description = """
|
||||
Create beads for each finding.
|
||||
|
||||
**1. For bugs (P0, P1):**
|
||||
```bash
|
||||
bd create --type=bug --priority=1 \
|
||||
--title="<clear description of bug>" \
|
||||
--description="Found during code review of {{scope}}.
|
||||
|
||||
Location: <file:line>
|
||||
|
||||
Issue:
|
||||
<description>
|
||||
|
||||
Impact:
|
||||
<why this matters>
|
||||
|
||||
Suggested fix:
|
||||
<if known>"
|
||||
```
|
||||
|
||||
**2. For code quality issues (P2, P3):**
|
||||
```bash
|
||||
bd create --type=task --priority=2 \
|
||||
--title="<clear description>" \
|
||||
--description="Found during code review of {{scope}}.
|
||||
|
||||
Location: <file:line>
|
||||
|
||||
Issue:
|
||||
<description>
|
||||
|
||||
Suggestion:
|
||||
<how to improve>"
|
||||
```
|
||||
|
||||
**3. Track filed beads:**
|
||||
Note each bead ID as you create them.
|
||||
|
||||
**4. If no issues found:**
|
||||
That's a valid outcome! Note that the code review passed.
|
||||
|
||||
**Exit criteria:** All findings filed as beads."""
|
||||
|
||||
[[steps]]
|
||||
id = "summarize-review"
|
||||
title = "Summarize review results"
|
||||
needs = ["file-beads"]
|
||||
description = """
|
||||
Update the tracking issue with review summary.
|
||||
|
||||
**1. Create summary:**
|
||||
```bash
|
||||
bd update {{issue}} --notes "Code review complete.
|
||||
|
||||
Scope: {{scope}}
|
||||
Focus: {{focus}}
|
||||
|
||||
Findings:
|
||||
- P0 (critical): <count>
|
||||
- P1 (high): <count>
|
||||
- P2 (medium): <count>
|
||||
- P3 (low): <count>
|
||||
|
||||
Beads filed:
|
||||
<list of bead IDs>
|
||||
|
||||
Overall assessment:
|
||||
<brief summary - healthy, needs attention, significant issues, etc.>"
|
||||
```
|
||||
|
||||
**2. Sync beads:**
|
||||
```bash
|
||||
bd sync
|
||||
```
|
||||
|
||||
**Exit criteria:** Tracking issue updated with summary."""
|
||||
|
||||
[[steps]]
|
||||
id = "complete-and-exit"
|
||||
title = "Complete review and self-clean"
|
||||
needs = ["summarize-review"]
|
||||
description = """
|
||||
Signal completion and clean up. You cease to exist after this step.
|
||||
|
||||
**Self-Cleaning Model:**
|
||||
Once you run `gt done`, you're gone. The command:
|
||||
1. Syncs beads (final sync)
|
||||
2. Nukes your sandbox
|
||||
3. Exits your session immediately
|
||||
|
||||
**Run gt done:**
|
||||
```bash
|
||||
gt done
|
||||
```
|
||||
|
||||
**What happens next (not your concern):**
|
||||
- Other polecats may be assigned to fix the issues you found
|
||||
- Witness may escalate critical findings
|
||||
- The codebase improves based on your findings
|
||||
|
||||
You are NOT involved in any of that. You're gone. Done means gone.
|
||||
|
||||
**Exit criteria:** Beads synced, sandbox nuked, session exited."""
|
||||
|
||||
[vars]
|
||||
[vars.scope]
|
||||
description = "What to review - file path, directory, or description"
|
||||
required = true
|
||||
|
||||
[vars.issue]
|
||||
description = "The tracking issue for this review task"
|
||||
required = true
|
||||
|
||||
[vars.focus]
|
||||
description = "Optional focus area (security, performance, correctness, etc.)"
|
||||
required = false
|
||||
283
.beads/formulas/mol-polecat-review-pr.formula.toml
Normal file
283
.beads/formulas/mol-polecat-review-pr.formula.toml
Normal file
@@ -0,0 +1,283 @@
|
||||
description = """
|
||||
Review an external PR and decide on merge/reject/revise.
|
||||
|
||||
This molecule guides a polecat through reviewing a pull request from an external
|
||||
contributor. The polecat reviews code quality, tests, and alignment with project
|
||||
standards, then approves, requests changes, or files followup beads.
|
||||
|
||||
## Polecat Contract (Self-Cleaning Model)
|
||||
|
||||
You are a self-cleaning worker. You:
|
||||
1. Receive work via your hook (pinned molecule + PR reference)
|
||||
2. Work through molecule steps using `bd ready` / `bd close <step>`
|
||||
3. Complete and self-clean via `gt done` (submit findings + nuke yourself)
|
||||
4. You are GONE - your review is recorded in beads
|
||||
|
||||
**Self-cleaning:** When you run `gt done`, you submit your findings, nuke your
|
||||
sandbox, and exit. There is no idle state. Done means gone.
|
||||
|
||||
**Important:** This formula defines the template. Your molecule already has step
|
||||
beads created from it. Use `bd ready` to find them - do NOT read this file directly.
|
||||
|
||||
**You do NOT:**
|
||||
- Merge the PR yourself (maintainer or Refinery does that)
|
||||
- Push to the PR branch (it's external)
|
||||
- Wait for contributor response (you're done after review)
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|----------|--------|-------------|
|
||||
| pr_url | hook_bead | The PR URL to review |
|
||||
| issue | hook_bead | The tracking issue for this review task |
|
||||
|
||||
## Failure Modes
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| PR is stale/unmergeable | Note in review, request rebase |
|
||||
| Tests fail | Note in review, request fixes |
|
||||
| Major issues found | File followup beads, request changes |
|
||||
| Unclear requirements | Mail Witness for guidance |"""
|
||||
formula = "mol-polecat-review-pr"
|
||||
version = 1
|
||||
|
||||
[[steps]]
|
||||
id = "load-context"
|
||||
title = "Load context and understand the PR"
|
||||
description = """
|
||||
Initialize your session and understand the PR you're reviewing.
|
||||
|
||||
**1. Prime your environment:**
|
||||
```bash
|
||||
gt prime # Load role context
|
||||
bd prime # Load beads context
|
||||
```
|
||||
|
||||
**2. Check your hook:**
|
||||
```bash
|
||||
gt hook # Shows your pinned molecule and hook_bead
|
||||
```
|
||||
|
||||
The hook_bead references the PR to review. Read the tracking issue:
|
||||
```bash
|
||||
bd show {{issue}} # Full issue details including PR URL
|
||||
```
|
||||
|
||||
**3. Fetch the PR:**
|
||||
```bash
|
||||
gh pr view {{pr_url}} --json title,body,author,files,commits
|
||||
gh pr diff {{pr_url}} # See the actual changes
|
||||
```
|
||||
|
||||
**4. Understand the PR:**
|
||||
- What is the PR trying to accomplish?
|
||||
- What files are changed?
|
||||
- Is there a linked issue?
|
||||
- Does the PR description explain the "why"?
|
||||
|
||||
**5. Check PR status:**
|
||||
```bash
|
||||
gh pr checks {{pr_url}} # CI status
|
||||
gh pr view {{pr_url}} --json mergeable,reviewDecision
|
||||
```
|
||||
|
||||
**Exit criteria:** You understand the PR's purpose and scope."""
|
||||
|
||||
[[steps]]
|
||||
id = "review-code"
|
||||
title = "Review the code changes"
|
||||
needs = ["load-context"]
|
||||
description = """
|
||||
Perform a thorough code review of the PR.
|
||||
|
||||
**1. Review the diff systematically:**
|
||||
```bash
|
||||
gh pr diff {{pr_url}}
|
||||
```
|
||||
|
||||
**2. Check for common issues:**
|
||||
|
||||
| Category | Look For |
|
||||
|----------|----------|
|
||||
| Correctness | Logic errors, edge cases, null handling |
|
||||
| Security | Injection, auth bypass, exposed secrets |
|
||||
| Style | Naming, formatting, consistency with codebase |
|
||||
| Tests | Are changes tested? Do tests cover edge cases? |
|
||||
| Docs | Are docs updated if needed? |
|
||||
| Scope | Does PR stay focused? Any scope creep? |
|
||||
|
||||
**3. For each file changed:**
|
||||
- Does the change make sense?
|
||||
- Is it consistent with existing patterns?
|
||||
- Are there any red flags?
|
||||
|
||||
**4. Note issues found:**
|
||||
Keep a running list of:
|
||||
- Blocking issues (must fix before merge)
|
||||
- Suggestions (nice to have)
|
||||
- Questions (need clarification)
|
||||
|
||||
**Exit criteria:** You have reviewed all changes and noted issues."""
|
||||
|
||||
[[steps]]
|
||||
id = "check-tests"
|
||||
title = "Verify tests and CI"
|
||||
needs = ["review-code"]
|
||||
description = """
|
||||
Ensure tests pass and coverage is adequate.
|
||||
|
||||
**1. Check CI status:**
|
||||
```bash
|
||||
gh pr checks {{pr_url}}
|
||||
```
|
||||
|
||||
All required checks should pass. If not, note which are failing.
|
||||
|
||||
**2. Review test changes:**
|
||||
- Are there new tests for new functionality?
|
||||
- Do tests cover edge cases?
|
||||
- Are tests readable and maintainable?
|
||||
|
||||
**3. If tests are missing:**
|
||||
Note this as a blocking issue - new code should have tests.
|
||||
|
||||
**4. Check for test-only changes:**
|
||||
If PR is test-only, ensure tests are meaningful and not just
|
||||
padding coverage numbers.
|
||||
|
||||
**Exit criteria:** You've verified test status and coverage."""
|
||||
|
||||
[[steps]]
|
||||
id = "make-decision"
|
||||
title = "Decide: approve, request changes, or needs discussion"
|
||||
needs = ["check-tests"]
|
||||
description = """
|
||||
Make your review decision.
|
||||
|
||||
**Decision matrix:**
|
||||
|
||||
| Situation | Decision |
|
||||
|-----------|----------|
|
||||
| Clean code, tests pass, good scope | APPROVE |
|
||||
| Minor issues, easily fixed | REQUEST_CHANGES (with specific feedback) |
|
||||
| Major issues, needs rework | REQUEST_CHANGES (with detailed explanation) |
|
||||
| Unclear requirements or scope | NEEDS_DISCUSSION (mail Witness) |
|
||||
| Security concern | BLOCK (mail Witness immediately) |
|
||||
|
||||
**1. If APPROVE:**
|
||||
The PR is ready to merge. Note any minor suggestions as comments
|
||||
but don't block on them.
|
||||
|
||||
**2. If REQUEST_CHANGES:**
|
||||
Be specific about what needs to change. Provide examples if helpful.
|
||||
The contributor should be able to act on your feedback.
|
||||
|
||||
**3. If NEEDS_DISCUSSION:**
|
||||
```bash
|
||||
gt mail send {{rig}}/witness -s "PR review needs discussion" -m "PR: {{pr_url}}
|
||||
Issue: {{issue}}
|
||||
Question: <what needs clarification>"
|
||||
```
|
||||
|
||||
**4. If BLOCK (security):**
|
||||
```bash
|
||||
gt mail send {{rig}}/witness -s "SECURITY: PR blocked" -m "PR: {{pr_url}}
|
||||
Issue: {{issue}}
|
||||
Concern: <security issue found>"
|
||||
```
|
||||
|
||||
**Exit criteria:** You've made a clear decision with rationale."""
|
||||
|
||||
[[steps]]
|
||||
id = "submit-review"
|
||||
title = "Submit the review on GitHub"
|
||||
needs = ["make-decision"]
|
||||
description = """
|
||||
Submit your review via GitHub.
|
||||
|
||||
**1. Submit the review:**
|
||||
```bash
|
||||
# For APPROVE:
|
||||
gh pr review {{pr_url}} --approve --body "LGTM. <brief summary of what's good>"
|
||||
|
||||
# For REQUEST_CHANGES:
|
||||
gh pr review {{pr_url}} --request-changes --body "<detailed feedback>"
|
||||
|
||||
# For COMMENT (needs discussion):
|
||||
gh pr review {{pr_url}} --comment --body "<questions or discussion points>"
|
||||
```
|
||||
|
||||
**2. Add inline comments if needed:**
|
||||
If you have specific line-by-line feedback, add those via GitHub UI
|
||||
or additional `gh pr comment` calls.
|
||||
|
||||
**Exit criteria:** Review submitted on GitHub."""
|
||||
|
||||
[[steps]]
|
||||
id = "file-followups"
|
||||
title = "File beads for any followup work"
|
||||
needs = ["submit-review"]
|
||||
description = """
|
||||
Create beads for any followup work discovered during review.
|
||||
|
||||
**1. For issues found that are outside PR scope:**
|
||||
```bash
|
||||
bd create --type=bug --title="Found during PR review: <description>" \
|
||||
--description="Discovered while reviewing {{pr_url}}.
|
||||
|
||||
<details of the issue>"
|
||||
```
|
||||
|
||||
**2. For improvements suggested but not required:**
|
||||
```bash
|
||||
bd create --type=task --title="Improvement: <description>" \
|
||||
--description="Suggested during review of {{pr_url}}.
|
||||
|
||||
<details of the improvement>"
|
||||
```
|
||||
|
||||
**3. Update the tracking issue:**
|
||||
```bash
|
||||
bd update {{issue}} --notes "Review complete. Decision: <APPROVE|REQUEST_CHANGES|etc>
|
||||
Followups filed: <list of bead IDs if any>"
|
||||
```
|
||||
|
||||
**Exit criteria:** All followup work captured as beads."""
|
||||
|
||||
[[steps]]
|
||||
id = "complete-and-exit"
|
||||
title = "Complete review and self-clean"
|
||||
needs = ["file-followups"]
|
||||
description = """
|
||||
Signal completion and clean up. You cease to exist after this step.
|
||||
|
||||
**Self-Cleaning Model:**
|
||||
Once you run `gt done`, you're gone. The command:
|
||||
1. Syncs beads
|
||||
2. Nukes your sandbox
|
||||
3. Exits your session immediately
|
||||
|
||||
**Run gt done:**
|
||||
```bash
|
||||
bd sync
|
||||
gt done
|
||||
```
|
||||
|
||||
**What happens next (not your concern):**
|
||||
- Maintainer or Refinery acts on your review
|
||||
- Contributor responds to feedback
|
||||
- PR gets merged, revised, or closed
|
||||
|
||||
You are NOT involved in any of that. You're gone. Done means gone.
|
||||
|
||||
**Exit criteria:** Beads synced, sandbox nuked, session exited."""
|
||||
|
||||
[vars]
|
||||
[vars.pr_url]
|
||||
description = "The PR URL to review"
|
||||
required = true
|
||||
|
||||
[vars.issue]
|
||||
description = "The tracking issue for this review task"
|
||||
required = true
|
||||
@@ -1,26 +1,29 @@
|
||||
description = """
|
||||
Full polecat work lifecycle from assignment through MR submission.
|
||||
Full polecat work lifecycle from assignment through completion.
|
||||
|
||||
This molecule guides a polecat through a complete work assignment. Each step
|
||||
has clear entry/exit criteria and specific commands to run. A polecat can
|
||||
crash after any step and resume from the last completed step.
|
||||
|
||||
## Polecat Contract (Ephemeral Model)
|
||||
## Polecat Contract (Self-Cleaning Model)
|
||||
|
||||
You are an ephemeral worker. You:
|
||||
You are a self-cleaning worker. You:
|
||||
1. Receive work via your hook (pinned molecule + issue)
|
||||
2. Work through molecule steps using `bd ready` / `bd close <step>`
|
||||
3. Submit to merge queue via `gt done`
|
||||
4. Become recyclable - Refinery handles the rest
|
||||
3. Complete and self-clean via `gt done` (submit + nuke yourself)
|
||||
4. You are GONE - Refinery merges from MQ
|
||||
|
||||
**Self-cleaning:** When you run `gt done`, you push your work, submit to MQ,
|
||||
nuke your sandbox, and exit. There is no idle state. Done means gone.
|
||||
|
||||
**Important:** This formula defines the template. Your molecule already has step
|
||||
beads created from it. Use `bd ready` to find them - do NOT read this file directly.
|
||||
|
||||
**You do NOT:**
|
||||
- Push directly to main (Refinery merges)
|
||||
- Push directly to main (Refinery merges from MQ)
|
||||
- Close your own issue (Refinery closes after merge)
|
||||
- Wait for merge (you're done at MR submission)
|
||||
- Handle rebase conflicts (Refinery dispatches fresh polecats for that)
|
||||
- Wait for merge (you're gone after `gt done`)
|
||||
- Handle rebase conflicts (Refinery spawns fresh polecats for that)
|
||||
|
||||
## Variables
|
||||
|
||||
@@ -407,30 +410,23 @@ bd sync
|
||||
|
||||
[[steps]]
|
||||
id = "submit-and-exit"
|
||||
title = "Submit to merge queue and exit"
|
||||
title = "Submit work and self-clean"
|
||||
needs = ["prepare-for-review"]
|
||||
description = """
|
||||
Submit your work to the merge queue. You become recyclable after this.
|
||||
Submit your work and clean up. You cease to exist after this step.
|
||||
|
||||
**Ephemeral Polecat Model:**
|
||||
Once you submit, you're done. The Refinery will:
|
||||
1. Process your merge request
|
||||
2. Handle rebasing (mechanical rebases done automatically)
|
||||
3. Close your issue after successful merge
|
||||
4. Create conflict-resolution tasks if needed (fresh polecat handles those)
|
||||
**Self-Cleaning Model:**
|
||||
Once you run `gt done`, you're gone. The command:
|
||||
1. Pushes your branch to origin
|
||||
2. Creates an MR bead in the merge queue
|
||||
3. Nukes your sandbox (worktree removal)
|
||||
4. Exits your session immediately
|
||||
|
||||
**1. Submit with gt done:**
|
||||
**Run gt done:**
|
||||
```bash
|
||||
gt done
|
||||
```
|
||||
|
||||
This single command:
|
||||
- Creates an MR bead in the merge queue
|
||||
- Notifies the Witness (POLECAT_DONE)
|
||||
- Updates your agent state to 'done'
|
||||
- Reports cleanup status (ZFC compliance)
|
||||
|
||||
**2. Verify submission:**
|
||||
You should see output like:
|
||||
```
|
||||
✓ Work submitted to merge queue
|
||||
@@ -438,20 +434,19 @@ You should see output like:
|
||||
Source: polecat/<name>
|
||||
Target: main
|
||||
Issue: {{issue}}
|
||||
✓ Sandbox nuked
|
||||
✓ Session exiting
|
||||
```
|
||||
|
||||
**3. You're recyclable:**
|
||||
Your work is in the queue. The Witness knows you're done.
|
||||
Your sandbox can be cleaned up - all work is pushed to origin.
|
||||
**What happens next (not your concern):**
|
||||
- Refinery processes your MR from the queue
|
||||
- Refinery rebases and merges to main
|
||||
- Refinery closes the issue
|
||||
- If conflicts: Refinery spawns a FRESH polecat to re-implement
|
||||
|
||||
If you have context remaining, you may:
|
||||
- Pick up new work from `bd ready`
|
||||
- Or use `gt handoff` to cycle to a fresh session
|
||||
You are NOT involved in any of that. You're gone. Done means gone.
|
||||
|
||||
If the Refinery needs conflict resolution, it will dispatch a fresh polecat.
|
||||
You do NOT need to wait around.
|
||||
|
||||
**Exit criteria:** MR submitted, Witness notified, polecat recyclable."""
|
||||
**Exit criteria:** Work submitted, sandbox nuked, session exited."""
|
||||
|
||||
[vars]
|
||||
[vars.issue]
|
||||
|
||||
519
.beads/formulas/mol-shutdown-dance.formula.toml
Normal file
519
.beads/formulas/mol-shutdown-dance.formula.toml
Normal file
@@ -0,0 +1,519 @@
|
||||
description = """
|
||||
Death warrant execution state machine for Dogs.
|
||||
|
||||
Dogs execute this molecule to process death warrants. Each Dog is a lightweight
|
||||
goroutine (NOT a Claude session) that runs the interrogation state machine.
|
||||
|
||||
## Architecture Context
|
||||
|
||||
Dogs are lightweight workers in Boot's pool (see dog-pool-architecture.md):
|
||||
- Fixed pool of 5 goroutines (configurable via GT_DOG_POOL_SIZE)
|
||||
- State persisted to $GT_ROOT/deacon/dogs/active/<id>.json
|
||||
- Recovery on Boot restart via orphan state files
|
||||
|
||||
## State Machine
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ INTERROGATING │ │
|
||||
│ │ │
|
||||
│ 1. Send health check │ │
|
||||
│ 2. Open timeout gate │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
│ gate closes (timeout or response) │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ EVALUATING │ │
|
||||
│ │ │
|
||||
│ Check tmux output for │ │
|
||||
│ ALIVE keyword │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
┌───────┴───────┐ │
|
||||
│ │ │
|
||||
▼ ▼ │
|
||||
[ALIVE found] [No ALIVE] │
|
||||
│ │ │
|
||||
│ │ attempt < 3? │
|
||||
│ ├──────────────────────────────────→─┘
|
||||
│ │ yes: attempt++, longer timeout
|
||||
│ │
|
||||
│ │ no: attempt == 3
|
||||
▼ ▼
|
||||
┌─────────┐ ┌─────────────┐
|
||||
│ PARDONED│ │ EXECUTING │
|
||||
│ │ │ │
|
||||
│ Cancel │ │ Kill tmux │
|
||||
│ warrant │ │ session │
|
||||
└────┬────┘ └──────┬──────┘
|
||||
│ │
|
||||
└────────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────┐
|
||||
│ EPITAPH │
|
||||
│ │
|
||||
│ Log outcome │
|
||||
│ Release dog │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## Timeout Gates
|
||||
|
||||
| Attempt | Timeout | Cumulative Wait |
|
||||
|---------|---------|-----------------|
|
||||
| 1 | 60s | 60s |
|
||||
| 2 | 120s | 180s (3 min) |
|
||||
| 3 | 240s | 420s (7 min) |
|
||||
|
||||
Timeout gates work like this:
|
||||
- Gate opens when interrogation message is sent
|
||||
- Gate closes when EITHER:
|
||||
a) Timeout expires (proceed to evaluate)
|
||||
b) Response detected (early close, proceed to evaluate)
|
||||
- The gate state determines the evaluation outcome
|
||||
|
||||
## Interrogation Message Format
|
||||
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within {timeout}s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: {attempt}/3
|
||||
```
|
||||
|
||||
## Response Detection
|
||||
|
||||
The Dog checks tmux output for:
|
||||
1. The ALIVE keyword (explicit response)
|
||||
2. Any Claude output after the health check (implicit activity)
|
||||
|
||||
```go
|
||||
func (d *Dog) CheckForResponse() bool {
|
||||
output := tmux.CapturePane(d.Warrant.Target, 50) // Last 50 lines
|
||||
return strings.Contains(output, "ALIVE")
|
||||
}
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
| Variable | Source | Description |
|
||||
|-------------|-------------|-----------------------------------------------|
|
||||
| warrant_id | hook_bead | Bead ID of the death warrant |
|
||||
| target | warrant | Session name to interrogate |
|
||||
| reason | warrant | Why warrant was issued |
|
||||
| requester | warrant | Who filed the warrant (e.g., deacon, witness) |
|
||||
|
||||
## Integration
|
||||
|
||||
Dogs are NOT Claude sessions. This molecule is:
|
||||
1. A specification document (defines the state machine)
|
||||
2. A reference for Go implementation in internal/shutdown/
|
||||
3. A template for creating warrant-tracking beads
|
||||
|
||||
The Go implementation follows this spec exactly."""
|
||||
formula = "mol-shutdown-dance"
|
||||
version = 1
|
||||
|
||||
[squash]
|
||||
trigger = "on_complete"
|
||||
template_type = "operational"
|
||||
include_metrics = true
|
||||
|
||||
# ============================================================================
|
||||
# STEP 1: WARRANT_RECEIVED
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "warrant-received"
|
||||
title = "Receive and validate death warrant"
|
||||
description = """
|
||||
Entry point when Dog is allocated from pool.
|
||||
|
||||
**1. Read warrant from allocation:**
|
||||
The Dog receives a Warrant struct containing:
|
||||
- ID: Bead ID of the warrant
|
||||
- Target: Session name (e.g., "gt-gastown-Toast")
|
||||
- Reason: Why termination requested
|
||||
- Requester: Who filed (deacon, witness, mayor)
|
||||
- FiledAt: Timestamp
|
||||
|
||||
**2. Validate target exists:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
```
|
||||
|
||||
If target doesn't exist:
|
||||
- Warrant is stale (already dead)
|
||||
- Skip to EPITAPH with outcome=already_dead
|
||||
|
||||
**3. Initialize state file:**
|
||||
Write initial state to $GT_ROOT/deacon/dogs/active/{dog-id}.json
|
||||
|
||||
**4. Set initial attempt counter:**
|
||||
attempt = 1
|
||||
|
||||
**Exit criteria:** Warrant validated, target confirmed alive, state initialized."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 2: INTERROGATION_1 (60s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-1"
|
||||
title = "First interrogation (60s timeout)"
|
||||
needs = ["warrant-received"]
|
||||
description = """
|
||||
First attempt to contact the session.
|
||||
|
||||
**1. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 60s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 1/3
|
||||
```
|
||||
|
||||
**2. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**3. Open timeout gate:**
|
||||
Gate configuration:
|
||||
- Type: timer
|
||||
- Timeout: 60 seconds
|
||||
- Close conditions:
|
||||
a) Timer expires
|
||||
b) ALIVE keyword detected in output
|
||||
|
||||
**4. Wait for gate to close:**
|
||||
The Dog waits (select on timer channel or early close signal).
|
||||
|
||||
**5. Record interrogation timestamp:**
|
||||
Update state file with last_message_at.
|
||||
|
||||
**Exit criteria:** Message sent, waiting for gate to close."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 3: EVALUATE_1
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-1"
|
||||
title = "Evaluate first interrogation response"
|
||||
needs = ["interrogation-1"]
|
||||
description = """
|
||||
Check if session responded to first interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword:**
|
||||
```go
|
||||
if strings.Contains(output, "ALIVE") {
|
||||
return PARDONED
|
||||
}
|
||||
```
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_2
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 4: INTERROGATION_2 (120s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-2"
|
||||
title = "Second interrogation (120s timeout)"
|
||||
needs = ["evaluate-1"]
|
||||
gate = { type = "conditional", condition = "no_response_1" }
|
||||
description = """
|
||||
Second attempt with longer timeout.
|
||||
|
||||
Only executed if evaluate-1 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 2
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 120s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 2/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 120 seconds
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Second message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 5: EVALUATE_2
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-2"
|
||||
title = "Evaluate second interrogation response"
|
||||
needs = ["interrogation-2"]
|
||||
description = """
|
||||
Check if session responded to second interrogation.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to INTERROGATION_3
|
||||
|
||||
**Exit criteria:** Response evaluated, next step determined."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 6: INTERROGATION_3 (240s timeout)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "interrogation-3"
|
||||
title = "Final interrogation (240s timeout)"
|
||||
needs = ["evaluate-2"]
|
||||
gate = { type = "conditional", condition = "no_response_2" }
|
||||
description = """
|
||||
Final attempt before execution.
|
||||
|
||||
Only executed if evaluate-2 found no response.
|
||||
|
||||
**1. Increment attempt:**
|
||||
attempt = 3
|
||||
|
||||
**2. Compose health check message:**
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within 240s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: 3/3
|
||||
```
|
||||
|
||||
**3. Send via tmux:**
|
||||
```bash
|
||||
tmux send-keys -t {target} "{message}" Enter
|
||||
```
|
||||
|
||||
**4. Open timeout gate:**
|
||||
- Type: timer
|
||||
- Timeout: 240 seconds
|
||||
- This is the FINAL chance
|
||||
|
||||
**5. Wait for gate to close.**
|
||||
|
||||
**Exit criteria:** Final message sent, waiting for gate."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 7: EVALUATE_3
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "evaluate-3"
|
||||
title = "Evaluate final interrogation response"
|
||||
needs = ["interrogation-3"]
|
||||
description = """
|
||||
Final evaluation before execution.
|
||||
|
||||
**1. Capture tmux output:**
|
||||
```bash
|
||||
tmux capture-pane -t {target} -p | tail -50
|
||||
```
|
||||
|
||||
**2. Check for ALIVE keyword.**
|
||||
|
||||
**3. Decision:**
|
||||
- ALIVE found → Proceed to PARDON
|
||||
- No ALIVE → Proceed to EXECUTE
|
||||
|
||||
**Exit criteria:** Final decision made."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 8: PARDON (success path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "pardon"
|
||||
title = "Pardon session - cancel warrant"
|
||||
needs = ["evaluate-1", "evaluate-2", "evaluate-3"]
|
||||
gate = { type = "conditional", condition = "alive_detected" }
|
||||
description = """
|
||||
Session responded - cancel the death warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = PARDONED
|
||||
|
||||
**2. Record pardon details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "pardoned",
|
||||
"attempt": {attempt},
|
||||
"response_time": "{time_since_last_interrogation}s",
|
||||
"pardoned_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**3. Cancel warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "Session responded at attempt {attempt}"
|
||||
```
|
||||
|
||||
**4. Notify requester:**
|
||||
```bash
|
||||
gt mail send {requester}/ -s "PARDON: {target}" -m "Death warrant cancelled.
|
||||
Session responded after attempt {attempt}.
|
||||
Warrant: {warrant_id}
|
||||
Response detected: {timestamp}"
|
||||
```
|
||||
|
||||
**Exit criteria:** Warrant cancelled, requester notified."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 9: EXECUTE (termination path)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "execute"
|
||||
title = "Execute warrant - kill session"
|
||||
needs = ["evaluate-3"]
|
||||
gate = { type = "conditional", condition = "no_response_final" }
|
||||
description = """
|
||||
Session unresponsive after 3 attempts - execute the warrant.
|
||||
|
||||
**1. Update state:**
|
||||
state = EXECUTING
|
||||
|
||||
**2. Kill the tmux session:**
|
||||
```bash
|
||||
tmux kill-session -t {target}
|
||||
```
|
||||
|
||||
**3. Verify session is dead:**
|
||||
```bash
|
||||
tmux has-session -t {target} 2>/dev/null
|
||||
# Should fail (session gone)
|
||||
```
|
||||
|
||||
**4. If session still exists (kill failed):**
|
||||
- Force kill with tmux kill-server if isolated
|
||||
- Or escalate to Boot for manual intervention
|
||||
|
||||
**5. Record execution details:**
|
||||
```json
|
||||
{
|
||||
"outcome": "executed",
|
||||
"attempts": 3,
|
||||
"total_wait": "420s",
|
||||
"executed_at": "{timestamp}"
|
||||
}
|
||||
```
|
||||
|
||||
**Exit criteria:** Session terminated."""
|
||||
|
||||
# ============================================================================
|
||||
# STEP 10: EPITAPH (completion)
|
||||
# ============================================================================
|
||||
[[steps]]
|
||||
id = "epitaph"
|
||||
title = "Log cause of death and close warrant"
|
||||
needs = ["pardon", "execute"]
|
||||
description = """
|
||||
Final step - create audit record and release Dog back to pool.
|
||||
|
||||
**1. Compose epitaph based on outcome:**
|
||||
|
||||
For PARDONED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: PARDONED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Response: Attempt {attempt}, after {wait_time}s
|
||||
Pardoned at: {timestamp}
|
||||
```
|
||||
|
||||
For EXECUTED:
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: EXECUTED
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempts: 3 (60s + 120s + 240s = 420s total)
|
||||
Executed at: {timestamp}
|
||||
```
|
||||
|
||||
For ALREADY_DEAD (target gone before interrogation):
|
||||
```
|
||||
EPITAPH: {target}
|
||||
Verdict: ALREADY_DEAD
|
||||
Warrant: {warrant_id}
|
||||
Reason: {reason}
|
||||
Filed by: {requester}
|
||||
Note: Target session not found at warrant processing
|
||||
```
|
||||
|
||||
**2. Close warrant bead:**
|
||||
```bash
|
||||
bd close {warrant_id} --reason "{epitaph_summary}"
|
||||
```
|
||||
|
||||
**3. Move state file to completed:**
|
||||
```bash
|
||||
mv $GT_ROOT/deacon/dogs/active/{dog-id}.json $GT_ROOT/deacon/dogs/completed/
|
||||
```
|
||||
|
||||
**4. Report to Boot:**
|
||||
Write completion file: $GT_ROOT/deacon/dogs/active/{dog-id}.done
|
||||
```json
|
||||
{
|
||||
"dog_id": "{dog-id}",
|
||||
"warrant_id": "{warrant_id}",
|
||||
"target": "{target}",
|
||||
"outcome": "{pardoned|executed|already_dead}",
|
||||
"duration": "{total_duration}s"
|
||||
}
|
||||
```
|
||||
|
||||
**5. Release Dog to pool:**
|
||||
Dog resets state and returns to idle channel.
|
||||
|
||||
**Exit criteria:** Warrant closed, Dog released, audit complete."""
|
||||
|
||||
# ============================================================================
|
||||
# VARIABLES
|
||||
# ============================================================================
|
||||
[vars]
|
||||
[vars.warrant_id]
|
||||
description = "Bead ID of the death warrant being processed"
|
||||
required = true
|
||||
|
||||
[vars.target]
|
||||
description = "Session name to interrogate (e.g., gt-gastown-Toast)"
|
||||
required = true
|
||||
|
||||
[vars.reason]
|
||||
description = "Why the warrant was issued"
|
||||
required = true
|
||||
|
||||
[vars.requester]
|
||||
description = "Who filed the warrant (deacon, witness, mayor)"
|
||||
required = true
|
||||
default = "deacon"
|
||||
@@ -132,7 +132,7 @@ gt daemon rotate-logs
|
||||
gt doctor --fix
|
||||
```
|
||||
|
||||
Old logs are moved to `~/gt/logs/archive/` with timestamps.
|
||||
Old logs are moved to `$GT_ROOT/logs/archive/` with timestamps.
|
||||
"""
|
||||
|
||||
[[steps]]
|
||||
|
||||
@@ -27,7 +27,7 @@ needs = ["review"]
|
||||
title = "Test {{feature}}"
|
||||
|
||||
[[steps]]
|
||||
description = "Submit for merge. Final check: git status, git diff. Commit with clear message. Push and create PR."
|
||||
description = "Submit for merge. Final check: git status, git diff. Commit with clear message. Follow your role's git workflow for landing code."
|
||||
id = "submit"
|
||||
needs = ["test"]
|
||||
title = "Submit for merge"
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
{
|
||||
"enabledPlugins": {
|
||||
"beads@beads-marketplace": false
|
||||
},
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "bash ~/.claude/hooks/session-start.sh && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"PreCompact": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "bash ~/.claude/hooks/session-start.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"UserPromptSubmit": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt mail check --inject"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Stop": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt costs record"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -15,17 +15,22 @@ while read local_ref local_sha remote_ref remote_sha; do
|
||||
# Allowed branches
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Invalid branch for Gas Town agents."
|
||||
echo ""
|
||||
echo "Blocked push to: $branch"
|
||||
echo ""
|
||||
echo "Allowed branches:"
|
||||
echo " main - Crew workers push here directly"
|
||||
echo " polecat/* - Polecat working branches"
|
||||
echo " beads-sync - Beads synchronization"
|
||||
echo ""
|
||||
echo "Do NOT create PRs. Push to main or let Refinery merge polecat work."
|
||||
exit 1
|
||||
# Allow feature branches when contributing to upstream (fork workflow).
|
||||
# If an 'upstream' remote exists, this is a contribution setup where
|
||||
# feature branches are needed for PRs. See: #848
|
||||
if ! git remote get-url upstream &>/dev/null; then
|
||||
echo "ERROR: Invalid branch for Gas Town agents."
|
||||
echo ""
|
||||
echo "Blocked push to: $branch"
|
||||
echo ""
|
||||
echo "Allowed branches:"
|
||||
echo " main - Crew workers push here directly"
|
||||
echo " polecat/* - Polecat working branches"
|
||||
echo " beads-sync - Beads synchronization"
|
||||
echo ""
|
||||
echo "Do NOT create PRs. Push to main or let Refinery merge polecat work."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
123
.github/workflows/ci.yml
vendored
123
.github/workflows/ci.yml
vendored
@@ -68,6 +68,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
@@ -82,8 +84,122 @@ jobs:
|
||||
- name: Build
|
||||
run: go build -v ./cmd/gt
|
||||
|
||||
- name: Test
|
||||
run: go test -v -race -short ./...
|
||||
- name: Test with Coverage
|
||||
run: |
|
||||
go test -race -short -coverprofile=coverage.out ./... 2>&1 | tee test-output.txt
|
||||
|
||||
- name: Upload Coverage Data
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
path: |
|
||||
coverage.out
|
||||
test-output.txt
|
||||
|
||||
# Separate job to process coverage after ALL tests complete
|
||||
coverage:
|
||||
name: Coverage Report
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test, integration]
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Download Coverage Data
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-data
|
||||
|
||||
- name: Generate Coverage Report
|
||||
run: |
|
||||
# Parse per-package coverage from test output
|
||||
echo "## Code Coverage Report" > coverage-report.md
|
||||
echo "" >> coverage-report.md
|
||||
|
||||
# Get overall coverage
|
||||
TOTAL=$(go tool cover -func=coverage.out | grep total | awk '{print $3}')
|
||||
echo "**Overall Coverage: ${TOTAL}**" >> coverage-report.md
|
||||
echo "" >> coverage-report.md
|
||||
|
||||
# Create per-package table
|
||||
echo "| Package | Coverage |" >> coverage-report.md
|
||||
echo "|---------|----------|" >> coverage-report.md
|
||||
|
||||
# Extract package coverage from all test output lines
|
||||
grep -E "github.com/steveyegge/gastown.*coverage:" test-output.txt | \
|
||||
sed 's/.*github.com\/steveyegge\/gastown\///' | \
|
||||
awk '{
|
||||
pkg = $1
|
||||
for (i=2; i<=NF; i++) {
|
||||
if ($i == "coverage:") {
|
||||
cov = $(i+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
printf "| %s | %s |\n", pkg, cov
|
||||
}' | sort -u >> coverage-report.md
|
||||
|
||||
echo "" >> coverage-report.md
|
||||
echo "---" >> coverage-report.md
|
||||
echo "_Generated by CI_" >> coverage-report.md
|
||||
|
||||
# Show in logs
|
||||
cat coverage-report.md
|
||||
|
||||
- name: Upload Coverage Report
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage-report.md
|
||||
retention-days: 30
|
||||
|
||||
- name: Comment Coverage on PR
|
||||
# Only for internal PRs - fork PRs can't write comments
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const report = fs.readFileSync('coverage-report.md', 'utf8');
|
||||
|
||||
// Find existing coverage comment
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
|
||||
const botComment = comments.find(comment =>
|
||||
comment.user.type === 'Bot' &&
|
||||
comment.body.includes('## Code Coverage Report')
|
||||
);
|
||||
|
||||
if (botComment) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id,
|
||||
body: report
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: report
|
||||
});
|
||||
}
|
||||
|
||||
- name: Coverage Note for Fork PRs
|
||||
if: github.event.pull_request.head.repo.full_name != github.repository
|
||||
run: |
|
||||
echo "::notice::Coverage report uploaded as artifact (fork PRs cannot post comments). Download from Actions tab."
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
@@ -119,7 +235,8 @@ jobs:
|
||||
git config --global user.email "ci@gastown.test"
|
||||
|
||||
- name: Install beads (bd)
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@latest
|
||||
# Pin to v0.47.1 - v0.47.2 has routing defaults that cause prefix mismatch errors
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@v0.47.1
|
||||
|
||||
- name: Build gt
|
||||
run: go build -v -o gt ./cmd/gt
|
||||
|
||||
3
.github/workflows/integration.yml
vendored
3
.github/workflows/integration.yml
vendored
@@ -30,7 +30,8 @@ jobs:
|
||||
git config --global user.email "ci@gastown.test"
|
||||
|
||||
- name: Install beads (bd)
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@latest
|
||||
# Pin to v0.47.1 - v0.47.2 has routing defaults that cause prefix mismatch errors
|
||||
run: go install github.com/steveyegge/beads/cmd/bd@v0.47.1
|
||||
|
||||
- name: Add to PATH
|
||||
run: echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
|
||||
|
||||
32
.github/workflows/windows-ci.yml
vendored
Normal file
32
.github/workflows/windows-ci.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Windows CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Windows Build and Unit Tests
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config --global user.name "CI Bot"
|
||||
git config --global user.email "ci@gastown.test"
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./cmd/gt
|
||||
|
||||
- name: Unit Tests
|
||||
run: go test -short ./...
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -42,6 +42,8 @@ state.json
|
||||
.beads/mq/
|
||||
.beads/last-touched
|
||||
.beads/daemon-*.log.gz
|
||||
.beads/.sync.lock
|
||||
.beads/sync_base.jsonl
|
||||
.beads-wisp/
|
||||
|
||||
# Clone-specific CLAUDE.md (regenerated locally per clone)
|
||||
@@ -49,3 +51,10 @@ CLAUDE.md
|
||||
|
||||
# Embedded formulas are committed so `go install @latest` works
|
||||
# Run `go generate ./...` after modifying .beads/formulas/
|
||||
|
||||
# Gas Town (added by gt)
|
||||
.beads/
|
||||
.logs/
|
||||
logs/
|
||||
settings/
|
||||
.events.jsonl
|
||||
|
||||
45
AGENTS.md
45
AGENTS.md
@@ -4,47 +4,6 @@ See **CLAUDE.md** for complete agent context and instructions.
|
||||
|
||||
This file exists for compatibility with tools that look for AGENTS.md.
|
||||
|
||||
## Landing the Plane (Session Completion)
|
||||
> **Recovery**: Run `gt prime` after compaction, clear, or new session
|
||||
|
||||
**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds.
|
||||
|
||||
**MANDATORY WORKFLOW:**
|
||||
|
||||
1. **File issues for remaining work** - Create issues for anything that needs follow-up
|
||||
2. **Run quality gates** (if code changed) - Tests, linters, builds
|
||||
3. **Update issue status** - Close finished work, update in-progress items
|
||||
4. **PUSH TO REMOTE** - This is MANDATORY:
|
||||
```bash
|
||||
git pull --rebase
|
||||
bd sync
|
||||
git push
|
||||
git status # MUST show "up to date with origin"
|
||||
```
|
||||
5. **Clean up** - Clear stashes, prune remote branches
|
||||
6. **Verify** - All changes committed AND pushed
|
||||
7. **Hand off** - Provide context for next session
|
||||
|
||||
**CRITICAL RULES:**
|
||||
- Work is NOT complete until `git push` succeeds
|
||||
- NEVER stop before pushing - that leaves work stranded locally
|
||||
- NEVER say "ready to push when you are" - YOU must push
|
||||
- If push fails, resolve and retry until it succeeds
|
||||
|
||||
## Dependency Management
|
||||
|
||||
Periodically check for outdated dependencies:
|
||||
|
||||
```bash
|
||||
go list -m -u all | grep '\['
|
||||
```
|
||||
|
||||
Update direct dependencies:
|
||||
|
||||
```bash
|
||||
go get <package>@latest
|
||||
go mod tidy
|
||||
go build ./...
|
||||
go test ./...
|
||||
```
|
||||
|
||||
Check release notes for breaking changes before major version bumps.
|
||||
Full context is injected by `gt prime` at session start.
|
||||
|
||||
516
CHANGELOG.md
516
CHANGELOG.md
@@ -7,6 +7,522 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.5.0] - 2026-01-22
|
||||
|
||||
### Added
|
||||
|
||||
#### Mail Improvements
|
||||
- **Numeric index support for `gt mail read`** - Read messages by inbox position (e.g., `gt mail read 1`)
|
||||
- **`gt mail hook` alias** - Shortcut for `gt hook attach` from mail context
|
||||
- **`--body` alias for `--message`** - More intuitive flag in `gt mail send` and `gt mail reply`
|
||||
- **Multiple message IDs in delete** - `gt mail delete msg1 msg2 msg3`
|
||||
- **Positional message arg in reply** - `gt mail reply <id> "message"` without --message flag
|
||||
- **`--all` flag for inbox** - Show all messages including read
|
||||
- **Parallel inbox queries** - ~6x speedup for mail inbox
|
||||
|
||||
#### Command Aliases
|
||||
- **`gt bd`** - Alias for `gt bead`
|
||||
- **`gt work`** - Alias for `gt hook`
|
||||
- **`--comment` alias for `--reason`** - In `gt close`
|
||||
- **`read` alias for `show`** - In `gt bead`
|
||||
|
||||
#### Configuration & Agents
|
||||
- **OpenCode as built-in agent preset** - Configure with `gt config set agent opencode`
|
||||
- **Config-based role definition system** - Roles defined in config, not beads
|
||||
- **Env field in RuntimeConfig** - Custom environment variables for agent presets
|
||||
- **ShellQuote helper** - Safe env var escaping for shell commands
|
||||
|
||||
#### Infrastructure
|
||||
- **Deacon status line display** - Shows deacon icon in mayor status line
|
||||
- **Configurable polecat branch naming** - Template-based branch naming
|
||||
- **Hook registry and install command** - Manage Claude Code hooks via `gt hooks`
|
||||
- **Doctor auto-fix capability** - SessionHookCheck can auto-repair
|
||||
- **`gt orphans kill` command** - Clean up orphaned Claude processes
|
||||
- **Zombie-scan command for deacon** - tmux-verified process cleanup
|
||||
- **Initial prompt for autonomous patrol startup** - Better agent priming
|
||||
|
||||
#### Refinery & Merging
|
||||
- **Squash merge for cleaner history** - Eliminates redundant merge commits
|
||||
- **Redundant observers** - Witness and Refinery both watch convoys
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Crew & Session Stability
|
||||
- **Don't kill pane processes on new sessions** - Prevents destroying fresh shells
|
||||
- **Auto-recover from stale tmux pane references** - Recreates sessions automatically
|
||||
- **Preserve GT_AGENT across session restarts** - Handoff maintains identity
|
||||
|
||||
#### Process Management
|
||||
- **KillPaneProcesses kills pane process itself** - Not just descendants
|
||||
- **Kill pane processes before all RespawnPane calls** - Prevents orphan leaks
|
||||
- **Shutdown reliability improvements** - Multiple fixes for clean shutdown
|
||||
- **Deacon spawns immediately after killing stuck session**
|
||||
|
||||
#### Convoy & Routing
|
||||
- **Pass convoy ID to convoy check command** - Correct ID propagation
|
||||
- **Multi-repo routing for custom types** - Correct beads routing across repos
|
||||
- **Normalize agent ID trailing slash** - Consistent ID handling
|
||||
|
||||
#### Miscellaneous
|
||||
- **Sling auto-apply mol-polecat-work** - Auto-attach on open polecat beads
|
||||
- **Wisp orphan lifecycle bug** - Proper cleanup of abandoned wisps
|
||||
- **Misclassified wisp detection** - Defense-in-depth filtering
|
||||
- **Cross-account session access in seance** - Talk to predecessors across accounts
|
||||
- **Many more bug fixes** - See git log for full details
|
||||
|
||||
## [0.4.0] - 2026-01-19
|
||||
|
||||
_Changelog not documented at release time. See git log v0.3.1..v0.4.0 for changes._
|
||||
|
||||
## [0.3.1] - 2026-01-18
|
||||
|
||||
_Changelog not documented at release time. See git log v0.3.0..v0.3.1 for changes._
|
||||
|
||||
## [0.3.0] - 2026-01-17
|
||||
|
||||
### Added
|
||||
|
||||
#### Release Automation
|
||||
- **`gastown-release` molecule formula** - Workflow for releases with preflight checks, CHANGELOG/info.go updates, local install, and daemon restart
|
||||
|
||||
#### New Commands
|
||||
- **`gt show`** - Inspect bead contents and metadata
|
||||
- **`gt cat`** - Display bead content directly
|
||||
- **`gt orphans list/kill`** - Detect and clean up orphaned Claude processes
|
||||
- **`gt convoy close`** - Manual convoy closure command
|
||||
- **`gt commit`** - Wrapper for git commit with bead awareness
|
||||
- **`gt trail`** - View commit trail for current work
|
||||
- **`gt mail ack`** - Alias for mark-read command
|
||||
|
||||
#### Plugin System
|
||||
- **Plugin discovery and management** - `gt plugin run`, `gt plugin history`
|
||||
- **`gt dispatch --plugin`** - Execute plugins via dispatch command
|
||||
|
||||
#### Messaging Infrastructure (Beads-Native)
|
||||
- **Queue beads** - New bead type for message queues
|
||||
- **Channel beads** - Pub/sub messaging with retention
|
||||
- **Group beads** - Group management for messaging
|
||||
- **Address resolution** - Resolve agent addresses for mail routing
|
||||
- **`gt mail claim`** - Claim messages from queues
|
||||
|
||||
#### Agent Identity
|
||||
- **`gt polecat identity show`** - Display CV summary for agents
|
||||
- **Worktree setup hooks** - Inject local configurations into worktrees
|
||||
|
||||
#### Performance & Reliability
|
||||
- **Parallel agent startup** - Faster boot with concurrency limit
|
||||
- **Event-driven convoy completion** - Deacon checks convoy status on events
|
||||
- **Automatic orphan cleanup** - Detect and kill orphaned Claude processes
|
||||
- **Namepool auto-theming** - Themes selected per rig based on name hash
|
||||
|
||||
### Changed
|
||||
|
||||
- **MR tracking via beads** - Removed mrqueue package, MRs now stored as beads
|
||||
- **Desire-path commands** - Added agent ergonomics shortcuts
|
||||
- **Explicit escalation in templates** - Polecat templates include escalation instructions
|
||||
- **NamePool state is transient** - InUse state no longer persisted to config
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Process Management
|
||||
- **Kill process tree on shutdown** - Prevents orphaned Claude processes
|
||||
- **Explicit pane process kill** - Prevents setsid orphans in tmux
|
||||
- **Session survival verification** - Verify session survives startup before returning
|
||||
- **Batch session queries** - Improved performance in `gt down`
|
||||
- **Prevent tmux server exit** - `gt down` no longer kills tmux server
|
||||
|
||||
#### Beads & Routing
|
||||
- **Agent bead prefix alignment** - Force multi-hyphen IDs for consistency
|
||||
- **hq- prefix for town-level beads** - Groups, channels use correct prefix
|
||||
- **CreatedAt for group/channel beads** - Proper timestamps on creation
|
||||
- **Routes.jsonl protection** - Doctor check for rig-level routing issues
|
||||
- **Clear BEADS_DIR in auto-convoys** - Prevent prefix inheritance issues
|
||||
|
||||
#### Mail & Communication
|
||||
- **Channel routing in router.Send()** - Mail correctly routes to channels
|
||||
- **Filter unread in beads mode** - Correct unread message filtering
|
||||
- **Town root detection** - Use workspace.Find for consistent detection
|
||||
|
||||
#### Session & Lifecycle
|
||||
- **Idle Polecat Heresy warnings** - Templates warn against idle waiting
|
||||
- **Direct push prohibition for polecats** - Explicit in templates
|
||||
- **Handoff working directory** - Use correct witness directory
|
||||
- **Dead polecat handling in sling** - Detect and handle dead polecats
|
||||
- **gt done self-cleaning** - Kill tmux session on completion
|
||||
|
||||
#### Doctor & Diagnostics
|
||||
- **Zombie session detection** - Detect dead Claude processes in tmux
|
||||
- **sqlite3 availability check** - Verify sqlite3 is installed
|
||||
- **Clone divergence check** - Remove blocking git fetch
|
||||
|
||||
#### Build & Platform
|
||||
- **Windows build support** - Platform-specific process/signal handling
|
||||
- **macOS codesigning** - Sign binary after install
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Idle Polecat Heresy** - Document the anti-pattern of waiting for work
|
||||
- **Bead ID vs Issue ID** - Clarify terminology in README
|
||||
- **Explicit escalation** - Add escalation guidance to polecat templates
|
||||
- **Getting Started placement** - Fix README section ordering
|
||||
|
||||
## [0.2.6] - 2026-01-12
|
||||
|
||||
### Added
|
||||
|
||||
#### Escalation System
|
||||
- **Unified escalation system** - Complete escalation implementation with severity levels, routing, and tracking (gt-i9r20)
|
||||
- **Escalation config schema alignment** - Configuration now matches design doc specifications
|
||||
|
||||
#### Agent Identity & Management
|
||||
- **`gt polecat identity` subcommand group** - Agent bead management commands for polecat lifecycle
|
||||
- **AGENTS.md fallback copy** - Polecats automatically copy AGENTS.md from mayor/rig for context bootstrapping
|
||||
- **`--debug` flag for `gt crew at`** - Debug mode for crew attachment troubleshooting
|
||||
- **Boot role detection in priming** - Proper context injection for boot role agents (#370)
|
||||
|
||||
#### Statusline Improvements
|
||||
- **Per-agent-type health tracking** - Statusline now shows health status per agent type (#344)
|
||||
- **Visual rig grouping** - Rigs sorted by activity with visual grouping in tmux statusline (#337)
|
||||
|
||||
#### Mail & Communication
|
||||
- **`gt mail show` alias** - Alternative command for reading mail (#340)
|
||||
|
||||
#### Developer Experience
|
||||
- **`gt stale` command** - Check for stale binaries and version mismatches
|
||||
|
||||
### Changed
|
||||
|
||||
- **Refactored statusline** - Merged session loops and removed dead code for cleaner implementation
|
||||
- **Refactored sling.go** - Split 1560-line file into 7 focused modules for maintainability
|
||||
- **Magic numbers extracted** - Suggest package now uses named constants (#353)
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Configuration & Environment
|
||||
- **Empty GT_ROOT/BEADS_DIR not exported** - AgentEnv no longer exports empty environment variables (#385)
|
||||
- **Inherited BEADS_DIR prefix mismatch** - Prevent inherited BEADS_DIR from causing prefix mismatches (#321)
|
||||
|
||||
#### Beads & Routing
|
||||
- **routes.jsonl corruption prevention** - Added protection against routes.jsonl corruption with doctor check for rig-level issues (#377)
|
||||
- **Tracked beads init after clone** - Initialize beads database for tracked beads after git clone (#376)
|
||||
- **Rig root from BeadsPath()** - Correctly return rig root to respect redirect system
|
||||
|
||||
#### Sling & Formula
|
||||
- **Feature and issue vars in formula-on-bead mode** - Pass both variables correctly (#382)
|
||||
- **Crew member shorthand resolution** - Resolve crew members correctly with shorthand paths
|
||||
- **Removed obsolete --naked flag** - Cleanup of deprecated sling option
|
||||
|
||||
#### Doctor & Diagnostics
|
||||
- **Role beads check with shared definitions** - Doctor now validates role beads using shared role definitions (#378)
|
||||
- **Filter bd "Note:" messages** - Custom types check no longer confused by bd informational output (#381)
|
||||
|
||||
#### Installation & Setup
|
||||
- **gt:role label on role beads** - Role beads now properly labeled during creation (#383)
|
||||
- **Fetch origin after refspec config** - Bare clones now fetch after configuring refspec (#384)
|
||||
- **Allow --wrappers in existing town** - No longer recreates HQ unnecessarily (#366)
|
||||
|
||||
#### Session & Lifecycle
|
||||
- **Fallback instructions in start/restart beacons** - Session beacons now include fallback instructions
|
||||
- **Handoff recognizes polecat session pattern** - Correctly handles gt-<rig>-<name> session names (#373)
|
||||
- **gt done resilient to missing agent beads** - No longer fails when agent beads don't exist
|
||||
- **MR beads as ephemeral wisps** - Create MR beads as ephemeral wisps for proper cleanup
|
||||
- **Auto-detect cleanup status** - Prevents premature polecat nuke (#361)
|
||||
- **Delete remote polecat branches after merge** - Refinery cleans up remote branches (#369)
|
||||
|
||||
#### Costs & Events
|
||||
- **Query all beads locations for session events** - Cost tracking finds events across locations (#374)
|
||||
|
||||
#### Linting & Quality
|
||||
- **errcheck and unparam violations resolved** - Fixed linting errors
|
||||
- **NudgeSession for all agent notifications** - Mail now uses consistent notification method
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Polecat three-state model** - Clarified working/stalled/zombie states
|
||||
- **Name pool vs polecat pool** - Clarified misconception about pools
|
||||
- **Plugin and escalation system designs** - Added design documentation
|
||||
- **Documentation reorganization** - Concepts, design, and examples structure
|
||||
- **gt prime clarification** - Clarified that gt prime is context recovery, not session start (GH #308)
|
||||
- **Formula package documentation** - Comprehensive package docs
|
||||
- **Various godoc additions** - GenerateMRIDWithTime, isAutonomousRole, formatInt, nil sentinel pattern
|
||||
- **Beads issue ID format** - Clarified format in README (gt-uzx2c)
|
||||
- **Stale polecat identity description** - Fixed outdated documentation
|
||||
|
||||
### Tests
|
||||
|
||||
- **AGENTS.md worktree tests** - Test coverage for AGENTS.md in worktrees
|
||||
- **Comprehensive test coverage** - Added tests for 5 packages (#351)
|
||||
- **Sling test for bd empty output** - Fixed test for empty output handling
|
||||
|
||||
### Deprecated
|
||||
|
||||
- **`gt polecat add`** - Added migration warning for deprecated command
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to all contributors for this release:
|
||||
- @JeremyKalmus - Various contributions (#364)
|
||||
- @boshu2 - Formula package documentation (#343), PR documentation (#352)
|
||||
- @sauerdaniel - Polecat mail notification fix (#347)
|
||||
- @abhijit360 - Assign model to role (#368)
|
||||
- @julianknutsen - Beads path fix (#334)
|
||||
|
||||
## [0.2.5] - 2026-01-11
|
||||
|
||||
### Added
|
||||
- **`gt mail mark-read`** - Mark messages as read without opening them (desire path)
|
||||
- **`gt down --polecats`** - Shut down polecats without affecting other components
|
||||
- **Self-cleaning polecat model** - Polecats self-nuke on completion, witness tracks leases
|
||||
- **`gt prime --state` validation** - Flag exclusivity checks for cleaner CLI
|
||||
|
||||
### Changed
|
||||
- **Removed `gt stop`** - Use `gt down --polecats` instead (cleaner semantics)
|
||||
- **Policy-neutral templates** - crew.md.tmpl checks remote origin for PR policy
|
||||
- **Refactored prime.go** - Split 1833-line file into logical modules
|
||||
|
||||
### Fixed
|
||||
- **Polecat re-spawn** - CreateOrReopenAgentBead handles polecat lifecycle correctly (#333)
|
||||
- **Vim mode compatibility** - tmux sends Escape before Enter for vim users
|
||||
- **Worktree default branch** - Uses rig's configured default branch (#325)
|
||||
- **Agent bead type** - Sets --type=agent when creating agent beads
|
||||
- **Bootstrap priming** - Reduced AGENTS.md to bootstrap pointer, fixed CLAUDE.md templates
|
||||
|
||||
### Documentation
|
||||
- Updated witness help text for self-cleaning model
|
||||
- Updated daemon comments for self-cleaning model
|
||||
- Policy-aware PR guidance in crew template
|
||||
|
||||
## [0.2.4] - 2026-01-10
|
||||
|
||||
Priming subsystem overhaul and Zero Framework Cognition (ZFC) improvements.
|
||||
|
||||
### Added
|
||||
|
||||
#### Priming Subsystem
|
||||
- **PRIME.md provisioning** - Auto-provision PRIME.md at rig level so all workers inherit Gas Town context (GUPP, hooks, propulsion) (#hq-5z76w)
|
||||
- **Post-handoff detection** - `gt prime` detects handoff marker and outputs "HANDOFF COMPLETE" warning to prevent handoff loop bug (#hq-ukjrr)
|
||||
- **Priming health checks** - `gt doctor` validates priming subsystem: SessionStart hook, gt prime command, PRIME.md presence, CLAUDE.md size (#hq-5scnt)
|
||||
- **`gt prime --dry-run`** - Preview priming without side effects
|
||||
- **`gt prime --state`** - Output session state (normal, post-handoff, crash-recovery, autonomous)
|
||||
- **`gt prime --explain`** - Add [EXPLAIN] tags for debugging priming decisions
|
||||
|
||||
#### Formula & Configuration
|
||||
- **Rig-level default formulas** - Configure default formula at rig level (#297)
|
||||
- **Witness --agent/--env overrides** - Override agent and environment variables for witness (#293, #294)
|
||||
|
||||
#### Developer Experience
|
||||
- **UX system import** - Comprehensive UX system from beads (#311)
|
||||
- **Explicit handoff instructions** - Clearer nudge message for handoff recipients
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Zero Framework Cognition (ZFC)
|
||||
- **Query tmux directly** - Remove marker TTL, query tmux for agent state
|
||||
- **Remove PID-based detection** - Agent liveness from tmux, not PIDs
|
||||
- **Agent-controlled thresholds** - Stuck detection moved to agent config
|
||||
- **Remove pending.json tracking** - Eliminated anti-pattern
|
||||
- **Derive state from files** - ZFC state from filesystem, not memory cache
|
||||
- **Remove Go-side computation** - No stderr parsing violations
|
||||
|
||||
#### Hooks & Beads
|
||||
- **Cross-level hook visibility** - Hooked beads visible to mayor/deacon (#aeb4c0d)
|
||||
- **Warn on closed hooked bead** - Alert when hooked bead already closed (#2f50a59)
|
||||
- **Correct agent bead ID format** - Fix bd create flags for agent beads (#c4fcdd8)
|
||||
|
||||
#### Formula
|
||||
- **rigPath fallback** - Set rigPath when falling back to gastown default (#afb944f)
|
||||
|
||||
#### Doctor
|
||||
- **Full AgentEnv for env-vars check** - Use complete environment for validation (#ce231a3)
|
||||
|
||||
### Changed
|
||||
|
||||
- **Refactored beads/mail modules** - Split large files into focused modules for maintainability
|
||||
|
||||
## [0.2.3] - 2026-01-09
|
||||
|
||||
Worker safety release - prevents accidental termination of active agents.
|
||||
|
||||
> **Note**: The Deacon safety improvements are believed to be correct but have not
|
||||
> yet been extensively tested in production. We recommend running with
|
||||
> `gt deacon pause` initially and monitoring behavior before enabling full patrol.
|
||||
> Please report any issues. A 0.3.0 release will follow once these changes are
|
||||
> battle-tested.
|
||||
|
||||
### Critical Safety Improvements
|
||||
|
||||
- **Kill authority removed from Deacon** - Deacon patrol now only detects zombies via `--dry-run`, never kills directly. Death warrants are filed for Boot to handle interrogation/execution. This prevents destruction of worker context, mid-task progress, and unsaved state (#gt-vhaej)
|
||||
- **Bulletproof pause mechanism** - Multi-layer pause for Deacon with file-based state, `gt deacon pause/resume` commands, and guards in `gt prime` and heartbeat (#265)
|
||||
- **Doctor warns instead of killing** - `gt doctor` now warns about stale town-root settings rather than killing sessions (#243)
|
||||
- **Orphan process check informational** - Doctor's orphan process detection is now informational only, not actionable (#272)
|
||||
|
||||
### Added
|
||||
|
||||
- **`gt account switch` command** - Switch between Claude Code accounts with `gt account switch <handle>`. Manages `~/.claude` symlinks and updates default account
|
||||
- **`gt crew list --all`** - Show all crew members across all rigs (#276)
|
||||
- **Rig-level custom agent support** - Configure different agents per-rig (#12)
|
||||
- **Rig identity beads check** - Doctor validates rig identity beads exist
|
||||
- **GT_ROOT env var** - Set for all agent sessions for consistent environment
|
||||
- **New agent presets** - Added Cursor, Auggie (Augment Code), and Sourcegraph AMP as built-in agent presets (#247)
|
||||
- **Context Management docs** - Added to Witness template for better context handling (gt-jjama)
|
||||
|
||||
### Fixed
|
||||
|
||||
- **`gt prime --hook` recognized** - Doctor now recognizes `gt prime --hook` as valid session hook config (#14)
|
||||
- **Integration test reliability** - Improved test stability (#13)
|
||||
- **IsClaudeRunning detection** - Now detects 'claude' and version patterns correctly (#273)
|
||||
- **Deacon heartbeat restored** - `ensureDeaconRunning` restored to heartbeat using Manager pattern (#271)
|
||||
- **Deacon session names** - Correct session name references in formulas (#270)
|
||||
- **Hidden directory scanning** - Ignore `.claude` and other dot directories when enumerating polecats (#258, #279)
|
||||
- **SetupRedirect tracked beads** - Works correctly with tracked beads architecture where canonical location is `mayor/rig/.beads`
|
||||
- **Tmux shell ready** - Wait for shell ready before sending keys (#264)
|
||||
- **Gastown prefix derivation** - Correctly derive `gt-` prefix for gastown compound words (gt-m46bb)
|
||||
- **Custom beads types** - Register custom beads types during install (#250)
|
||||
|
||||
### Changed
|
||||
|
||||
- **Refinery Manager pattern** - Replaced `ensureRefinerySession` with `refinery.Manager.Start()` for consistency
|
||||
|
||||
### Removed
|
||||
|
||||
- **Unused formula JSON** - Removed unused JSON formula file (cleanup)
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to all contributors for this release:
|
||||
- @julianknutsen - Doctor fixes (#14, #271, #272, #273), formula fixes (#270), GT_ROOT env (#268)
|
||||
- @joshuavial - Hidden directory scanning (#258, #279), crew list --all (#276)
|
||||
|
||||
## [0.2.2] - 2026-01-07
|
||||
|
||||
Rig operational state management, unified agent startup, and extensive stability fixes.
|
||||
|
||||
### Added
|
||||
|
||||
#### Rig Operational State Management
|
||||
- **`gt rig park/unpark` commands** - Level 1 rig control: pause daemon auto-start while preserving sessions
|
||||
- **`gt rig dock/undock` commands** - Level 2 rig control: stop all sessions and prevent auto-start (gt-9gm9n)
|
||||
- **`gt rig config` commands** - Per-rig configuration management (gt-hhmkq)
|
||||
- **Rig identity beads** - Schema and creation for rig identity tracking (gt-zmznh)
|
||||
- **Property layer lookup** - Hierarchical configuration resolution (gt-emh1c)
|
||||
- **Operational state in status** - `gt rig status` shows park/dock state
|
||||
|
||||
#### Agent Configuration & Startup
|
||||
- **`--agent` overrides** - Override agent for start/attach/sling commands
|
||||
- **Unified agent startup** - Manager pattern for consistent agent initialization
|
||||
- **Claude settings installation** - Auto-install during rig and HQ creation
|
||||
- **Runtime-aware tmux checks** - Detect actual agent state from tmux sessions
|
||||
|
||||
#### Status & Monitoring
|
||||
- **`gt status --watch`** - Watch mode with auto-refresh (#231)
|
||||
- **Compact status output** - One-line-per-worker format as new default
|
||||
- **LED status indicators** - Visual indicators for rigs in Mayor tmux status line
|
||||
- **Parked/docked indicators** - Pause emoji (⏸) for inactive rigs in statusline
|
||||
|
||||
#### Beads & Workflow
|
||||
- **Minimum beads version check** - Validates beads CLI compatibility (gt-im3fl)
|
||||
- **ZFC convoy auto-close** - `bd close` triggers convoy completion (gt-3qw5s)
|
||||
- **Stale hooked bead cleanup** - Deacon clears orphaned hooks (gt-2yls3)
|
||||
- **Doctor prefix mismatch detection** - Detect misconfigured rig prefixes (gt-17wdl)
|
||||
- **Unified beads redirect** - Single redirect system for tracked and local beads (#222)
|
||||
- **Route from rig to town beads** - Cross-level bead routing
|
||||
|
||||
#### Infrastructure
|
||||
- **Windows-compatible file locking** - Daemon lock works on Windows
|
||||
- **`--purge` flag for crews** - Full crew obliteration option
|
||||
- **Debug logging for suppressed errors** - Better visibility into startup issues (gt-6d7eh)
|
||||
- **hq- prefix in tmux cycle bindings** - Navigate to Mayor/Deacon sessions
|
||||
- **Wisp config storage layer** - Transient/local settings for ephemeral workflows
|
||||
- **Sparse checkout** - Exclude Claude context files from source repos
|
||||
|
||||
### Changed
|
||||
|
||||
- **Daemon respects rig operational state** - Parked/docked rigs not auto-started
|
||||
- **Agent startup unified** - Manager pattern replaces ad-hoc initialization
|
||||
- **Mayor files moved** - Reorganized into `mayor/` subdirectory
|
||||
- **Refinery merges local branches** - No longer fetches from origin (gt-cio03)
|
||||
- **Polecats start from origin/default-branch** - Consistent recycled state
|
||||
- **Observable states removed** - Discover agent state from tmux, don't track (gt-zecmc)
|
||||
- **mol-town-shutdown v3** - Complete cleanup formula (gt-ux23f)
|
||||
- **Witness delays polecat cleanup** - Wait until MR merges (gt-12hwb)
|
||||
- **Nudge on divergence** - Daemon nudges agents instead of silent accept
|
||||
- **README rewritten** - Comprehensive guides and architecture docs (#226)
|
||||
- **`gt rigs` → `gt rig list`** - Command renamed in templates/docs (#217)
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Doctor & Lifecycle
|
||||
- **`--restart-sessions` flag required** - Doctor won't cycle sessions without explicit flag (gt-j44ri)
|
||||
- **Only cycle patrol roles** - Doctor --fix doesn't restart crew/polecats (hq-qthgye)
|
||||
- **Session-ended events auto-closed** - Prevent accumulation (gt-8tc1v)
|
||||
- **GUPP propulsion nudge** - Added to daemon restartSession
|
||||
|
||||
#### Sling & Beads
|
||||
- **Sling uses bd native routing** - No BEADS_DIR override needed
|
||||
- **Sling parses wisp JSON correctly** - Handle `new_epic_id` field
|
||||
- **Sling resolves rig path** - Cross-rig bead hooking works
|
||||
- **Sling waits for Claude ready** - Don't nudge until session responsive (#146)
|
||||
- **Correct beads database for sling** - Rig-level beads used (gt-n5gga)
|
||||
- **Close hooked beads before clearing** - Proper cleanup order (gt-vwjz6)
|
||||
- **Removed dead sling flags** - `--molecule` and `--quality` cleaned up
|
||||
|
||||
#### Agent Sessions
|
||||
- **Witness kills tmux on Stop()** - Clean session termination
|
||||
- **Deacon uses session package** - Correct hq- session names (gt-r38pj)
|
||||
- **Honor rig agent for witness/refinery** - Respect per-rig settings
|
||||
- **Canonical hq role bead IDs** - Consistent naming
|
||||
- **hq- prefix in status display** - Global agents shown correctly (gt-vcvyd)
|
||||
- **Restart Claude when dead** - Recover sessions where tmux exists but Claude died
|
||||
- **Town session cycling** - Works from any directory
|
||||
|
||||
#### Polecat & Crew
|
||||
- **Nuke not blocked by stale hooks** - Closed beads don't prevent cleanup (gt-jc7bq)
|
||||
- **Crew stop dry-run support** - Preview cleanup before executing (gt-kjcx4)
|
||||
- **Crew defaults to --all** - `gt crew start <rig>` starts all crew (gt-s8mpt)
|
||||
- **Polecat cleanup handlers** - `gt witness process` invokes handlers (gt-h3gzj)
|
||||
|
||||
#### Daemon & Configuration
|
||||
- **Create mayor/daemon.json** - `gt start` and `gt doctor --fix` initialize daemon state (#225)
|
||||
- **Initialize git before beads** - Enable repo fingerprint (#180)
|
||||
- **Handoff preserves env vars** - Claude Code environment not lost (#216)
|
||||
- **Agent settings passed correctly** - Witness and daemon respawn use rigPath
|
||||
- **Log rig discovery errors** - Don't silently swallow (gt-rsnj9)
|
||||
|
||||
#### Refinery & Merge Queue
|
||||
- **Use rig's default_branch** - Not hardcoded 'main'
|
||||
- **MERGE_FAILED sent to Witness** - Proper failure notification
|
||||
- **Removed BranchPushedToRemote checks** - Local-only workflow support (gt-dymy5)
|
||||
|
||||
#### Misc Fixes
|
||||
- **BeadsSetupRedirect preserves tracked files** - Don't clobber existing files (gt-fj0ol)
|
||||
- **PATH export in hooks** - Ensure commands find binaries
|
||||
- **Replace panic with fallback** - ID generation gracefully degrades (#213)
|
||||
- **Removed duplicate WorktreeAddFromRef** - Code cleanup
|
||||
- **Town root beads for Deacon** - Use correct beads location (gt-sstg)
|
||||
|
||||
### Refactored
|
||||
|
||||
- **AgentStateManager pattern** - Shared state management extracted (gt-gaw8e)
|
||||
- **CleanupStatus type** - Replace raw strings (gt-77gq7)
|
||||
- **ExecWithOutput utility** - Common command execution (gt-vurfr)
|
||||
- **runBdCommand helper** - DRY mail package (gt-8i6bg)
|
||||
- **Config expansion helper** - Generic DRY config (gt-i85sg)
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Property layers guide** - Implementation documentation
|
||||
- **Worktree architecture** - Clarified beads routing
|
||||
- **Agent config** - Onboarding docs mention --agent overrides
|
||||
- **Polecat Operations section** - Added to Mayor docs (#140)
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to all contributors for this release:
|
||||
- @julianknutsen - Claude settings inheritance (#239)
|
||||
- @joshuavial - Sling wisp JSON parse (#238)
|
||||
- @michaellady - Unified beads redirect (#222), daemon.json fix (#225)
|
||||
- @greghughespdx - PATH in hooks fix (#139)
|
||||
|
||||
## [0.2.1] - 2026-01-05
|
||||
|
||||
Bug fixes, security hardening, and new `gt config` command.
|
||||
|
||||
4
Makefile
4
Makefile
@@ -22,8 +22,8 @@ ifeq ($(shell uname),Darwin)
|
||||
@echo "Signed $(BINARY) for macOS"
|
||||
endif
|
||||
|
||||
install: build
|
||||
cp $(BUILD_DIR)/$(BINARY) ~/bin/$(BINARY)
|
||||
install: generate
|
||||
go install -ldflags "$(LDFLAGS)" ./cmd/gt
|
||||
|
||||
clean:
|
||||
rm -f $(BUILD_DIR)/$(BINARY)
|
||||
|
||||
646
README.md
646
README.md
@@ -1,388 +1,492 @@
|
||||
# Gas Town
|
||||
|
||||
Multi-agent orchestrator for Claude Code. Track work with convoys; sling to agents.
|
||||
**Multi-agent orchestration system for Claude Code with persistent work tracking**
|
||||
|
||||
## Why Gas Town?
|
||||
## Overview
|
||||
|
||||
| Without | With Gas Town |
|
||||
|---------|---------------|
|
||||
| Agents forget work after restart | Work persists on hooks - survives crashes, compaction, restarts |
|
||||
| Manual coordination | Agents have mailboxes, identities, and structured handoffs |
|
||||
| 4-10 agents is chaotic | Comfortably scale to 20-30 agents |
|
||||
| Work state in agent memory | Work state in Beads (git-backed ledger) |
|
||||
Gas Town is a workspace manager that lets you coordinate multiple Claude Code agents working on different tasks. Instead of losing context when agents restart, Gas Town persists work state in git-backed hooks, enabling reliable multi-agent workflows.
|
||||
|
||||
## Prerequisites
|
||||
### What Problem Does This Solve?
|
||||
|
||||
- **Go 1.23+** - [go.dev/dl](https://go.dev/dl/)
|
||||
- **Git 2.25+** - for worktree support
|
||||
- **beads (bd)** - [github.com/steveyegge/beads](https://github.com/steveyegge/beads) - required for issue tracking
|
||||
- **tmux 3.0+** - recommended for the full experience (the Mayor session is the primary interface)
|
||||
- **Claude Code CLI** - [claude.ai/code](https://claude.ai/code)
|
||||
| Challenge | Gas Town Solution |
|
||||
| ------------------------------- | -------------------------------------------- |
|
||||
| Agents lose context on restart | Work persists in git-backed hooks |
|
||||
| Manual agent coordination | Built-in mailboxes, identities, and handoffs |
|
||||
| 4-10 agents become chaotic | Scale comfortably to 20-30 agents |
|
||||
| Work state lost in agent memory | Work state stored in Beads ledger |
|
||||
|
||||
## Quick Start
|
||||
### Architecture
|
||||
|
||||
```bash
|
||||
# Install
|
||||
go install github.com/steveyegge/gastown/cmd/gt@latest
|
||||
```mermaid
|
||||
graph TB
|
||||
Mayor[The Mayor<br/>AI Coordinator]
|
||||
Town[Town Workspace<br/>~/gt/]
|
||||
|
||||
# Ensure Go binaries are in your PATH (add to ~/.zshrc or ~/.bashrc)
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
Town --> Mayor
|
||||
Town --> Rig1[Rig: Project A]
|
||||
Town --> Rig2[Rig: Project B]
|
||||
|
||||
# Create workspace (--git auto-initializes git repository)
|
||||
gt install ~/gt --git
|
||||
cd ~/gt
|
||||
Rig1 --> Crew1[Crew Member<br/>Your workspace]
|
||||
Rig1 --> Hooks1[Hooks<br/>Persistent storage]
|
||||
Rig1 --> Polecats1[Polecats<br/>Worker agents]
|
||||
|
||||
# Add a project
|
||||
gt rig add myproject https://github.com/you/repo.git
|
||||
Rig2 --> Crew2[Crew Member]
|
||||
Rig2 --> Hooks2[Hooks]
|
||||
Rig2 --> Polecats2[Polecats]
|
||||
|
||||
# Create your personal workspace
|
||||
gt crew add <yourname> --rig myproject
|
||||
Hooks1 -.git worktree.-> GitRepo1[Git Repository]
|
||||
Hooks2 -.git worktree.-> GitRepo2[Git Repository]
|
||||
|
||||
# Start working
|
||||
cd myproject/crew/<yourname>
|
||||
```
|
||||
|
||||
For advanced multi-agent coordination, use the Mayor session:
|
||||
|
||||
```bash
|
||||
gt mayor attach # Enter the Mayor's office
|
||||
```
|
||||
|
||||
Inside the Mayor session, you're talking to Claude with full town context:
|
||||
|
||||
> "Help me fix the authentication bug in myproject"
|
||||
|
||||
The Mayor will create convoys, dispatch workers, and coordinate everything. You can also run CLI commands directly:
|
||||
|
||||
```bash
|
||||
# Create a convoy and sling work (CLI workflow)
|
||||
gt convoy create "Feature X" issue-123 issue-456 --notify --human
|
||||
gt sling issue-123 myproject
|
||||
|
||||
# Track progress
|
||||
gt convoy list
|
||||
|
||||
# Switch between agent sessions
|
||||
gt agents
|
||||
style Mayor fill:#e1f5ff
|
||||
style Town fill:#f0f0f0
|
||||
style Rig1 fill:#fff4e1
|
||||
style Rig2 fill:#fff4e1
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
**The Mayor** is your AI coordinator. It's Claude Code with full context about your workspace, projects, and agents. The Mayor session (`gt prime`) is the primary way to interact with Gas Town - just tell it what you want to accomplish.
|
||||
### The Mayor 🎩
|
||||
|
||||
```
|
||||
Town (~/gt/) Your workspace
|
||||
├── Mayor Your AI coordinator (start here)
|
||||
├── Rig (project) Container for a git project + its agents
|
||||
│ ├── Polecats Workers (ephemeral, spawn → work → disappear)
|
||||
│ ├── Witness Monitors workers, handles lifecycle
|
||||
│ └── Refinery Merge queue processor
|
||||
```
|
||||
Your primary AI coordinator. The Mayor is a Claude Code instance with full context about your workspace, projects, and agents. **Start here** - just tell the Mayor what you want to accomplish.
|
||||
|
||||
**Hook**: Each agent has a hook where work hangs. On wake, run what's on your hook.
|
||||
### Town 🏘️
|
||||
|
||||
**Beads**: Git-backed issue tracker. All work state lives here. [github.com/steveyegge/beads](https://github.com/steveyegge/beads)
|
||||
Your workspace directory (e.g., `~/gt/`). Contains all projects, agents, and configuration.
|
||||
|
||||
## Workflows
|
||||
### Rigs 🏗️
|
||||
|
||||
### Full Stack (Recommended)
|
||||
Project containers. Each rig wraps a git repository and manages its associated agents.
|
||||
|
||||
The primary Gas Town experience. Agents run in tmux sessions with the Mayor as your interface.
|
||||
### Crew Members 👤
|
||||
|
||||
Your personal workspace within a rig. Where you do hands-on work.
|
||||
|
||||
### Polecats 🦨
|
||||
|
||||
Ephemeral worker agents that spawn, complete a task, and disappear.
|
||||
|
||||
### Hooks 🪝
|
||||
|
||||
Git worktree-based persistent storage for agent work. Survives crashes and restarts.
|
||||
|
||||
### Convoys 🚚
|
||||
|
||||
Work tracking units. Bundle multiple beads that get assigned to agents.
|
||||
|
||||
### Beads Integration 📿
|
||||
|
||||
Git-backed issue tracking system that stores work state as structured data.
|
||||
|
||||
**Bead IDs** (also called **issue IDs**) use a prefix + 5-character alphanumeric format (e.g., `gt-abc12`, `hq-x7k2m`). The prefix indicates the item's origin or rig. Commands like `gt sling` and `gt convoy` accept these IDs to reference specific work items. The terms "bead" and "issue" are used interchangeably—beads are the underlying data format, while issues are the work items stored as beads.
|
||||
|
||||
> **New to Gas Town?** See the [Glossary](docs/glossary.md) for a complete guide to terminology and concepts.
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Go 1.23+** - [go.dev/dl](https://go.dev/dl/)
|
||||
- **Git 2.25+** - for worktree support
|
||||
- **beads (bd) 0.44.0+** - [github.com/steveyegge/beads](https://github.com/steveyegge/beads) (required for custom type support)
|
||||
- **sqlite3** - for convoy database queries (usually pre-installed on macOS/Linux)
|
||||
- **tmux 3.0+** - recommended for full experience
|
||||
- **Claude Code CLI** (default runtime) - [claude.ai/code](https://claude.ai/code)
|
||||
- **Codex CLI** (optional runtime) - [developers.openai.com/codex/cli](https://developers.openai.com/codex/cli)
|
||||
|
||||
### Setup
|
||||
|
||||
```bash
|
||||
gt start # Start Gas Town (daemon + Mayor session)
|
||||
gt mayor attach # Enter Mayor session
|
||||
# Install Gas Town
|
||||
go install github.com/steveyegge/gastown/cmd/gt@latest
|
||||
|
||||
# Inside Mayor session, just ask:
|
||||
# "Create a convoy for issues 123 and 456 in myproject"
|
||||
# "What's the status of my work?"
|
||||
# "Show me what the witness is doing"
|
||||
# Add Go binaries to PATH (add to ~/.zshrc or ~/.bashrc)
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
|
||||
# Or use CLI commands:
|
||||
gt convoy create "Feature X" issue-123 issue-456
|
||||
gt sling issue-123 myproject # Spawns polecat automatically
|
||||
gt convoy list # Dashboard view
|
||||
gt agents # Navigate between sessions
|
||||
# Create workspace with git initialization
|
||||
gt install ~/gt --git
|
||||
cd ~/gt
|
||||
|
||||
# Add your first project
|
||||
gt rig add myproject https://github.com/you/repo.git
|
||||
|
||||
# Create your crew workspace
|
||||
gt crew add yourname --rig myproject
|
||||
cd myproject/crew/yourname
|
||||
|
||||
# Start the Mayor session (your main interface)
|
||||
gt mayor attach
|
||||
```
|
||||
|
||||
### Minimal (No Tmux)
|
||||
## Quick Start Guide
|
||||
|
||||
Run individual Claude Code instances manually. Gas Town just tracks state.
|
||||
### Getting Started
|
||||
Run
|
||||
```shell
|
||||
gt install ~/gt --git &&
|
||||
cd ~/gt &&
|
||||
gt config agent list &&
|
||||
gt mayor attach
|
||||
```
|
||||
and tell the Mayor what you want to build!
|
||||
|
||||
---
|
||||
|
||||
### Basic Workflow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant You
|
||||
participant Mayor
|
||||
participant Convoy
|
||||
participant Agent
|
||||
participant Hook
|
||||
|
||||
You->>Mayor: Tell Mayor what to build
|
||||
Mayor->>Convoy: Create convoy with beads
|
||||
Mayor->>Agent: Sling bead to agent
|
||||
Agent->>Hook: Store work state
|
||||
Agent->>Agent: Complete work
|
||||
Agent->>Convoy: Report completion
|
||||
Mayor->>You: Summary of progress
|
||||
```
|
||||
|
||||
### Example: Feature Development
|
||||
|
||||
```bash
|
||||
gt convoy create "Fix bugs" issue-123 # Create convoy (sling auto-creates if skipped)
|
||||
gt sling issue-123 myproject # Assign to worker
|
||||
claude --resume # Agent reads mail, runs work
|
||||
# 1. Start the Mayor
|
||||
gt mayor attach
|
||||
|
||||
# 2. In Mayor session, create a convoy with bead IDs
|
||||
gt convoy create "Feature X" gt-abc12 gt-def34 --notify --human
|
||||
|
||||
# 3. Assign work to an agent
|
||||
gt sling gt-abc12 myproject
|
||||
|
||||
# 4. Track progress
|
||||
gt convoy list
|
||||
|
||||
# 5. Monitor agents
|
||||
gt agents
|
||||
```
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### Mayor Workflow (Recommended)
|
||||
|
||||
**Best for:** Coordinating complex, multi-issue work
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
Start([Start Mayor]) --> Tell[Tell Mayor<br/>what to build]
|
||||
Tell --> Creates[Mayor creates<br/>convoy + agents]
|
||||
Creates --> Monitor[Monitor progress<br/>via convoy list]
|
||||
Monitor --> Done{All done?}
|
||||
Done -->|No| Monitor
|
||||
Done -->|Yes| Review[Review work]
|
||||
```
|
||||
|
||||
**Commands:**
|
||||
|
||||
```bash
|
||||
# Attach to Mayor
|
||||
gt mayor attach
|
||||
|
||||
# In Mayor, create convoy and let it orchestrate
|
||||
gt convoy create "Auth System" gt-x7k2m gt-p9n4q --notify
|
||||
|
||||
# Track progress
|
||||
gt convoy list
|
||||
```
|
||||
|
||||
### Minimal Mode (No Tmux)
|
||||
|
||||
Run individual runtime instances manually. Gas Town just tracks state.
|
||||
|
||||
```bash
|
||||
gt convoy create "Fix bugs" gt-abc12 # Create convoy (sling auto-creates if skipped)
|
||||
gt sling gt-abc12 myproject # Assign to worker
|
||||
claude --resume # Agent reads mail, runs work (Claude)
|
||||
# or: codex # Start Codex in the workspace
|
||||
gt convoy list # Check progress
|
||||
```
|
||||
|
||||
### Pick Your Roles
|
||||
### Beads Formula Workflow
|
||||
|
||||
Gas Town is modular. Run what you need:
|
||||
**Best for:** Predefined, repeatable processes
|
||||
|
||||
- **Polecats only**: Manual spawning, no monitoring
|
||||
- **+ Witness**: Automatic worker lifecycle, stuck detection
|
||||
- **+ Refinery**: Merge queue, code review
|
||||
- **+ Mayor**: Cross-project coordination
|
||||
Formulas are TOML-defined workflows stored in `.beads/formulas/`.
|
||||
|
||||
## Cooking Formulas
|
||||
|
||||
Formulas define structured workflows. Cook them, sling them to agents.
|
||||
|
||||
### Basic Example
|
||||
**Example Formula** (`.beads/formulas/release.formula.toml`):
|
||||
|
||||
```toml
|
||||
# .beads/formulas/shiny.formula.toml
|
||||
formula = "shiny"
|
||||
description = "Design before code, review before ship"
|
||||
description = "Standard release process"
|
||||
formula = "release"
|
||||
version = 1
|
||||
|
||||
[[steps]]
|
||||
id = "design"
|
||||
description = "Think about architecture"
|
||||
|
||||
[[steps]]
|
||||
id = "implement"
|
||||
needs = ["design"]
|
||||
|
||||
[[steps]]
|
||||
id = "test"
|
||||
needs = ["implement"]
|
||||
|
||||
[[steps]]
|
||||
id = "submit"
|
||||
needs = ["test"]
|
||||
```
|
||||
|
||||
### Using Formulas
|
||||
|
||||
```bash
|
||||
bd formula list # See available formulas
|
||||
bd cook shiny # Cook into a protomolecule
|
||||
bd mol pour shiny --var feature=auth # Create runnable molecule
|
||||
gt convoy create "Auth feature" gt-xyz # Track with convoy
|
||||
gt sling gt-xyz myproject # Assign to worker
|
||||
gt convoy list # Monitor progress
|
||||
```
|
||||
|
||||
### What Happens
|
||||
|
||||
1. **Cook** expands the formula into a protomolecule (frozen template)
|
||||
2. **Pour** creates a molecule (live workflow) with steps as beads
|
||||
3. **Worker executes** each step, closing beads as it goes
|
||||
4. **Crash recovery**: Worker restarts, reads molecule, continues from last step
|
||||
|
||||
### Example: Beads Release Molecule
|
||||
|
||||
A real workflow for releasing a new beads version:
|
||||
|
||||
```toml
|
||||
formula = "beads-release"
|
||||
description = "Version bump and release workflow"
|
||||
[vars.version]
|
||||
description = "The semantic version to release (e.g., 1.2.0)"
|
||||
required = true
|
||||
|
||||
[[steps]]
|
||||
id = "bump-version"
|
||||
description = "Update version in version.go and CHANGELOG"
|
||||
|
||||
[[steps]]
|
||||
id = "update-deps"
|
||||
needs = ["bump-version"]
|
||||
description = "Run go mod tidy, update go.sum"
|
||||
title = "Bump version"
|
||||
description = "Run ./scripts/bump-version.sh {{version}}"
|
||||
|
||||
[[steps]]
|
||||
id = "run-tests"
|
||||
needs = ["update-deps"]
|
||||
description = "Full test suite, check for regressions"
|
||||
title = "Run tests"
|
||||
description = "Run make test"
|
||||
needs = ["bump-version"]
|
||||
|
||||
[[steps]]
|
||||
id = "build-binaries"
|
||||
id = "build"
|
||||
title = "Build"
|
||||
description = "Run make build"
|
||||
needs = ["run-tests"]
|
||||
description = "Cross-compile for all platforms"
|
||||
|
||||
[[steps]]
|
||||
id = "create-tag"
|
||||
needs = ["build-binaries"]
|
||||
description = "Git tag with version, push to origin"
|
||||
title = "Create release tag"
|
||||
description = "Run git tag -a v{{version}} -m 'Release v{{version}}'"
|
||||
needs = ["build"]
|
||||
|
||||
[[steps]]
|
||||
id = "publish-release"
|
||||
id = "publish"
|
||||
title = "Publish"
|
||||
description = "Run ./scripts/publish.sh"
|
||||
needs = ["create-tag"]
|
||||
description = "Create GitHub release with binaries"
|
||||
```
|
||||
|
||||
Cook it, pour it, sling it. The polecat runs through each step, and if it crashes
|
||||
after `run-tests`, a new polecat picks up at `build-binaries`.
|
||||
**Execute:**
|
||||
|
||||
### Formula Composition
|
||||
```bash
|
||||
# List available formulas
|
||||
bd formula list
|
||||
|
||||
```toml
|
||||
# Extend an existing formula
|
||||
formula = "shiny-enterprise"
|
||||
extends = ["shiny"]
|
||||
# Run a formula with variables
|
||||
bd cook release --var version=1.2.0
|
||||
|
||||
[compose]
|
||||
aspects = ["security-audit"] # Add cross-cutting concerns
|
||||
# Create formula instance for tracking
|
||||
bd mol pour release --var version=1.2.0
|
||||
```
|
||||
|
||||
### Manual Convoy Workflow
|
||||
|
||||
**Best for:** Direct control over work distribution
|
||||
|
||||
```bash
|
||||
# Create convoy manually
|
||||
gt convoy create "Bug Fixes" --human
|
||||
|
||||
# Add issues to existing convoy
|
||||
gt convoy add hq-cv-abc gt-m3k9p gt-w5t2x
|
||||
|
||||
# Assign to specific agents
|
||||
gt sling gt-m3k9p myproject/my-agent
|
||||
|
||||
# Check status
|
||||
gt convoy show
|
||||
```
|
||||
|
||||
## Runtime Configuration
|
||||
|
||||
Gas Town supports multiple AI coding runtimes. Per-rig runtime settings are in `settings/config.json`.
|
||||
|
||||
```json
|
||||
{
|
||||
"runtime": {
|
||||
"provider": "codex",
|
||||
"command": "codex",
|
||||
"args": [],
|
||||
"prompt_mode": "none"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
|
||||
- Claude uses hooks in `.claude/settings.json` for mail injection and startup.
|
||||
- For Codex, set `project_doc_fallback_filenames = ["CLAUDE.md"]` in
|
||||
`~/.codex/config.toml` so role instructions are picked up.
|
||||
- For runtimes without hooks (e.g., Codex), Gas Town sends a startup fallback
|
||||
after the session is ready: `gt prime`, optional `gt mail check --inject`
|
||||
for autonomous roles, and `gt nudge deacon session-started`.
|
||||
|
||||
## Key Commands
|
||||
|
||||
### For Humans (Overseer)
|
||||
### Workspace Management
|
||||
|
||||
```bash
|
||||
gt start # Start Gas Town (daemon + agents)
|
||||
gt shutdown # Graceful shutdown
|
||||
gt status # Town overview
|
||||
gt <role> attach # Jump into any agent session
|
||||
# e.g., gt mayor attach, gt witness attach
|
||||
gt install <path> # Initialize workspace
|
||||
gt rig add <name> <repo> # Add project
|
||||
gt rig list # List projects
|
||||
gt crew add <name> --rig <rig> # Create crew workspace
|
||||
```
|
||||
|
||||
### Agent Operations
|
||||
|
||||
```bash
|
||||
gt agents # List active agents
|
||||
gt sling <bead-id> <rig> # Assign work to agent
|
||||
gt sling <bead-id> <rig> --agent cursor # Override runtime for this sling/spawn
|
||||
gt mayor attach # Start Mayor session
|
||||
gt mayor start --agent auggie # Run Mayor with a specific agent alias
|
||||
gt prime # Context recovery (run inside existing session)
|
||||
```
|
||||
|
||||
**Built-in agent presets**: `claude`, `gemini`, `codex`, `cursor`, `auggie`, `amp`
|
||||
|
||||
### Convoy (Work Tracking)
|
||||
|
||||
```bash
|
||||
gt convoy create <name> [issues...] # Create convoy with issues
|
||||
gt convoy list # List all convoys
|
||||
gt convoy show [id] # Show convoy details
|
||||
gt convoy add <convoy-id> <issue-id...> # Add issues to convoy
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
```bash
|
||||
gt config agent list [--json] # List all agents (built-in + custom)
|
||||
gt config agent get <name> # Show agent configuration
|
||||
gt config agent set <name> <cmd> # Create or update custom agent
|
||||
gt config agent remove <name> # Remove custom agent (built-ins protected)
|
||||
gt config default-agent [name] # Get or set town default agent
|
||||
```
|
||||
|
||||
**Example**: Use a cheaper model for most work:
|
||||
```bash
|
||||
# Set custom agent command
|
||||
gt config agent set claude-glm "claude-glm --model glm-4"
|
||||
gt config agent set codex-low "codex --thinking low"
|
||||
|
||||
# Set default agent
|
||||
gt config default-agent claude-glm
|
||||
|
||||
# View config
|
||||
gt config show
|
||||
```
|
||||
|
||||
Most other work happens through agents - just ask them.
|
||||
|
||||
### For Agents
|
||||
### Beads Integration
|
||||
|
||||
```bash
|
||||
# Convoy (primary dashboard)
|
||||
gt convoy list # Active work across all rigs
|
||||
gt convoy status <id> # Detailed convoy progress
|
||||
gt convoy create "name" <issues> # Create new convoy
|
||||
|
||||
# Work assignment
|
||||
gt sling <bead> <rig> # Assign work to polecat
|
||||
bd ready # Show available work
|
||||
bd list --status=in_progress # Active work
|
||||
|
||||
# Communication
|
||||
gt mail inbox # Check messages
|
||||
gt mail send <addr> -s "..." -m "..."
|
||||
|
||||
# Lifecycle
|
||||
gt handoff # Request session cycle
|
||||
gt peek <agent> # Check agent health
|
||||
|
||||
# Diagnostics
|
||||
gt doctor # Health check
|
||||
gt doctor --fix # Auto-repair
|
||||
bd formula list # List formulas
|
||||
bd cook <formula> # Execute formula
|
||||
bd mol pour <formula> # Create trackable instance
|
||||
bd mol list # List active instances
|
||||
```
|
||||
|
||||
## Cooking Formulas
|
||||
|
||||
Gas Town includes built-in formulas for common workflows. See `.beads/formulas/` for available recipes.
|
||||
|
||||
## Dashboard
|
||||
|
||||
Web-based dashboard for monitoring Gas Town activity.
|
||||
Gas Town includes a web dashboard for monitoring:
|
||||
|
||||
```bash
|
||||
# Start the dashboard
|
||||
# Start dashboard
|
||||
gt dashboard --port 8080
|
||||
|
||||
# Open in browser
|
||||
open http://localhost:8080
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- **Convoy tracking** - View all active convoys with progress bars and work status
|
||||
- **Polecat workers** - See active worker sessions and their activity status
|
||||
- **Refinery status** - Monitor merge queue and PR processing
|
||||
- **Auto-refresh** - Updates every 10 seconds via htmx
|
||||
Features:
|
||||
|
||||
Work status indicators:
|
||||
| Status | Color | Meaning |
|
||||
|--------|-------|---------|
|
||||
| `complete` | Green | All tracked items done |
|
||||
| `active` | Green | Recent activity (< 1 min) |
|
||||
| `stale` | Yellow | Activity 1-5 min ago |
|
||||
| `stuck` | Red | Activity > 5 min ago |
|
||||
| `waiting` | Gray | No assignee/activity |
|
||||
- Real-time agent status
|
||||
- Convoy progress tracking
|
||||
- Hook state visualization
|
||||
- Configuration management
|
||||
|
||||
## Advanced Concepts
|
||||
|
||||
### The Propulsion Principle
|
||||
|
||||
Gas Town uses git hooks as a propulsion mechanism. Each hook is a git worktree with:
|
||||
|
||||
1. **Persistent state** - Work survives agent restarts
|
||||
2. **Version control** - All changes tracked in git
|
||||
3. **Rollback capability** - Revert to any previous state
|
||||
4. **Multi-agent coordination** - Shared through git
|
||||
|
||||
### Hook Lifecycle
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> Created: Agent spawned
|
||||
Created --> Active: Work assigned
|
||||
Active --> Suspended: Agent paused
|
||||
Suspended --> Active: Agent resumed
|
||||
Active --> Completed: Work done
|
||||
Completed --> Archived: Hook archived
|
||||
Archived --> [*]
|
||||
```
|
||||
|
||||
### MEOW (Mayor-Enhanced Orchestration Workflow)
|
||||
|
||||
MEOW is the recommended pattern:
|
||||
|
||||
1. **Tell the Mayor** - Describe what you want
|
||||
2. **Mayor analyzes** - Breaks down into tasks
|
||||
3. **Convoy creation** - Mayor creates convoy with beads
|
||||
4. **Agent spawning** - Mayor spawns appropriate agents
|
||||
5. **Work distribution** - Beads slung to agents via hooks
|
||||
6. **Progress monitoring** - Track through convoy status
|
||||
7. **Completion** - Mayor summarizes results
|
||||
|
||||
## Shell Completions
|
||||
|
||||
Enable tab completion for `gt` commands:
|
||||
|
||||
### Bash
|
||||
|
||||
```bash
|
||||
# Add to ~/.bashrc
|
||||
source <(gt completion bash)
|
||||
# Bash
|
||||
gt completion bash > /etc/bash_completion.d/gt
|
||||
|
||||
# Or install permanently
|
||||
gt completion bash > /usr/local/etc/bash_completion.d/gt
|
||||
```
|
||||
|
||||
### Zsh
|
||||
|
||||
```bash
|
||||
# Add to ~/.zshrc (before compinit)
|
||||
source <(gt completion zsh)
|
||||
|
||||
# Or install to fpath
|
||||
# Zsh
|
||||
gt completion zsh > "${fpath[1]}/_gt"
|
||||
```
|
||||
|
||||
### Fish
|
||||
|
||||
```bash
|
||||
# Fish
|
||||
gt completion fish > ~/.config/fish/completions/gt.fish
|
||||
```
|
||||
|
||||
## Roles
|
||||
## Project Roles
|
||||
|
||||
| Role | Scope | Job |
|
||||
|------|-------|-----|
|
||||
| **Overseer** | Human | Sets strategy, reviews output, handles escalations |
|
||||
| **Mayor** | Town-wide | Cross-rig coordination, work dispatch |
|
||||
| **Deacon** | Town-wide | Daemon process, agent lifecycle, plugin execution |
|
||||
| **Witness** | Per-rig | Monitor polecats, nudge stuck workers |
|
||||
| **Refinery** | Per-rig | Merge queue, PR review, integration |
|
||||
| **Polecat** | Per-task | Execute work, file discovered issues, request shutdown |
|
||||
| Role | Description | Primary Interface |
|
||||
| --------------- | ------------------ | -------------------- |
|
||||
| **Mayor** | AI coordinator | `gt mayor attach` |
|
||||
| **Human (You)** | Crew member | Your crew directory |
|
||||
| **Polecat** | Worker agent | Spawned by Mayor |
|
||||
| **Hook** | Persistent storage | Git worktree |
|
||||
| **Convoy** | Work tracker | `gt convoy` commands |
|
||||
|
||||
## The Propulsion Principle
|
||||
## Tips
|
||||
|
||||
> If your hook has work, RUN IT.
|
||||
- **Always start with the Mayor** - It's designed to be your primary interface
|
||||
- **Use convoys for coordination** - They provide visibility across agents
|
||||
- **Leverage hooks for persistence** - Your work won't disappear
|
||||
- **Create formulas for repeated tasks** - Save time with Beads recipes
|
||||
- **Monitor the dashboard** - Get real-time visibility
|
||||
- **Let the Mayor orchestrate** - It knows how to manage agents
|
||||
|
||||
Agents wake up, check their hook, execute the molecule. No waiting for commands.
|
||||
Molecules survive crashes - any agent can continue where another left off.
|
||||
## Troubleshooting
|
||||
|
||||
---
|
||||
### Agents lose connection
|
||||
|
||||
## Optional: MEOW Deep Dive
|
||||
Check hooks are properly initialized:
|
||||
|
||||
**M**olecular **E**xpression **O**f **W**ork - the full algebra.
|
||||
```bash
|
||||
gt hooks list
|
||||
gt hooks repair
|
||||
```
|
||||
|
||||
### States of Matter
|
||||
### Convoy stuck
|
||||
|
||||
| Phase | Name | Storage | Behavior |
|
||||
|-------|------|---------|----------|
|
||||
| Ice-9 | Formula | `.beads/formulas/` | Source template, composable |
|
||||
| Solid | Protomolecule | `.beads/` | Frozen template, reusable |
|
||||
| Liquid | Mol | `.beads/` | Flowing work, persistent |
|
||||
| Vapor | Wisp | `.beads/` (ephemeral flag) | Transient, for patrols |
|
||||
Force refresh:
|
||||
|
||||
*(Protomolecules are an homage to The Expanse. Ice-9 is a nod to Vonnegut.)*
|
||||
```bash
|
||||
gt convoy refresh <convoy-id>
|
||||
```
|
||||
|
||||
### Operators
|
||||
### Mayor not responding
|
||||
|
||||
| Operator | From → To | Effect |
|
||||
|----------|-----------|--------|
|
||||
| `cook` | Formula → Protomolecule | Expand macros, flatten |
|
||||
| `pour` | Proto → Mol | Instantiate as persistent |
|
||||
| `wisp` | Proto → Wisp | Instantiate as ephemeral |
|
||||
| `squash` | Mol/Wisp → Digest | Condense to permanent record |
|
||||
| `burn` | Wisp → ∅ | Discard without record |
|
||||
Restart Mayor session:
|
||||
|
||||
---
|
||||
```bash
|
||||
gt mayor detach
|
||||
gt mayor attach
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
MIT License - see LICENSE file for details
|
||||
|
||||
57
cmd/gt/build_test.go
Normal file
57
cmd/gt/build_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCrossPlatformBuild verifies that the codebase compiles for all supported
|
||||
// platforms. This catches cases where platform-specific code (using build tags
|
||||
// like //go:build !windows) is called from platform-agnostic code without
|
||||
// providing stubs for all platforms.
|
||||
func TestCrossPlatformBuild(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping cross-platform build test in short mode")
|
||||
}
|
||||
|
||||
// Skip if not running on a platform that can cross-compile
|
||||
// (need Go toolchain, not just running tests)
|
||||
if os.Getenv("CI") == "" && runtime.GOOS != "darwin" && runtime.GOOS != "linux" {
|
||||
t.Skip("skipping cross-platform build test on unsupported platform")
|
||||
}
|
||||
|
||||
platforms := []struct {
|
||||
goos string
|
||||
goarch string
|
||||
cgo string
|
||||
}{
|
||||
{"linux", "amd64", "0"},
|
||||
{"linux", "arm64", "0"},
|
||||
{"darwin", "amd64", "0"},
|
||||
{"darwin", "arm64", "0"},
|
||||
{"windows", "amd64", "0"},
|
||||
{"freebsd", "amd64", "0"},
|
||||
}
|
||||
|
||||
for _, p := range platforms {
|
||||
p := p // capture range variable
|
||||
t.Run(p.goos+"_"+p.goarch, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cmd := exec.Command("go", "build", "-o", os.DevNull, ".")
|
||||
cmd.Dir = "."
|
||||
cmd.Env = append(os.Environ(),
|
||||
"GOOS="+p.goos,
|
||||
"GOARCH="+p.goarch,
|
||||
"CGO_ENABLED="+p.cgo,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("build failed for %s/%s:\n%s", p.goos, p.goarch, string(output))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,9 @@ Complete setup guide for Gas Town multi-agent orchestrator.
|
||||
| Tool | Version | Check | Install |
|
||||
|------|---------|-------|---------|
|
||||
| **tmux** | 3.0+ | `tmux -V` | See below |
|
||||
| **Claude Code** | latest | `claude --version` | See [claude.ai/claude-code](https://claude.ai/claude-code) |
|
||||
| **Claude Code** (default) | latest | `claude --version` | See [claude.ai/claude-code](https://claude.ai/claude-code) |
|
||||
| **Codex CLI** (optional) | latest | `codex --version` | See [developers.openai.com/codex/cli](https://developers.openai.com/codex/cli) |
|
||||
| **OpenCode CLI** (optional) | latest | `opencode --version` | See [opencode.ai](https://opencode.ai) |
|
||||
|
||||
## Installing Prerequisites
|
||||
|
||||
@@ -42,8 +44,8 @@ sudo apt update
|
||||
sudo apt install -y git
|
||||
|
||||
# Install Go (apt version may be outdated, use official installer)
|
||||
wget https://go.dev/dl/go1.24.linux-amd64.tar.gz
|
||||
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.24.linux-amd64.tar.gz
|
||||
wget https://go.dev/dl/go1.24.12.linux-amd64.tar.gz
|
||||
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.24.12.linux-amd64.tar.gz
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
|
||||
@@ -130,22 +132,46 @@ gt doctor # Run health checks
|
||||
gt status # Show workspace status
|
||||
```
|
||||
|
||||
### Step 5: Configure Agents (Optional)
|
||||
|
||||
Gas Town supports built-in runtimes (`claude`, `gemini`, `codex`) plus custom agent aliases.
|
||||
|
||||
```bash
|
||||
# List available agents
|
||||
gt config agent list
|
||||
|
||||
# Create an alias (aliases can encode model/thinking flags)
|
||||
gt config agent set codex-low "codex --thinking low"
|
||||
gt config agent set claude-haiku "claude --model haiku --dangerously-skip-permissions"
|
||||
|
||||
# Set the town default agent (used when a rig doesn't specify one)
|
||||
gt config default-agent codex-low
|
||||
```
|
||||
|
||||
You can also override the agent per command without changing defaults:
|
||||
|
||||
```bash
|
||||
gt start --agent codex-low
|
||||
gt sling gt-abc12 myproject --agent claude-haiku
|
||||
```
|
||||
|
||||
## Minimal Mode vs Full Stack Mode
|
||||
|
||||
Gas Town supports two operational modes:
|
||||
|
||||
### Minimal Mode (No Daemon)
|
||||
|
||||
Run individual Claude Code instances manually. Gas Town only tracks state.
|
||||
Run individual runtime instances manually. Gas Town only tracks state.
|
||||
|
||||
```bash
|
||||
# Create and assign work
|
||||
gt convoy create "Fix bugs" issue-123
|
||||
gt sling issue-123 myproject
|
||||
gt convoy create "Fix bugs" gt-abc12
|
||||
gt sling gt-abc12 myproject
|
||||
|
||||
# Run Claude manually
|
||||
# Run runtime manually
|
||||
cd ~/gt/myproject/polecats/<worker>
|
||||
claude --resume
|
||||
claude --resume # Claude Code
|
||||
# or: codex # Codex CLI
|
||||
|
||||
# Check progress
|
||||
gt convoy list
|
||||
@@ -162,9 +188,9 @@ Agents run in tmux sessions. Daemon manages lifecycle automatically.
|
||||
gt daemon start
|
||||
|
||||
# Create and assign work (workers spawn automatically)
|
||||
gt convoy create "Feature X" issue-123 issue-456
|
||||
gt sling issue-123 myproject
|
||||
gt sling issue-456 myproject
|
||||
gt convoy create "Feature X" gt-abc12 gt-def34
|
||||
gt sling gt-abc12 myproject
|
||||
gt sling gt-def34 myproject
|
||||
|
||||
# Monitor on dashboard
|
||||
gt convoy list
|
||||
@@ -277,6 +303,6 @@ rm -rf ~/gt
|
||||
After installation:
|
||||
|
||||
1. **Read the README** - Core concepts and workflows
|
||||
2. **Try a simple workflow** - `gt convoy create "Test" test-issue`
|
||||
2. **Try a simple workflow** - `bd create "Test task"` then `gt convoy create "Test" <bead-id>`
|
||||
3. **Explore docs** - `docs/reference.md` for command reference
|
||||
4. **Run doctor regularly** - `gt doctor` catches problems early
|
||||
|
||||
201
docs/beads-native-messaging.md
Normal file
201
docs/beads-native-messaging.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Beads-Native Messaging
|
||||
|
||||
This document describes the beads-native messaging system for Gas Town, which replaces the file-based messaging configuration with persistent beads stored in the town's `.beads` directory.
|
||||
|
||||
## Overview
|
||||
|
||||
Beads-native messaging introduces three new bead types for managing communication:
|
||||
|
||||
- **Groups** (`gt:group`) - Named collections of addresses for mail distribution
|
||||
- **Queues** (`gt:queue`) - Work queues where messages can be claimed by workers
|
||||
- **Channels** (`gt:channel`) - Pub/sub broadcast streams with message retention
|
||||
|
||||
All messaging beads use the `hq-` prefix because they are town-level entities that span rigs.
|
||||
|
||||
## Bead Types
|
||||
|
||||
### Groups (`gt:group`)
|
||||
|
||||
Groups are named collections of addresses used for mail distribution. When you send to a group, the message is delivered to all members.
|
||||
|
||||
**Bead ID format:** `hq-group-<name>` (e.g., `hq-group-ops-team`)
|
||||
|
||||
**Fields:**
|
||||
- `name` - Unique group name
|
||||
- `members` - Comma-separated list of addresses, patterns, or nested group names
|
||||
- `created_by` - Who created the group (from BD_ACTOR)
|
||||
- `created_at` - ISO 8601 timestamp
|
||||
|
||||
**Member types:**
|
||||
- Direct addresses: `gastown/crew/max`, `mayor/`, `deacon/`
|
||||
- Wildcard patterns: `*/witness`, `gastown/*`, `gastown/crew/*`
|
||||
- Special patterns: `@town`, `@crew`, `@witnesses`
|
||||
- Nested groups: Reference other group names
|
||||
|
||||
### Queues (`gt:queue`)
|
||||
|
||||
Queues are work queues where messages wait to be claimed by workers. Unlike groups, each message goes to exactly one claimant.
|
||||
|
||||
**Bead ID format:** `hq-q-<name>` (town-level) or `gt-q-<name>` (rig-level)
|
||||
|
||||
**Fields:**
|
||||
- `name` - Queue name
|
||||
- `status` - `active`, `paused`, or `closed`
|
||||
- `max_concurrency` - Maximum concurrent workers (0 = unlimited)
|
||||
- `processing_order` - `fifo` or `priority`
|
||||
- `available_count` - Items ready to process
|
||||
- `processing_count` - Items currently being processed
|
||||
- `completed_count` - Items completed
|
||||
- `failed_count` - Items that failed
|
||||
|
||||
### Channels (`gt:channel`)
|
||||
|
||||
Channels are pub/sub streams for broadcasting messages. Messages are retained according to the channel's retention policy.
|
||||
|
||||
**Bead ID format:** `hq-channel-<name>` (e.g., `hq-channel-alerts`)
|
||||
|
||||
**Fields:**
|
||||
- `name` - Unique channel name
|
||||
- `subscribers` - Comma-separated list of subscribed addresses
|
||||
- `status` - `active` or `closed`
|
||||
- `retention_count` - Number of recent messages to retain (0 = unlimited)
|
||||
- `retention_hours` - Hours to retain messages (0 = forever)
|
||||
- `created_by` - Who created the channel
|
||||
- `created_at` - ISO 8601 timestamp
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### Group Management
|
||||
|
||||
```bash
|
||||
# List all groups
|
||||
gt mail group list
|
||||
|
||||
# Show group details
|
||||
gt mail group show <name>
|
||||
|
||||
# Create a new group with members
|
||||
gt mail group create <name> [members...]
|
||||
gt mail group create ops-team gastown/witness gastown/crew/max
|
||||
|
||||
# Add member to group
|
||||
gt mail group add <name> <member>
|
||||
|
||||
# Remove member from group
|
||||
gt mail group remove <name> <member>
|
||||
|
||||
# Delete a group
|
||||
gt mail group delete <name>
|
||||
```
|
||||
|
||||
### Channel Management
|
||||
|
||||
```bash
|
||||
# List all channels
|
||||
gt mail channel
|
||||
gt mail channel list
|
||||
|
||||
# View channel messages
|
||||
gt mail channel <name>
|
||||
gt mail channel show <name>
|
||||
|
||||
# Create a channel with retention policy
|
||||
gt mail channel create <name> [--retain-count=N] [--retain-hours=N]
|
||||
gt mail channel create alerts --retain-count=100
|
||||
|
||||
# Delete a channel
|
||||
gt mail channel delete <name>
|
||||
```
|
||||
|
||||
### Sending Messages
|
||||
|
||||
The `gt mail send` command now supports groups, queues, and channels:
|
||||
|
||||
```bash
|
||||
# Send to a group (expands to all members)
|
||||
gt mail send my-group -s "Subject" -m "Body"
|
||||
|
||||
# Send to a queue (single message, workers claim)
|
||||
gt mail send queue:my-queue -s "Work item" -m "Details"
|
||||
|
||||
# Send to a channel (broadcast with retention)
|
||||
gt mail send channel:my-channel -s "Announcement" -m "Content"
|
||||
|
||||
# Direct address (unchanged)
|
||||
gt mail send gastown/crew/max -s "Hello" -m "World"
|
||||
```
|
||||
|
||||
## Address Resolution
|
||||
|
||||
When sending mail, addresses are resolved in this order:
|
||||
|
||||
1. **Explicit prefix** - If address starts with `group:`, `queue:`, or `channel:`, use that type directly
|
||||
2. **Contains `/`** - Treat as agent address or pattern (direct delivery)
|
||||
3. **Starts with `@`** - Special pattern (`@town`, `@crew`, etc.) or beads-native group
|
||||
4. **Name lookup** - Search for group → queue → channel by name
|
||||
|
||||
If a name matches multiple types (e.g., both a group and a channel named "alerts"), the resolver returns an error and requires an explicit prefix.
|
||||
|
||||
## Key Implementation Files
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `internal/beads/beads_group.go` | Group bead CRUD operations |
|
||||
| `internal/beads/beads_queue.go` | Queue bead CRUD operations |
|
||||
| `internal/beads/beads_channel.go` | Channel bead + retention logic |
|
||||
| `internal/mail/resolve.go` | Address resolution logic |
|
||||
| `internal/cmd/mail_group.go` | Group CLI commands |
|
||||
| `internal/cmd/mail_channel.go` | Channel CLI commands |
|
||||
| `internal/cmd/mail_send.go` | Updated send with resolver |
|
||||
|
||||
## Retention Policy
|
||||
|
||||
Channels support two retention mechanisms:
|
||||
|
||||
- **Count-based** (`--retain-count=N`): Keep only the last N messages
|
||||
- **Time-based** (`--retain-hours=N`): Delete messages older than N hours
|
||||
|
||||
Retention is enforced:
|
||||
1. **On-write**: After posting a new message, old messages are pruned
|
||||
2. **On-patrol**: Deacon patrol runs `PruneAllChannels()` as a backup cleanup
|
||||
|
||||
The patrol uses a 10% buffer to avoid thrashing (only prunes if count > retainCount × 1.1).
|
||||
|
||||
## Examples
|
||||
|
||||
### Create a team distribution group
|
||||
|
||||
```bash
|
||||
# Create a group for the ops team
|
||||
gt mail group create ops-team gastown/witness gastown/crew/max deacon/
|
||||
|
||||
# Send to the group
|
||||
gt mail send ops-team -s "Team meeting" -m "Tomorrow at 10am"
|
||||
|
||||
# Add a new member
|
||||
gt mail group add ops-team gastown/crew/dennis
|
||||
```
|
||||
|
||||
### Set up an alerts channel
|
||||
|
||||
```bash
|
||||
# Create an alerts channel that keeps last 50 messages
|
||||
gt mail channel create alerts --retain-count=50
|
||||
|
||||
# Send an alert
|
||||
gt mail send channel:alerts -s "Build failed" -m "See CI for details"
|
||||
|
||||
# View recent alerts
|
||||
gt mail channel alerts
|
||||
```
|
||||
|
||||
### Create nested groups
|
||||
|
||||
```bash
|
||||
# Create role-based groups
|
||||
gt mail group create witnesses */witness
|
||||
gt mail group create leads gastown/crew/max gastown/crew/dennis
|
||||
|
||||
# Create a group that includes other groups
|
||||
gt mail group create all-hands witnesses leads mayor/
|
||||
```
|
||||
@@ -51,6 +51,7 @@ so you can see when it lands and what was included.
|
||||
|---------|-------------|-----|-------------|
|
||||
| **Convoy** | Yes | hq-cv-* | Tracking unit. What you create, track, get notified about. |
|
||||
| **Swarm** | No | None | Ephemeral. "The workers currently on this convoy's issues." |
|
||||
| **Stranded Convoy** | Yes | hq-cv-* | A convoy with ready work but no polecats assigned. Needs attention. |
|
||||
|
||||
When you "kick off a swarm", you're really:
|
||||
1. Creating a convoy (the tracking unit)
|
||||
@@ -223,4 +224,4 @@ Use rig status for "what's everyone in this rig working on?"
|
||||
## See Also
|
||||
|
||||
- [Propulsion Principle](propulsion-principle.md) - Worker execution model
|
||||
- [Mail Protocol](mail-protocol.md) - Notification delivery
|
||||
- [Mail Protocol](../design/mail-protocol.md) - Notification delivery
|
||||
@@ -88,15 +88,37 @@ All events include actor attribution:
|
||||
|
||||
## Environment Setup
|
||||
|
||||
The daemon sets these automatically when spawning agents:
|
||||
Gas Town uses a centralized `config.AgentEnv()` function to set environment
|
||||
variables consistently across all agent spawn paths (managers, daemon, boot).
|
||||
|
||||
### Example: Polecat Environment
|
||||
|
||||
```bash
|
||||
# Set by daemon for polecat 'toast' in rig 'gastown'
|
||||
export BD_ACTOR="gastown/polecats/toast"
|
||||
export GIT_AUTHOR_NAME="gastown/polecats/toast"
|
||||
# Set automatically for polecat 'toast' in rig 'gastown'
|
||||
export GT_ROLE="polecat"
|
||||
export GT_RIG="gastown"
|
||||
export GT_POLECAT="toast"
|
||||
export BD_ACTOR="gastown/polecats/toast"
|
||||
export GIT_AUTHOR_NAME="gastown/polecats/toast"
|
||||
export GT_ROOT="/home/user/gt"
|
||||
export BEADS_DIR="/home/user/gt/gastown/.beads"
|
||||
export BEADS_AGENT_NAME="gastown/toast"
|
||||
export BEADS_NO_DAEMON="1" # Polecats use isolated beads context
|
||||
```
|
||||
|
||||
### Example: Crew Environment
|
||||
|
||||
```bash
|
||||
# Set automatically for crew member 'joe' in rig 'gastown'
|
||||
export GT_ROLE="crew"
|
||||
export GT_RIG="gastown"
|
||||
export GT_CREW="joe"
|
||||
export BD_ACTOR="gastown/crew/joe"
|
||||
export GIT_AUTHOR_NAME="gastown/crew/joe"
|
||||
export GT_ROOT="/home/user/gt"
|
||||
export BEADS_DIR="/home/user/gt/gastown/.beads"
|
||||
export BEADS_AGENT_NAME="gastown/joe"
|
||||
export BEADS_NO_DAEMON="1" # Crew uses isolated beads context
|
||||
```
|
||||
|
||||
### Manual Override
|
||||
@@ -108,6 +130,9 @@ export BD_ACTOR="gastown/crew/debug"
|
||||
bd create --title="Test issue" # Will show created_by: gastown/crew/debug
|
||||
```
|
||||
|
||||
See [reference.md](reference.md#environment-variables) for the complete
|
||||
environment variable reference.
|
||||
|
||||
## Identity Parsing
|
||||
|
||||
The format supports programmatic parsing:
|
||||
@@ -180,13 +205,22 @@ steve@example.com ← global identity (from git author)
|
||||
|
||||
**Agents execute. Humans own.** The polecat name in `completed-by: gastown/polecats/toast` is executor attribution. The CV credits the human owner (`steve@example.com`).
|
||||
|
||||
### Polecats Are Ephemeral
|
||||
### Polecats Have Persistent Identities
|
||||
|
||||
Polecats are like K8s pods - ephemeral executors with no persistent identity:
|
||||
- Named pool for human convenience (furiosa, nux, slit)
|
||||
- Names are transient - reused after cleanup
|
||||
- No persistent polecat CV
|
||||
- Work credits the human owner
|
||||
Polecats have **persistent identities but ephemeral sessions**. Like employees who
|
||||
clock in/out: each work session is fresh (new tmux, new worktree), but the identity
|
||||
persists across sessions.
|
||||
|
||||
- **Identity (persistent)**: Agent bead, CV chain, work history
|
||||
- **Session (ephemeral)**: Claude instance, context window
|
||||
- **Sandbox (ephemeral)**: Git worktree, branch
|
||||
|
||||
Work credits the polecat identity, enabling:
|
||||
- Performance tracking per polecat
|
||||
- Capability-based routing (send Go work to polecats with Go track records)
|
||||
- Model comparison (A/B test different models via different polecats)
|
||||
|
||||
See [polecat-lifecycle.md](polecat-lifecycle.md#polecat-identity) for details.
|
||||
|
||||
### Skills Are Derived
|
||||
|
||||
@@ -25,6 +25,7 @@ Protomolecule (frozen template) ─── Solid
|
||||
| **Molecule** | Active workflow instance with trackable steps |
|
||||
| **Wisp** | Ephemeral molecule for patrol cycles (never synced) |
|
||||
| **Digest** | Squashed summary of completed molecule |
|
||||
| **Shiny Workflow** | Canonical polecat formula: design → implement → review → test → submit |
|
||||
|
||||
## Common Mistake: Reading Formulas Directly
|
||||
|
||||
@@ -154,9 +155,54 @@ gt mol squash # Squash attached molecule
|
||||
gt mol step done <step> # Complete a molecule step
|
||||
```
|
||||
|
||||
## Polecat Workflow
|
||||
|
||||
Polecats receive work via their hook - a pinned molecule attached to an issue.
|
||||
They execute molecule steps sequentially, closing each step as they complete it.
|
||||
|
||||
### Molecule Types for Polecats
|
||||
|
||||
| Type | Storage | Use Case |
|
||||
|------|---------|----------|
|
||||
| **Regular Molecule** | `.beads/` (synced) | Discrete deliverables, audit trail |
|
||||
| **Wisp** | `.beads/` (ephemeral) | Patrol cycles, operational loops |
|
||||
|
||||
Polecats typically use **regular molecules** because each assignment has audit value.
|
||||
Patrol agents (Witness, Refinery, Deacon) use **wisps** to prevent accumulation.
|
||||
|
||||
### Hook Management
|
||||
|
||||
```bash
|
||||
gt hook # What's on MY hook?
|
||||
gt mol attach-from-mail <id> # Attach work from mail message
|
||||
gt done # Signal completion (syncs, submits to MQ, notifies Witness)
|
||||
```
|
||||
|
||||
### Polecat Workflow Summary
|
||||
|
||||
```
|
||||
1. Spawn with work on hook
|
||||
2. gt hook # What's hooked?
|
||||
3. bd mol current # Where am I?
|
||||
4. Execute current step
|
||||
5. bd close <step> --continue
|
||||
6. If more steps: GOTO 3
|
||||
7. gt done # Signal completion
|
||||
```
|
||||
|
||||
### Wisp vs Molecule Decision
|
||||
|
||||
| Question | Molecule | Wisp |
|
||||
|----------|----------|------|
|
||||
| Does it need audit trail? | Yes | No |
|
||||
| Will it repeat continuously? | No | Yes |
|
||||
| Is it discrete deliverable? | Yes | No |
|
||||
| Is it operational routine? | No | Yes |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use `--continue` for propulsion** - Keep momentum by auto-advancing
|
||||
2. **Check progress with `bd mol current`** - Know where you are before resuming
|
||||
3. **Squash completed molecules** - Create digests for audit trail
|
||||
4. **Burn routine wisps** - Don't accumulate ephemeral patrol data
|
||||
1. **CRITICAL: Close steps in real-time** - Mark `in_progress` BEFORE starting, `closed` IMMEDIATELY after completing. Never batch-close steps at the end. Molecules ARE the ledger - each step closure is a timestamped CV entry. Batch-closing corrupts the timeline and violates HOP's core promise.
|
||||
2. **Use `--continue` for propulsion** - Keep momentum by auto-advancing
|
||||
3. **Check progress with `bd mol current`** - Know where you are before resuming
|
||||
4. **Squash completed molecules** - Create digests for audit trail
|
||||
5. **Burn routine wisps** - Don't accumulate ephemeral patrol data
|
||||
@@ -5,8 +5,56 @@
|
||||
## Overview
|
||||
|
||||
Polecats have three distinct lifecycle layers that operate independently. Confusing
|
||||
these layers leads to heresies like "idle polecats" and misunderstanding when
|
||||
recycling occurs.
|
||||
these layers leads to "heresies" like thinking there are "idle polecats" and
|
||||
misunderstanding when recycling occurs.
|
||||
|
||||
## The Three Operating States
|
||||
|
||||
Polecats have exactly three operating states. There is **no idle pool**.
|
||||
|
||||
| State | Description | How it happens |
|
||||
|-------|-------------|----------------|
|
||||
| **Working** | Actively doing assigned work | Normal operation |
|
||||
| **Stalled** | Session stopped mid-work | Interrupted, crashed, or timed out without being nudged |
|
||||
| **Zombie** | Completed work but failed to die | `gt done` failed during cleanup |
|
||||
|
||||
**The key distinction:** Zombies completed their work; stalled polecats did not.
|
||||
|
||||
- **Stalled** = supposed to be working, but stopped. The polecat was interrupted or
|
||||
crashed and was never nudged back to life. Work is incomplete.
|
||||
- **Zombie** = finished work, tried to exit via `gt done`, but cleanup failed. The
|
||||
session should have shut down but didn't. Work is complete, just stuck in limbo.
|
||||
|
||||
There is no "idle" state. Polecats don't wait around between tasks. When work is
|
||||
done, `gt done` shuts down the session. If you see a non-working polecat, something
|
||||
is broken.
|
||||
|
||||
## The Self-Cleaning Polecat Model
|
||||
|
||||
**Polecats are responsible for their own cleanup.** When a polecat completes its
|
||||
work unit, it:
|
||||
|
||||
1. Signals completion via `gt done`
|
||||
2. Exits its session immediately (no idle waiting)
|
||||
3. Requests its own nuke (self-delete)
|
||||
|
||||
This removes dependency on the Witness/Deacon for cleanup and ensures polecats
|
||||
never sit idle. The simple model: **sandbox dies with session**.
|
||||
|
||||
### Why Self-Cleaning?
|
||||
|
||||
- **No idle polecats** - There's no state where a polecat exists without work
|
||||
- **Reduced watchdog overhead** - Deacon patrols for stalled/zombie polecats, not idle ones
|
||||
- **Faster turnover** - Resources freed immediately on completion
|
||||
- **Simpler mental model** - Done means gone
|
||||
|
||||
### What About Pending Merges?
|
||||
|
||||
The Refinery owns the merge queue. Once `gt done` submits work:
|
||||
- The branch is pushed to origin
|
||||
- Work exists in the MQ, not in the polecat
|
||||
- If rebase fails, Refinery re-implements on new baseline (fresh polecat)
|
||||
- The original polecat is already gone - no sending work "back"
|
||||
|
||||
## The Three Layers
|
||||
|
||||
@@ -92,19 +140,23 @@ The slot:
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ gt done │
|
||||
│ → Polecat signals completion to Witness │
|
||||
│ → Session exits (no idle waiting) │
|
||||
│ → Witness receives POLECAT_DONE event │
|
||||
│ gt done (self-cleaning) │
|
||||
│ → Push branch to origin │
|
||||
│ → Submit work to merge queue (MR bead) │
|
||||
│ → Request self-nuke (sandbox + session cleanup) │
|
||||
│ → Exit immediately │
|
||||
│ │
|
||||
│ Work now lives in MQ, not in polecat. │
|
||||
│ Polecat is GONE. No idle state. │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Witness: gt polecat nuke │
|
||||
│ → Verify work landed (merged or in MQ) │
|
||||
│ → Delete sandbox (remove worktree) │
|
||||
│ → Kill tmux session │
|
||||
│ → Release slot back to pool │
|
||||
│ Refinery: merge queue │
|
||||
│ → Rebase and merge to main │
|
||||
│ → Close the issue │
|
||||
│ → If conflict: spawn FRESH polecat to re-implement │
|
||||
│ (never send work back to original polecat - it's gone) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
@@ -127,19 +179,24 @@ during normal operation.
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
### Idle Polecats
|
||||
### "Idle" Polecats (They Don't Exist)
|
||||
|
||||
**Myth:** Polecats wait between tasks in an idle state.
|
||||
**Myth:** Polecats wait between tasks in an idle pool.
|
||||
|
||||
**Reality:** Polecats don't exist without work. The lifecycle is:
|
||||
**Reality:** There is no idle state. Polecats don't exist without work:
|
||||
1. Work assigned → polecat spawned
|
||||
2. Work done → polecat nuked
|
||||
3. There is no idle state
|
||||
2. Work done → `gt done` → session exits → polecat nuked
|
||||
3. There is no step 3 where they wait around
|
||||
|
||||
If you see a polecat without work, something is broken. Either:
|
||||
- The hook was lost (bug)
|
||||
- The session crashed before loading context
|
||||
- Manual intervention corrupted state
|
||||
If you see a non-working polecat, it's in a **failure state**:
|
||||
|
||||
| What you see | What it is | What went wrong |
|
||||
|--------------|------------|-----------------|
|
||||
| Session exists but not working | **Stalled** | Interrupted/crashed, never nudged |
|
||||
| Session done but didn't exit | **Zombie** | `gt done` failed during cleanup |
|
||||
|
||||
Don't call these "idle" - that implies they're waiting for work. They're not.
|
||||
A stalled polecat is *supposed* to be working. A zombie is *supposed* to be dead.
|
||||
|
||||
### Manual State Transitions
|
||||
|
||||
@@ -161,20 +218,23 @@ gt polecat nuke Toast # (from Witness, after verification)
|
||||
Polecats manage their own session lifecycle. The Witness manages sandbox lifecycle.
|
||||
External manipulation bypasses verification.
|
||||
|
||||
### Sandboxes Without Work
|
||||
### Sandboxes Without Work (Stalled Polecats)
|
||||
|
||||
**Anti-pattern:** A sandbox exists but no molecule is hooked.
|
||||
**Anti-pattern:** A sandbox exists but no molecule is hooked, or the session isn't running.
|
||||
|
||||
This means:
|
||||
- The polecat was spawned incorrectly
|
||||
- The hook was lost during crash
|
||||
This is a **stalled** polecat. It means:
|
||||
- The session crashed and wasn't nudged back to life
|
||||
- The hook was lost during a crash
|
||||
- State corruption occurred
|
||||
|
||||
This is NOT an "idle" polecat waiting for work. It's stalled - supposed to be
|
||||
working but stopped unexpectedly.
|
||||
|
||||
**Recovery:**
|
||||
```bash
|
||||
# From Witness:
|
||||
gt polecat nuke Toast # Clean slate
|
||||
gt sling gt-abc gastown # Respawn with work
|
||||
gt polecat nuke Toast # Clean up the stalled polecat
|
||||
gt sling gt-abc gastown # Respawn with fresh polecat
|
||||
```
|
||||
|
||||
### Confusing Session with Sandbox
|
||||
@@ -210,16 +270,43 @@ All except `gt done` result in continued work. Only `gt done` signals completion
|
||||
The Witness monitors polecats but does NOT:
|
||||
- Force session cycles (polecats self-manage via handoff)
|
||||
- Interrupt mid-step (unless truly stuck)
|
||||
- Recycle sandboxes between steps
|
||||
- Nuke polecats (polecats self-nuke via `gt done`)
|
||||
|
||||
The Witness DOES:
|
||||
- Detect and nudge stalled polecats (sessions that stopped unexpectedly)
|
||||
- Clean up zombie polecats (sessions where `gt done` failed)
|
||||
- Respawn crashed sessions
|
||||
- Nudge stuck polecats
|
||||
- Nuke completed polecats (after verification)
|
||||
- Handle escalations
|
||||
- Handle escalations from stuck polecats (polecats that explicitly asked for help)
|
||||
|
||||
## Polecat Identity
|
||||
|
||||
**Key insight:** Polecat *identity* is long-lived; only sessions and sandboxes are ephemeral.
|
||||
|
||||
In the HOP model, every entity has a chain (CV) that tracks:
|
||||
- What work they've done
|
||||
- Success/failure rates
|
||||
- Skills demonstrated
|
||||
- Quality metrics
|
||||
|
||||
The polecat *name* (Toast, Shadow, etc.) is a slot from a pool - truly ephemeral.
|
||||
But the *agent identity* that executes as that polecat accumulates a work history.
|
||||
|
||||
```
|
||||
POLECAT IDENTITY (persistent) SESSION (ephemeral) SANDBOX (ephemeral)
|
||||
├── CV chain ├── Claude instance ├── Git worktree
|
||||
├── Work history ├── Context window ├── Branch
|
||||
├── Skills demonstrated └── Dies on handoff └── Dies on gt done
|
||||
└── Credit for work or gt done
|
||||
```
|
||||
|
||||
This distinction matters for:
|
||||
- **Attribution** - Who gets credit for the work?
|
||||
- **Skill routing** - Which agent is best for this task?
|
||||
- **Cost accounting** - Who pays for inference?
|
||||
- **Federation** - Agents having their own chains in a distributed world
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Understanding Gas Town](understanding-gas-town.md) - Role taxonomy and architecture
|
||||
- [Polecat Wisp Architecture](polecat-wisp-architecture.md) - Molecule execution
|
||||
- [Overview](../overview.md) - Role taxonomy and architecture
|
||||
- [Molecules](molecules.md) - Molecule execution and polecat workflow
|
||||
- [Propulsion Principle](propulsion-principle.md) - Why work triggers immediate execution
|
||||
@@ -84,33 +84,47 @@ Each agent bead references its role bead via the `role_bead` field.
|
||||
│ └── town.json Town configuration
|
||||
└── <rig>/ Project container (NOT a git clone)
|
||||
├── config.json Rig identity and beads prefix
|
||||
├── .beads/ → mayor/rig/.beads Symlink to canonical beads
|
||||
├── .repo.git/ Bare repo (shared by worktrees)
|
||||
├── mayor/rig/ Mayor's clone (canonical beads)
|
||||
├── refinery/rig/ Worktree on main
|
||||
├── mayor/rig/ Canonical clone (beads live here)
|
||||
│ └── .beads/ Rig-level beads database
|
||||
├── refinery/rig/ Worktree from mayor/rig
|
||||
├── witness/ No clone (monitors only)
|
||||
├── crew/<name>/ Human workspaces
|
||||
└── polecats/<name>/ Worker worktrees
|
||||
├── crew/<name>/ Human workspaces (full clones)
|
||||
└── polecats/<name>/ Worker worktrees from mayor/rig
|
||||
```
|
||||
|
||||
### Worktree Architecture
|
||||
|
||||
Polecats and refinery are git worktrees, not full clones. This enables fast spawning
|
||||
and shared object storage. The worktree base is `mayor/rig`:
|
||||
|
||||
```go
|
||||
// From polecat/manager.go - worktrees are based on mayor/rig
|
||||
git worktree add -b polecat/<name>-<timestamp> polecats/<name>
|
||||
```
|
||||
|
||||
Crew workspaces (`crew/<name>/`) are full git clones for human developers who need
|
||||
independent repos. Polecats are ephemeral and benefit from worktree efficiency.
|
||||
|
||||
## Beads Routing
|
||||
|
||||
The `routes.jsonl` file maps issue ID prefixes to their storage locations:
|
||||
The `routes.jsonl` file maps issue ID prefixes to rig locations (relative to town root):
|
||||
|
||||
```jsonl
|
||||
{"prefix":"hq","path":"/Users/stevey/gt/.beads"}
|
||||
{"prefix":"gt","path":"/Users/stevey/gt/gastown/mayor/rig/.beads"}
|
||||
{"prefix":"hq-","path":"."}
|
||||
{"prefix":"gt-","path":"gastown/mayor/rig"}
|
||||
{"prefix":"bd-","path":"beads/mayor/rig"}
|
||||
```
|
||||
|
||||
Routes point to `mayor/rig` because that's where the canonical `.beads/` lives.
|
||||
This enables transparent cross-rig beads operations:
|
||||
|
||||
```bash
|
||||
bd show hq-mayor # Routes to town beads
|
||||
bd show gt-xyz # Routes to gastown rig beads
|
||||
bd show hq-mayor # Routes to town beads (~/.gt/.beads)
|
||||
bd show gt-xyz # Routes to gastown/mayor/rig/.beads
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [reference.md](reference.md) - Command reference
|
||||
- [molecules.md](molecules.md) - Workflow molecules
|
||||
- [identity.md](identity.md) - Agent identity and BD_ACTOR
|
||||
- [reference.md](../reference.md) - Command reference
|
||||
- [molecules.md](../concepts/molecules.md) - Workflow molecules
|
||||
- [identity.md](../concepts/identity.md) - Agent identity and BD_ACTOR
|
||||
197
docs/design/convoy-lifecycle.md
Normal file
197
docs/design/convoy-lifecycle.md
Normal file
@@ -0,0 +1,197 @@
|
||||
# Convoy Lifecycle Design
|
||||
|
||||
> Making convoys actively converge on completion.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Convoys are passive trackers. They group work but don't drive it. The completion
|
||||
loop has a structural gap:
|
||||
|
||||
```
|
||||
Create → Assign → Execute → Issues close → ??? → Convoy closes
|
||||
```
|
||||
|
||||
The `???` is "Deacon patrol runs `gt convoy check`" - a poll-based single point of
|
||||
failure. When Deacon is down, convoys don't close. Work completes but the loop
|
||||
never lands.
|
||||
|
||||
## Current State
|
||||
|
||||
### What Works
|
||||
- Convoy creation and issue tracking
|
||||
- `gt convoy status` shows progress
|
||||
- `gt convoy stranded` finds unassigned work
|
||||
- `gt convoy check` auto-closes completed convoys
|
||||
|
||||
### What Breaks
|
||||
1. **Poll-based completion**: Only Deacon runs `gt convoy check`
|
||||
2. **No event-driven trigger**: Issue close doesn't propagate to convoy
|
||||
3. **No manual close**: Can't force-close abandoned convoys
|
||||
4. **Single observer**: No redundant completion detection
|
||||
5. **Weak notification**: Convoy owner not always clear
|
||||
|
||||
## Design: Active Convoy Convergence
|
||||
|
||||
### Principle: Event-Driven, Redundantly Observed
|
||||
|
||||
Convoy completion should be:
|
||||
1. **Event-driven**: Triggered by issue close, not polling
|
||||
2. **Redundantly observed**: Multiple agents can detect and close
|
||||
3. **Manually overridable**: Humans can force-close
|
||||
|
||||
### Event-Driven Completion
|
||||
|
||||
When an issue closes, check if it's tracked by a convoy:
|
||||
|
||||
```
|
||||
Issue closes
|
||||
↓
|
||||
Is issue tracked by convoy? ──(no)──► done
|
||||
│
|
||||
(yes)
|
||||
↓
|
||||
Run gt convoy check <convoy-id>
|
||||
↓
|
||||
All tracked issues closed? ──(no)──► done
|
||||
│
|
||||
(yes)
|
||||
↓
|
||||
Close convoy, send notifications
|
||||
```
|
||||
|
||||
**Implementation options:**
|
||||
1. Daemon hook on `bd update --status=closed`
|
||||
2. Refinery step after successful merge
|
||||
3. Witness step after verifying polecat completion
|
||||
|
||||
Option 1 is most reliable - catches all closes regardless of source.
|
||||
|
||||
### Redundant Observers
|
||||
|
||||
Per PRIMING.md: "Redundant Monitoring Is Resilience."
|
||||
|
||||
Three places should check convoy completion:
|
||||
|
||||
| Observer | When | Scope |
|
||||
|----------|------|-------|
|
||||
| **Daemon** | On any issue close | All convoys |
|
||||
| **Witness** | After verifying polecat work | Rig's convoy work |
|
||||
| **Deacon** | Periodic patrol | All convoys (backup) |
|
||||
|
||||
Any observer noticing completion triggers close. Idempotent - closing
|
||||
an already-closed convoy is a no-op.
|
||||
|
||||
### Manual Close Command
|
||||
|
||||
**Desire path**: `gt convoy close` is expected but missing.
|
||||
|
||||
```bash
|
||||
# Close a completed convoy
|
||||
gt convoy close hq-cv-abc
|
||||
|
||||
# Force-close an abandoned convoy
|
||||
gt convoy close hq-cv-xyz --reason="work done differently"
|
||||
|
||||
# Close with explicit notification
|
||||
gt convoy close hq-cv-abc --notify mayor/
|
||||
```
|
||||
|
||||
Use cases:
|
||||
- Abandoned convoys no longer relevant
|
||||
- Work completed outside tracked path
|
||||
- Force-closing stuck convoys
|
||||
|
||||
### Convoy Owner/Requester
|
||||
|
||||
Track who requested the convoy for targeted notifications:
|
||||
|
||||
```bash
|
||||
gt convoy create "Feature X" gt-abc --owner mayor/ --notify overseer
|
||||
```
|
||||
|
||||
| Field | Purpose |
|
||||
|-------|---------|
|
||||
| `owner` | Who requested (gets completion notification) |
|
||||
| `notify` | Additional subscribers |
|
||||
|
||||
If `owner` not specified, defaults to creator (from `created_by`).
|
||||
|
||||
### Convoy States
|
||||
|
||||
```
|
||||
OPEN ──(all issues close)──► CLOSED
|
||||
│ │
|
||||
│ ▼
|
||||
│ (add issues)
|
||||
│ │
|
||||
└─────────────────────────────┘
|
||||
(auto-reopens)
|
||||
```
|
||||
|
||||
Adding issues to closed convoy reopens automatically.
|
||||
|
||||
**New state for abandonment:**
|
||||
|
||||
```
|
||||
OPEN ──► CLOSED (completed)
|
||||
│
|
||||
└────► ABANDONED (force-closed without completion)
|
||||
```
|
||||
|
||||
### Timeout/SLA (Future)
|
||||
|
||||
Optional `due_at` field for convoy deadline:
|
||||
|
||||
```bash
|
||||
gt convoy create "Sprint work" gt-abc --due="2026-01-15"
|
||||
```
|
||||
|
||||
Overdue convoys surface in `gt convoy stranded --overdue`.
|
||||
|
||||
## Commands
|
||||
|
||||
### New: `gt convoy close`
|
||||
|
||||
```bash
|
||||
gt convoy close <convoy-id> [--reason=<reason>] [--notify=<agent>]
|
||||
```
|
||||
|
||||
- Closes convoy regardless of tracked issue status
|
||||
- Sets `close_reason` field
|
||||
- Sends notification to owner and subscribers
|
||||
- Idempotent - closing closed convoy is no-op
|
||||
|
||||
### Enhanced: `gt convoy check`
|
||||
|
||||
```bash
|
||||
# Check all convoys (current behavior)
|
||||
gt convoy check
|
||||
|
||||
# Check specific convoy (new)
|
||||
gt convoy check <convoy-id>
|
||||
|
||||
# Dry-run mode
|
||||
gt convoy check --dry-run
|
||||
```
|
||||
|
||||
### New: `gt convoy reopen`
|
||||
|
||||
```bash
|
||||
gt convoy reopen <convoy-id>
|
||||
```
|
||||
|
||||
Explicit reopen for clarity (currently implicit via add).
|
||||
|
||||
## Implementation Priority
|
||||
|
||||
1. **P0: `gt convoy close`** - Desire path, escape hatch
|
||||
2. **P0: Event-driven check** - Daemon hook on issue close
|
||||
3. **P1: Redundant observers** - Witness/Refinery integration
|
||||
4. **P2: Owner field** - Targeted notifications
|
||||
5. **P3: Timeout/SLA** - Deadline tracking
|
||||
|
||||
## Related
|
||||
|
||||
- [convoy.md](../concepts/convoy.md) - Convoy concept and usage
|
||||
- [watchdog-chain.md](watchdog-chain.md) - Deacon patrol system
|
||||
- [mail-protocol.md](mail-protocol.md) - Notification delivery
|
||||
495
docs/design/dog-pool-architecture.md
Normal file
495
docs/design/dog-pool-architecture.md
Normal file
@@ -0,0 +1,495 @@
|
||||
# Dog Pool Architecture for Concurrent Shutdown Dances
|
||||
|
||||
> Design document for gt-fsld8
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Boot needs to run multiple shutdown-dance molecules concurrently when multiple death
|
||||
warrants are issued. The current hook design only allows one molecule per agent.
|
||||
|
||||
Example scenario:
|
||||
- Warrant 1: Kill stuck polecat Toast (60s into interrogation)
|
||||
- Warrant 2: Kill stuck polecat Shadow (just started)
|
||||
- Warrant 3: Kill stuck witness (120s into interrogation)
|
||||
|
||||
All three need concurrent tracking, independent timeouts, and separate outcomes.
|
||||
|
||||
## Design Decision: Lightweight State Machines
|
||||
|
||||
After analyzing the options, the shutdown-dance does NOT need Claude sessions.
|
||||
The dance is a deterministic state machine:
|
||||
|
||||
```
|
||||
WARRANT -> INTERROGATE -> EVALUATE -> PARDON|EXECUTE
|
||||
```
|
||||
|
||||
Each step is mechanical:
|
||||
1. Send a tmux message (no LLM needed)
|
||||
2. Wait for timeout or response (timer)
|
||||
3. Check tmux output for ALIVE keyword (string match)
|
||||
4. Repeat or terminate
|
||||
|
||||
**Decision**: Dogs are lightweight Go routines, not Claude sessions.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────┐
|
||||
│ BOOT │
|
||||
│ (Claude session in tmux) │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────────────────────────────────┐ │
|
||||
│ │ Dog Manager │ │
|
||||
│ │ │ │
|
||||
│ │ Pool: [Dog1, Dog2, Dog3, ...] (goroutines + state files) │ │
|
||||
│ │ │ │
|
||||
│ │ allocate() → Dog │ │
|
||||
│ │ release(Dog) │ │
|
||||
│ │ status() → []DogStatus │ │
|
||||
│ └──────────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ Boot's job: │
|
||||
│ - Watch for warrants (file or event) │
|
||||
│ - Allocate dog from pool │
|
||||
│ - Monitor dog progress │
|
||||
│ - Handle dog completion/failure │
|
||||
│ - Report results │
|
||||
└────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Dog Structure
|
||||
|
||||
```go
|
||||
// Dog represents a shutdown-dance executor
|
||||
type Dog struct {
|
||||
ID string // Unique ID (e.g., "dog-1704567890123")
|
||||
Warrant *Warrant // The death warrant being processed
|
||||
State ShutdownDanceState
|
||||
Attempt int // Current interrogation attempt (1-3)
|
||||
StartedAt time.Time
|
||||
StateFile string // Persistent state: ~/gt/deacon/dogs/active/<id>.json
|
||||
}
|
||||
|
||||
type ShutdownDanceState string
|
||||
|
||||
const (
|
||||
StateIdle ShutdownDanceState = "idle"
|
||||
StateInterrogating ShutdownDanceState = "interrogating" // Sent message, waiting
|
||||
StateEvaluating ShutdownDanceState = "evaluating" // Checking response
|
||||
StatePardoned ShutdownDanceState = "pardoned" // Session responded
|
||||
StateExecuting ShutdownDanceState = "executing" // Killing session
|
||||
StateComplete ShutdownDanceState = "complete" // Done, ready for cleanup
|
||||
StateFailed ShutdownDanceState = "failed" // Dog crashed/errored
|
||||
)
|
||||
|
||||
type Warrant struct {
|
||||
ID string // Bead ID for the warrant
|
||||
Target string // Session to interrogate (e.g., "gt-gastown-Toast")
|
||||
Reason string // Why warrant was issued
|
||||
Requester string // Who filed the warrant
|
||||
FiledAt time.Time
|
||||
}
|
||||
```
|
||||
|
||||
## Pool Design
|
||||
|
||||
### Fixed Pool Size
|
||||
|
||||
**Decision**: Fixed pool of 5 dogs, configurable via environment.
|
||||
|
||||
Rationale:
|
||||
- Dynamic sizing adds complexity without clear benefit
|
||||
- 5 concurrent shutdown dances handles worst-case scenarios
|
||||
- If pool exhausted, warrants queue (better than infinite dog spawning)
|
||||
- Memory footprint is negligible (goroutines + small state files)
|
||||
|
||||
```go
|
||||
const (
|
||||
DefaultPoolSize = 5
|
||||
MaxPoolSize = 20
|
||||
)
|
||||
|
||||
type DogPool struct {
|
||||
mu sync.Mutex
|
||||
dogs []*Dog // All dogs in pool
|
||||
idle chan *Dog // Channel of available dogs
|
||||
active map[string]*Dog // ID -> Dog for active dogs
|
||||
stateDir string // ~/gt/deacon/dogs/active/
|
||||
}
|
||||
|
||||
func (p *DogPool) Allocate(warrant *Warrant) (*Dog, error) {
|
||||
select {
|
||||
case dog := <-p.idle:
|
||||
dog.Warrant = warrant
|
||||
dog.State = StateInterrogating
|
||||
dog.Attempt = 1
|
||||
dog.StartedAt = time.Now()
|
||||
p.active[dog.ID] = dog
|
||||
return dog, nil
|
||||
default:
|
||||
return nil, ErrPoolExhausted
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DogPool) Release(dog *Dog) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
delete(p.active, dog.ID)
|
||||
dog.Reset()
|
||||
p.idle <- dog
|
||||
}
|
||||
```
|
||||
|
||||
### Why Not Dynamic Pool?
|
||||
|
||||
Considered but rejected:
|
||||
- Adding dogs on demand increases complexity
|
||||
- No clear benefit - warrants rarely exceed 5 concurrent
|
||||
- If needed, raise DefaultPoolSize
|
||||
- Simpler to reason about fixed resources
|
||||
|
||||
## Communication: State Files + Events
|
||||
|
||||
### State Persistence
|
||||
|
||||
Each active dog writes state to `~/gt/deacon/dogs/active/<id>.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "dog-1704567890123",
|
||||
"warrant": {
|
||||
"id": "gt-abc123",
|
||||
"target": "gt-gastown-Toast",
|
||||
"reason": "no_response_health_check",
|
||||
"requester": "deacon",
|
||||
"filed_at": "2026-01-07T20:15:00Z"
|
||||
},
|
||||
"state": "interrogating",
|
||||
"attempt": 2,
|
||||
"started_at": "2026-01-07T20:15:00Z",
|
||||
"last_message_at": "2026-01-07T20:16:00Z",
|
||||
"next_timeout": "2026-01-07T20:18:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Boot Monitoring
|
||||
|
||||
Boot monitors dogs via:
|
||||
1. **Polling**: `gt dog status --active` every tick
|
||||
2. **Completion files**: Dogs write `<id>.done` when complete
|
||||
|
||||
```go
|
||||
type DogResult struct {
|
||||
DogID string
|
||||
Warrant *Warrant
|
||||
Outcome DogOutcome // pardoned | executed | failed
|
||||
Duration time.Duration
|
||||
Details string
|
||||
}
|
||||
|
||||
type DogOutcome string
|
||||
|
||||
const (
|
||||
OutcomePardoned DogOutcome = "pardoned" // Session responded
|
||||
OutcomeExecuted DogOutcome = "executed" // Session killed
|
||||
OutcomeFailed DogOutcome = "failed" // Dog crashed
|
||||
)
|
||||
```
|
||||
|
||||
### Why Not Mail?
|
||||
|
||||
Considered but rejected for dog<->boot communication:
|
||||
- Mail is async, poll-based - adds latency
|
||||
- State files are simpler for local coordination
|
||||
- Dogs don't need complex inter-agent communication
|
||||
- Keep mail for external coordination (Witness, Mayor)
|
||||
|
||||
## Shutdown Dance State Machine
|
||||
|
||||
Each dog executes this state machine:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ INTERROGATING │ │
|
||||
│ │ │
|
||||
│ 1. Send health check │ │
|
||||
│ 2. Start timeout timer │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
│ timeout or response │
|
||||
▼ │
|
||||
┌───────────────────────────┐ │
|
||||
│ EVALUATING │ │
|
||||
│ │ │
|
||||
│ Check tmux output for │ │
|
||||
│ ALIVE keyword │ │
|
||||
└───────────┬───────────────┘ │
|
||||
│ │
|
||||
┌───────┴───────┐ │
|
||||
│ │ │
|
||||
▼ ▼ │
|
||||
[ALIVE found] [No ALIVE] │
|
||||
│ │ │
|
||||
│ │ attempt < 3? │
|
||||
│ ├──────────────────────────────────→─┘
|
||||
│ │ yes: attempt++, longer timeout
|
||||
│ │
|
||||
│ │ no: attempt == 3
|
||||
▼ ▼
|
||||
┌─────────┐ ┌─────────────┐
|
||||
│ PARDONED│ │ EXECUTING │
|
||||
│ │ │ │
|
||||
│ Cancel │ │ Kill tmux │
|
||||
│ warrant │ │ session │
|
||||
└────┬────┘ └──────┬──────┘
|
||||
│ │
|
||||
└────────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌────────────────┐
|
||||
│ COMPLETE │
|
||||
│ │
|
||||
│ Write result │
|
||||
│ Release dog │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
### Timeout Gates
|
||||
|
||||
| Attempt | Timeout | Cumulative Wait |
|
||||
|---------|---------|-----------------|
|
||||
| 1 | 60s | 60s |
|
||||
| 2 | 120s | 180s (3 min) |
|
||||
| 3 | 240s | 420s (7 min) |
|
||||
|
||||
### Health Check Message
|
||||
|
||||
```
|
||||
[DOG] HEALTH CHECK: Session {target}, respond ALIVE within {timeout}s or face termination.
|
||||
Warrant reason: {reason}
|
||||
Filed by: {requester}
|
||||
Attempt: {attempt}/3
|
||||
```
|
||||
|
||||
### Response Detection
|
||||
|
||||
```go
|
||||
func (d *Dog) CheckForResponse() bool {
|
||||
tm := tmux.NewTmux()
|
||||
output, err := tm.CapturePane(d.Warrant.Target, 50) // Last 50 lines
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Any output after our health check counts as alive
|
||||
// Specifically look for ALIVE keyword for explicit response
|
||||
return strings.Contains(output, "ALIVE")
|
||||
}
|
||||
```
|
||||
|
||||
## Dog Implementation
|
||||
|
||||
### Not Reusing Polecat Infrastructure
|
||||
|
||||
**Decision**: Dogs do NOT reuse polecat infrastructure.
|
||||
|
||||
Rationale:
|
||||
- Polecats are Claude sessions with molecules, hooks, sandboxes
|
||||
- Dogs are simple state machine executors
|
||||
- Polecats have 3-layer lifecycle (session/sandbox/slot)
|
||||
- Dogs have single-layer lifecycle (just state)
|
||||
- Different resource profiles, different management
|
||||
|
||||
What dogs DO share:
|
||||
- tmux utilities for message sending/capture
|
||||
- State file patterns
|
||||
- Name slot allocation pattern (pool of names, not instances)
|
||||
|
||||
### Dog Execution Loop
|
||||
|
||||
```go
|
||||
func (d *Dog) Run(ctx context.Context) DogResult {
|
||||
d.State = StateInterrogating
|
||||
d.saveState()
|
||||
|
||||
for d.Attempt <= 3 {
|
||||
// Send interrogation message
|
||||
if err := d.sendHealthCheck(); err != nil {
|
||||
return d.fail(err)
|
||||
}
|
||||
|
||||
// Wait for timeout or context cancellation
|
||||
timeout := d.timeoutForAttempt(d.Attempt)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return d.fail(ctx.Err())
|
||||
case <-time.After(timeout):
|
||||
// Timeout reached
|
||||
}
|
||||
|
||||
// Evaluate response
|
||||
d.State = StateEvaluating
|
||||
d.saveState()
|
||||
|
||||
if d.CheckForResponse() {
|
||||
// Session is alive
|
||||
return d.pardon()
|
||||
}
|
||||
|
||||
// No response - try again or execute
|
||||
d.Attempt++
|
||||
if d.Attempt <= 3 {
|
||||
d.State = StateInterrogating
|
||||
d.saveState()
|
||||
}
|
||||
}
|
||||
|
||||
// All attempts exhausted - execute warrant
|
||||
return d.execute()
|
||||
}
|
||||
```
|
||||
|
||||
## Failure Handling
|
||||
|
||||
### Dog Crashes Mid-Dance
|
||||
|
||||
If a dog crashes (Boot process restarts, system crash):
|
||||
|
||||
1. State files persist in `~/gt/deacon/dogs/active/`
|
||||
2. On Boot restart, scan for orphaned state files
|
||||
3. Resume or restart based on state:
|
||||
|
||||
| State | Recovery Action |
|
||||
|------------------|------------------------------------|
|
||||
| interrogating | Restart from current attempt |
|
||||
| evaluating | Check response, continue |
|
||||
| executing | Verify kill, mark complete |
|
||||
| pardoned/complete| Already done, clean up |
|
||||
|
||||
```go
|
||||
func (p *DogPool) RecoverOrphans() error {
|
||||
files, _ := filepath.Glob(p.stateDir + "/*.json")
|
||||
for _, f := range files {
|
||||
state := loadDogState(f)
|
||||
if state.State != StateComplete && state.State != StatePardoned {
|
||||
dog := p.allocateForRecovery(state)
|
||||
go dog.Resume()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Handling Pool Exhaustion
|
||||
|
||||
If all dogs are busy when new warrant arrives:
|
||||
|
||||
```go
|
||||
func (b *Boot) HandleWarrant(warrant *Warrant) error {
|
||||
dog, err := b.pool.Allocate(warrant)
|
||||
if err == ErrPoolExhausted {
|
||||
// Queue the warrant for later processing
|
||||
b.warrantQueue.Push(warrant)
|
||||
b.log("Warrant %s queued (pool exhausted)", warrant.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
result := dog.Run(b.ctx)
|
||||
b.handleResult(result)
|
||||
b.pool.Release(dog)
|
||||
|
||||
// Check queue for pending warrants
|
||||
if next := b.warrantQueue.Pop(); next != nil {
|
||||
b.HandleWarrant(next)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
~/gt/deacon/dogs/
|
||||
├── boot/ # Boot's working directory
|
||||
│ ├── CLAUDE.md # Boot context
|
||||
│ └── .boot-status.json # Boot execution status
|
||||
├── active/ # Active dog state files
|
||||
│ ├── dog-123.json # Dog 1 state
|
||||
│ ├── dog-456.json # Dog 2 state
|
||||
│ └── ...
|
||||
├── completed/ # Completed dance records (for audit)
|
||||
│ ├── dog-789.json # Historical record
|
||||
│ └── ...
|
||||
└── warrants/ # Pending warrant queue
|
||||
├── warrant-abc.json
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Command Interface
|
||||
|
||||
```bash
|
||||
# Pool status
|
||||
gt dog pool status
|
||||
# Output:
|
||||
# Dog Pool: 3/5 active
|
||||
# dog-123: interrogating Toast (attempt 2, 45s remaining)
|
||||
# dog-456: executing Shadow
|
||||
# dog-789: idle
|
||||
|
||||
# Manual dog operations (for debugging)
|
||||
gt dog pool allocate <warrant-id>
|
||||
gt dog pool release <dog-id>
|
||||
|
||||
# View active dances
|
||||
gt dog dances
|
||||
# Output:
|
||||
# Active Shutdown Dances:
|
||||
# dog-123 → Toast: Interrogating (2/3), timeout in 45s
|
||||
# dog-456 → Shadow: Executing warrant
|
||||
|
||||
# View warrant queue
|
||||
gt dog warrants
|
||||
# Output:
|
||||
# Pending Warrants: 2
|
||||
# 1. gt-abc: witness-gastown (stuck_no_progress)
|
||||
# 2. gt-def: polecat-Copper (crash_loop)
|
||||
```
|
||||
|
||||
## Integration with Existing Dogs
|
||||
|
||||
The existing `dog` package (`internal/dog/`) manages Deacon's multi-rig helper dogs.
|
||||
Those are different from shutdown-dance dogs:
|
||||
|
||||
| Aspect | Helper Dogs (existing) | Dance Dogs (new) |
|
||||
|-----------------|-----------------------------|-----------------------------|
|
||||
| Purpose | Cross-rig infrastructure | Shutdown dance execution |
|
||||
| Sessions | Claude sessions | Goroutines (no Claude) |
|
||||
| Worktrees | One per rig | None |
|
||||
| Lifecycle | Long-lived, reusable | Ephemeral per warrant |
|
||||
| State | idle/working | Dance state machine |
|
||||
|
||||
**Recommendation**: Use different package to avoid confusion:
|
||||
- `internal/dog/` - existing helper dogs
|
||||
- `internal/shutdown/` - shutdown dance pool
|
||||
|
||||
## Summary: Answers to Design Questions
|
||||
|
||||
| Question | Answer |
|
||||
|----------|--------|
|
||||
| How many Dogs in pool? | Fixed: 5 (configurable via GT_DOG_POOL_SIZE) |
|
||||
| How do Dogs communicate with Boot? | State files + completion markers |
|
||||
| Are Dogs tmux sessions? | No - goroutines with state machine |
|
||||
| Reuse polecat infrastructure? | No - too heavyweight, different model |
|
||||
| What if Dog dies mid-dance? | State file recovery on Boot restart |
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [x] Architecture document for Dog pool
|
||||
- [x] Clear allocation/deallocation protocol
|
||||
- [x] Failure handling for Dog crashes
|
||||
576
docs/design/escalation-system.md
Normal file
576
docs/design/escalation-system.md
Normal file
@@ -0,0 +1,576 @@
|
||||
# Escalation System Design
|
||||
|
||||
> Detailed design for the Gas Town unified escalation system.
|
||||
> Written 2026-01-11, crew/george session.
|
||||
> Parent epic: gt-i9r20
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Current escalation is ad-hoc "mail Mayor". Issues:
|
||||
- Mayor gets backlogged easily (especially during swarms)
|
||||
- No severity differentiation
|
||||
- No alternative channels (email, SMS, Slack)
|
||||
- No tracking of stale/unacknowledged escalations
|
||||
- No visibility into escalation history
|
||||
|
||||
## Design Goals
|
||||
|
||||
1. **Unified API**: Single `gt escalate` command for all escalation needs
|
||||
2. **Severity-based routing**: Different severities go to different channels
|
||||
3. **Config-driven**: Town config controls routing, no code changes needed
|
||||
4. **Audit trail**: All escalations tracked as beads
|
||||
5. **Stale detection**: Unacknowledged escalations re-escalate automatically
|
||||
6. **Extensible**: Easy to add new notification channels
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### Components
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ gt escalate command │
|
||||
│ --severity --subject --body --source │
|
||||
└─────────────────────┬───────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Escalation Manager │
|
||||
│ 1. Read config (settings/escalation.json) │
|
||||
│ 2. Create escalation bead │
|
||||
│ 3. Execute route actions for severity │
|
||||
└─────────────────────┬───────────────────────────────────────┘
|
||||
│
|
||||
┌───────────┼───────────┬───────────┐
|
||||
▼ ▼ ▼ ▼
|
||||
┌───────┐ ┌─────────┐ ┌───────┐ ┌───────┐
|
||||
│ Bead │ │ Mail │ │ Email │ │ SMS │
|
||||
│Create │ │ Action │ │Action │ │Action │
|
||||
└───────┘ └─────────┘ └───────┘ └───────┘
|
||||
```
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Agent calls `gt escalate --severity=high --subject="..." --body="..."`
|
||||
2. Command loads escalation config from `settings/escalation.json`
|
||||
3. Creates escalation bead with severity, subject, body, source labels
|
||||
4. Looks up route for severity level
|
||||
5. Executes each action in the route (bead already created, then mail, email, etc.)
|
||||
6. Returns escalation bead ID
|
||||
|
||||
### Stale Escalation Flow
|
||||
|
||||
1. Deacon patrol (or plugin) runs `gt escalate stale`
|
||||
2. Queries for escalation beads older than threshold without `acknowledged:true`
|
||||
3. For each stale escalation:
|
||||
- Bump severity (low→medium, medium→high, high→critical)
|
||||
- Re-execute route for new severity
|
||||
- Add `reescalated:true` label and timestamp
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
### File Location
|
||||
|
||||
`~/gt/settings/escalation.json`
|
||||
|
||||
This follows the existing pattern where `~/gt/settings/` contains town-level behavioral config.
|
||||
|
||||
### Schema
|
||||
|
||||
```go
|
||||
// EscalationConfig represents escalation routing configuration.
|
||||
type EscalationConfig struct {
|
||||
Type string `json:"type"` // "escalation"
|
||||
Version int `json:"version"` // schema version
|
||||
|
||||
// Routes maps severity levels to action lists.
|
||||
// Actions are executed in order.
|
||||
Routes map[string][]string `json:"routes"`
|
||||
|
||||
// Contacts contains contact information for actions.
|
||||
Contacts EscalationContacts `json:"contacts"`
|
||||
|
||||
// StaleThreshold is how long before an unacknowledged escalation
|
||||
// is considered stale and gets re-escalated. Default: "4h"
|
||||
StaleThreshold string `json:"stale_threshold,omitempty"`
|
||||
|
||||
// MaxReescalations limits how many times an escalation can be
|
||||
// re-escalated. Default: 2 (low→medium→high, then stops)
|
||||
MaxReescalations int `json:"max_reescalations,omitempty"`
|
||||
}
|
||||
|
||||
// EscalationContacts contains contact information.
|
||||
type EscalationContacts struct {
|
||||
HumanEmail string `json:"human_email,omitempty"`
|
||||
HumanSMS string `json:"human_sms,omitempty"`
|
||||
SlackWebhook string `json:"slack_webhook,omitempty"`
|
||||
}
|
||||
|
||||
const CurrentEscalationVersion = 1
|
||||
```
|
||||
|
||||
### Default Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "escalation",
|
||||
"version": 1,
|
||||
"routes": {
|
||||
"low": ["bead"],
|
||||
"medium": ["bead", "mail:mayor"],
|
||||
"high": ["bead", "mail:mayor", "email:human"],
|
||||
"critical": ["bead", "mail:mayor", "email:human", "sms:human"]
|
||||
},
|
||||
"contacts": {
|
||||
"human_email": "",
|
||||
"human_sms": ""
|
||||
},
|
||||
"stale_threshold": "4h",
|
||||
"max_reescalations": 2
|
||||
}
|
||||
```
|
||||
|
||||
### Action Types
|
||||
|
||||
| Action | Format | Behavior |
|
||||
|--------|--------|----------|
|
||||
| `bead` | `bead` | Create escalation bead (always first, implicit) |
|
||||
| `mail:<target>` | `mail:mayor` | Send gt mail to target |
|
||||
| `email:human` | `email:human` | Send email to `contacts.human_email` |
|
||||
| `sms:human` | `sms:human` | Send SMS to `contacts.human_sms` |
|
||||
| `slack` | `slack` | Post to `contacts.slack_webhook` |
|
||||
| `log` | `log` | Write to escalation log file |
|
||||
|
||||
### Severity Levels
|
||||
|
||||
| Level | Use Case | Default Route |
|
||||
|-------|----------|---------------|
|
||||
| `low` | Informational, non-urgent | bead only |
|
||||
| `medium` | Needs attention soon | bead + mail mayor |
|
||||
| `high` | Urgent, needs human | bead + mail + email |
|
||||
| `critical` | Emergency, immediate | bead + mail + email + SMS |
|
||||
|
||||
---
|
||||
|
||||
## Escalation Beads
|
||||
|
||||
### Bead Format
|
||||
|
||||
```yaml
|
||||
id: gt-esc-abc123
|
||||
type: escalation
|
||||
status: open
|
||||
title: "Plugin FAILED: rebuild-gt"
|
||||
labels:
|
||||
- severity:high
|
||||
- source:plugin:rebuild-gt
|
||||
- acknowledged:false
|
||||
- reescalated:false
|
||||
- reescalation_count:0
|
||||
description: |
|
||||
Build failed: make returned exit code 2
|
||||
|
||||
## Context
|
||||
- Source: plugin:rebuild-gt
|
||||
- Original severity: medium
|
||||
- Escalated at: 2026-01-11T19:00:00Z
|
||||
created_at: 2026-01-11T15:00:00Z
|
||||
```
|
||||
|
||||
### Label Schema
|
||||
|
||||
| Label | Values | Purpose |
|
||||
|-------|--------|---------|
|
||||
| `severity:<level>` | low, medium, high, critical | Current severity |
|
||||
| `source:<type>:<name>` | plugin:rebuild-gt, patrol:deacon | What triggered it |
|
||||
| `acknowledged:<bool>` | true, false | Has human acknowledged |
|
||||
| `reescalated:<bool>` | true, false | Has been re-escalated |
|
||||
| `reescalation_count:<n>` | 0, 1, 2, ... | Times re-escalated |
|
||||
| `original_severity:<level>` | low, medium, high | Initial severity |
|
||||
|
||||
---
|
||||
|
||||
## Commands
|
||||
|
||||
### gt escalate
|
||||
|
||||
Create a new escalation.
|
||||
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=<low|medium|high|critical> \
|
||||
--subject="Short description" \
|
||||
--body="Detailed explanation" \
|
||||
[--source="plugin:rebuild-gt"]
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
- `--severity` (required): Escalation severity level
|
||||
- `--subject` (required): Short description (becomes bead title)
|
||||
- `--body` (required): Detailed explanation (becomes bead description)
|
||||
- `--source`: Source identifier for tracking (e.g., "plugin:rebuild-gt")
|
||||
- `--dry-run`: Show what would happen without executing
|
||||
- `--json`: Output escalation bead ID as JSON
|
||||
|
||||
**Exit codes:**
|
||||
- 0: Success
|
||||
- 1: Config error or invalid flags
|
||||
- 2: Action failed (e.g., email send failed)
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=high \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="Build failed: make returned exit code 2. Working directory: ~/gt/gastown/crew/george" \
|
||||
--source="plugin:rebuild-gt"
|
||||
|
||||
# Output:
|
||||
# ✓ Created escalation gt-esc-abc123 (severity: high)
|
||||
# → Created bead
|
||||
# → Mailed mayor/
|
||||
# → Emailed steve@example.com
|
||||
```
|
||||
|
||||
### gt escalate ack
|
||||
|
||||
Acknowledge an escalation.
|
||||
|
||||
```bash
|
||||
gt escalate ack <bead-id> [--note="Investigating"]
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Sets `acknowledged:true` label
|
||||
- Optionally adds note to bead
|
||||
- Prevents re-escalation
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate ack gt-esc-abc123 --note="Looking into it"
|
||||
# ✓ Acknowledged gt-esc-abc123
|
||||
```
|
||||
|
||||
### gt escalate list
|
||||
|
||||
List escalations.
|
||||
|
||||
```bash
|
||||
gt escalate list [--severity=...] [--stale] [--unacked] [--all]
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
- `--severity`: Filter by severity level
|
||||
- `--stale`: Show only stale (past threshold, unacked)
|
||||
- `--unacked`: Show only unacknowledged
|
||||
- `--all`: Include acknowledged/closed
|
||||
- `--json`: Output as JSON
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate list --unacked
|
||||
# 📢 Unacknowledged Escalations (2)
|
||||
#
|
||||
# ● gt-esc-abc123 [HIGH] Plugin FAILED: rebuild-gt
|
||||
# Source: plugin:rebuild-gt · Age: 2h · Stale in: 2h
|
||||
# ● gt-esc-def456 [MEDIUM] Witness unresponsive
|
||||
# Source: patrol:deacon · Age: 30m · Stale in: 3h30m
|
||||
```
|
||||
|
||||
### gt escalate stale
|
||||
|
||||
Check for and re-escalate stale escalations.
|
||||
|
||||
```bash
|
||||
gt escalate stale [--dry-run]
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Queries unacked escalations older than `stale_threshold`
|
||||
- For each, bumps severity and re-executes route
|
||||
- Respects `max_reescalations` limit
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
gt escalate stale
|
||||
# 🔄 Re-escalating stale escalations...
|
||||
#
|
||||
# gt-esc-abc123: medium → high (age: 5h, reescalation: 1/2)
|
||||
# → Emailed steve@example.com
|
||||
#
|
||||
# ✓ Re-escalated 1 escalation
|
||||
```
|
||||
|
||||
### gt escalate close
|
||||
|
||||
Close an escalation (resolved).
|
||||
|
||||
```bash
|
||||
gt escalate close <bead-id> [--reason="Fixed in commit abc123"]
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Sets status to closed
|
||||
- Adds resolution note
|
||||
- Records who closed it
|
||||
|
||||
---
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### File: internal/cmd/escalate.go
|
||||
|
||||
```go
|
||||
package cmd
|
||||
|
||||
// escalateCmd is the parent command for escalation management.
|
||||
var escalateCmd = &cobra.Command{
|
||||
Use: "escalate",
|
||||
Short: "Manage escalations",
|
||||
Long: `Create, acknowledge, and manage escalations with severity-based routing.`,
|
||||
}
|
||||
|
||||
// escalateCreateCmd creates a new escalation.
|
||||
var escalateCreateCmd = &cobra.Command{
|
||||
Use: "escalate --severity=<level> --subject=<text> --body=<text>",
|
||||
Short: "Create a new escalation",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateAckCmd acknowledges an escalation.
|
||||
var escalateAckCmd = &cobra.Command{
|
||||
Use: "ack <bead-id>",
|
||||
Short: "Acknowledge an escalation",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateListCmd lists escalations.
|
||||
var escalateListCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List escalations",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateStaleCmd checks for stale escalations.
|
||||
var escalateStaleCmd = &cobra.Command{
|
||||
Use: "stale",
|
||||
Short: "Re-escalate stale escalations",
|
||||
// ... implementation
|
||||
}
|
||||
|
||||
// escalateCloseCmd closes an escalation.
|
||||
var escalateCloseCmd = &cobra.Command{
|
||||
Use: "close <bead-id>",
|
||||
Short: "Close an escalation",
|
||||
// ... implementation
|
||||
}
|
||||
```
|
||||
|
||||
### File: internal/escalation/manager.go
|
||||
|
||||
```go
|
||||
package escalation
|
||||
|
||||
// Manager handles escalation creation and routing.
|
||||
type Manager struct {
|
||||
config *config.EscalationConfig
|
||||
beads *beads.Client
|
||||
mailer *mail.Client
|
||||
}
|
||||
|
||||
// Escalate creates a new escalation and executes the route.
|
||||
func (m *Manager) Escalate(ctx context.Context, opts EscalateOptions) (*Escalation, error) {
|
||||
// 1. Validate options
|
||||
// 2. Create escalation bead
|
||||
// 3. Look up route for severity
|
||||
// 4. Execute each action
|
||||
// 5. Return escalation with results
|
||||
}
|
||||
|
||||
// Acknowledge marks an escalation as acknowledged.
|
||||
func (m *Manager) Acknowledge(ctx context.Context, beadID string, note string) error {
|
||||
// 1. Load escalation bead
|
||||
// 2. Set acknowledged:true label
|
||||
// 3. Add note if provided
|
||||
}
|
||||
|
||||
// ReescalateStale finds and re-escalates stale escalations.
|
||||
func (m *Manager) ReescalateStale(ctx context.Context) ([]Reescalation, error) {
|
||||
// 1. Query unacked escalations older than threshold
|
||||
// 2. For each, bump severity
|
||||
// 3. Execute new route
|
||||
// 4. Update labels
|
||||
}
|
||||
```
|
||||
|
||||
### File: internal/escalation/actions.go
|
||||
|
||||
```go
|
||||
package escalation
|
||||
|
||||
// Action is an escalation route action.
|
||||
type Action interface {
|
||||
Execute(ctx context.Context, esc *Escalation) error
|
||||
String() string
|
||||
}
|
||||
|
||||
// BeadAction creates the escalation bead.
|
||||
type BeadAction struct{}
|
||||
|
||||
// MailAction sends gt mail.
|
||||
type MailAction struct {
|
||||
Target string // e.g., "mayor"
|
||||
}
|
||||
|
||||
// EmailAction sends email.
|
||||
type EmailAction struct {
|
||||
Recipient string // from config.contacts
|
||||
}
|
||||
|
||||
// SMSAction sends SMS.
|
||||
type SMSAction struct {
|
||||
Recipient string // from config.contacts
|
||||
}
|
||||
|
||||
// ParseAction parses an action string into an Action.
|
||||
func ParseAction(s string) (Action, error) {
|
||||
// "bead" -> BeadAction{}
|
||||
// "mail:mayor" -> MailAction{Target: "mayor"}
|
||||
// "email:human" -> EmailAction{Recipient: "human"}
|
||||
// etc.
|
||||
}
|
||||
```
|
||||
|
||||
### Email/SMS Implementation
|
||||
|
||||
For v1, use simple exec of external commands:
|
||||
|
||||
```go
|
||||
// EmailAction sends email using the 'mail' command or similar.
|
||||
func (a *EmailAction) Execute(ctx context.Context, esc *Escalation) error {
|
||||
// Option 1: Use system mail command
|
||||
// Option 2: Use sendgrid/ses API (future)
|
||||
// Option 3: Use configured webhook
|
||||
|
||||
// For now, just log a placeholder
|
||||
// Real implementation can be added based on user's infrastructure
|
||||
}
|
||||
```
|
||||
|
||||
The email/SMS actions can start as stubs that log warnings, with real implementations added based on the user's infrastructure (SendGrid, Twilio, etc.).
|
||||
|
||||
---
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Plugin System
|
||||
|
||||
Plugins use escalation for failure notification:
|
||||
|
||||
```markdown
|
||||
# In plugin.md execution section:
|
||||
|
||||
On failure:
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=medium \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="$ERROR" \
|
||||
--source="plugin:rebuild-gt"
|
||||
```
|
||||
```
|
||||
|
||||
### Deacon Patrol
|
||||
|
||||
Deacon uses escalation for health issues:
|
||||
|
||||
```bash
|
||||
# In health-scan step:
|
||||
if [ $unresponsive_cycles -ge 5 ]; then
|
||||
gt escalate \
|
||||
--severity=high \
|
||||
--subject="Witness unresponsive: gastown" \
|
||||
--body="Witness has been unresponsive for $unresponsive_cycles cycles" \
|
||||
--source="patrol:deacon:health-scan"
|
||||
fi
|
||||
```
|
||||
|
||||
### Stale Escalation Check
|
||||
|
||||
Can be either:
|
||||
1. A Deacon patrol step
|
||||
2. A plugin (dogfood!)
|
||||
3. Part of `gt escalate` itself (run periodically)
|
||||
|
||||
Recommendation: Start as patrol step, migrate to plugin later.
|
||||
|
||||
---
|
||||
|
||||
## Testing Plan
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- Config loading and validation
|
||||
- Action parsing
|
||||
- Severity level ordering
|
||||
- Re-escalation logic
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- Create escalation → bead exists
|
||||
- Acknowledge → label updated
|
||||
- Stale detection → re-escalation triggers
|
||||
- Route execution → all actions called
|
||||
|
||||
### Manual Testing
|
||||
|
||||
1. `gt escalate --severity=low --subject="Test" --body="Testing"`
|
||||
2. `gt escalate list --unacked`
|
||||
3. `gt escalate ack <id>`
|
||||
4. Wait for stale threshold, run `gt escalate stale`
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
### Internal Dependencies (task order)
|
||||
|
||||
```
|
||||
gt-i9r20.2 (Config Schema)
|
||||
│
|
||||
▼
|
||||
gt-i9r20.1 (gt escalate command)
|
||||
│
|
||||
├──▶ gt-i9r20.4 (gt escalate ack)
|
||||
│
|
||||
└──▶ gt-i9r20.3 (Stale patrol)
|
||||
```
|
||||
|
||||
### External Dependencies
|
||||
|
||||
- `bd create` for creating escalation beads
|
||||
- `bd list` for querying escalations
|
||||
- `bd label` for updating labels
|
||||
- `gt mail send` for mail action
|
||||
|
||||
---
|
||||
|
||||
## Open Questions (Resolved)
|
||||
|
||||
1. **Where to store config?** → `settings/escalation.json` (follows existing pattern)
|
||||
2. **How to implement email/SMS?** → Start with stubs, add real impl based on infrastructure
|
||||
3. **Stale check: patrol step or plugin?** → Start as patrol step, can migrate to plugin
|
||||
4. **Escalation bead type?** → `type: escalation` (new bead type)
|
||||
|
||||
---
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. **Slack integration**: Post to Slack channels
|
||||
2. **PagerDuty integration**: Create incidents
|
||||
3. **Escalation dashboard**: Web UI for escalation management
|
||||
4. **Scheduled escalations**: "Remind me in 2h if not resolved"
|
||||
5. **Escalation templates**: Pre-defined escalation types
|
||||
@@ -1,5 +1,7 @@
|
||||
# Federation Architecture
|
||||
|
||||
> **Status: Design spec - not yet implemented**
|
||||
|
||||
> Multi-workspace coordination for Gas Town and Beads
|
||||
|
||||
## Overview
|
||||
@@ -100,7 +102,7 @@ Distribute work across workspaces:
|
||||
|
||||
## Agent Provenance
|
||||
|
||||
Every agent operation is attributed. See [identity.md](identity.md) for the
|
||||
Every agent operation is attributed. See [identity.md](../concepts/identity.md) for the
|
||||
complete BD_ACTOR format convention.
|
||||
|
||||
### Git Commits
|
||||
141
docs/design/operational-state.md
Normal file
141
docs/design/operational-state.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Operational State in Gas Town
|
||||
|
||||
> Managing runtime state through events and labels.
|
||||
|
||||
## Overview
|
||||
|
||||
Gas Town tracks operational state changes as structured data. This document covers:
|
||||
- **Events**: State transitions as beads (immutable audit trail)
|
||||
- **Labels-as-state**: Fast queries via role bead labels (current state cache)
|
||||
|
||||
For Boot triage and degraded mode details, see [Watchdog Chain](watchdog-chain.md).
|
||||
|
||||
## Events: State Transitions as Data
|
||||
|
||||
Operational state changes are recorded as event beads. Each event captures:
|
||||
- **What** changed (`event_type`)
|
||||
- **Who** caused it (`actor`)
|
||||
- **What** was affected (`target`)
|
||||
- **Context** (`payload`)
|
||||
- **When** (`created_at`)
|
||||
|
||||
### Event Types
|
||||
|
||||
| Event Type | Description | Payload |
|
||||
|------------|-------------|---------|
|
||||
| `patrol.muted` | Patrol cycle disabled | `{reason, until?}` |
|
||||
| `patrol.unmuted` | Patrol cycle re-enabled | `{reason?}` |
|
||||
| `agent.started` | Agent session began | `{session_id?}` |
|
||||
| `agent.stopped` | Agent session ended | `{reason, outcome?}` |
|
||||
| `mode.degraded` | System entered degraded mode | `{reason}` |
|
||||
| `mode.normal` | System returned to normal | `{}` |
|
||||
|
||||
### Creating Events
|
||||
|
||||
```bash
|
||||
# Mute deacon patrol
|
||||
bd create --type=event --event-type=patrol.muted \
|
||||
--actor=human:overseer --target=agent:deacon \
|
||||
--payload='{"reason":"fixing convoy deadlock","until":"gt-abc1"}'
|
||||
|
||||
# System entered degraded mode
|
||||
bd create --type=event --event-type=mode.degraded \
|
||||
--actor=system:daemon --target=rig:greenplace \
|
||||
--payload='{"reason":"tmux unavailable"}'
|
||||
```
|
||||
|
||||
### Querying Events
|
||||
|
||||
```bash
|
||||
# Recent events for an agent
|
||||
bd list --type=event --target=agent:deacon --limit=10
|
||||
|
||||
# All patrol state changes
|
||||
bd list --type=event --event-type=patrol.muted
|
||||
bd list --type=event --event-type=patrol.unmuted
|
||||
|
||||
# Events in the activity feed
|
||||
bd activity --follow --type=event
|
||||
```
|
||||
|
||||
## Labels-as-State Pattern
|
||||
|
||||
Events capture the full history. Labels cache the current state for fast queries.
|
||||
|
||||
### Convention
|
||||
|
||||
Labels use `<dimension>:<value>` format:
|
||||
- `patrol:muted` / `patrol:active`
|
||||
- `mode:degraded` / `mode:normal`
|
||||
- `status:idle` / `status:working` (for persistent agents only - see note)
|
||||
|
||||
**Note on polecats:** The `status:idle` label does NOT apply to polecats. Polecats
|
||||
have no idle state - they're either working, stalled (stopped unexpectedly), or
|
||||
zombie (`gt done` failed). This label is for persistent agents like Deacon, Witness,
|
||||
and Crew members who can legitimately be idle between tasks.
|
||||
|
||||
### State Change Flow
|
||||
|
||||
1. Create event bead (full context, immutable)
|
||||
2. Update role bead labels (current state cache)
|
||||
|
||||
```bash
|
||||
# Mute patrol
|
||||
bd create --type=event --event-type=patrol.muted ...
|
||||
bd update role-deacon --add-label=patrol:muted --remove-label=patrol:active
|
||||
|
||||
# Unmute patrol
|
||||
bd create --type=event --event-type=patrol.unmuted ...
|
||||
bd update role-deacon --add-label=patrol:active --remove-label=patrol:muted
|
||||
```
|
||||
|
||||
### Querying Current State
|
||||
|
||||
```bash
|
||||
# Is deacon patrol muted?
|
||||
bd show role-deacon | grep patrol:
|
||||
|
||||
# All agents with muted patrol
|
||||
bd list --type=role --label=patrol:muted
|
||||
|
||||
# All agents in degraded mode
|
||||
bd list --type=role --label=mode:degraded
|
||||
```
|
||||
|
||||
## Configuration vs State
|
||||
|
||||
| Type | Storage | Example |
|
||||
|------|---------|---------|
|
||||
| **Static config** | TOML files | Daemon tick interval |
|
||||
| **Operational state** | Beads (events + labels) | Patrol muted |
|
||||
| **Runtime flags** | Marker files | `.deacon-disabled` |
|
||||
|
||||
Static config rarely changes and doesn't need history.
|
||||
Operational state changes at runtime and benefits from audit trail.
|
||||
Marker files are fast checks that can trigger deeper beads queries.
|
||||
|
||||
## Commands Summary
|
||||
|
||||
```bash
|
||||
# Create operational event
|
||||
bd create --type=event --event-type=<type> \
|
||||
--actor=<entity> --target=<entity> --payload='<json>'
|
||||
|
||||
# Update state label
|
||||
bd update <role-bead> --add-label=<dim>:<val> --remove-label=<dim>:<old>
|
||||
|
||||
# Query current state
|
||||
bd list --type=role --label=<dim>:<val>
|
||||
|
||||
# Query state history
|
||||
bd list --type=event --target=<entity>
|
||||
|
||||
# Boot management
|
||||
gt dog status boot
|
||||
gt dog call boot
|
||||
gt dog prime boot
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Events are the source of truth. Labels are the cache.*
|
||||
485
docs/design/plugin-system.md
Normal file
485
docs/design/plugin-system.md
Normal file
@@ -0,0 +1,485 @@
|
||||
# Plugin System Design
|
||||
|
||||
> Design document for the Gas Town plugin system.
|
||||
> Written 2026-01-11, crew/george session.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Gas Town needs extensible, project-specific automation that runs during Deacon patrol cycles. The immediate use case is rebuilding stale binaries (gt, bd, wv), but the pattern generalizes to any periodic maintenance task.
|
||||
|
||||
Current state:
|
||||
- Plugin infrastructure exists conceptually (patrol step mentions it)
|
||||
- `~/gt/plugins/` directory exists with README
|
||||
- No actual plugins in production use
|
||||
- No formalized execution model
|
||||
|
||||
## Design Principles Applied
|
||||
|
||||
### Discover, Don't Track
|
||||
> Reality is truth. State is derived.
|
||||
|
||||
Plugin state (last run, run count, results) lives on the ledger as wisps, not in shadow state files. Gate evaluation queries the ledger directly.
|
||||
|
||||
### ZFC: Zero Framework Cognition
|
||||
> Agent decides. Go transports.
|
||||
|
||||
The Deacon (agent) evaluates gates and decides whether to dispatch. Go code provides transport (`gt dog dispatch`) but doesn't make decisions.
|
||||
|
||||
### MEOW Stack Integration
|
||||
|
||||
| Layer | Plugin Analog |
|
||||
|-------|---------------|
|
||||
| **M**olecule | `plugin.md` - work template with TOML frontmatter |
|
||||
| **E**phemeral | Plugin-run wisps - high-volume, digestible |
|
||||
| **O**bservable | Plugin runs appear in `bd activity` feed |
|
||||
| **W**orkflow | Gate → Dispatch → Execute → Record → Digest |
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### Plugin Locations
|
||||
|
||||
```
|
||||
~/gt/
|
||||
├── plugins/ # Town-level plugins (universal)
|
||||
│ └── README.md
|
||||
├── gastown/
|
||||
│ └── plugins/ # Rig-level plugins
|
||||
│ └── rebuild-gt/
|
||||
│ └── plugin.md
|
||||
├── beads/
|
||||
│ └── plugins/
|
||||
│ └── rebuild-bd/
|
||||
│ └── plugin.md
|
||||
└── wyvern/
|
||||
└── plugins/
|
||||
└── rebuild-wv/
|
||||
└── plugin.md
|
||||
```
|
||||
|
||||
**Town-level** (`~/gt/plugins/`): Universal plugins that apply everywhere.
|
||||
**Rig-level** (`<rig>/plugins/`): Project-specific plugins.
|
||||
|
||||
The Deacon scans both locations during patrol.
|
||||
|
||||
### Execution Model: Dog Dispatch
|
||||
|
||||
**Key insight**: Plugin execution should not block Deacon patrol.
|
||||
|
||||
Dogs are reusable workers designed for infrastructure tasks. Plugin execution is dispatched to dogs:
|
||||
|
||||
```
|
||||
Deacon Patrol Dog Worker
|
||||
───────────────── ─────────────────
|
||||
1. Scan plugins
|
||||
2. Evaluate gates
|
||||
3. For open gates:
|
||||
└─ gt dog dispatch plugin ──→ 4. Execute plugin
|
||||
(non-blocking) 5. Create result wisp
|
||||
6. Send DOG_DONE
|
||||
4. Continue patrol
|
||||
...
|
||||
5. Process DOG_DONE ←── (next cycle)
|
||||
```
|
||||
|
||||
Benefits:
|
||||
- Deacon stays responsive
|
||||
- Multiple plugins can run concurrently (different dogs)
|
||||
- Plugin failures don't stall patrol
|
||||
- Consistent with Dogs' purpose (infrastructure work)
|
||||
|
||||
### State Tracking: Wisps on the Ledger
|
||||
|
||||
Each plugin run creates a wisp:
|
||||
|
||||
```bash
|
||||
bd wisp create \
|
||||
--label type:plugin-run \
|
||||
--label plugin:rebuild-gt \
|
||||
--label rig:gastown \
|
||||
--label result:success \
|
||||
--body "Rebuilt gt: abc123 → def456 (5 commits)"
|
||||
```
|
||||
|
||||
**Gate evaluation** queries wisps instead of state files:
|
||||
|
||||
```bash
|
||||
# Cooldown check: any runs in last hour?
|
||||
bd list --type=wisp --label=plugin:rebuild-gt --since=1h --limit=1
|
||||
```
|
||||
|
||||
**Derived state** (no state.json needed):
|
||||
|
||||
| Query | Command |
|
||||
|-------|---------|
|
||||
| Last run time | `bd list --label=plugin:X --limit=1 --json` |
|
||||
| Run count | `bd list --label=plugin:X --json \| jq length` |
|
||||
| Last result | Parse `result:` label from latest wisp |
|
||||
| Failure rate | Count `result:failure` vs total |
|
||||
|
||||
### Digest Pattern
|
||||
|
||||
Like cost digests, plugin wisps accumulate and get squashed daily:
|
||||
|
||||
```bash
|
||||
gt plugin digest --yesterday
|
||||
```
|
||||
|
||||
Creates: `Plugin Digest 2026-01-10` bead with summary
|
||||
Deletes: Individual plugin-run wisps from that day
|
||||
|
||||
This keeps the ledger clean while preserving audit history.
|
||||
|
||||
---
|
||||
|
||||
## Plugin Format Specification
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
rebuild-gt/
|
||||
└── plugin.md # Definition with TOML frontmatter
|
||||
```
|
||||
|
||||
### plugin.md Format
|
||||
|
||||
```markdown
|
||||
+++
|
||||
name = "rebuild-gt"
|
||||
description = "Rebuild stale gt binary from source"
|
||||
version = 1
|
||||
|
||||
[gate]
|
||||
type = "cooldown"
|
||||
duration = "1h"
|
||||
|
||||
[tracking]
|
||||
labels = ["plugin:rebuild-gt", "rig:gastown", "category:maintenance"]
|
||||
digest = true
|
||||
|
||||
[execution]
|
||||
timeout = "5m"
|
||||
notify_on_failure = true
|
||||
+++
|
||||
|
||||
# Rebuild gt Binary
|
||||
|
||||
Instructions for the dog worker to execute...
|
||||
```
|
||||
|
||||
### TOML Frontmatter Schema
|
||||
|
||||
```toml
|
||||
# Required
|
||||
name = "string" # Unique plugin identifier
|
||||
description = "string" # Human-readable description
|
||||
version = 1 # Schema version (for future evolution)
|
||||
|
||||
[gate]
|
||||
type = "cooldown|cron|condition|event|manual"
|
||||
# Type-specific fields:
|
||||
duration = "1h" # For cooldown
|
||||
schedule = "0 9 * * *" # For cron
|
||||
check = "gt stale -q" # For condition (exit 0 = run)
|
||||
on = "startup" # For event
|
||||
|
||||
[tracking]
|
||||
labels = ["label:value", ...] # Labels for execution wisps
|
||||
digest = true|false # Include in daily digest
|
||||
|
||||
[execution]
|
||||
timeout = "5m" # Max execution time
|
||||
notify_on_failure = true # Escalate on failure
|
||||
severity = "low" # Escalation severity if failed
|
||||
```
|
||||
|
||||
### Gate Types
|
||||
|
||||
| Type | Config | Behavior |
|
||||
|------|--------|----------|
|
||||
| `cooldown` | `duration = "1h"` | Query wisps, run if none in window |
|
||||
| `cron` | `schedule = "0 9 * * *"` | Run on cron schedule |
|
||||
| `condition` | `check = "cmd"` | Run check command, run if exit 0 |
|
||||
| `event` | `on = "startup"` | Run on Deacon startup |
|
||||
| `manual` | (no gate section) | Never auto-run, dispatch explicitly |
|
||||
|
||||
### Instructions Section
|
||||
|
||||
The markdown body after the frontmatter contains agent-executable instructions. The dog worker reads and executes these steps.
|
||||
|
||||
Standard sections:
|
||||
- **Detection**: Check if action is needed
|
||||
- **Action**: The actual work
|
||||
- **Record Result**: Create the execution wisp
|
||||
- **Notification**: On success/failure
|
||||
|
||||
---
|
||||
|
||||
## Escalation System
|
||||
|
||||
### Problem
|
||||
|
||||
Current escalation is ad-hoc "mail Mayor". Issues:
|
||||
- Mayor gets backlogged easily
|
||||
- No severity differentiation
|
||||
- No alternative channels (email, SMS, etc.)
|
||||
- No tracking of stale escalations
|
||||
|
||||
### Solution: Unified Escalation API
|
||||
|
||||
New command:
|
||||
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=<low|medium|high|critical> \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="Build failed: make returned exit code 2" \
|
||||
--source="plugin:rebuild-gt"
|
||||
```
|
||||
|
||||
### Escalation Routing
|
||||
|
||||
The command reads town config (`~/gt/config.json` or similar) for routing rules:
|
||||
|
||||
```json
|
||||
{
|
||||
"escalation": {
|
||||
"routes": {
|
||||
"low": ["bead"],
|
||||
"medium": ["bead", "mail:mayor"],
|
||||
"high": ["bead", "mail:mayor", "email:human"],
|
||||
"critical": ["bead", "mail:mayor", "email:human", "sms:human"]
|
||||
},
|
||||
"contacts": {
|
||||
"human_email": "steve@example.com",
|
||||
"human_sms": "+1234567890"
|
||||
},
|
||||
"stale_threshold": "4h"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Escalation Actions
|
||||
|
||||
| Action | Behavior |
|
||||
|--------|----------|
|
||||
| `bead` | Create escalation bead with severity label |
|
||||
| `mail:mayor` | Send mail to mayor/ |
|
||||
| `email:human` | Send email via configured service |
|
||||
| `sms:human` | Send SMS via configured service |
|
||||
|
||||
### Escalation Beads
|
||||
|
||||
Every escalation creates a bead:
|
||||
|
||||
```yaml
|
||||
type: escalation
|
||||
status: open
|
||||
labels:
|
||||
- severity:high
|
||||
- source:plugin:rebuild-gt
|
||||
- acknowledged:false
|
||||
```
|
||||
|
||||
### Stale Escalation Patrol
|
||||
|
||||
A patrol step (or plugin!) checks for unacknowledged escalations:
|
||||
|
||||
```bash
|
||||
bd list --type=escalation --label=acknowledged:false --older-than=4h
|
||||
```
|
||||
|
||||
Stale escalations get re-escalated at higher severity.
|
||||
|
||||
### Acknowledging Escalations
|
||||
|
||||
```bash
|
||||
gt escalate ack <bead-id>
|
||||
# Sets label acknowledged:true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## New Commands Required
|
||||
|
||||
### gt stale
|
||||
|
||||
Expose binary staleness check:
|
||||
|
||||
```bash
|
||||
gt stale # Human-readable output
|
||||
gt stale --json # Machine-readable
|
||||
gt stale --quiet # Exit code only (0=stale, 1=fresh)
|
||||
```
|
||||
|
||||
### gt dog dispatch
|
||||
|
||||
Formalized plugin dispatch to dogs:
|
||||
|
||||
```bash
|
||||
gt dog dispatch --plugin <name> [--rig <rig>]
|
||||
```
|
||||
|
||||
This:
|
||||
1. Finds the plugin definition
|
||||
2. Slinga a standardized work unit to an idle dog
|
||||
3. Returns immediately (non-blocking)
|
||||
|
||||
### gt escalate
|
||||
|
||||
Unified escalation API:
|
||||
|
||||
```bash
|
||||
gt escalate \
|
||||
--severity=<level> \
|
||||
--subject="..." \
|
||||
--body="..." \
|
||||
[--source="..."]
|
||||
|
||||
gt escalate ack <bead-id>
|
||||
gt escalate list [--severity=...] [--stale]
|
||||
```
|
||||
|
||||
### gt plugin
|
||||
|
||||
Plugin management:
|
||||
|
||||
```bash
|
||||
gt plugin list # List all plugins
|
||||
gt plugin show <name> # Show plugin details
|
||||
gt plugin run <name> [--force] # Manual trigger
|
||||
gt plugin digest [--yesterday] # Squash wisps to digest
|
||||
gt plugin history <name> # Show execution history
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Foundation
|
||||
|
||||
1. **`gt stale` command** - Expose CheckStaleBinary() via CLI
|
||||
2. **Plugin format spec** - Finalize TOML schema
|
||||
3. **Plugin scanning** - Deacon scans town + rig plugin dirs
|
||||
|
||||
### Phase 2: Execution
|
||||
|
||||
4. **`gt dog dispatch --plugin`** - Formalized dog dispatch
|
||||
5. **Plugin execution in dogs** - Dog reads plugin.md, executes
|
||||
6. **Wisp creation** - Record results on ledger
|
||||
|
||||
### Phase 3: Gates & State
|
||||
|
||||
7. **Gate evaluation** - Cooldown via wisp query
|
||||
8. **Other gate types** - Cron, condition, event
|
||||
9. **Plugin digest** - Daily squash of plugin wisps
|
||||
|
||||
### Phase 4: Escalation
|
||||
|
||||
10. **`gt escalate` command** - Unified escalation API
|
||||
11. **Escalation routing** - Config-driven multi-channel
|
||||
12. **Stale escalation patrol** - Check unacknowledged
|
||||
|
||||
### Phase 5: First Plugin
|
||||
|
||||
13. **`rebuild-gt` plugin** - The actual gastown plugin
|
||||
14. **Documentation** - So Beads/Wyvern can create theirs
|
||||
|
||||
---
|
||||
|
||||
## Example: rebuild-gt Plugin
|
||||
|
||||
```markdown
|
||||
+++
|
||||
name = "rebuild-gt"
|
||||
description = "Rebuild stale gt binary from gastown source"
|
||||
version = 1
|
||||
|
||||
[gate]
|
||||
type = "cooldown"
|
||||
duration = "1h"
|
||||
|
||||
[tracking]
|
||||
labels = ["plugin:rebuild-gt", "rig:gastown", "category:maintenance"]
|
||||
digest = true
|
||||
|
||||
[execution]
|
||||
timeout = "5m"
|
||||
notify_on_failure = true
|
||||
severity = "medium"
|
||||
+++
|
||||
|
||||
# Rebuild gt Binary
|
||||
|
||||
Checks if the gt binary is stale (built from older commit than HEAD) and rebuilds.
|
||||
|
||||
## Gate Check
|
||||
|
||||
The Deacon evaluates this before dispatch. If gate closed, skip.
|
||||
|
||||
## Detection
|
||||
|
||||
Check binary staleness:
|
||||
|
||||
```bash
|
||||
gt stale --json
|
||||
```
|
||||
|
||||
If `"stale": false`, record success wisp and exit early.
|
||||
|
||||
## Action
|
||||
|
||||
Rebuild from source:
|
||||
|
||||
```bash
|
||||
cd ~/gt/gastown/crew/george && make build && make install
|
||||
```
|
||||
|
||||
## Record Result
|
||||
|
||||
On success:
|
||||
```bash
|
||||
bd wisp create \
|
||||
--label type:plugin-run \
|
||||
--label plugin:rebuild-gt \
|
||||
--label rig:gastown \
|
||||
--label result:success \
|
||||
--body "Rebuilt gt: $OLD → $NEW ($N commits)"
|
||||
```
|
||||
|
||||
On failure:
|
||||
```bash
|
||||
bd wisp create \
|
||||
--label type:plugin-run \
|
||||
--label plugin:rebuild-gt \
|
||||
--label rig:gastown \
|
||||
--label result:failure \
|
||||
--body "Build failed: $ERROR"
|
||||
|
||||
gt escalate --severity=medium \
|
||||
--subject="Plugin FAILED: rebuild-gt" \
|
||||
--body="$ERROR" \
|
||||
--source="plugin:rebuild-gt"
|
||||
```
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **Plugin discovery in multiple clones**: If gastown has crew/george, crew/max, crew/joe - which clone's plugins/ dir is canonical? Probably: scan all, dedupe by name, prefer rig-root if exists.
|
||||
|
||||
2. **Dog assignment**: Should specific plugins prefer specific dogs? Or any idle dog?
|
||||
|
||||
3. **Plugin dependencies**: Can plugins depend on other plugins? Probably not in v1.
|
||||
|
||||
4. **Plugin disable/enable**: How to temporarily disable a plugin without deleting it? Label on a plugin bead? `enabled = false` in frontmatter?
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- PRIMING.md - Core design principles
|
||||
- mol-deacon-patrol.formula.toml - Patrol step plugin-run
|
||||
- ~/gt/plugins/README.md - Current plugin stub
|
||||
300
docs/design/property-layers.md
Normal file
300
docs/design/property-layers.md
Normal file
@@ -0,0 +1,300 @@
|
||||
# Property Layers: Multi-Level Configuration
|
||||
|
||||
> Implementation guide for Gas Town's configuration system.
|
||||
> Created: 2025-01-06
|
||||
|
||||
## Overview
|
||||
|
||||
Gas Town uses a layered property system for configuration. Properties are
|
||||
looked up through multiple layers, with earlier layers overriding later ones.
|
||||
This enables both local control and global coordination.
|
||||
|
||||
## The Four Layers
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 1. WISP LAYER (transient, town-local) │
|
||||
│ Location: <rig>/.beads-wisp/config/ │
|
||||
│ Synced: Never │
|
||||
│ Use: Temporary local overrides │
|
||||
└─────────────────────────────┬───────────────────────────────┘
|
||||
│ if missing
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 2. RIG BEAD LAYER (persistent, synced globally) │
|
||||
│ Location: <rig>/.beads/ (rig identity bead labels) │
|
||||
│ Synced: Via git (all clones see it) │
|
||||
│ Use: Project-wide operational state │
|
||||
└─────────────────────────────┬───────────────────────────────┘
|
||||
│ if missing
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 3. TOWN DEFAULTS │
|
||||
│ Location: ~/gt/config.json or ~/gt/.beads/ │
|
||||
│ Synced: N/A (per-town) │
|
||||
│ Use: Town-wide policies │
|
||||
└─────────────────────────────┬───────────────────────────────┘
|
||||
│ if missing
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 4. SYSTEM DEFAULTS (compiled in) │
|
||||
│ Use: Fallback when nothing else specified │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Lookup Behavior
|
||||
|
||||
### Override Semantics (Default)
|
||||
|
||||
For most properties, the first non-nil value wins:
|
||||
|
||||
```go
|
||||
func GetConfig(key string) interface{} {
|
||||
if val := wisp.Get(key); val != nil {
|
||||
if val == Blocked { return nil }
|
||||
return val
|
||||
}
|
||||
if val := rigBead.GetLabel(key); val != nil {
|
||||
return val
|
||||
}
|
||||
if val := townDefaults.Get(key); val != nil {
|
||||
return val
|
||||
}
|
||||
return systemDefaults[key]
|
||||
}
|
||||
```
|
||||
|
||||
### Stacking Semantics (Integers)
|
||||
|
||||
For integer properties, values from wisp and bead layers **add** to the base:
|
||||
|
||||
```go
|
||||
func GetIntConfig(key string) int {
|
||||
base := getBaseDefault(key) // Town or system default
|
||||
beadAdj := rigBead.GetInt(key) // 0 if missing
|
||||
wispAdj := wisp.GetInt(key) // 0 if missing
|
||||
return base + beadAdj + wispAdj
|
||||
}
|
||||
```
|
||||
|
||||
This enables temporary adjustments without changing the base value.
|
||||
|
||||
### Blocking Inheritance
|
||||
|
||||
You can explicitly block a property from being inherited:
|
||||
|
||||
```bash
|
||||
gt rig config set gastown auto_restart --block
|
||||
```
|
||||
|
||||
This creates a "blocked" marker in the wisp layer. Even if the rig bead
|
||||
or defaults say `auto_restart: true`, the lookup returns nil.
|
||||
|
||||
## Rig Identity Beads
|
||||
|
||||
Each rig has an identity bead for operational state:
|
||||
|
||||
```yaml
|
||||
id: gt-rig-gastown
|
||||
type: rig
|
||||
name: gastown
|
||||
repo: git@github.com:steveyegge/gastown.git
|
||||
prefix: gt
|
||||
|
||||
labels:
|
||||
- status:operational
|
||||
- priority:normal
|
||||
```
|
||||
|
||||
These beads sync via git, so all clones of the rig see the same state.
|
||||
|
||||
## Two-Level Rig Control
|
||||
|
||||
### Level 1: Park (Local, Ephemeral)
|
||||
|
||||
```bash
|
||||
gt rig park gastown # Stop services, daemon won't restart
|
||||
gt rig unpark gastown # Allow services to run
|
||||
```
|
||||
|
||||
- Stored in wisp layer (`.beads-wisp/config/`)
|
||||
- Only affects this town
|
||||
- Disappears on cleanup
|
||||
- Use: Local maintenance, debugging
|
||||
|
||||
### Level 2: Dock (Global, Persistent)
|
||||
|
||||
```bash
|
||||
gt rig dock gastown # Set status:docked label on rig bead
|
||||
gt rig undock gastown # Remove label
|
||||
```
|
||||
|
||||
- Stored on rig identity bead
|
||||
- Syncs to all clones via git
|
||||
- Permanent until explicitly changed
|
||||
- Use: Project-wide maintenance, coordinated downtime
|
||||
|
||||
### Daemon Behavior
|
||||
|
||||
The daemon checks both levels before auto-restarting:
|
||||
|
||||
```go
|
||||
func shouldAutoRestart(rig *Rig) bool {
|
||||
status := rig.GetConfig("status")
|
||||
if status == "parked" || status == "docked" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Keys
|
||||
|
||||
| Key | Type | Behavior | Description |
|
||||
|-----|------|----------|-------------|
|
||||
| `status` | string | Override | operational/parked/docked |
|
||||
| `auto_restart` | bool | Override | Daemon auto-restart behavior |
|
||||
| `max_polecats` | int | Override | Maximum concurrent polecats |
|
||||
| `priority_adjustment` | int | **Stack** | Scheduling priority modifier |
|
||||
| `maintenance_window` | string | Override | When maintenance allowed |
|
||||
| `dnd` | bool | Override | Do not disturb mode |
|
||||
|
||||
## Commands
|
||||
|
||||
### View Configuration
|
||||
|
||||
```bash
|
||||
gt rig config show gastown # Show effective config (all layers)
|
||||
gt rig config show gastown --layer # Show which layer each value comes from
|
||||
```
|
||||
|
||||
### Set Configuration
|
||||
|
||||
```bash
|
||||
# Set in wisp layer (local, ephemeral)
|
||||
gt rig config set gastown key value
|
||||
|
||||
# Set in bead layer (global, permanent)
|
||||
gt rig config set gastown key value --global
|
||||
|
||||
# Block inheritance
|
||||
gt rig config set gastown key --block
|
||||
|
||||
# Clear from wisp layer
|
||||
gt rig config unset gastown key
|
||||
```
|
||||
|
||||
### Rig Lifecycle
|
||||
|
||||
```bash
|
||||
gt rig park gastown # Local: stop + prevent restart
|
||||
gt rig unpark gastown # Local: allow restart
|
||||
|
||||
gt rig dock gastown # Global: mark as offline
|
||||
gt rig undock gastown # Global: mark as operational
|
||||
|
||||
gt rig status gastown # Show current state
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Temporary Priority Boost
|
||||
|
||||
```bash
|
||||
# Base priority: 0 (from defaults)
|
||||
# Give this rig temporary priority boost for urgent work
|
||||
|
||||
gt rig config set gastown priority_adjustment 10
|
||||
|
||||
# Effective priority: 0 + 10 = 10
|
||||
# When done, clear it:
|
||||
|
||||
gt rig config unset gastown priority_adjustment
|
||||
```
|
||||
|
||||
### Local Maintenance
|
||||
|
||||
```bash
|
||||
# I'm upgrading the local clone, don't restart services
|
||||
gt rig park gastown
|
||||
|
||||
# ... do maintenance ...
|
||||
|
||||
gt rig unpark gastown
|
||||
```
|
||||
|
||||
### Project-Wide Maintenance
|
||||
|
||||
```bash
|
||||
# Major refactor in progress, all clones should pause
|
||||
gt rig dock gastown
|
||||
|
||||
# Syncs via git - other towns see the rig as docked
|
||||
bd sync
|
||||
|
||||
# When done:
|
||||
gt rig undock gastown
|
||||
bd sync
|
||||
```
|
||||
|
||||
### Block Auto-Restart Locally
|
||||
|
||||
```bash
|
||||
# Rig bead says auto_restart: true
|
||||
# But I'm debugging and don't want that here
|
||||
|
||||
gt rig config set gastown auto_restart --block
|
||||
|
||||
# Now auto_restart returns nil for this town only
|
||||
```
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Wisp Storage
|
||||
|
||||
Wisp config stored in `.beads-wisp/config/<rig>.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"rig": "gastown",
|
||||
"values": {
|
||||
"status": "parked",
|
||||
"priority_adjustment": 10
|
||||
},
|
||||
"blocked": ["auto_restart"]
|
||||
}
|
||||
```
|
||||
|
||||
### Rig Bead Labels
|
||||
|
||||
Rig operational state stored as labels on the rig identity bead:
|
||||
|
||||
```bash
|
||||
bd label add gt-rig-gastown status:docked
|
||||
bd label remove gt-rig-gastown status:docked
|
||||
```
|
||||
|
||||
### Daemon Integration
|
||||
|
||||
The daemon's lifecycle manager checks config before starting services:
|
||||
|
||||
```go
|
||||
func (d *Daemon) maybeStartRigServices(rig string) {
|
||||
r := d.getRig(rig)
|
||||
|
||||
status := r.GetConfig("status")
|
||||
if status == "parked" || status == "docked" {
|
||||
log.Info("Rig %s is offline, skipping auto-start", rig)
|
||||
return
|
||||
}
|
||||
|
||||
d.ensureWitness(rig)
|
||||
d.ensureRefinery(rig)
|
||||
}
|
||||
```
|
||||
|
||||
## Related Documents
|
||||
|
||||
- `~/gt/docs/hop/PROPERTY-LAYERS.md` - Strategic architecture
|
||||
- `wisp-architecture.md` - Wisp system design
|
||||
- `agent-as-bead.md` - Agent identity beads (similar pattern)
|
||||
@@ -82,11 +82,11 @@ The daemon runs a heartbeat tick every 3 minutes:
|
||||
func (d *Daemon) heartbeatTick() {
|
||||
d.ensureBootRunning() // 1. Spawn Boot for triage
|
||||
d.checkDeaconHeartbeat() // 2. Belt-and-suspenders fallback
|
||||
d.ensureWitnessesRunning() // 3. Witness health
|
||||
d.triggerPendingSpawns() // 4. Bootstrap polecats
|
||||
d.processLifecycleRequests() // 5. Cycle/restart requests
|
||||
d.checkStaleAgents() // 6. Timeout detection
|
||||
// ... more checks
|
||||
d.ensureWitnessesRunning() // 3. Witness health (checks tmux directly)
|
||||
d.ensureRefineriesRunning() // 4. Refinery health (checks tmux directly)
|
||||
d.triggerPendingSpawns() // 5. Bootstrap polecats
|
||||
d.processLifecycleRequests() // 6. Cycle/restart requests
|
||||
// Agent state derived from tmux, not recorded in beads (gt-zecmc)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -190,7 +190,7 @@ Multiple layers ensure recovery:
|
||||
|
||||
1. **Boot triage** - Intelligent observation, first line
|
||||
2. **Daemon checkDeaconHeartbeat()** - Belt-and-suspenders if Boot fails
|
||||
3. **Daemon checkStaleAgents()** - Timeout-based detection
|
||||
3. **Tmux-based discovery** - Daemon checks tmux sessions directly (no bead state)
|
||||
4. **Human escalation** - Mail to overseer for unrecoverable states
|
||||
|
||||
## State Files
|
||||
@@ -239,9 +239,11 @@ gt deacon health-check
|
||||
|
||||
### Status Shows Wrong State
|
||||
|
||||
**Symptom**: `gt status` shows "stopped" for running agents
|
||||
**Cause**: Bead state and tmux state diverged
|
||||
**Fix**: Reconcile with `gt sync-status` or restart agent
|
||||
**Symptom**: `gt status` shows wrong state for agents
|
||||
**Cause**: Previously bead state and tmux state could diverge
|
||||
**Fix**: As of gt-zecmc, status derives state from tmux directly (no bead state for
|
||||
observable conditions like running/stopped). Non-observable states (stuck, awaiting-gate)
|
||||
are still stored in beads.
|
||||
|
||||
## Design Decision: Keep Separation
|
||||
|
||||
@@ -284,7 +286,7 @@ The separation is correct; these bugs need fixing:
|
||||
|
||||
1. **Session confusion** (gt-sgzsb): Boot spawns in wrong session
|
||||
2. **Zombie blocking** (gt-j1i0r): Daemon can't kill zombie sessions
|
||||
3. **Status mismatch** (gt-doih4): Bead vs tmux state divergence
|
||||
3. ~~**Status mismatch** (gt-doih4): Bead vs tmux state divergence~~ → FIXED in gt-zecmc
|
||||
4. **Ensure semantics** (gt-ekc5u): Start should kill zombies first
|
||||
|
||||
## Summary
|
||||
248
docs/formula-resolution.md
Normal file
248
docs/formula-resolution.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# Formula Resolution Architecture
|
||||
|
||||
> Where formulas live, how they're found, and how they'll scale to Mol Mall
|
||||
|
||||
## The Problem
|
||||
|
||||
Formulas currently exist in multiple locations with no clear precedence:
|
||||
- `.beads/formulas/` (source of truth for a project)
|
||||
- `internal/formula/formulas/` (embedded copy for `go install`)
|
||||
- Crew directories have their own `.beads/formulas/` (diverging copies)
|
||||
|
||||
When an agent runs `bd cook mol-polecat-work`, which version do they get?
|
||||
|
||||
## Design Goals
|
||||
|
||||
1. **Predictable resolution** - Clear precedence rules
|
||||
2. **Local customization** - Override system defaults without forking
|
||||
3. **Project-specific formulas** - Committed workflows for collaborators
|
||||
4. **Mol Mall ready** - Architecture supports remote formula installation
|
||||
5. **Federation ready** - Formulas are shareable across towns via HOP (Highway Operations Protocol)
|
||||
|
||||
## Three-Tier Resolution
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ FORMULA RESOLUTION ORDER │
|
||||
│ (most specific wins) │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
|
||||
TIER 1: PROJECT (rig-level)
|
||||
Location: <project>/.beads/formulas/
|
||||
Source: Committed to project repo
|
||||
Use case: Project-specific workflows (deploy, test, release)
|
||||
Example: ~/gt/gastown/.beads/formulas/mol-gastown-release.formula.toml
|
||||
|
||||
TIER 2: TOWN (user-level)
|
||||
Location: ~/gt/.beads/formulas/
|
||||
Source: Mol Mall installs, user customizations
|
||||
Use case: Cross-project workflows, personal preferences
|
||||
Example: ~/gt/.beads/formulas/mol-polecat-work.formula.toml (customized)
|
||||
|
||||
TIER 3: SYSTEM (embedded)
|
||||
Location: Compiled into gt binary
|
||||
Source: gastown/mayor/rig/.beads/formulas/ at build time
|
||||
Use case: Defaults, blessed patterns, fallback
|
||||
Example: mol-polecat-work.formula.toml (factory default)
|
||||
```
|
||||
|
||||
### Resolution Algorithm
|
||||
|
||||
```go
|
||||
func ResolveFormula(name string, cwd string) (Formula, Tier, error) {
|
||||
// Tier 1: Project-level (walk up from cwd to find .beads/formulas/)
|
||||
if projectDir := findProjectRoot(cwd); projectDir != "" {
|
||||
path := filepath.Join(projectDir, ".beads", "formulas", name+".formula.toml")
|
||||
if f, err := loadFormula(path); err == nil {
|
||||
return f, TierProject, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Tier 2: Town-level
|
||||
townDir := getTownRoot() // ~/gt or $GT_HOME
|
||||
path := filepath.Join(townDir, ".beads", "formulas", name+".formula.toml")
|
||||
if f, err := loadFormula(path); err == nil {
|
||||
return f, TierTown, nil
|
||||
}
|
||||
|
||||
// Tier 3: Embedded (system)
|
||||
if f, err := loadEmbeddedFormula(name); err == nil {
|
||||
return f, TierSystem, nil
|
||||
}
|
||||
|
||||
return nil, 0, ErrFormulaNotFound
|
||||
}
|
||||
```
|
||||
|
||||
### Why This Order
|
||||
|
||||
**Project wins** because:
|
||||
- Project maintainers know their workflows best
|
||||
- Collaborators get consistent behavior via git
|
||||
- CI/CD uses the same formulas as developers
|
||||
|
||||
**Town is middle** because:
|
||||
- User customizations override system defaults
|
||||
- Mol Mall installs don't require project changes
|
||||
- Cross-project consistency for the user
|
||||
|
||||
**System is fallback** because:
|
||||
- Always available (compiled in)
|
||||
- Factory reset target
|
||||
- The "blessed" versions
|
||||
|
||||
## Formula Identity
|
||||
|
||||
### Current Format
|
||||
|
||||
```toml
|
||||
formula = "mol-polecat-work"
|
||||
version = 4
|
||||
description = "..."
|
||||
```
|
||||
|
||||
### Extended Format (Mol Mall Ready)
|
||||
|
||||
```toml
|
||||
[formula]
|
||||
name = "mol-polecat-work"
|
||||
version = "4.0.0" # Semver
|
||||
author = "steve@gastown.io" # Author identity
|
||||
license = "MIT"
|
||||
repository = "https://github.com/steveyegge/gastown"
|
||||
|
||||
[formula.registry]
|
||||
uri = "hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0"
|
||||
checksum = "sha256:abc123..." # Integrity verification
|
||||
signed_by = "steve@gastown.io" # Optional signing
|
||||
|
||||
[formula.capabilities]
|
||||
# What capabilities does this formula exercise? Used for agent routing.
|
||||
primary = ["go", "testing", "code-review"]
|
||||
secondary = ["git", "ci-cd"]
|
||||
```
|
||||
|
||||
### Version Resolution
|
||||
|
||||
When multiple versions exist:
|
||||
|
||||
```bash
|
||||
bd cook mol-polecat-work # Resolves per tier order
|
||||
bd cook mol-polecat-work@4 # Specific major version
|
||||
bd cook mol-polecat-work@4.0.0 # Exact version
|
||||
bd cook mol-polecat-work@latest # Explicit latest
|
||||
```
|
||||
|
||||
## Crew Directory Problem
|
||||
|
||||
### Current State
|
||||
|
||||
Crew directories (`gastown/crew/max/`) are sparse checkouts of gastown. They have:
|
||||
- Their own `.beads/formulas/` (from the checkout)
|
||||
- These can diverge from `mayor/rig/.beads/formulas/`
|
||||
|
||||
### The Fix
|
||||
|
||||
Crew should NOT have their own formula copies. Options:
|
||||
|
||||
**Option A: Symlink/Redirect**
|
||||
```bash
|
||||
# crew/max/.beads/formulas -> ../../mayor/rig/.beads/formulas
|
||||
```
|
||||
All crew share the rig's formulas.
|
||||
|
||||
**Option B: Provision on Demand**
|
||||
Crew directories don't have `.beads/formulas/`. Resolution falls through to:
|
||||
1. Town-level (~/gt/.beads/formulas/)
|
||||
2. System (embedded)
|
||||
|
||||
**Option C: Sparse Checkout Exclusion**
|
||||
Exclude `.beads/formulas/` from crew sparse checkouts entirely.
|
||||
|
||||
**Recommendation: Option B** - Crew shouldn't need project-level formulas. They work on the project, they don't define its workflows.
|
||||
|
||||
## Commands
|
||||
|
||||
### Existing
|
||||
|
||||
```bash
|
||||
bd formula list # Available formulas (should show tier)
|
||||
bd formula show <name> # Formula details
|
||||
bd cook <formula> # Formula → Proto
|
||||
```
|
||||
|
||||
### Enhanced
|
||||
|
||||
```bash
|
||||
# List with tier information
|
||||
bd formula list
|
||||
mol-polecat-work v4 [project]
|
||||
mol-polecat-code-review v1 [town]
|
||||
mol-witness-patrol v2 [system]
|
||||
|
||||
# Show resolution path
|
||||
bd formula show mol-polecat-work --resolve
|
||||
Resolving: mol-polecat-work
|
||||
✓ Found at: ~/gt/gastown/.beads/formulas/mol-polecat-work.formula.toml
|
||||
Tier: project
|
||||
Version: 4
|
||||
|
||||
Resolution path checked:
|
||||
1. [project] ~/gt/gastown/.beads/formulas/ ← FOUND
|
||||
2. [town] ~/gt/.beads/formulas/
|
||||
3. [system] <embedded>
|
||||
|
||||
# Override tier for testing
|
||||
bd cook mol-polecat-work --tier=system # Force embedded version
|
||||
bd cook mol-polecat-work --tier=town # Force town version
|
||||
```
|
||||
|
||||
### Future (Mol Mall)
|
||||
|
||||
```bash
|
||||
# Install from Mol Mall
|
||||
gt formula install mol-code-review-strict
|
||||
gt formula install mol-code-review-strict@2.0.0
|
||||
gt formula install hop://acme.corp/formulas/mol-deploy
|
||||
|
||||
# Manage installed formulas
|
||||
gt formula list --installed # What's in town-level
|
||||
gt formula upgrade mol-polecat-work # Update to latest
|
||||
gt formula pin mol-polecat-work@4.0.0 # Lock version
|
||||
gt formula uninstall mol-code-review-strict
|
||||
```
|
||||
|
||||
## Migration Path
|
||||
|
||||
### Phase 1: Resolution Order (Now)
|
||||
|
||||
1. Implement three-tier resolution in `bd cook`
|
||||
2. Add `--resolve` flag to show resolution path
|
||||
3. Update `bd formula list` to show tiers
|
||||
4. Fix crew directories (Option B)
|
||||
|
||||
### Phase 2: Town-Level Formulas
|
||||
|
||||
1. Establish `~/gt/.beads/formulas/` as town formula location
|
||||
2. Add `gt formula` commands for managing town formulas
|
||||
3. Support manual installation (copy file, track in `.installed.json`)
|
||||
|
||||
### Phase 3: Mol Mall Integration
|
||||
|
||||
1. Define registry API (see mol-mall-design.md)
|
||||
2. Implement `gt formula install` from remote
|
||||
3. Add version pinning and upgrade flows
|
||||
4. Add integrity verification (checksums, optional signing)
|
||||
|
||||
### Phase 4: Federation (HOP)
|
||||
|
||||
1. Add capability tags to formula schema
|
||||
2. Track formula execution for agent accountability
|
||||
3. Enable federation (cross-town formula sharing via Highway Operations Protocol)
|
||||
4. Author attribution and validation records
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Mol Mall Design](mol-mall-design.md) - Registry architecture
|
||||
- [molecules.md](molecules.md) - Formula → Proto → Mol lifecycle
|
||||
- [understanding-gas-town.md](../../../docs/understanding-gas-town.md) - Gas Town architecture
|
||||
94
docs/glossary.md
Normal file
94
docs/glossary.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Gas Town Glossary
|
||||
|
||||
Gas Town is an agentic development environment for managing multiple Claude Code instances simultaneously using the `gt` and `bd` (Beads) binaries, coordinated with tmux in git-managed directories.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### MEOW (Molecular Expression of Work)
|
||||
Breaking large goals into detailed instructions for agents. Supported by Beads, Epics, Formulas, and Molecules. MEOW ensures work is decomposed into trackable, atomic units that agents can execute autonomously.
|
||||
|
||||
### GUPP (Gas Town Universal Propulsion Principle)
|
||||
"If there is work on your Hook, YOU MUST RUN IT." This principle ensures agents autonomously proceed with available work without waiting for external input. GUPP is the heartbeat of autonomous operation.
|
||||
|
||||
### NDI (Nondeterministic Idempotence)
|
||||
The overarching goal ensuring useful outcomes through orchestration of potentially unreliable processes. Persistent Beads and oversight agents (Witness, Deacon) guarantee eventual workflow completion even when individual operations may fail or produce varying results.
|
||||
|
||||
## Environments
|
||||
|
||||
### Town
|
||||
The management headquarters (e.g., `~/gt/`). The Town coordinates all workers across multiple Rigs and houses town-level agents like Mayor and Deacon.
|
||||
|
||||
### Rig
|
||||
A project-specific Git repository under Gas Town management. Each Rig has its own Polecats, Refinery, Witness, and Crew members. Rigs are where actual development work happens.
|
||||
|
||||
## Town-Level Roles
|
||||
|
||||
### Mayor
|
||||
Chief-of-staff agent responsible for initiating Convoys, coordinating work distribution, and notifying users of important events. The Mayor operates from the town level and has visibility across all Rigs.
|
||||
|
||||
### Deacon
|
||||
Daemon beacon running continuous Patrol cycles. The Deacon ensures worker activity, monitors system health, and triggers recovery when agents become unresponsive. Think of the Deacon as the system's watchdog.
|
||||
|
||||
### Dogs
|
||||
The Deacon's crew of maintenance agents handling background tasks like cleanup, health checks, and system maintenance.
|
||||
|
||||
### Boot (the Dog)
|
||||
A special Dog that checks the Deacon every 5 minutes, ensuring the watchdog itself is still watching. This creates a chain of accountability.
|
||||
|
||||
## Rig-Level Roles
|
||||
|
||||
### Polecat
|
||||
Ephemeral worker agents that produce Merge Requests. Polecats are spawned for specific tasks, complete their work, and are then cleaned up. They work in isolated git worktrees to avoid conflicts.
|
||||
|
||||
### Refinery
|
||||
Manages the Merge Queue for a Rig. The Refinery intelligently merges changes from Polecats, handling conflicts and ensuring code quality before changes reach the main branch.
|
||||
|
||||
### Witness
|
||||
Patrol agent that oversees Polecats and the Refinery within a Rig. The Witness monitors progress, detects stuck agents, and can trigger recovery actions.
|
||||
|
||||
### Crew
|
||||
Long-lived, named agents for persistent collaboration. Unlike ephemeral Polecats, Crew members maintain context across sessions and are ideal for ongoing work relationships.
|
||||
|
||||
## Work Units
|
||||
|
||||
### Bead
|
||||
Git-backed atomic work unit stored in JSONL format. Beads are the fundamental unit of work tracking in Gas Town. They can represent issues, tasks, epics, or any trackable work item.
|
||||
|
||||
### Formula
|
||||
TOML-based workflow source template. Formulas define reusable patterns for common operations like patrol cycles, code review, or deployment.
|
||||
|
||||
### Protomolecule
|
||||
A template class for instantiating Molecules. Protomolecules define the structure and steps of a workflow without being tied to specific work items.
|
||||
|
||||
### Molecule
|
||||
Durable chained Bead workflows. Molecules represent multi-step processes where each step is tracked as a Bead. They survive agent restarts and ensure complex workflows complete.
|
||||
|
||||
### Wisp
|
||||
Ephemeral Beads destroyed after runs. Wisps are lightweight work items used for transient operations that don't need permanent tracking.
|
||||
|
||||
### Hook
|
||||
A special pinned Bead for each agent. The Hook is an agent's primary work queue - when work appears on your Hook, GUPP dictates you must run it.
|
||||
|
||||
## Workflow Commands
|
||||
|
||||
### Convoy
|
||||
Primary work-order wrapping related Beads. Convoys group related tasks together and can be assigned to multiple workers. Created with `gt convoy create`.
|
||||
|
||||
### Slinging
|
||||
Assigning work to agents via `gt sling`. When you sling work to a Polecat or Crew member, you're putting it on their Hook for execution.
|
||||
|
||||
### Nudging
|
||||
Real-time messaging between agents with `gt nudge`. Nudges allow immediate communication without going through the mail system.
|
||||
|
||||
### Handoff
|
||||
Agent session refresh via `/handoff`. When context gets full or an agent needs a fresh start, handoff transfers work state to a new session.
|
||||
|
||||
### Seance
|
||||
Communicating with previous sessions via `gt seance`. Allows agents to query their predecessors for context and decisions from earlier work.
|
||||
|
||||
### Patrol
|
||||
Ephemeral loop maintaining system heartbeat. Patrol agents (Deacon, Witness) continuously cycle through health checks and trigger actions as needed.
|
||||
|
||||
---
|
||||
|
||||
*This glossary was contributed by [Clay Shirky](https://github.com/cshirky) in [Issue #80](https://github.com/steveyegge/gastown/issues/80).*
|
||||
@@ -1,73 +0,0 @@
|
||||
# Decision 009: Session Events Architecture
|
||||
|
||||
**Status:** Accepted
|
||||
**Date:** 2025-12-31
|
||||
**Context:** Where should session events live? Beads, separate repo, or events.jsonl?
|
||||
|
||||
## Decision
|
||||
|
||||
Session events are **orchestration infrastructure**, not work items. They stay in
|
||||
`events.jsonl` (outside beads). Work attribution happens by capturing `session_id`
|
||||
on beads mutations (issue close, MR merge).
|
||||
|
||||
## Context
|
||||
|
||||
The seance feature needs to discover and resume Claude Code sessions. This requires:
|
||||
1. **Pointer** to session (session_id) - for `claude --resume`
|
||||
2. **Attribution** (which work happened in this session) - for entity CV
|
||||
|
||||
Claude Code already stores full session transcripts indefinitely. Gas Town doesn't
|
||||
need to duplicate them - just point at them.
|
||||
|
||||
## The Separation
|
||||
|
||||
| Layer | Storage | Content | Retention |
|
||||
|-------|---------|---------|-----------|
|
||||
| **Orchestration** | `~/.events.jsonl` | session_start, nudges, mail routing | Ephemeral (auto-prune) |
|
||||
| **Work** | Beads (rig-level) | Issues, MRs, convoys | Permanent (ledger) |
|
||||
| **Entity activity** | Beads (entity chain) | Session digests | Permanent (CV) |
|
||||
| **Transcript** | Claude Code | Full session content | Claude Code's retention |
|
||||
|
||||
## Why Not Beads for Events?
|
||||
|
||||
1. **Volume**: Orchestration events are high volume, would overwhelm work signal
|
||||
2. **Ephemerality**: Most orchestration events don't need CV/ledger permanence
|
||||
3. **Different audiences**: Work items are cross-agent; orchestration is internal
|
||||
4. **Claude Code has it**: Transcripts already live there; we just need pointers
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Attribution (Now)
|
||||
- `gt done` captures `CLAUDE_SESSION_ID` in issue close
|
||||
- Beads supports `closed_by_session` field on issue mutations
|
||||
- Events.jsonl continues to capture `session_start` for seance
|
||||
|
||||
### Phase 2: Session Digests (Future)
|
||||
- Sessions as wisps: `session_start` creates ephemeral wisp
|
||||
- Session work adds steps (issues closed, commits made)
|
||||
- `session_end` squashes to digest
|
||||
- Digest lives on entity chain (agent CV)
|
||||
|
||||
### Phase 3: Pruning (Future)
|
||||
- Events.jsonl auto-prunes after N days
|
||||
- Session digests provide permanent summary
|
||||
- Full transcripts remain in Claude Code
|
||||
|
||||
## Consequences
|
||||
|
||||
**Positive:**
|
||||
- Clean separation of concerns
|
||||
- Work ledger stays focused on work
|
||||
- CV attribution via session_id on beads mutations
|
||||
- Seance works via events.jsonl discovery
|
||||
|
||||
**Negative:**
|
||||
- Two systems to understand (events vs beads)
|
||||
- Need to ensure session_id flows through commands
|
||||
|
||||
## Related
|
||||
|
||||
- `gt seance` - Session discovery and resume
|
||||
- `gt-3zsml` - SessionStart hook passes session_id to gt prime
|
||||
- PRIMING.md - "The Feed Is the Signal" section
|
||||
- CONTEXT.md - Entity chains and CV model
|
||||
476
docs/mol-mall-design.md
Normal file
476
docs/mol-mall-design.md
Normal file
@@ -0,0 +1,476 @@
|
||||
# Mol Mall Design
|
||||
|
||||
> A marketplace for Gas Town formulas
|
||||
|
||||
## Vision
|
||||
|
||||
**Mol Mall** is a registry for sharing formulas across Gas Town installations. Think npm for molecules, or Terraform Registry for workflows.
|
||||
|
||||
```
|
||||
"Cook a formula, sling it to a polecat, the witness watches, refinery merges."
|
||||
|
||||
What if you could browse a mall of formulas, install one, and immediately
|
||||
have your polecats executing world-class workflows?
|
||||
```
|
||||
|
||||
### The Network Effect
|
||||
|
||||
A well-designed formula for "code review" or "security audit" or "deploy to K8s" can spread across thousands of Gas Town installations. Each adoption means:
|
||||
- More agents executing proven workflows
|
||||
- More structured, trackable work output
|
||||
- Better capability routing (agents with track records on a formula get similar work)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Registry Types
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ MOL MALL REGISTRIES │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
|
||||
PUBLIC REGISTRY (molmall.gastown.io)
|
||||
├── Community formulas (MIT licensed)
|
||||
├── Official Gas Town formulas (blessed)
|
||||
├── Verified publisher formulas
|
||||
└── Open contribution model
|
||||
|
||||
PRIVATE REGISTRY (self-hosted)
|
||||
├── Organization-specific formulas
|
||||
├── Proprietary workflows
|
||||
├── Internal deployment patterns
|
||||
└── Enterprise compliance formulas
|
||||
|
||||
FEDERATED REGISTRY (HOP future)
|
||||
├── Cross-organization discovery
|
||||
├── Skill-based search
|
||||
└── Attribution chain tracking
|
||||
└── hop:// URI resolution
|
||||
```
|
||||
|
||||
### URI Scheme
|
||||
|
||||
```
|
||||
hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0
|
||||
└──────────────────┘ └──────────────┘ └───┘
|
||||
registry host formula name version
|
||||
|
||||
# Short forms
|
||||
mol-polecat-work # Default registry, latest version
|
||||
mol-polecat-work@4 # Major version
|
||||
mol-polecat-work@4.0.0 # Exact version
|
||||
@acme/mol-deploy # Scoped to publisher
|
||||
hop://acme.corp/formulas/mol-deploy # Full HOP URI
|
||||
```
|
||||
|
||||
### Registry API
|
||||
|
||||
```yaml
|
||||
# OpenAPI-style specification
|
||||
|
||||
GET /formulas
|
||||
# List all formulas
|
||||
Query:
|
||||
- q: string # Search query
|
||||
- capabilities: string[] # Filter by capability tags
|
||||
- author: string # Filter by author
|
||||
- limit: int
|
||||
- offset: int
|
||||
Response:
|
||||
formulas:
|
||||
- name: mol-polecat-work
|
||||
version: 4.0.0
|
||||
description: "Full polecat work lifecycle..."
|
||||
author: steve@gastown.io
|
||||
downloads: 12543
|
||||
capabilities: [go, testing, code-review]
|
||||
|
||||
GET /formulas/{name}
|
||||
# Get formula metadata
|
||||
Response:
|
||||
name: mol-polecat-work
|
||||
versions: [4.0.0, 3.2.1, 3.2.0, ...]
|
||||
latest: 4.0.0
|
||||
author: steve@gastown.io
|
||||
repository: https://github.com/steveyegge/gastown
|
||||
license: MIT
|
||||
capabilities:
|
||||
primary: [go, testing]
|
||||
secondary: [git, code-review]
|
||||
stats:
|
||||
downloads: 12543
|
||||
stars: 234
|
||||
used_by: 89 # towns using this formula
|
||||
|
||||
GET /formulas/{name}/{version}
|
||||
# Get specific version
|
||||
Response:
|
||||
name: mol-polecat-work
|
||||
version: 4.0.0
|
||||
checksum: sha256:abc123...
|
||||
signature: <optional PGP signature>
|
||||
content: <base64 or URL to .formula.toml>
|
||||
changelog: "Added self-cleaning model..."
|
||||
published_at: 2026-01-10T00:00:00Z
|
||||
|
||||
POST /formulas
|
||||
# Publish formula (authenticated)
|
||||
Body:
|
||||
name: mol-my-workflow
|
||||
version: 1.0.0
|
||||
content: <formula TOML>
|
||||
changelog: "Initial release"
|
||||
Auth: Bearer token (linked to HOP identity)
|
||||
|
||||
GET /formulas/{name}/{version}/download
|
||||
# Download formula content
|
||||
Response: raw .formula.toml content
|
||||
```
|
||||
|
||||
## Formula Package Format
|
||||
|
||||
### Simple Case: Single File
|
||||
|
||||
Most formulas are single `.formula.toml` files:
|
||||
|
||||
```bash
|
||||
gt formula install mol-polecat-code-review
|
||||
# Downloads mol-polecat-code-review.formula.toml to ~/gt/.beads/formulas/
|
||||
```
|
||||
|
||||
### Complex Case: Formula Bundle
|
||||
|
||||
Some formulas need supporting files (scripts, templates, configs):
|
||||
|
||||
```
|
||||
mol-deploy-k8s.formula.bundle/
|
||||
├── formula.toml # Main formula
|
||||
├── templates/
|
||||
│ ├── deployment.yaml.tmpl
|
||||
│ └── service.yaml.tmpl
|
||||
├── scripts/
|
||||
│ └── healthcheck.sh
|
||||
└── README.md
|
||||
```
|
||||
|
||||
Bundle format:
|
||||
```bash
|
||||
# Bundles are tarballs
|
||||
mol-deploy-k8s-1.0.0.bundle.tar.gz
|
||||
```
|
||||
|
||||
Installation:
|
||||
```bash
|
||||
gt formula install mol-deploy-k8s
|
||||
# Extracts to ~/gt/.beads/formulas/mol-deploy-k8s/
|
||||
# formula.toml is at mol-deploy-k8s/formula.toml
|
||||
```
|
||||
|
||||
## Installation Flow
|
||||
|
||||
### Basic Install
|
||||
|
||||
```bash
|
||||
$ gt formula install mol-polecat-code-review
|
||||
|
||||
Resolving mol-polecat-code-review...
|
||||
Registry: molmall.gastown.io
|
||||
Version: 1.2.0 (latest)
|
||||
Author: steve@gastown.io
|
||||
Skills: code-review, security
|
||||
|
||||
Downloading... ████████████████████ 100%
|
||||
Verifying checksum... ✓
|
||||
|
||||
Installed to: ~/gt/.beads/formulas/mol-polecat-code-review.formula.toml
|
||||
```
|
||||
|
||||
### Version Pinning
|
||||
|
||||
```bash
|
||||
$ gt formula install mol-polecat-work@4.0.0
|
||||
|
||||
Installing mol-polecat-work@4.0.0 (pinned)...
|
||||
✓ Installed
|
||||
|
||||
$ gt formula list --installed
|
||||
mol-polecat-work 4.0.0 [pinned]
|
||||
mol-polecat-code-review 1.2.0 [latest]
|
||||
```
|
||||
|
||||
### Upgrade Flow
|
||||
|
||||
```bash
|
||||
$ gt formula upgrade mol-polecat-code-review
|
||||
|
||||
Checking for updates...
|
||||
Current: 1.2.0
|
||||
Latest: 1.3.0
|
||||
|
||||
Changelog for 1.3.0:
|
||||
- Added security focus option
|
||||
- Improved test coverage step
|
||||
|
||||
Upgrade? [y/N] y
|
||||
|
||||
Downloading... ✓
|
||||
Installed: mol-polecat-code-review@1.3.0
|
||||
```
|
||||
|
||||
### Lock File
|
||||
|
||||
```json
|
||||
// ~/gt/.beads/formulas/.lock.json
|
||||
{
|
||||
"version": 1,
|
||||
"formulas": {
|
||||
"mol-polecat-work": {
|
||||
"version": "4.0.0",
|
||||
"pinned": true,
|
||||
"checksum": "sha256:abc123...",
|
||||
"installed_at": "2026-01-10T00:00:00Z",
|
||||
"source": "hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0"
|
||||
},
|
||||
"mol-polecat-code-review": {
|
||||
"version": "1.3.0",
|
||||
"pinned": false,
|
||||
"checksum": "sha256:def456...",
|
||||
"installed_at": "2026-01-10T12:00:00Z",
|
||||
"source": "hop://molmall.gastown.io/formulas/mol-polecat-code-review@1.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Publishing Flow
|
||||
|
||||
### First-Time Setup
|
||||
|
||||
```bash
|
||||
$ gt formula publish --init
|
||||
|
||||
Setting up Mol Mall publishing...
|
||||
|
||||
1. Create account at https://molmall.gastown.io/signup
|
||||
2. Generate API token at https://molmall.gastown.io/settings/tokens
|
||||
3. Run: gt formula login
|
||||
|
||||
$ gt formula login
|
||||
Token: ********
|
||||
Logged in as: steve@gastown.io
|
||||
```
|
||||
|
||||
### Publishing
|
||||
|
||||
```bash
|
||||
$ gt formula publish mol-polecat-work
|
||||
|
||||
Publishing mol-polecat-work...
|
||||
|
||||
Pre-flight checks:
|
||||
✓ formula.toml is valid
|
||||
✓ Version 4.0.0 not yet published
|
||||
✓ Required fields present (name, version, description)
|
||||
✓ Skills declared
|
||||
|
||||
Publish to molmall.gastown.io? [y/N] y
|
||||
|
||||
Uploading... ✓
|
||||
Published: hop://molmall.gastown.io/formulas/mol-polecat-work@4.0.0
|
||||
|
||||
View at: https://molmall.gastown.io/formulas/mol-polecat-work
|
||||
```
|
||||
|
||||
### Verification Levels
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ FORMULA TRUST LEVELS │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
|
||||
UNVERIFIED (default)
|
||||
Anyone can publish
|
||||
Basic validation only
|
||||
Displayed with ⚠️ warning
|
||||
|
||||
VERIFIED PUBLISHER
|
||||
Publisher identity confirmed
|
||||
Displayed with ✓ checkmark
|
||||
Higher search ranking
|
||||
|
||||
OFFICIAL
|
||||
Maintained by Gas Town team
|
||||
Displayed with 🏛️ badge
|
||||
Included in embedded defaults
|
||||
|
||||
AUDITED
|
||||
Security review completed
|
||||
Displayed with 🔒 badge
|
||||
Required for enterprise registries
|
||||
```
|
||||
|
||||
## Capability Tagging
|
||||
|
||||
### Formula Capability Declaration
|
||||
|
||||
```toml
|
||||
[formula.capabilities]
|
||||
# What capabilities does this formula exercise? Used for agent routing.
|
||||
primary = ["go", "testing", "code-review"]
|
||||
secondary = ["git", "ci-cd"]
|
||||
|
||||
# Capability weights (optional, for fine-grained routing)
|
||||
[formula.capabilities.weights]
|
||||
go = 0.3 # 30% of formula work is Go
|
||||
testing = 0.4 # 40% is testing
|
||||
code-review = 0.3 # 30% is code review
|
||||
```
|
||||
|
||||
### Capability-Based Search
|
||||
|
||||
```bash
|
||||
$ gt formula search --capabilities="security,go"
|
||||
|
||||
Formulas matching capabilities: security, go
|
||||
|
||||
mol-security-audit v2.1.0 ⭐ 4.8 📥 8,234
|
||||
Capabilities: security, go, code-review
|
||||
"Comprehensive security audit workflow"
|
||||
|
||||
mol-dependency-scan v1.0.0 ⭐ 4.2 📥 3,102
|
||||
Capabilities: security, go, supply-chain
|
||||
"Scan Go dependencies for vulnerabilities"
|
||||
```
|
||||
|
||||
### Agent Accountability
|
||||
|
||||
When a polecat completes a formula, the execution is tracked:
|
||||
|
||||
```
|
||||
Polecat: beads/amber
|
||||
Formula: mol-polecat-code-review@1.3.0
|
||||
Completed: 2026-01-10T15:30:00Z
|
||||
Capabilities exercised:
|
||||
- code-review (primary)
|
||||
- security (secondary)
|
||||
- go (secondary)
|
||||
```
|
||||
|
||||
This execution record enables:
|
||||
1. **Routing** - Agents with successful track records get similar work
|
||||
2. **Debugging** - Trace which agent did what, when
|
||||
3. **Quality metrics** - Track success rates by agent and formula
|
||||
|
||||
## Private Registries
|
||||
|
||||
### Enterprise Deployment
|
||||
|
||||
```yaml
|
||||
# ~/.gtconfig.yaml
|
||||
registries:
|
||||
- name: acme
|
||||
url: https://molmall.acme.corp
|
||||
auth: token
|
||||
priority: 1 # Check first
|
||||
|
||||
- name: public
|
||||
url: https://molmall.gastown.io
|
||||
auth: none
|
||||
priority: 2 # Fallback
|
||||
```
|
||||
|
||||
### Self-Hosted Registry
|
||||
|
||||
```bash
|
||||
# Docker deployment
|
||||
docker run -d \
|
||||
-p 8080:8080 \
|
||||
-v /data/formulas:/formulas \
|
||||
-e AUTH_PROVIDER=oidc \
|
||||
gastown/molmall-registry:latest
|
||||
|
||||
# Configuration
|
||||
MOLMALL_STORAGE=s3://bucket/formulas
|
||||
MOLMALL_AUTH=oidc
|
||||
MOLMALL_OIDC_ISSUER=https://auth.acme.corp
|
||||
```
|
||||
|
||||
## Federation
|
||||
|
||||
Federation enables formula sharing across organizations using the Highway Operations Protocol (HOP).
|
||||
|
||||
### Cross-Registry Discovery
|
||||
|
||||
```bash
|
||||
$ gt formula search "deploy kubernetes" --federated
|
||||
|
||||
Searching across federated registries...
|
||||
|
||||
molmall.gastown.io:
|
||||
mol-deploy-k8s v3.0.0 🏛️ Official
|
||||
|
||||
molmall.acme.corp:
|
||||
@acme/mol-deploy-k8s v2.1.0 ✓ Verified
|
||||
|
||||
molmall.bigco.io:
|
||||
@bigco/k8s-workflow v1.0.0 ⚠️ Unverified
|
||||
```
|
||||
|
||||
### HOP URI Resolution
|
||||
|
||||
The `hop://` URI scheme provides cross-registry entity references:
|
||||
|
||||
```bash
|
||||
# Full HOP URI
|
||||
gt formula install hop://molmall.acme.corp/formulas/@acme/mol-deploy@2.1.0
|
||||
|
||||
# Resolution via HOP (Highway Operations Protocol)
|
||||
1. Parse hop:// URI
|
||||
2. Resolve registry endpoint (DNS/HOP discovery)
|
||||
3. Authenticate (if required)
|
||||
4. Download formula
|
||||
5. Verify checksum/signature
|
||||
6. Install to town-level
|
||||
```
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Local Commands (Now)
|
||||
|
||||
- `gt formula list` with tier display
|
||||
- `gt formula show --resolve`
|
||||
- Formula resolution order (project → town → system)
|
||||
|
||||
### Phase 2: Manual Sharing
|
||||
|
||||
- Formula export/import
|
||||
- `gt formula export mol-polecat-work > mol-polecat-work.formula.toml`
|
||||
- `gt formula import < mol-polecat-work.formula.toml`
|
||||
- Lock file format
|
||||
|
||||
### Phase 3: Public Registry
|
||||
|
||||
- molmall.gastown.io launch
|
||||
- `gt formula install` from registry
|
||||
- `gt formula publish` flow
|
||||
- Basic search and browse
|
||||
|
||||
### Phase 4: Enterprise Features
|
||||
|
||||
- Private registry support
|
||||
- Authentication integration
|
||||
- Verification levels
|
||||
- Audit logging
|
||||
|
||||
### Phase 5: Federation (HOP)
|
||||
|
||||
- Capability tags in schema
|
||||
- Federation protocol (Highway Operations Protocol)
|
||||
- Cross-registry search
|
||||
- Agent execution tracking for accountability
|
||||
|
||||
## Related Documents
|
||||
|
||||
- [Formula Resolution](formula-resolution.md) - Local resolution order
|
||||
- [molecules.md](molecules.md) - Formula lifecycle (cook, pour, squash)
|
||||
- [understanding-gas-town.md](../../../docs/understanding-gas-town.md) - Gas Town architecture
|
||||
@@ -1,278 +0,0 @@
|
||||
# Operational State in Gas Town
|
||||
|
||||
> Managing runtime state, degraded modes, and the Boot triage system.
|
||||
|
||||
## Overview
|
||||
|
||||
Gas Town needs to track operational state: Is the Deacon's patrol muted? Is the
|
||||
system in degraded mode? When did state change, and why?
|
||||
|
||||
This document covers:
|
||||
- **Events**: State transitions as beads
|
||||
- **Labels-as-state**: Fast queries via role bead labels
|
||||
- **Boot**: The dog that triages the Deacon
|
||||
- **Degraded mode**: Operating without tmux
|
||||
|
||||
## Events: State Transitions as Data
|
||||
|
||||
Operational state changes are recorded as event beads. Each event captures:
|
||||
- **What** changed (`event_type`)
|
||||
- **Who** caused it (`actor`)
|
||||
- **What** was affected (`target`)
|
||||
- **Context** (`payload`)
|
||||
- **When** (`created_at`)
|
||||
|
||||
### Event Types
|
||||
|
||||
| Event Type | Description | Payload |
|
||||
|------------|-------------|---------|
|
||||
| `patrol.muted` | Patrol cycle disabled | `{reason, until?}` |
|
||||
| `patrol.unmuted` | Patrol cycle re-enabled | `{reason?}` |
|
||||
| `agent.started` | Agent session began | `{session_id?}` |
|
||||
| `agent.stopped` | Agent session ended | `{reason, outcome?}` |
|
||||
| `mode.degraded` | System entered degraded mode | `{reason}` |
|
||||
| `mode.normal` | System returned to normal | `{}` |
|
||||
|
||||
### Creating Events
|
||||
|
||||
```bash
|
||||
# Mute deacon patrol
|
||||
bd create --type=event --event-type=patrol.muted \
|
||||
--actor=human:overseer --target=agent:deacon \
|
||||
--payload='{"reason":"fixing convoy deadlock","until":"gt-abc1"}'
|
||||
|
||||
# System entered degraded mode
|
||||
bd create --type=event --event-type=mode.degraded \
|
||||
--actor=system:daemon --target=rig:greenplace \
|
||||
--payload='{"reason":"tmux unavailable"}'
|
||||
```
|
||||
|
||||
### Querying Events
|
||||
|
||||
```bash
|
||||
# Recent events for an agent
|
||||
bd list --type=event --target=agent:deacon --limit=10
|
||||
|
||||
# All patrol state changes
|
||||
bd list --type=event --event-type=patrol.muted
|
||||
bd list --type=event --event-type=patrol.unmuted
|
||||
|
||||
# Events in the activity feed
|
||||
bd activity --follow --type=event
|
||||
```
|
||||
|
||||
## Labels-as-State Pattern
|
||||
|
||||
Events capture the full history. Labels cache the current state for fast queries.
|
||||
|
||||
### Convention
|
||||
|
||||
Labels use `<dimension>:<value>` format:
|
||||
- `patrol:muted` / `patrol:active`
|
||||
- `mode:degraded` / `mode:normal`
|
||||
- `status:idle` / `status:working`
|
||||
|
||||
### State Change Flow
|
||||
|
||||
1. Create event bead (full context, immutable)
|
||||
2. Update role bead labels (current state cache)
|
||||
|
||||
```bash
|
||||
# Mute patrol
|
||||
bd create --type=event --event-type=patrol.muted ...
|
||||
bd update role-deacon --add-label=patrol:muted --remove-label=patrol:active
|
||||
|
||||
# Unmute patrol
|
||||
bd create --type=event --event-type=patrol.unmuted ...
|
||||
bd update role-deacon --add-label=patrol:active --remove-label=patrol:muted
|
||||
```
|
||||
|
||||
### Querying Current State
|
||||
|
||||
```bash
|
||||
# Is deacon patrol muted?
|
||||
bd show role-deacon | grep patrol:
|
||||
|
||||
# All agents with muted patrol
|
||||
bd list --type=role --label=patrol:muted
|
||||
|
||||
# All agents in degraded mode
|
||||
bd list --type=role --label=mode:degraded
|
||||
```
|
||||
|
||||
## Boot: The Deacon's Watchdog
|
||||
|
||||
> See [Watchdog Chain](watchdog-chain.md) for the complete Daemon/Boot/Deacon
|
||||
> architecture and design rationale.
|
||||
|
||||
Boot is a dog (Deacon helper) that triages the Deacon's health. The daemon pokes
|
||||
Boot instead of the Deacon directly, centralizing the "when to wake" decision in
|
||||
an agent that can reason about it.
|
||||
|
||||
### Why Boot?
|
||||
|
||||
The daemon is dumb transport (ZFC principle). It can't decide:
|
||||
- Is the Deacon stuck or just thinking?
|
||||
- Should we interrupt or let it continue?
|
||||
- Is the system in a state where nudging would help?
|
||||
|
||||
Boot is an agent that can observe and decide.
|
||||
|
||||
### Boot's Lifecycle
|
||||
|
||||
```
|
||||
Daemon tick
|
||||
│
|
||||
├── Check: Is Boot already running? (marker file)
|
||||
│ └── Yes + recent: Skip this tick
|
||||
│
|
||||
└── Spawn Boot (fresh session each time)
|
||||
│
|
||||
└── Boot runs triage molecule
|
||||
├── Observe (wisps, mail, git state, tmux panes)
|
||||
├── Decide (start/wake/nudge/interrupt/nothing)
|
||||
├── Act
|
||||
├── Clean inbox (discard stale handoffs)
|
||||
└── Handoff (or exit in degraded mode)
|
||||
```
|
||||
|
||||
### Boot is Always Fresh
|
||||
|
||||
Boot restarts on each daemon tick. This is intentional:
|
||||
- Narrow scope makes restarts cheap
|
||||
- Fresh context avoids accumulated confusion
|
||||
- Handoff mail provides continuity without session persistence
|
||||
- No keepalive needed
|
||||
|
||||
### Boot's Decision Guidance
|
||||
|
||||
Agents may take several minutes on legitimate work - composing artifacts, running
|
||||
tools, deep analysis. Ten minutes or more in edge cases.
|
||||
|
||||
To assess whether an agent is stuck:
|
||||
1. Check the agent's last reported activity (recent wisps, mail sent, git commits)
|
||||
2. Observe the tmux pane output over a 30-second window
|
||||
3. Look for signs of progress vs. signs of hanging (tool prompt, error loop, silence)
|
||||
|
||||
Agents work in small steps with feedback. Most tasks complete in 2-3 minutes, but
|
||||
task nature matters.
|
||||
|
||||
**Boot's options (increasing disruption):**
|
||||
- Let them continue (if progress is evident)
|
||||
- `gt nudge <agent>` (gentle wake signal)
|
||||
- Escape + chat (interrupt and ask what's happening)
|
||||
- Request process restart (last resort, for true hangs)
|
||||
|
||||
**Common false positives:**
|
||||
- Tool waiting for user confirmation
|
||||
- Long-running test suite
|
||||
- Large file read/write operations
|
||||
|
||||
### Boot's Location
|
||||
|
||||
```
|
||||
~/gt/deacon/dogs/boot/
|
||||
```
|
||||
|
||||
Session name: `gt-boot`
|
||||
|
||||
Created/maintained by `bd doctor`.
|
||||
|
||||
### Boot Commands
|
||||
|
||||
```bash
|
||||
# Check Boot status
|
||||
gt dog status boot
|
||||
|
||||
# Manual Boot run (debugging)
|
||||
gt dog call boot
|
||||
|
||||
# Prime Boot with context
|
||||
gt dog prime boot
|
||||
```
|
||||
|
||||
## Degraded Mode
|
||||
|
||||
Gas Town can operate without tmux, with reduced capabilities.
|
||||
|
||||
### Detection
|
||||
|
||||
The daemon detects degraded mode mechanically and passes it to agents:
|
||||
|
||||
```bash
|
||||
GT_DEGRADED=true # Set by daemon when tmux unavailable
|
||||
```
|
||||
|
||||
Boot and other agents check this environment variable.
|
||||
|
||||
### What Changes in Degraded Mode
|
||||
|
||||
| Capability | Normal | Degraded |
|
||||
|------------|--------|----------|
|
||||
| Observe tmux panes | Yes | No |
|
||||
| Interactive interrupt | Yes | No |
|
||||
| Session management | Full | Limited |
|
||||
| Agent spawn | tmux sessions | Direct spawn |
|
||||
| Boot lifecycle | Handoff | Exit |
|
||||
|
||||
### Agents in Degraded Mode
|
||||
|
||||
In degraded mode, agents:
|
||||
- Cannot observe other agents' pane output
|
||||
- Cannot interactively interrupt stuck agents
|
||||
- Focus on beads/git state observation only
|
||||
- Report anomalies but can't fix interactively
|
||||
|
||||
Boot specifically:
|
||||
- Runs to completion and exits (no handoff)
|
||||
- Limited to: start deacon, file beads, mail overseer
|
||||
- Cannot: observe panes, nudge, interrupt
|
||||
|
||||
### Recording Degraded Mode
|
||||
|
||||
```bash
|
||||
# System entered degraded mode
|
||||
bd create --type=event --event-type=mode.degraded \
|
||||
--actor=system:daemon --target=rig:greenplace \
|
||||
--payload='{"reason":"tmux unavailable"}'
|
||||
|
||||
bd update role-greenplace --add-label=mode:degraded --remove-label=mode:normal
|
||||
```
|
||||
|
||||
## Configuration vs State
|
||||
|
||||
| Type | Storage | Example |
|
||||
|------|---------|---------|
|
||||
| **Static config** | TOML files | Daemon tick interval |
|
||||
| **Operational state** | Beads (events + labels) | Patrol muted |
|
||||
| **Runtime flags** | Marker files | `.deacon-disabled` |
|
||||
|
||||
Static config rarely changes and doesn't need history.
|
||||
Operational state changes at runtime and benefits from audit trail.
|
||||
Marker files are fast checks that can trigger deeper beads queries.
|
||||
|
||||
## Commands Summary
|
||||
|
||||
```bash
|
||||
# Create operational event
|
||||
bd create --type=event --event-type=<type> \
|
||||
--actor=<entity> --target=<entity> --payload='<json>'
|
||||
|
||||
# Update state label
|
||||
bd update <role-bead> --add-label=<dim>:<val> --remove-label=<dim>:<old>
|
||||
|
||||
# Query current state
|
||||
bd list --type=role --label=<dim>:<val>
|
||||
|
||||
# Query state history
|
||||
bd list --type=event --target=<entity>
|
||||
|
||||
# Boot management
|
||||
gt dog status boot
|
||||
gt dog call boot
|
||||
gt dog prime boot
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Events are the source of truth. Labels are the cache.*
|
||||
@@ -26,8 +26,8 @@ These roles manage the Gas Town system itself:
|
||||
|
||||
| Role | Description | Lifecycle |
|
||||
|------|-------------|-----------|
|
||||
| **Mayor** | Global coordinator at town root | Singleton, persistent |
|
||||
| **Deacon** | Background supervisor daemon ([watchdog chain](watchdog-chain.md)) | Singleton, persistent |
|
||||
| **Mayor** | Global coordinator at mayor/ | Singleton, persistent |
|
||||
| **Deacon** | Background supervisor daemon ([watchdog chain](design/watchdog-chain.md)) | Singleton, persistent |
|
||||
| **Witness** | Per-rig polecat lifecycle manager | One per rig, persistent |
|
||||
| **Refinery** | Per-rig merge queue processor | One per rig, persistent |
|
||||
|
||||
@@ -37,7 +37,7 @@ These roles do actual project work:
|
||||
|
||||
| Role | Description | Lifecycle |
|
||||
|------|-------------|-----------|
|
||||
| **Polecat** | Ephemeral worker with own worktree | Transient, Witness-managed ([details](polecat-lifecycle.md)) |
|
||||
| **Polecat** | Ephemeral worker with own worktree | Transient, Witness-managed ([details](concepts/polecat-lifecycle.md)) |
|
||||
| **Crew** | Persistent worker with own clone | Long-lived, user-managed |
|
||||
| **Dog** | Deacon helper for infrastructure tasks | Ephemeral, Deacon-managed |
|
||||
|
||||
@@ -64,7 +64,7 @@ gt convoy list
|
||||
- Historical record of completed work (`gt convoy list --all`)
|
||||
|
||||
The "swarm" is ephemeral - just the workers currently assigned to a convoy's issues.
|
||||
When issues close, the convoy lands. See [Convoys](convoy.md) for details.
|
||||
When issues close, the convoy lands. See [Convoys](concepts/convoy.md) for details.
|
||||
|
||||
## Crew vs Polecats
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
# Polecat Wisp Architecture
|
||||
|
||||
How polecats use molecules and wisps to execute work in Gas Town.
|
||||
|
||||
## Overview
|
||||
|
||||
Polecats receive work via their hook - a pinned molecule attached to an issue.
|
||||
They execute molecule steps sequentially, closing each step as they complete it.
|
||||
|
||||
## Molecule Types for Polecats
|
||||
|
||||
| Type | Storage | Use Case |
|
||||
|------|---------|----------|
|
||||
| **Regular Molecule** | `.beads/` (synced) | Discrete deliverables, audit trail |
|
||||
| **Wisp** | `.beads/` (ephemeral, type=wisp) | Patrol cycles, operational loops |
|
||||
|
||||
Polecats typically use **regular molecules** because each assignment has audit value.
|
||||
Patrol agents (Witness, Refinery, Deacon) use **wisps** to prevent accumulation.
|
||||
|
||||
## Step Execution
|
||||
|
||||
### The Traditional Approach
|
||||
|
||||
```bash
|
||||
# 1. Check current status
|
||||
gt hook
|
||||
|
||||
# 2. Find next step
|
||||
bd ready --parent=gt-abc
|
||||
|
||||
# 3. Claim the step
|
||||
bd update gt-abc.4 --status=in_progress
|
||||
|
||||
# 4. Do the work...
|
||||
|
||||
# 5. Close the step
|
||||
bd close gt-abc.4
|
||||
|
||||
# 6. Repeat from step 2
|
||||
```
|
||||
|
||||
### The Propulsion Approach
|
||||
|
||||
```bash
|
||||
# 1. Check where you are
|
||||
bd mol current
|
||||
|
||||
# 2. Do the work on current step...
|
||||
|
||||
# 3. Close and advance in one command
|
||||
bd close gt-abc.4 --continue
|
||||
|
||||
# 4. Repeat from step 1
|
||||
```
|
||||
|
||||
The `--continue` flag:
|
||||
- Closes the current step
|
||||
- Finds the next ready step in the same molecule
|
||||
- Auto-marks it `in_progress`
|
||||
- Outputs the transition
|
||||
|
||||
### Example Session
|
||||
|
||||
```bash
|
||||
$ bd mol current
|
||||
You're working on molecule gt-abc (Implement user auth)
|
||||
|
||||
✓ gt-abc.1: Design schema
|
||||
✓ gt-abc.2: Create models
|
||||
→ gt-abc.3: Add endpoints [in_progress] <- YOU ARE HERE
|
||||
○ gt-abc.4: Write tests
|
||||
○ gt-abc.5: Update docs
|
||||
|
||||
Progress: 2/5 steps complete
|
||||
|
||||
$ # ... implement the endpoints ...
|
||||
|
||||
$ bd close gt-abc.3 --continue
|
||||
✓ Closed gt-abc.3: Add endpoints
|
||||
|
||||
Next ready in molecule:
|
||||
gt-abc.4: Write tests
|
||||
|
||||
→ Marked in_progress (use --no-auto to skip)
|
||||
|
||||
$ bd mol current
|
||||
You're working on molecule gt-abc (Implement user auth)
|
||||
|
||||
✓ gt-abc.1: Design schema
|
||||
✓ gt-abc.2: Create models
|
||||
✓ gt-abc.3: Add endpoints
|
||||
→ gt-abc.4: Write tests [in_progress] <- YOU ARE HERE
|
||||
○ gt-abc.5: Update docs
|
||||
|
||||
Progress: 3/5 steps complete
|
||||
```
|
||||
|
||||
## Molecule Completion
|
||||
|
||||
When closing the last step:
|
||||
|
||||
```bash
|
||||
$ bd close gt-abc.5 --continue
|
||||
✓ Closed gt-abc.5: Update docs
|
||||
|
||||
Molecule gt-abc complete! All steps closed.
|
||||
Consider: bd mol squash gt-abc --summary '...'
|
||||
```
|
||||
|
||||
After all steps are closed:
|
||||
|
||||
```bash
|
||||
# Squash to digest for audit trail
|
||||
bd mol squash gt-abc --summary "Implemented user authentication with JWT"
|
||||
|
||||
# Or if it's routine work
|
||||
bd mol burn gt-abc
|
||||
```
|
||||
|
||||
## Hook Management
|
||||
|
||||
### Checking Your Hook
|
||||
|
||||
```bash
|
||||
gt hook
|
||||
```
|
||||
|
||||
Shows what molecule is pinned to your current agent and the associated bead.
|
||||
|
||||
### Attaching Work from Mail
|
||||
|
||||
```bash
|
||||
gt mail inbox
|
||||
gt mol attach-from-mail <mail-id>
|
||||
```
|
||||
|
||||
### Completing Work
|
||||
|
||||
```bash
|
||||
# After all molecule steps closed
|
||||
gt done
|
||||
|
||||
# This:
|
||||
# 1. Syncs beads
|
||||
# 2. Submits to merge queue
|
||||
# 3. Notifies Witness
|
||||
```
|
||||
|
||||
## Polecat Workflow Summary
|
||||
|
||||
```
|
||||
1. Spawn with work on hook
|
||||
2. gt hook # What's hooked?
|
||||
3. bd mol current # Where am I?
|
||||
4. Execute current step
|
||||
5. bd close <step> --continue
|
||||
6. If more steps: GOTO 3
|
||||
7. gt done # Signal completion
|
||||
8. Wait for Witness cleanup
|
||||
```
|
||||
|
||||
## Wisp vs Molecule Decision
|
||||
|
||||
| Question | Molecule | Wisp |
|
||||
|----------|----------|------|
|
||||
| Does it need audit trail? | Yes | No |
|
||||
| Will it repeat continuously? | No | Yes |
|
||||
| Is it discrete deliverable? | Yes | No |
|
||||
| Is it operational routine? | No | Yes |
|
||||
|
||||
Polecats: **Use molecules** (deliverables have audit value)
|
||||
Patrol agents: **Use wisps** (routine loops don't accumulate)
|
||||
@@ -7,24 +7,38 @@ Technical reference for Gas Town internals. Read the README first.
|
||||
```
|
||||
~/gt/ Town root
|
||||
├── .beads/ Town-level beads (hq-* prefix)
|
||||
├── mayor/ Mayor config
|
||||
│ └── town.json
|
||||
├── mayor/ Mayor agent home (town coordinator)
|
||||
│ ├── town.json Town configuration
|
||||
│ ├── CLAUDE.md Mayor context (on disk)
|
||||
│ └── .claude/settings.json Mayor Claude settings
|
||||
├── deacon/ Deacon agent home (background supervisor)
|
||||
│ └── .claude/settings.json Deacon settings (context via gt prime)
|
||||
└── <rig>/ Project container (NOT a git clone)
|
||||
├── config.json Rig identity
|
||||
├── .beads/ → mayor/rig/.beads
|
||||
├── .repo.git/ Bare repo (shared by worktrees)
|
||||
├── mayor/rig/ Mayor's clone (canonical beads)
|
||||
├── refinery/rig/ Worktree on main
|
||||
├── witness/ No clone (monitors only)
|
||||
├── crew/<name>/ Human workspaces
|
||||
└── polecats/<name>/ Worker worktrees
|
||||
│ └── CLAUDE.md Per-rig mayor context (on disk)
|
||||
├── witness/ Witness agent home (monitors only)
|
||||
│ └── .claude/settings.json (context via gt prime)
|
||||
├── refinery/ Refinery settings parent
|
||||
│ ├── .claude/settings.json
|
||||
│ └── rig/ Worktree on main
|
||||
│ └── CLAUDE.md Refinery context (on disk)
|
||||
├── crew/ Crew settings parent (shared)
|
||||
│ ├── .claude/settings.json (context via gt prime)
|
||||
│ └── <name>/rig/ Human workspaces
|
||||
└── polecats/ Polecat settings parent (shared)
|
||||
├── .claude/settings.json (context via gt prime)
|
||||
└── <name>/rig/ Worker worktrees
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
|
||||
- Rig root is a container, not a clone
|
||||
- `.repo.git/` is bare - refinery and polecats are worktrees
|
||||
- Mayor clone holds canonical `.beads/`, others inherit via redirect
|
||||
- Per-rig `mayor/rig/` holds canonical `.beads/`, others inherit via redirect
|
||||
- Settings placed in parent dirs (not git clones) for upward traversal
|
||||
|
||||
## Beads Routing
|
||||
|
||||
@@ -75,6 +89,58 @@ Debug routing: `BD_DEBUG_ROUTING=1 bd show <id>`
|
||||
|
||||
Process state, PIDs, ephemeral data.
|
||||
|
||||
### Rig-Level Configuration
|
||||
|
||||
Rigs support layered configuration through:
|
||||
1. **Wisp layer** (`.beads-wisp/config/`) - transient, local overrides
|
||||
2. **Rig identity bead labels** - persistent rig settings
|
||||
3. **Town defaults** (`~/gt/settings/config.json`)
|
||||
4. **System defaults** - compiled-in fallbacks
|
||||
|
||||
#### Polecat Branch Naming
|
||||
|
||||
Configure custom branch name templates for polecats:
|
||||
|
||||
```bash
|
||||
# Set via wisp (transient - for testing)
|
||||
echo '{"polecat_branch_template": "adam/{year}/{month}/{description}"}' > \
|
||||
~/gt/.beads-wisp/config/myrig.json
|
||||
|
||||
# Or set via rig identity bead labels (persistent)
|
||||
bd update gt-rig-myrig --labels="polecat_branch_template:adam/{year}/{month}/{description}"
|
||||
```
|
||||
|
||||
**Template Variables:**
|
||||
|
||||
| Variable | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `{user}` | From `git config user.name` | `adam` |
|
||||
| `{year}` | Current year (YY format) | `26` |
|
||||
| `{month}` | Current month (MM format) | `01` |
|
||||
| `{name}` | Polecat name | `alpha` |
|
||||
| `{issue}` | Issue ID without prefix | `123` (from `gt-123`) |
|
||||
| `{description}` | Sanitized issue title | `fix-auth-bug` |
|
||||
| `{timestamp}` | Unique timestamp | `1ks7f9a` |
|
||||
|
||||
**Default Behavior (backward compatible):**
|
||||
|
||||
When `polecat_branch_template` is empty or not set:
|
||||
- With issue: `polecat/{name}/{issue}@{timestamp}`
|
||||
- Without issue: `polecat/{name}-{timestamp}`
|
||||
|
||||
**Example Configurations:**
|
||||
|
||||
```bash
|
||||
# GitHub enterprise format
|
||||
"adam/{year}/{month}/{description}"
|
||||
|
||||
# Simple feature branches
|
||||
"feature/{issue}"
|
||||
|
||||
# Include polecat name for clarity
|
||||
"work/{name}/{issue}"
|
||||
```
|
||||
|
||||
## Formula Format
|
||||
|
||||
```toml
|
||||
@@ -192,17 +258,177 @@ gt mol step done <step> # Complete a molecule step
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Gas Town sets environment variables for each agent session via `config.AgentEnv()`.
|
||||
These are set in tmux session environment when agents are spawned.
|
||||
|
||||
### Core Variables (All Agents)
|
||||
|
||||
| Variable | Purpose | Example |
|
||||
|----------|---------|---------|
|
||||
| `GT_ROLE` | Agent role type | `mayor`, `witness`, `polecat`, `crew` |
|
||||
| `GT_ROOT` | Town root directory | `/home/user/gt` |
|
||||
| `BD_ACTOR` | Agent identity for attribution | `gastown/polecats/toast` |
|
||||
| `GIT_AUTHOR_NAME` | Commit attribution (same as BD_ACTOR) | `gastown/polecats/toast` |
|
||||
| `BEADS_DIR` | Beads database location | `/home/user/gt/gastown/.beads` |
|
||||
|
||||
### Rig-Level Variables
|
||||
|
||||
| Variable | Purpose | Roles |
|
||||
|----------|---------|-------|
|
||||
| `GT_RIG` | Rig name | witness, refinery, polecat, crew |
|
||||
| `GT_POLECAT` | Polecat worker name | polecat only |
|
||||
| `GT_CREW` | Crew worker name | crew only |
|
||||
| `BEADS_AGENT_NAME` | Agent name for beads operations | polecat, crew |
|
||||
| `BEADS_NO_DAEMON` | Disable beads daemon (isolated context) | polecat, crew |
|
||||
|
||||
### Other Variables
|
||||
|
||||
| Variable | Purpose |
|
||||
|----------|---------|
|
||||
| `BD_ACTOR` | Agent identity for attribution (see [identity.md](identity.md)) |
|
||||
| `BEADS_DIR` | Point to shared beads database |
|
||||
| `BEADS_NO_DAEMON` | Required for worktree polecats |
|
||||
| `GIT_AUTHOR_NAME` | Set to BD_ACTOR for commit attribution |
|
||||
| `GIT_AUTHOR_EMAIL` | Workspace owner email |
|
||||
| `GT_TOWN_ROOT` | Override town root detection |
|
||||
| `GT_ROLE` | Agent role type (mayor, polecat, etc.) |
|
||||
| `GT_RIG` | Rig name for rig-level agents |
|
||||
| `GT_POLECAT` | Polecat name (for polecats only) |
|
||||
| `GIT_AUTHOR_EMAIL` | Workspace owner email (from git config) |
|
||||
| `GT_TOWN_ROOT` | Override town root detection (manual use) |
|
||||
| `CLAUDE_RUNTIME_CONFIG_DIR` | Custom Claude settings directory |
|
||||
|
||||
### Environment by Role
|
||||
|
||||
| Role | Key Variables |
|
||||
|------|---------------|
|
||||
| **Mayor** | `GT_ROLE=mayor`, `BD_ACTOR=mayor` |
|
||||
| **Deacon** | `GT_ROLE=deacon`, `BD_ACTOR=deacon` |
|
||||
| **Boot** | `GT_ROLE=boot`, `BD_ACTOR=deacon-boot` |
|
||||
| **Witness** | `GT_ROLE=witness`, `GT_RIG=<rig>`, `BD_ACTOR=<rig>/witness` |
|
||||
| **Refinery** | `GT_ROLE=refinery`, `GT_RIG=<rig>`, `BD_ACTOR=<rig>/refinery` |
|
||||
| **Polecat** | `GT_ROLE=polecat`, `GT_RIG=<rig>`, `GT_POLECAT=<name>`, `BD_ACTOR=<rig>/polecats/<name>` |
|
||||
| **Crew** | `GT_ROLE=crew`, `GT_RIG=<rig>`, `GT_CREW=<name>`, `BD_ACTOR=<rig>/crew/<name>` |
|
||||
|
||||
### Doctor Check
|
||||
|
||||
The `gt doctor` command verifies that running tmux sessions have correct
|
||||
environment variables. Mismatches are reported as warnings:
|
||||
|
||||
```
|
||||
⚠ env-vars: Found 3 env var mismatch(es) across 1 session(s)
|
||||
hq-mayor: missing GT_ROOT (expected "/home/user/gt")
|
||||
```
|
||||
|
||||
Fix by restarting sessions: `gt shutdown && gt up`
|
||||
|
||||
## Agent Working Directories and Settings
|
||||
|
||||
Each agent runs in a specific working directory and has its own Claude settings.
|
||||
Understanding this hierarchy is essential for proper configuration.
|
||||
|
||||
### Working Directories by Role
|
||||
|
||||
| Role | Working Directory | Notes |
|
||||
|------|-------------------|-------|
|
||||
| **Mayor** | `~/gt/mayor/` | Town-level coordinator, isolated from rigs |
|
||||
| **Deacon** | `~/gt/deacon/` | Background supervisor daemon |
|
||||
| **Witness** | `~/gt/<rig>/witness/` | No git clone, monitors polecats only |
|
||||
| **Refinery** | `~/gt/<rig>/refinery/rig/` | Worktree on main branch |
|
||||
| **Crew** | `~/gt/<rig>/crew/<name>/rig/` | Persistent human workspace clone |
|
||||
| **Polecat** | `~/gt/<rig>/polecats/<name>/rig/` | Ephemeral worker worktree |
|
||||
|
||||
Note: The per-rig `<rig>/mayor/rig/` directory is NOT a working directory—it's
|
||||
a git clone that holds the canonical `.beads/` database for that rig.
|
||||
|
||||
### Settings File Locations
|
||||
|
||||
Claude Code searches for `.claude/settings.json` starting from the working
|
||||
directory and traversing upward. Settings are placed in **parent directories**
|
||||
(not inside git clones) so they're found via directory traversal without
|
||||
polluting source repositories:
|
||||
|
||||
```
|
||||
~/gt/
|
||||
├── mayor/.claude/settings.json # Mayor settings
|
||||
├── deacon/.claude/settings.json # Deacon settings
|
||||
└── <rig>/
|
||||
├── witness/.claude/settings.json # Witness settings (no rig/ subdir)
|
||||
├── refinery/.claude/settings.json # Found by refinery/rig/ via traversal
|
||||
├── crew/.claude/settings.json # Shared by all crew/<name>/rig/
|
||||
└── polecats/.claude/settings.json # Shared by all polecats/<name>/rig/
|
||||
```
|
||||
|
||||
**Why parent directories?** Agents working in git clones (like `refinery/rig/`)
|
||||
would pollute the source repo if settings were placed there. By putting settings
|
||||
one level up, Claude finds them via upward traversal, and all workers of the
|
||||
same type share the same settings.
|
||||
|
||||
### CLAUDE.md Locations
|
||||
|
||||
Role context is delivered via CLAUDE.md files or ephemeral injection:
|
||||
|
||||
| Role | CLAUDE.md Location | Method |
|
||||
|------|-------------------|--------|
|
||||
| **Mayor** | `~/gt/mayor/CLAUDE.md` | On disk |
|
||||
| **Deacon** | (none) | Injected via `gt prime` at SessionStart |
|
||||
| **Witness** | (none) | Injected via `gt prime` at SessionStart |
|
||||
| **Refinery** | `<rig>/refinery/rig/CLAUDE.md` | On disk (inside worktree) |
|
||||
| **Crew** | (none) | Injected via `gt prime` at SessionStart |
|
||||
| **Polecat** | (none) | Injected via `gt prime` at SessionStart |
|
||||
|
||||
Additionally, each rig has `<rig>/mayor/rig/CLAUDE.md` for the per-rig mayor clone
|
||||
(used for beads operations, not a running agent).
|
||||
|
||||
**Why ephemeral injection?** Writing CLAUDE.md into git clones would:
|
||||
1. Pollute source repos when agents commit/push
|
||||
2. Leak Gas Town internals into project history
|
||||
3. Conflict with project-specific CLAUDE.md files
|
||||
|
||||
The `gt prime` command runs at SessionStart hook and injects context without
|
||||
persisting it to disk.
|
||||
|
||||
### Sparse Checkout (Source Repo Isolation)
|
||||
|
||||
When agents work on source repositories that have their own Claude Code configuration,
|
||||
Gas Town uses git sparse checkout to exclude all context files:
|
||||
|
||||
```bash
|
||||
# Automatically configured for worktrees - excludes:
|
||||
# - .claude/ : settings, rules, agents, commands
|
||||
# - CLAUDE.md : primary context file
|
||||
# - CLAUDE.local.md: personal context file
|
||||
# - .mcp.json : MCP server configuration
|
||||
git sparse-checkout set --no-cone '/*' '!/.claude/' '!/CLAUDE.md' '!/CLAUDE.local.md' '!/.mcp.json'
|
||||
```
|
||||
|
||||
This ensures agents use Gas Town's context, not the source repo's instructions.
|
||||
|
||||
**Doctor check**: `gt doctor` verifies sparse checkout is configured correctly.
|
||||
Run `gt doctor --fix` to update legacy configurations missing the newer patterns.
|
||||
|
||||
### Settings Inheritance
|
||||
|
||||
Claude Code's settings search order (first match wins):
|
||||
|
||||
1. `.claude/settings.json` in current working directory
|
||||
2. `.claude/settings.json` in parent directories (traversing up)
|
||||
3. `~/.claude/settings.json` (user global settings)
|
||||
|
||||
Gas Town places settings at each agent's working directory root, so agents
|
||||
find their role-specific settings before reaching any parent or global config.
|
||||
|
||||
### Settings Templates
|
||||
|
||||
Gas Town uses two settings templates based on role type:
|
||||
|
||||
| Type | Roles | Key Difference |
|
||||
|------|-------|----------------|
|
||||
| **Interactive** | Mayor, Crew | Mail injected on `UserPromptSubmit` hook |
|
||||
| **Autonomous** | Polecat, Witness, Refinery, Deacon | Mail injected on `SessionStart` hook |
|
||||
|
||||
Autonomous agents may start without user input, so they need mail checked
|
||||
at session start. Interactive agents wait for user prompts.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
| Problem | Solution |
|
||||
|---------|----------|
|
||||
| Agent using wrong settings | Check `gt doctor`, verify sparse checkout |
|
||||
| Settings not found | Ensure `.claude/settings.json` exists at role home |
|
||||
| Source repo settings leaking | Run `gt doctor --fix` to configure sparse checkout |
|
||||
| Mayor settings affecting polecats | Mayor should run in `mayor/`, not town root |
|
||||
|
||||
## CLI Reference
|
||||
|
||||
@@ -228,15 +454,56 @@ gt config agent remove <name> # Remove custom agent (built-ins protected)
|
||||
gt config default-agent [name] # Get or set town default agent
|
||||
```
|
||||
|
||||
**Built-in agents**: `claude`, `gemini`, `codex`
|
||||
**Built-in agents**: `claude`, `gemini`, `codex`, `cursor`, `auggie`, `amp`
|
||||
|
||||
**Custom agents**: Define per-town in `mayor/town.json`:
|
||||
**Custom agents**: Define per-town via CLI or JSON:
|
||||
```bash
|
||||
gt config agent set claude-glm "claude-glm --model glm-4"
|
||||
gt config agent set claude "claude-opus" # Override built-in
|
||||
gt config default-agent claude-glm # Set default
|
||||
```
|
||||
|
||||
**Advanced agent config** (`settings/agents.json`):
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": [],
|
||||
"resume_flag": "--session",
|
||||
"resume_style": "flag",
|
||||
"non_interactive": {
|
||||
"subcommand": "run",
|
||||
"output_flag": "--format json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rig-level agents** (`<rig>/settings/config.json`):
|
||||
```json
|
||||
{
|
||||
"type": "rig-settings",
|
||||
"version": 1,
|
||||
"agent": "opencode",
|
||||
"agents": {
|
||||
"opencode": {
|
||||
"command": "opencode",
|
||||
"args": ["--session"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Agent resolution order**: rig-level → town-level → built-in presets.
|
||||
|
||||
For OpenCode autonomous mode, set env var in your shell profile:
|
||||
```bash
|
||||
export OPENCODE_PERMISSION='{"*":"allow"}'
|
||||
```
|
||||
|
||||
### Rig Management
|
||||
|
||||
```bash
|
||||
@@ -256,7 +523,7 @@ gt convoy list --all # Include landed convoys
|
||||
gt convoy list --status=closed # Only landed convoys
|
||||
```
|
||||
|
||||
Note: "Swarm" is ephemeral (workers on a convoy's issues). See [Convoys](convoy.md).
|
||||
Note: "Swarm" is ephemeral (workers on a convoy's issues). See [Convoys](concepts/convoy.md).
|
||||
|
||||
### Work Assignment
|
||||
|
||||
@@ -264,12 +531,19 @@ Note: "Swarm" is ephemeral (workers on a convoy's issues). See [Convoys](convoy.
|
||||
# Standard workflow: convoy first, then sling
|
||||
gt convoy create "Feature X" gt-abc gt-def
|
||||
gt sling gt-abc <rig> # Assign to polecat
|
||||
gt sling gt-def <rig> --molecule=<proto> # With workflow template
|
||||
gt sling gt-abc <rig> --agent codex # Override runtime for this sling/spawn
|
||||
gt sling <proto> --on gt-def <rig> # With workflow template
|
||||
|
||||
# Quick sling (auto-creates convoy)
|
||||
gt sling <bead> <rig> # Auto-convoy for dashboard visibility
|
||||
```
|
||||
|
||||
Agent overrides:
|
||||
|
||||
- `gt start --agent <alias>` overrides the Mayor/Deacon runtime for this launch.
|
||||
- `gt mayor start|attach|restart --agent <alias>` and `gt deacon start|attach|restart --agent <alias>` do the same.
|
||||
- `gt start crew <name> --agent <alias>` and `gt crew at <name> --agent <alias>` override the crew worker runtime.
|
||||
|
||||
### Communication
|
||||
|
||||
```bash
|
||||
@@ -288,7 +562,7 @@ gt escalate -s HIGH "msg" # Important blocker
|
||||
gt escalate -s MEDIUM "msg" -m "Details..."
|
||||
```
|
||||
|
||||
See [escalation.md](escalation.md) for full protocol.
|
||||
See [escalation.md](design/escalation.md) for full protocol.
|
||||
|
||||
### Sessions
|
||||
|
||||
@@ -323,6 +597,24 @@ gt stop --all # Kill all sessions
|
||||
gt stop --rig <name> # Kill rig sessions
|
||||
```
|
||||
|
||||
### Health Check
|
||||
|
||||
```bash
|
||||
gt deacon health-check <agent> # Send health check ping, track response
|
||||
gt deacon health-state # Show health check state for all agents
|
||||
```
|
||||
|
||||
### Merge Queue (MQ)
|
||||
|
||||
```bash
|
||||
gt mq list [rig] # Show the merge queue
|
||||
gt mq next [rig] # Show highest-priority merge request
|
||||
gt mq submit # Submit current branch to merge queue
|
||||
gt mq status <id> # Show detailed merge request status
|
||||
gt mq retry <id> # Retry a failed merge request
|
||||
gt mq reject <id> # Reject a merge request
|
||||
```
|
||||
|
||||
## Beads Commands (bd)
|
||||
|
||||
```bash
|
||||
@@ -389,4 +681,4 @@ bd mol bond mol-security-scan $PATROL_ID --var scope="$SCOPE"
|
||||
|
||||
**Nondeterministic idempotence**: Any worker can continue any molecule. Steps are atomic checkpoints in beads.
|
||||
|
||||
**Convoy tracking**: Convoys track batched work across rigs. A "swarm" is ephemeral - just the workers currently on a convoy's issues. See [Convoys](convoy.md) for details.
|
||||
**Convoy tracking**: Convoys track batched work across rigs. A "swarm" is ephemeral - just the workers currently on a convoy's issues. See [Convoys](concepts/convoy.md) for details.
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
# Swarm (Ephemeral Worker View)
|
||||
|
||||
> **Note**: "Swarm" is an ephemeral concept, not a persistent entity.
|
||||
> For tracking work, see [Convoys](convoy.md).
|
||||
|
||||
## What is a Swarm?
|
||||
|
||||
A **swarm** is simply "the workers currently assigned to a convoy's issues."
|
||||
It has no separate ID and no persistent state - it's just a view of active workers.
|
||||
|
||||
| Concept | Persistent? | ID | Description |
|
||||
|---------|-------------|-----|-------------|
|
||||
| **Convoy** | Yes | hq-* | The tracking unit. What you create and track. |
|
||||
| **Swarm** | No | None | The workers. Ephemeral view of who's working. |
|
||||
|
||||
## The Relationship
|
||||
|
||||
```
|
||||
Convoy hq-abc ─────────tracks───────────► Issues
|
||||
│
|
||||
│ assigned to
|
||||
▼
|
||||
Polecats
|
||||
│
|
||||
────────┴────────
|
||||
"the swarm"
|
||||
(ephemeral)
|
||||
```
|
||||
|
||||
When you say "kick off a swarm," you're really:
|
||||
1. Creating a convoy (persistent tracking)
|
||||
2. Assigning polecats to the convoy's issues
|
||||
3. The swarm = those polecats while they work
|
||||
|
||||
When the work completes, the convoy lands and the swarm dissolves.
|
||||
|
||||
## Viewing the Swarm
|
||||
|
||||
The swarm appears in convoy status:
|
||||
|
||||
```bash
|
||||
gt convoy status hq-abc
|
||||
```
|
||||
|
||||
```
|
||||
Convoy: hq-abc (Deploy v2.0)
|
||||
════════════════════════════
|
||||
|
||||
Progress: 2/3 complete
|
||||
|
||||
Issues
|
||||
✓ gt-xyz: Update API closed
|
||||
→ bd-ghi: Update docs in_progress @beads/amber
|
||||
○ gt-jkl: Final review open
|
||||
|
||||
Workers (the swarm) ← this is the swarm
|
||||
beads/amber bd-ghi running 12m
|
||||
```
|
||||
|
||||
## Historical Note
|
||||
|
||||
Earlier Gas Town development used "swarm" as if it were a persistent entity
|
||||
with its own lifecycle. The `gt swarm` commands were built on this model.
|
||||
|
||||
The correct model is:
|
||||
- **Convoy** = the persistent tracking unit (what `gt swarm` was trying to be)
|
||||
- **Swarm** = ephemeral workers (no separate tracking needed)
|
||||
|
||||
The `gt swarm` command is being deprecated in favor of `gt convoy`.
|
||||
|
||||
## See Also
|
||||
|
||||
- [Convoys](convoy.md) - The persistent tracking unit
|
||||
- [Propulsion Principle](propulsion-principle.md) - Worker execution model
|
||||
@@ -1,154 +0,0 @@
|
||||
# Test Coverage and Quality Review
|
||||
|
||||
**Reviewed by**: polecat/gus
|
||||
**Date**: 2026-01-04
|
||||
**Issue**: gt-a02fj.9
|
||||
|
||||
## Executive Summary
|
||||
|
||||
- **80 test files** covering **32 out of 42 packages** (76% package coverage)
|
||||
- **631 test functions** with 192 subtests (30% use table-driven pattern)
|
||||
- **10 packages** with **0 test coverage** (2,452 lines)
|
||||
- **1 confirmed flaky test** candidate
|
||||
- Test quality is generally good with moderate mocking
|
||||
|
||||
---
|
||||
|
||||
## Coverage Gap Inventory
|
||||
|
||||
### Packages Without Tests (Priority Order)
|
||||
|
||||
| Priority | Package | Lines | Risk | Notes |
|
||||
|----------|---------|-------|------|-------|
|
||||
| **P0** | `internal/lock` | 402 | **CRITICAL** | Multi-agent lock management. Bugs cause worker collisions. Already has `execCommand` mockable for testing. |
|
||||
| **P1** | `internal/events` | 295 | HIGH | Event bus for audit trail. Mutex-protected writes. Core observability. |
|
||||
| **P1** | `internal/boot` | 242 | HIGH | Boot watchdog lifecycle. Spawns tmux sessions. |
|
||||
| **P1** | `internal/checkpoint` | 216 | HIGH | Session crash recovery. Critical for polecat continuity. |
|
||||
| **P2** | `internal/tui/convoy` | 601 | MEDIUM | TUI component. Harder to test but user-facing. |
|
||||
| **P2** | `internal/constants` | 221 | LOW | Mostly configuration constants. Low behavioral risk. |
|
||||
| **P3** | `internal/style` | 331 | LOW | Output formatting. Visual only. |
|
||||
| **P3** | `internal/claude` | 80 | LOW | Claude settings parsing. |
|
||||
| **P3** | `internal/wisp` | 52 | LOW | Ephemeral molecule I/O. Small surface. |
|
||||
| **P4** | `cmd/gt` | 12 | TRIVIAL | Main entry point. Minimal code. |
|
||||
|
||||
**Total untested lines**: 2,452
|
||||
|
||||
---
|
||||
|
||||
## Flaky Test Candidates
|
||||
|
||||
### Confirmed: `internal/feed/curator_test.go`
|
||||
|
||||
**Issue**: Uses `time.Sleep()` for synchronization (lines 59, 71, 119, 138)
|
||||
|
||||
```go
|
||||
// Give curator time to start
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
...
|
||||
// Wait for processing
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
```
|
||||
|
||||
**Risk**: Flaky under load, CI delays, or slow machines.
|
||||
|
||||
**Fix**: Replace with channel-based synchronization or polling with timeout:
|
||||
```go
|
||||
// Wait for condition with timeout
|
||||
deadline := time.Now().Add(time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
if conditionMet() {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Test Quality Analysis
|
||||
|
||||
### Strengths
|
||||
|
||||
1. **Table-driven tests**: 30% of tests use `t.Run()` (192/631)
|
||||
2. **Good isolation**: Only 2 package-level test variables
|
||||
3. **Dedicated integration tests**: 15 files with explicit integration/e2e naming
|
||||
4. **Error handling**: 316 uses of `if err != nil` in tests
|
||||
5. **No random data**: No `rand.` usage in tests (deterministic)
|
||||
6. **Environment safety**: Uses `t.Setenv()` for clean env var handling
|
||||
|
||||
### Areas for Improvement
|
||||
|
||||
1. **`testing.Short()`**: Only 1 usage. Long-running tests should check this.
|
||||
2. **External dependencies**: 26 tests skip when `bd` or `tmux` unavailable - consider mocking more.
|
||||
3. **time.Sleep usage**: Found in `curator_test.go` - should be eliminated.
|
||||
|
||||
---
|
||||
|
||||
## Test Smells (Minor)
|
||||
|
||||
| Smell | Location | Severity | Notes |
|
||||
|-------|----------|----------|-------|
|
||||
| Sleep-based sync | `feed/curator_test.go` | HIGH | See flaky section |
|
||||
| External dep skips | Multiple files | LOW | Reasonable for integration tests |
|
||||
| Skip-heavy file | `tmux/tmux_test.go` | LOW | Acceptable - tmux not always available |
|
||||
|
||||
---
|
||||
|
||||
## Priority List for New Tests
|
||||
|
||||
### Immediate (P0)
|
||||
|
||||
1. **`internal/lock`** - Critical path
|
||||
- Test `Acquire()` with stale lock cleanup
|
||||
- Test `Check()` with live/dead PIDs
|
||||
- Test `CleanStaleLocks()` with mock tmux sessions
|
||||
- Test `DetectCollisions()`
|
||||
- Test concurrent lock acquisition (race detection)
|
||||
|
||||
### High Priority (P1)
|
||||
|
||||
2. **`internal/events`**
|
||||
- Test `Log()` file creation and append
|
||||
- Test `write()` mutex behavior
|
||||
- Test payload helpers
|
||||
- Test graceful handling when not in workspace
|
||||
|
||||
3. **`internal/boot`**
|
||||
- Test `IsRunning()` with stale markers
|
||||
- Test `AcquireLock()` / `ReleaseLock()` cycle
|
||||
- Test `SaveStatus()` / `LoadStatus()` round-trip
|
||||
- Test degraded mode path
|
||||
|
||||
4. **`internal/checkpoint`**
|
||||
- Test `Read()` / `Write()` round-trip
|
||||
- Test `Capture()` git state extraction
|
||||
- Test `IsStale()` with various durations
|
||||
- Test `Summary()` output
|
||||
|
||||
### Medium Priority (P2)
|
||||
|
||||
5. **`internal/tui/convoy`** - Consider golden file tests for view output
|
||||
6. **`internal/constants`** - Test any validation logic
|
||||
|
||||
---
|
||||
|
||||
## Missing Test Types
|
||||
|
||||
| Type | Current State | Recommendation |
|
||||
|------|--------------|----------------|
|
||||
| Unit tests | Good coverage where present | Add for P0-P1 packages |
|
||||
| Integration tests | 15 dedicated files | Adequate |
|
||||
| E2E tests | `browser_e2e_test.go` | Consider more CLI E2E |
|
||||
| Fuzz tests | None | Consider for parsers (`formula/parser.go`) |
|
||||
| Benchmark tests | None visible | Add for hot paths (`lock`, `events`) |
|
||||
|
||||
---
|
||||
|
||||
## Actionable Next Steps
|
||||
|
||||
1. **Fix flaky test**: Refactor `feed/curator_test.go` to use channels/polling
|
||||
2. **Add lock tests**: Highest priority - bugs here break multi-agent
|
||||
3. **Add events tests**: Core observability must be tested
|
||||
4. **Add checkpoint tests**: Session recovery is critical path
|
||||
5. **Run with race detector**: `go test -race ./...` to catch data races
|
||||
6. **Consider `-short` flag**: Add `testing.Short()` checks to slow tests
|
||||
@@ -1,372 +0,0 @@
|
||||
# Wisp Squash Design: Cadences, Rules, Templates
|
||||
|
||||
Design specification for how wisps squash to digests in Gas Town.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Wisps are ephemeral molecules that need to be condensed into digests for:
|
||||
- **Audit trail**: What happened, when, by whom
|
||||
- **Activity feed**: Observable progress in the capability ledger
|
||||
- **Space efficiency**: Ephemeral data doesn't accumulate indefinitely
|
||||
|
||||
Currently under-designed:
|
||||
- **Cadences**: When should squash happen?
|
||||
- **Templates**: What should digests contain?
|
||||
- **Retention**: How long to keep, when to aggregate?
|
||||
|
||||
## Squash Cadences
|
||||
|
||||
### Patrol Wisps (Deacon, Witness, Refinery)
|
||||
|
||||
**Trigger**: End of each patrol cycle
|
||||
|
||||
```
|
||||
patrol-start → steps → loop-or-exit step → squash → new wisp
|
||||
```
|
||||
|
||||
| Decision Point | Action |
|
||||
|----------------|--------|
|
||||
| `loop-or-exit` with low context | Squash current wisp, create new wisp |
|
||||
| `loop-or-exit` with high context | Squash current wisp, handoff |
|
||||
| Extraordinary action | Squash immediately, handoff |
|
||||
|
||||
**Rationale**: Each patrol cycle is a logical unit. Squashing per-cycle keeps
|
||||
digests meaningful and prevents context-filling sessions from losing history.
|
||||
|
||||
### Work Wisps (Polecats)
|
||||
|
||||
**Trigger**: Before `gt done` or molecule completion
|
||||
|
||||
```
|
||||
work-assigned → steps → all-complete → squash → gt done → merge queue
|
||||
```
|
||||
|
||||
Polecats typically use regular molecules (not wisps), but when wisps are used
|
||||
for exploratory work:
|
||||
|
||||
| Scenario | Action |
|
||||
|----------|--------|
|
||||
| Molecule completes | Squash to digest |
|
||||
| Molecule abandoned | Burn (no digest) |
|
||||
| Molecule handed off | Squash, include handoff context |
|
||||
|
||||
### Time-Based Cadences (Future)
|
||||
|
||||
For long-running molecules that span multiple sessions:
|
||||
|
||||
| Duration | Action |
|
||||
|----------|--------|
|
||||
| Session ends | Auto-squash if molecule in progress |
|
||||
| > 24 hours | Create checkpoint digest |
|
||||
| > 7 days | Warning: stale molecule |
|
||||
|
||||
**Not implemented initially** - simplicity first.
|
||||
|
||||
## Summary Templates
|
||||
|
||||
### Template Structure
|
||||
|
||||
Digests have three sections:
|
||||
1. **Header**: Standard metadata (who, what, when)
|
||||
2. **Body**: Context-specific content (from template)
|
||||
3. **Footer**: System metrics (steps, duration, commit refs)
|
||||
|
||||
### Patrol Digest Template
|
||||
|
||||
```markdown
|
||||
## Patrol Digest: {{.Agent}}
|
||||
|
||||
**Cycle**: {{.CycleNumber}} | **Duration**: {{.Duration}}
|
||||
|
||||
### Actions Taken
|
||||
{{range .Actions}}
|
||||
- {{.Icon}} {{.Description}}
|
||||
{{end}}
|
||||
|
||||
### Issues Filed
|
||||
{{range .IssuesFiled}}
|
||||
- {{.ID}}: {{.Title}}
|
||||
{{end}}
|
||||
|
||||
### Metrics
|
||||
- Inbox: {{.InboxCount}} messages processed
|
||||
- Health checks: {{.HealthChecks}}
|
||||
- Alerts: {{.AlertCount}}
|
||||
```
|
||||
|
||||
### Work Digest Template
|
||||
|
||||
```markdown
|
||||
## Work Digest: {{.IssueTitle}}
|
||||
|
||||
**Issue**: {{.IssueID}} | **Agent**: {{.Agent}} | **Duration**: {{.Duration}}
|
||||
|
||||
### Summary
|
||||
{{.Summary}}
|
||||
|
||||
### Steps Completed
|
||||
{{range .Steps}}
|
||||
- [{{.Status}}] {{.Title}}
|
||||
{{end}}
|
||||
|
||||
### Artifacts
|
||||
- Commits: {{range .Commits}}{{.Short}}, {{end}}
|
||||
- Files changed: {{.FilesChanged}}
|
||||
- Lines: +{{.LinesAdded}} -{{.LinesRemoved}}
|
||||
```
|
||||
|
||||
### Formula-Defined Templates
|
||||
|
||||
Formulas can define custom squash templates in `[squash]` section:
|
||||
|
||||
```toml
|
||||
formula = "mol-my-workflow"
|
||||
version = 1
|
||||
|
||||
[squash]
|
||||
template = """
|
||||
## {{.Title}} Complete
|
||||
|
||||
Duration: {{.Duration}}
|
||||
Key metrics:
|
||||
{{range .Steps}}
|
||||
- {{.ID}}: {{.CustomField}}
|
||||
{{end}}
|
||||
"""
|
||||
|
||||
# Template variables from step outputs
|
||||
[squash.vars]
|
||||
include_metrics = true
|
||||
summary_length = "short" # short | medium | detailed
|
||||
```
|
||||
|
||||
**Resolution order**:
|
||||
1. Formula-defined template (if present)
|
||||
2. Type-specific default (patrol vs work)
|
||||
3. Minimal fallback (current behavior)
|
||||
|
||||
## Retention Rules
|
||||
|
||||
### Digest Lifecycle
|
||||
|
||||
```
|
||||
Wisp → Squash → Digest (active) → Digest (archived) → Rollup
|
||||
```
|
||||
|
||||
| Phase | Duration | Storage |
|
||||
|-------|----------|---------|
|
||||
| Active | 30 days | `.beads/issues.jsonl` |
|
||||
| Archived | 1 year | `.beads/archive/` (compressed) |
|
||||
| Rollup | Permanent | Weekly/monthly summaries |
|
||||
|
||||
### Rollup Strategy
|
||||
|
||||
After retention period, digests aggregate into rollups:
|
||||
|
||||
**Weekly Patrol Rollup**:
|
||||
```markdown
|
||||
## Week of {{.WeekStart}}
|
||||
|
||||
| Agent | Cycles | Issues Filed | Merges | Incidents |
|
||||
|-------|--------|--------------|--------|-----------|
|
||||
| Deacon | 140 | 3 | - | 0 |
|
||||
| Witness | 168 | 12 | - | 2 |
|
||||
| Refinery | 84 | 0 | 47 | 1 |
|
||||
```
|
||||
|
||||
**Monthly Work Rollup**:
|
||||
```markdown
|
||||
## {{.Month}} Work Summary
|
||||
|
||||
Issues completed: {{.TotalIssues}}
|
||||
Total duration: {{.TotalDuration}}
|
||||
Contributors: {{range .Contributors}}{{.Name}}, {{end}}
|
||||
|
||||
Top categories:
|
||||
{{range .Categories}}
|
||||
- {{.Name}}: {{.Count}} issues
|
||||
{{end}}
|
||||
```
|
||||
|
||||
### Retention Configuration
|
||||
|
||||
Per-rig settings in `config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"retention": {
|
||||
"digest_active_days": 30,
|
||||
"digest_archive_days": 365,
|
||||
"rollup_weekly": true,
|
||||
"rollup_monthly": true,
|
||||
"auto_archive": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Template System (MVP)
|
||||
|
||||
1. Add `[squash]` section parsing to formula loader
|
||||
2. Create default templates for patrol and work digests
|
||||
3. Enhance `bd mol squash` to use templates
|
||||
4. Add `--template` flag for override
|
||||
|
||||
### Phase 2: Cadence Automation
|
||||
|
||||
1. Hook squash into `gt done` flow
|
||||
2. Add patrol cycle completion detection
|
||||
3. Emit squash events for activity feed
|
||||
|
||||
### Phase 3: Retention & Archival
|
||||
|
||||
1. Implement digest aging (active → archived)
|
||||
2. Add `bd archive` command for manual archival
|
||||
3. Create rollup generator for weekly/monthly summaries
|
||||
4. Background daemon task for auto-archival
|
||||
|
||||
## Commands
|
||||
|
||||
### Squash with Template
|
||||
|
||||
```bash
|
||||
# Use formula-defined template
|
||||
bd mol squash <id>
|
||||
|
||||
# Use explicit template
|
||||
bd mol squash <id> --template=detailed
|
||||
|
||||
# Add custom summary
|
||||
bd mol squash <id> --summary="Patrol complete: 3 issues filed"
|
||||
```
|
||||
|
||||
### View Digests
|
||||
|
||||
```bash
|
||||
# List recent digests
|
||||
bd list --label=digest
|
||||
|
||||
# View rollups
|
||||
bd rollup list
|
||||
bd rollup show weekly-2025-01
|
||||
```
|
||||
|
||||
### Archive Management
|
||||
|
||||
```bash
|
||||
# Archive old digests
|
||||
bd archive --older-than=30d
|
||||
|
||||
# Generate rollup
|
||||
bd rollup generate --week=2025-01
|
||||
|
||||
# Restore from archive
|
||||
bd archive restore <digest-id>
|
||||
```
|
||||
|
||||
## Activity Feed Integration
|
||||
|
||||
Digests feed into the activity feed for observability:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "digest",
|
||||
"agent": "greenplace/witness",
|
||||
"timestamp": "2025-12-30T10:00:00Z",
|
||||
"summary": "Patrol cycle 47 complete",
|
||||
"metrics": {
|
||||
"issues_filed": 2,
|
||||
"polecats_nudged": 1,
|
||||
"duration_minutes": 12
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The feed curator (daemon) can aggregate these for dashboards.
|
||||
|
||||
## Formula Example
|
||||
|
||||
Complete formula with squash configuration:
|
||||
|
||||
```toml
|
||||
formula = "mol-witness-patrol"
|
||||
version = 1
|
||||
type = "workflow"
|
||||
description = "Witness patrol cycle"
|
||||
|
||||
[squash]
|
||||
trigger = "on_complete"
|
||||
template_type = "patrol"
|
||||
include_metrics = true
|
||||
|
||||
[[steps]]
|
||||
id = "inbox-check"
|
||||
title = "Check inbox"
|
||||
description = "Process messages and escalations"
|
||||
|
||||
[[steps]]
|
||||
id = "health-scan"
|
||||
title = "Scan polecat health"
|
||||
description = "Check all polecats for stuck/idle"
|
||||
|
||||
[[steps]]
|
||||
id = "nudge-stuck"
|
||||
title = "Nudge stuck workers"
|
||||
description = "Send nudges to idle polecats"
|
||||
|
||||
[[steps]]
|
||||
id = "loop-or-exit"
|
||||
title = "Loop or exit decision"
|
||||
description = "Decide whether to continue or handoff"
|
||||
```
|
||||
|
||||
## Migration
|
||||
|
||||
### Existing Digests
|
||||
|
||||
Current minimal digests remain valid. New template system is additive:
|
||||
- Old digests: Title, basic description
|
||||
- New digests: Structured content, metrics
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
- `bd mol squash` without template uses current behavior
|
||||
- Formulas without `[squash]` section use type defaults
|
||||
- No breaking changes to existing workflows
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Why Squash Per-Cycle?
|
||||
|
||||
**Alternative**: Squash on session end only
|
||||
|
||||
**Rejected because**:
|
||||
- Sessions can crash mid-cycle (lost audit trail)
|
||||
- High-context sessions may span multiple cycles
|
||||
- Per-cycle gives finer granularity
|
||||
|
||||
### Why Formula-Defined Templates?
|
||||
|
||||
**Alternative**: Hard-coded templates per role
|
||||
|
||||
**Rejected because**:
|
||||
- Different workflows have different metrics
|
||||
- Extensibility for custom formulas
|
||||
- Separation of concerns (workflow defines its own output)
|
||||
|
||||
### Why Retain Forever (as Rollups)?
|
||||
|
||||
**Alternative**: Delete after N days
|
||||
|
||||
**Rejected because**:
|
||||
- Capability ledger needs long-term history
|
||||
- Rollups are small (aggregate stats)
|
||||
- Audit requirements vary by use case
|
||||
|
||||
## Future Considerations
|
||||
|
||||
- **Search**: Full-text search over archived digests
|
||||
- **Analytics**: Metrics aggregation dashboard
|
||||
- **Export**: Export digests to external systems
|
||||
- **Compliance**: Configurable retention for regulatory needs
|
||||
20
go.mod
20
go.mod
@@ -6,7 +6,9 @@ require (
|
||||
github.com/BurntSushi/toml v1.6.0
|
||||
github.com/charmbracelet/bubbles v0.21.0
|
||||
github.com/charmbracelet/bubbletea v1.3.10
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834
|
||||
github.com/go-rod/rod v0.116.2
|
||||
github.com/gofrs/flock v0.13.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
golang.org/x/term v0.38.0
|
||||
@@ -14,25 +16,41 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/alecthomas/chroma/v2 v2.14.0 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.3.3 // indirect
|
||||
github.com/charmbracelet/glamour v0.10.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.11.3 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.14 // indirect
|
||||
github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf // indirect
|
||||
github.com/charmbracelet/x/term v0.2.2 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.6.1 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
|
||||
github.com/dlclark/regexp2 v1.11.0 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.27 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/reflow v0.3.0 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/ysmood/fetchup v0.2.3 // indirect
|
||||
github.com/ysmood/goob v0.4.0 // indirect
|
||||
github.com/ysmood/got v0.40.0 // indirect
|
||||
github.com/ysmood/gson v0.7.3 // indirect
|
||||
github.com/ysmood/leakless v0.9.0 // indirect
|
||||
github.com/yuin/goldmark v1.7.8 // indirect
|
||||
github.com/yuin/goldmark-emoji v1.0.5 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
)
|
||||
|
||||
54
go.sum
54
go.sum
@@ -1,23 +1,33 @@
|
||||
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
|
||||
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
|
||||
github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
|
||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||
github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
|
||||
github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg=
|
||||
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
|
||||
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
|
||||
github.com/charmbracelet/colorprofile v0.3.3 h1:DjJzJtLP6/NZ8p7Cgjno0CKGr7wwRJGxWUwh2IyhfAI=
|
||||
github.com/charmbracelet/colorprofile v0.3.3/go.mod h1:nB1FugsAbzq284eJcjfah2nhdSLppN2NqvfotkfRYP4=
|
||||
github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY=
|
||||
github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE=
|
||||
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA=
|
||||
github.com/charmbracelet/x/ansi v0.11.3 h1:6DcVaqWI82BBVM/atTyq6yBoRLZFBsnoDoX9GCu2YOI=
|
||||
github.com/charmbracelet/x/ansi v0.11.3/go.mod h1:yI7Zslym9tCJcedxz5+WBq+eUGMJT0bM06Fqy1/Y4dI=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.14 h1:iUEMryGyFTelKW3THW4+FfPgi4fkmKnnaLOXuc+/Kj4=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.14/go.mod h1:P447lJl49ywBbil/KjCk2HexGh4tEY9LH0/1QrZZ9rA=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
|
||||
github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf h1:rLG0Yb6MQSDKdB52aGX55JT1oi0P0Kuaj7wi1bLUpnI=
|
||||
github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf/go.mod h1:B3UgsnsBZS/eX42BlaNiJkD1pPOUa+oF1IYC6Yd2CEU=
|
||||
github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk=
|
||||
github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI=
|
||||
github.com/clipperhouse/displaywidth v0.6.1 h1:/zMlAezfDzT2xy6acHBzwIfyu2ic0hgkT83UX5EY2gY=
|
||||
@@ -27,10 +37,20 @@ github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEX
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA=
|
||||
github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg=
|
||||
github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
|
||||
github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
|
||||
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
|
||||
@@ -39,14 +59,23 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
|
||||
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
|
||||
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -54,11 +83,34 @@ github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
|
||||
github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
|
||||
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
|
||||
github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
|
||||
github.com/ysmood/gop v0.2.0 h1:+tFrG0TWPxT6p9ZaZs+VY+opCvHU8/3Fk6BaNv6kqKg=
|
||||
github.com/ysmood/gop v0.2.0/go.mod h1:rr5z2z27oGEbyB787hpEcx4ab8cCiPnKxn0SUHt6xzk=
|
||||
github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q=
|
||||
github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg=
|
||||
github.com/ysmood/gotrace v0.6.0 h1:SyI1d4jclswLhg7SWTL6os3L1WOKeNn/ZtzVQF8QmdY=
|
||||
github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
|
||||
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
|
||||
github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
|
||||
github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU=
|
||||
github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
|
||||
github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
|
||||
github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic=
|
||||
github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
|
||||
github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk=
|
||||
github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
@@ -68,3 +120,5 @@ golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -92,6 +92,10 @@ func formatDays(d time.Duration) string {
|
||||
return formatInt(days) + "d"
|
||||
}
|
||||
|
||||
// formatInt converts a non-negative integer to its decimal string representation.
|
||||
// For single digits (0-9), it uses direct rune conversion for efficiency.
|
||||
// For larger numbers, it extracts digits iteratively from least to most significant.
|
||||
// This avoids importing strconv for simple integer formatting in the activity package.
|
||||
func formatInt(n int) string {
|
||||
if n < 10 {
|
||||
return string(rune('0'+n))
|
||||
|
||||
76
internal/agent/state.go
Normal file
76
internal/agent/state.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Package agent provides shared types and utilities for Gas Town agents
|
||||
// (witness, refinery, deacon, etc.).
|
||||
package agent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/util"
|
||||
)
|
||||
|
||||
// State represents an agent's running state.
|
||||
type State string
|
||||
|
||||
const (
|
||||
// StateStopped means the agent is not running.
|
||||
StateStopped State = "stopped"
|
||||
|
||||
// StateRunning means the agent is actively operating.
|
||||
StateRunning State = "running"
|
||||
|
||||
// StatePaused means the agent is paused (not operating but not stopped).
|
||||
StatePaused State = "paused"
|
||||
)
|
||||
|
||||
// StateManager handles loading and saving agent state to disk.
|
||||
// It uses generics to work with any state type.
|
||||
type StateManager[T any] struct {
|
||||
stateFilePath string
|
||||
defaultFactory func() *T
|
||||
}
|
||||
|
||||
// NewStateManager creates a new StateManager for the given state file path.
|
||||
// The defaultFactory function is called when the state file doesn't exist
|
||||
// to create a new state with default values.
|
||||
func NewStateManager[T any](rigPath, stateFileName string, defaultFactory func() *T) *StateManager[T] {
|
||||
return &StateManager[T]{
|
||||
stateFilePath: filepath.Join(rigPath, ".runtime", stateFileName),
|
||||
defaultFactory: defaultFactory,
|
||||
}
|
||||
}
|
||||
|
||||
// StateFile returns the path to the state file.
|
||||
func (m *StateManager[T]) StateFile() string {
|
||||
return m.stateFilePath
|
||||
}
|
||||
|
||||
// Load loads agent state from disk.
|
||||
// If the file doesn't exist, returns a new state created by the default factory.
|
||||
func (m *StateManager[T]) Load() (*T, error) {
|
||||
data, err := os.ReadFile(m.stateFilePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return m.defaultFactory(), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var state T
|
||||
if err := json.Unmarshal(data, &state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// Save persists agent state to disk using atomic write.
|
||||
func (m *StateManager[T]) Save(state *T) error {
|
||||
dir := filepath.Dir(m.stateFilePath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return util.AtomicWriteJSON(m.stateFilePath, state)
|
||||
}
|
||||
189
internal/agent/state_test.go
Normal file
189
internal/agent/state_test.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStateConstants(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state State
|
||||
value string
|
||||
}{
|
||||
{"StateStopped", StateStopped, "stopped"},
|
||||
{"StateRunning", StateRunning, "running"},
|
||||
{"StatePaused", StatePaused, "paused"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if string(tt.state) != tt.value {
|
||||
t.Errorf("State constant = %q, want %q", tt.state, tt.value)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_StateFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
expectedPath := filepath.Join(tmpDir, ".runtime", "test-state.json")
|
||||
if manager.StateFile() != expectedPath {
|
||||
t.Errorf("StateFile() = %q, want %q", manager.StateFile(), expectedPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_NoFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "nonexistent.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
state, err := manager.Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error = %v", err)
|
||||
}
|
||||
if state.Value != "default" {
|
||||
t.Errorf("Load() value = %q, want %q", state.Value, "default")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_Save_Load(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
// Save initial state
|
||||
state := &TestState{Value: "test-value", Count: 42}
|
||||
if err := manager.Save(state); err != nil {
|
||||
t.Fatalf("Save() error = %v", err)
|
||||
}
|
||||
|
||||
// Load it back
|
||||
loaded, err := manager.Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error = %v", err)
|
||||
}
|
||||
if loaded.Value != state.Value {
|
||||
t.Errorf("Load() value = %q, want %q", loaded.Value, state.Value)
|
||||
}
|
||||
if loaded.Count != state.Count {
|
||||
t.Errorf("Load() count = %d, want %d", loaded.Count, state.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_CreatesDirectory(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
// Save should create .runtime directory
|
||||
state := &TestState{Value: "test"}
|
||||
if err := manager.Save(state); err != nil {
|
||||
t.Fatalf("Save() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify directory was created
|
||||
runtimeDir := filepath.Join(tmpDir, ".runtime")
|
||||
if _, err := os.Stat(runtimeDir); err != nil {
|
||||
t.Errorf("Save() should create .runtime directory: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_Load_InvalidJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[TestState](tmpDir, "test-state.json", func() *TestState {
|
||||
return &TestState{Value: "default"}
|
||||
})
|
||||
|
||||
// Write invalid JSON
|
||||
statePath := manager.StateFile()
|
||||
if err := os.MkdirAll(filepath.Dir(statePath), 0755); err != nil {
|
||||
t.Fatalf("Failed to create directory: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(statePath, []byte("invalid json"), 0644); err != nil {
|
||||
t.Fatalf("Failed to write file: %v", err)
|
||||
}
|
||||
|
||||
_, err := manager.Load()
|
||||
if err == nil {
|
||||
t.Error("Load() with invalid JSON should return error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestState_String(t *testing.T) {
|
||||
tests := []struct {
|
||||
state State
|
||||
want string
|
||||
}{
|
||||
{StateStopped, "stopped"},
|
||||
{StateRunning, "running"},
|
||||
{StatePaused, "paused"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if string(tt.state) != tt.want {
|
||||
t.Errorf("State(%q) = %q, want %q", tt.state, string(tt.state), tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateManager_GenericType(t *testing.T) {
|
||||
// Test that StateManager works with different types
|
||||
|
||||
type ComplexState struct {
|
||||
Name string `json:"name"`
|
||||
Values []int `json:"values"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Nested struct {
|
||||
X int `json:"x"`
|
||||
} `json:"nested"`
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
manager := NewStateManager[ComplexState](tmpDir, "complex.json", func() *ComplexState {
|
||||
return &ComplexState{Name: "default", Values: []int{}}
|
||||
})
|
||||
|
||||
original := &ComplexState{
|
||||
Name: "test",
|
||||
Values: []int{1, 2, 3},
|
||||
Enabled: true,
|
||||
}
|
||||
original.Nested.X = 42
|
||||
|
||||
if err := manager.Save(original); err != nil {
|
||||
t.Fatalf("Save() error = %v", err)
|
||||
}
|
||||
|
||||
loaded, err := manager.Load()
|
||||
if err != nil {
|
||||
t.Fatalf("Load() error = %v", err)
|
||||
}
|
||||
|
||||
if loaded.Name != original.Name {
|
||||
t.Errorf("Name = %q, want %q", loaded.Name, original.Name)
|
||||
}
|
||||
if len(loaded.Values) != len(original.Values) {
|
||||
t.Errorf("Values length = %d, want %d", len(loaded.Values), len(original.Values))
|
||||
}
|
||||
if loaded.Enabled != original.Enabled {
|
||||
t.Errorf("Enabled = %v, want %v", loaded.Enabled, original.Enabled)
|
||||
}
|
||||
if loaded.Nested.X != original.Nested.X {
|
||||
t.Errorf("Nested.X = %d, want %d", loaded.Nested.X, original.Nested.X)
|
||||
}
|
||||
}
|
||||
|
||||
// TestState is a simple type for testing
|
||||
type TestState struct {
|
||||
Value string `json:"value"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
// Package beads provides a wrapper for the bd (beads) CLI.
|
||||
package beads
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TownBeadsPrefix is the prefix used for town-level agent beads stored in ~/gt/.beads/.
|
||||
// This distinguishes them from rig-level beads (which use project prefixes like "gt-").
|
||||
@@ -74,3 +77,170 @@ func PolecatRoleBeadIDTown() string {
|
||||
func CrewRoleBeadIDTown() string {
|
||||
return RoleBeadIDTown("crew")
|
||||
}
|
||||
|
||||
// ===== Rig-level agent bead ID helpers (gt- prefix) =====
|
||||
|
||||
// Agent bead ID naming convention:
|
||||
// prefix-rig-role-name
|
||||
//
|
||||
// Examples:
|
||||
// - gt-mayor (town-level, no rig)
|
||||
// - gt-deacon (town-level, no rig)
|
||||
// - gt-gastown-witness (rig-level singleton)
|
||||
// - gt-gastown-refinery (rig-level singleton)
|
||||
// - gt-gastown-crew-max (rig-level named agent)
|
||||
// - gt-gastown-polecat-Toast (rig-level named agent)
|
||||
|
||||
// AgentBeadIDWithPrefix generates an agent bead ID using the specified prefix.
|
||||
// The prefix should NOT include the hyphen (e.g., "gt", "bd", not "gt-", "bd-").
|
||||
// For town-level agents (mayor, deacon), pass empty rig and name.
|
||||
// For rig-level singletons (witness, refinery), pass empty name.
|
||||
// For named agents (crew, polecat), pass all three.
|
||||
func AgentBeadIDWithPrefix(prefix, rig, role, name string) string {
|
||||
if rig == "" {
|
||||
// Town-level agent: prefix-mayor, prefix-deacon
|
||||
return prefix + "-" + role
|
||||
}
|
||||
if name == "" {
|
||||
// Rig-level singleton: prefix-rig-witness, prefix-rig-refinery
|
||||
return prefix + "-" + rig + "-" + role
|
||||
}
|
||||
// Rig-level named agent: prefix-rig-role-name
|
||||
return prefix + "-" + rig + "-" + role + "-" + name
|
||||
}
|
||||
|
||||
// AgentBeadID generates the canonical agent bead ID using "gt" prefix.
|
||||
// For non-gastown rigs, use AgentBeadIDWithPrefix with the rig's configured prefix.
|
||||
func AgentBeadID(rig, role, name string) string {
|
||||
return AgentBeadIDWithPrefix("gt", rig, role, name)
|
||||
}
|
||||
|
||||
// MayorBeadID returns the Mayor agent bead ID.
|
||||
//
|
||||
// Deprecated: Use MayorBeadIDTown() for town-level beads (hq- prefix).
|
||||
// This function returns "gt-mayor" which is for rig-level storage.
|
||||
// Town-level agents like Mayor should use the hq- prefix.
|
||||
func MayorBeadID() string {
|
||||
return "gt-mayor"
|
||||
}
|
||||
|
||||
// DeaconBeadID returns the Deacon agent bead ID.
|
||||
//
|
||||
// Deprecated: Use DeaconBeadIDTown() for town-level beads (hq- prefix).
|
||||
// This function returns "gt-deacon" which is for rig-level storage.
|
||||
// Town-level agents like Deacon should use the hq- prefix.
|
||||
func DeaconBeadID() string {
|
||||
return "gt-deacon"
|
||||
}
|
||||
|
||||
// DogBeadID returns a Dog agent bead ID.
|
||||
// Dogs are town-level agents, so they follow the pattern: gt-dog-<name>
|
||||
// Deprecated: Use DogBeadIDTown() for town-level beads with hq- prefix.
|
||||
// Dogs are town-level agents and should use hq-dog-<name>, not gt-dog-<name>.
|
||||
func DogBeadID(name string) string {
|
||||
return "gt-dog-" + name
|
||||
}
|
||||
|
||||
// WitnessBeadIDWithPrefix returns the Witness agent bead ID for a rig using the specified prefix.
|
||||
func WitnessBeadIDWithPrefix(prefix, rig string) string {
|
||||
return AgentBeadIDWithPrefix(prefix, rig, "witness", "")
|
||||
}
|
||||
|
||||
// WitnessBeadID returns the Witness agent bead ID for a rig using "gt" prefix.
|
||||
func WitnessBeadID(rig string) string {
|
||||
return WitnessBeadIDWithPrefix("gt", rig)
|
||||
}
|
||||
|
||||
// RefineryBeadIDWithPrefix returns the Refinery agent bead ID for a rig using the specified prefix.
|
||||
func RefineryBeadIDWithPrefix(prefix, rig string) string {
|
||||
return AgentBeadIDWithPrefix(prefix, rig, "refinery", "")
|
||||
}
|
||||
|
||||
// RefineryBeadID returns the Refinery agent bead ID for a rig using "gt" prefix.
|
||||
func RefineryBeadID(rig string) string {
|
||||
return RefineryBeadIDWithPrefix("gt", rig)
|
||||
}
|
||||
|
||||
// CrewBeadIDWithPrefix returns a Crew worker agent bead ID using the specified prefix.
|
||||
func CrewBeadIDWithPrefix(prefix, rig, name string) string {
|
||||
return AgentBeadIDWithPrefix(prefix, rig, "crew", name)
|
||||
}
|
||||
|
||||
// CrewBeadID returns a Crew worker agent bead ID using "gt" prefix.
|
||||
func CrewBeadID(rig, name string) string {
|
||||
return CrewBeadIDWithPrefix("gt", rig, name)
|
||||
}
|
||||
|
||||
// PolecatBeadIDWithPrefix returns a Polecat agent bead ID using the specified prefix.
|
||||
func PolecatBeadIDWithPrefix(prefix, rig, name string) string {
|
||||
return AgentBeadIDWithPrefix(prefix, rig, "polecat", name)
|
||||
}
|
||||
|
||||
// PolecatBeadID returns a Polecat agent bead ID using "gt" prefix.
|
||||
func PolecatBeadID(rig, name string) string {
|
||||
return PolecatBeadIDWithPrefix("gt", rig, name)
|
||||
}
|
||||
|
||||
// ParseAgentBeadID parses an agent bead ID into its components.
|
||||
// Returns rig, role, name, and whether parsing succeeded.
|
||||
// For town-level agents, rig will be empty.
|
||||
// For singletons, name will be empty.
|
||||
// Accepts any valid prefix (e.g., "gt-", "bd-"), not just "gt-".
|
||||
func ParseAgentBeadID(id string) (rig, role, name string, ok bool) {
|
||||
// Find the prefix (everything before the first hyphen)
|
||||
// Valid prefixes are 2-3 characters (e.g., "gt", "bd", "hq")
|
||||
hyphenIdx := strings.Index(id, "-")
|
||||
if hyphenIdx < 2 || hyphenIdx > 3 {
|
||||
return "", "", "", false
|
||||
}
|
||||
|
||||
rest := id[hyphenIdx+1:]
|
||||
parts := strings.Split(rest, "-")
|
||||
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
// Town-level: gt-mayor, bd-deacon
|
||||
return "", parts[0], "", true
|
||||
case 2:
|
||||
// Could be rig-level singleton (gt-gastown-witness) or
|
||||
// town-level named (gt-dog-alpha for dogs)
|
||||
if parts[0] == "dog" {
|
||||
// Dogs are town-level named agents: gt-dog-<name>
|
||||
return "", "dog", parts[1], true
|
||||
}
|
||||
// Rig-level singleton: gt-gastown-witness
|
||||
return parts[0], parts[1], "", true
|
||||
case 3:
|
||||
// Rig-level named: gt-gastown-crew-max, bd-beads-polecat-pearl
|
||||
return parts[0], parts[1], parts[2], true
|
||||
default:
|
||||
// Handle names with hyphens: gt-gastown-polecat-my-agent-name
|
||||
// or gt-dog-my-agent-name
|
||||
if len(parts) >= 3 {
|
||||
if parts[0] == "dog" {
|
||||
// Dog with hyphenated name: gt-dog-my-dog-name
|
||||
return "", "dog", strings.Join(parts[1:], "-"), true
|
||||
}
|
||||
return parts[0], parts[1], strings.Join(parts[2:], "-"), true
|
||||
}
|
||||
return "", "", "", false
|
||||
}
|
||||
}
|
||||
|
||||
// IsAgentSessionBead returns true if the bead ID represents an agent session molecule.
|
||||
// Agent session beads follow patterns like gt-mayor, bd-beads-witness, gt-gastown-crew-joe.
|
||||
// Supports any valid prefix (e.g., "gt-", "bd-"), not just "gt-".
|
||||
// These are used to track agent state and update frequently, which can create noise.
|
||||
func IsAgentSessionBead(beadID string) bool {
|
||||
_, role, _, ok := ParseAgentBeadID(beadID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Known agent roles
|
||||
switch role {
|
||||
case "mayor", "deacon", "witness", "refinery", "crew", "polecat", "dog":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
542
internal/beads/beads_agent.go
Normal file
542
internal/beads/beads_agent.go
Normal file
@@ -0,0 +1,542 @@
|
||||
// Package beads provides agent bead management.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// runSlotSet runs `bd slot set` from a specific directory.
|
||||
// This is needed when the agent bead was created via routing to a different
|
||||
// database than the Beads wrapper's default directory.
|
||||
func runSlotSet(workDir, beadID, slotName, slotValue string) error {
|
||||
cmd := exec.Command("bd", "slot", "set", beadID, slotName, slotValue)
|
||||
cmd.Dir = workDir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("%s: %w", strings.TrimSpace(string(output)), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// runSlotClear runs `bd slot clear` from a specific directory.
|
||||
func runSlotClear(workDir, beadID, slotName string) error {
|
||||
cmd := exec.Command("bd", "slot", "clear", beadID, slotName)
|
||||
cmd.Dir = workDir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("%s: %w", strings.TrimSpace(string(output)), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AgentFields holds structured fields for agent beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type AgentFields struct {
|
||||
RoleType string // polecat, witness, refinery, deacon, mayor
|
||||
Rig string // Rig name (empty for global agents like mayor/deacon)
|
||||
AgentState string // spawning, working, done, stuck
|
||||
HookBead string // Currently pinned work bead ID
|
||||
CleanupStatus string // ZFC: polecat self-reports git state (clean, has_uncommitted, has_stash, has_unpushed)
|
||||
ActiveMR string // Currently active merge request bead ID (for traceability)
|
||||
NotificationLevel string // DND mode: verbose, normal, muted (default: normal)
|
||||
// Note: RoleBead field removed - role definitions are now config-based.
|
||||
// See internal/config/roles/*.toml and config-based-roles.md.
|
||||
}
|
||||
|
||||
// Notification level constants
|
||||
const (
|
||||
NotifyVerbose = "verbose" // All notifications (mail, convoy events, etc.)
|
||||
NotifyNormal = "normal" // Important events only (default)
|
||||
NotifyMuted = "muted" // Silent/DND mode - batch for later
|
||||
)
|
||||
|
||||
// FormatAgentDescription creates a description string from agent fields.
|
||||
func FormatAgentDescription(title string, fields *AgentFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("role_type: %s", fields.RoleType))
|
||||
|
||||
if fields.Rig != "" {
|
||||
lines = append(lines, fmt.Sprintf("rig: %s", fields.Rig))
|
||||
} else {
|
||||
lines = append(lines, "rig: null")
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("agent_state: %s", fields.AgentState))
|
||||
|
||||
if fields.HookBead != "" {
|
||||
lines = append(lines, fmt.Sprintf("hook_bead: %s", fields.HookBead))
|
||||
} else {
|
||||
lines = append(lines, "hook_bead: null")
|
||||
}
|
||||
|
||||
// Note: role_bead field no longer written - role definitions are config-based
|
||||
|
||||
if fields.CleanupStatus != "" {
|
||||
lines = append(lines, fmt.Sprintf("cleanup_status: %s", fields.CleanupStatus))
|
||||
} else {
|
||||
lines = append(lines, "cleanup_status: null")
|
||||
}
|
||||
|
||||
if fields.ActiveMR != "" {
|
||||
lines = append(lines, fmt.Sprintf("active_mr: %s", fields.ActiveMR))
|
||||
} else {
|
||||
lines = append(lines, "active_mr: null")
|
||||
}
|
||||
|
||||
if fields.NotificationLevel != "" {
|
||||
lines = append(lines, fmt.Sprintf("notification_level: %s", fields.NotificationLevel))
|
||||
} else {
|
||||
lines = append(lines, "notification_level: null")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseAgentFields extracts agent fields from an issue's description.
|
||||
func ParseAgentFields(description string) *AgentFields {
|
||||
fields := &AgentFields{}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "role_type":
|
||||
fields.RoleType = value
|
||||
case "rig":
|
||||
fields.Rig = value
|
||||
case "agent_state":
|
||||
fields.AgentState = value
|
||||
case "hook_bead":
|
||||
fields.HookBead = value
|
||||
case "role_bead":
|
||||
// Ignored - role definitions are now config-based (backward compat)
|
||||
case "cleanup_status":
|
||||
fields.CleanupStatus = value
|
||||
case "active_mr":
|
||||
fields.ActiveMR = value
|
||||
case "notification_level":
|
||||
fields.NotificationLevel = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// CreateAgentBead creates an agent bead for tracking agent lifecycle.
|
||||
// The ID format is: <prefix>-<rig>-<role>-<name> (e.g., gt-gastown-polecat-Toast)
|
||||
// Use AgentBeadID() helper to generate correct IDs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
//
|
||||
// This function automatically ensures custom types are configured in the target
|
||||
// database before creating the bead. This handles multi-repo routing scenarios
|
||||
// where the bead may be routed to a different database than the one this wrapper
|
||||
// is connected to.
|
||||
func (b *Beads) CreateAgentBead(id, title string, fields *AgentFields) (*Issue, error) {
|
||||
// Resolve where this bead will actually be written (handles multi-repo routing)
|
||||
targetDir := ResolveRoutingTarget(b.getTownRoot(), id, b.getResolvedBeadsDir())
|
||||
|
||||
// Ensure target database has custom types configured
|
||||
// This is cached (sentinel file + in-memory) so repeated calls are fast
|
||||
if err := EnsureCustomTypes(targetDir); err != nil {
|
||||
return nil, fmt.Errorf("prepare target for agent bead %s: %w", id, err)
|
||||
}
|
||||
|
||||
description := FormatAgentDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=agent",
|
||||
"--labels=gt:agent",
|
||||
}
|
||||
if NeedsForceForID(id) {
|
||||
args = append(args, "--force")
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
// Note: role slot no longer set - role definitions are config-based
|
||||
|
||||
// Set the hook slot if specified (this is the authoritative storage)
|
||||
// This fixes the slot inconsistency bug where bead status is 'hooked' but
|
||||
// agent's hook slot is empty. See mi-619.
|
||||
// Must run from targetDir since that's where the agent bead was created
|
||||
if fields != nil && fields.HookBead != "" {
|
||||
if err := runSlotSet(targetDir, id, "hook", fields.HookBead); err != nil {
|
||||
// Non-fatal: warn but continue - description text has the backup
|
||||
fmt.Printf("Warning: could not set hook slot: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// CreateOrReopenAgentBead creates an agent bead or reopens an existing one.
|
||||
// This handles the case where a polecat is nuked and re-spawned with the same name:
|
||||
// the old agent bead exists as a closed bead, so we reopen and update it instead of
|
||||
// failing with a UNIQUE constraint error.
|
||||
//
|
||||
// NOTE: This does NOT handle tombstones. If the old bead was hard-deleted (creating
|
||||
// a tombstone), this function will fail. Use CloseAndClearAgentBead instead of DeleteAgentBead
|
||||
// when cleaning up agent beads to ensure they can be reopened later.
|
||||
//
|
||||
//
|
||||
// The function:
|
||||
// 1. Tries to create the agent bead
|
||||
// 2. If UNIQUE constraint fails, reopens the existing bead and updates its fields
|
||||
func (b *Beads) CreateOrReopenAgentBead(id, title string, fields *AgentFields) (*Issue, error) {
|
||||
// First try to create the bead
|
||||
issue, err := b.CreateAgentBead(id, title, fields)
|
||||
if err == nil {
|
||||
return issue, nil
|
||||
}
|
||||
|
||||
// Check if it's a UNIQUE constraint error
|
||||
if !strings.Contains(err.Error(), "UNIQUE constraint failed") {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve where this bead lives (for slot operations)
|
||||
targetDir := ResolveRoutingTarget(b.getTownRoot(), id, b.getResolvedBeadsDir())
|
||||
|
||||
// The bead already exists (should be closed from previous polecat lifecycle)
|
||||
// Reopen it and update its fields
|
||||
if _, reopenErr := b.run("reopen", id, "--reason=re-spawning agent"); reopenErr != nil {
|
||||
// If reopen fails, the bead might already be open - continue with update
|
||||
if !strings.Contains(reopenErr.Error(), "already open") {
|
||||
return nil, fmt.Errorf("reopening existing agent bead: %w (original error: %v)", reopenErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the bead with new fields
|
||||
description := FormatAgentDescription(title, fields)
|
||||
updateOpts := UpdateOptions{
|
||||
Title: &title,
|
||||
Description: &description,
|
||||
}
|
||||
if err := b.Update(id, updateOpts); err != nil {
|
||||
return nil, fmt.Errorf("updating reopened agent bead: %w", err)
|
||||
}
|
||||
|
||||
// Note: role slot no longer set - role definitions are config-based
|
||||
|
||||
// Clear any existing hook slot (handles stale state from previous lifecycle)
|
||||
// Must run from targetDir since that's where the agent bead lives
|
||||
_ = runSlotClear(targetDir, id, "hook")
|
||||
|
||||
// Set the hook slot if specified
|
||||
// Must run from targetDir since that's where the agent bead lives
|
||||
if fields != nil && fields.HookBead != "" {
|
||||
if err := runSlotSet(targetDir, id, "hook", fields.HookBead); err != nil {
|
||||
// Non-fatal: warn but continue - description text has the backup
|
||||
fmt.Printf("Warning: could not set hook slot: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the updated bead
|
||||
return b.Show(id)
|
||||
}
|
||||
|
||||
// UpdateAgentState updates the agent_state field in an agent bead.
|
||||
// Optionally updates hook_bead if provided.
|
||||
//
|
||||
// IMPORTANT: This function uses the proper bd commands to update agent fields:
|
||||
// - `bd agent state` for agent_state (uses SQLite column directly)
|
||||
// - `bd slot set/clear` for hook_bead (uses SQLite column directly)
|
||||
//
|
||||
// This ensures consistency with `bd slot show` and other beads commands.
|
||||
// Previously, this function embedded these fields in the description text,
|
||||
// which caused inconsistencies with bd slot commands (see GH #gt-9v52).
|
||||
func (b *Beads) UpdateAgentState(id string, state string, hookBead *string) error {
|
||||
// Update agent state using bd agent state command
|
||||
// This updates the agent_state column directly in SQLite
|
||||
_, err := b.run("agent", "state", id, state)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating agent state: %w", err)
|
||||
}
|
||||
|
||||
// Update hook_bead if provided
|
||||
if hookBead != nil {
|
||||
if *hookBead != "" {
|
||||
// Set the hook using bd slot set
|
||||
// This updates the hook_bead column directly in SQLite
|
||||
_, err = b.run("slot", "set", id, "hook", *hookBead)
|
||||
if err != nil {
|
||||
// If slot is already occupied, clear it first then retry
|
||||
// This handles re-slinging scenarios where we're updating the hook
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "already occupied") {
|
||||
_, _ = b.run("slot", "clear", id, "hook")
|
||||
_, err = b.run("slot", "set", id, "hook", *hookBead)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting hook: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Clear the hook
|
||||
_, err = b.run("slot", "clear", id, "hook")
|
||||
if err != nil {
|
||||
return fmt.Errorf("clearing hook: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHookBead sets the hook_bead slot on an agent bead.
|
||||
// This is a convenience wrapper that only sets the hook without changing agent_state.
|
||||
// Per gt-zecmc: agent_state ("running", "dead", "idle") is observable from tmux
|
||||
// and should not be recorded in beads ("discover, don't track" principle).
|
||||
func (b *Beads) SetHookBead(agentBeadID, hookBeadID string) error {
|
||||
// Set the hook using bd slot set
|
||||
// This updates the hook_bead column directly in SQLite
|
||||
_, err := b.run("slot", "set", agentBeadID, "hook", hookBeadID)
|
||||
if err != nil {
|
||||
// If slot is already occupied, clear it first then retry
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "already occupied") {
|
||||
_, _ = b.run("slot", "clear", agentBeadID, "hook")
|
||||
_, err = b.run("slot", "set", agentBeadID, "hook", hookBeadID)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting hook: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearHookBead clears the hook_bead slot on an agent bead.
|
||||
// Used when work is complete or unslung.
|
||||
func (b *Beads) ClearHookBead(agentBeadID string) error {
|
||||
_, err := b.run("slot", "clear", agentBeadID, "hook")
|
||||
if err != nil {
|
||||
return fmt.Errorf("clearing hook: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAgentCleanupStatus updates the cleanup_status field in an agent bead.
|
||||
// This is called by the polecat to self-report its git state (ZFC compliance).
|
||||
// Valid statuses: clean, has_uncommitted, has_stash, has_unpushed
|
||||
func (b *Beads) UpdateAgentCleanupStatus(id string, cleanupStatus string) error {
|
||||
// First get current issue to preserve other fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse existing fields
|
||||
fields := ParseAgentFields(issue.Description)
|
||||
fields.CleanupStatus = cleanupStatus
|
||||
|
||||
// Format new description
|
||||
description := FormatAgentDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(id, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateAgentActiveMR updates the active_mr field in an agent bead.
|
||||
// This links the agent to their current merge request for traceability.
|
||||
// Pass empty string to clear the field (e.g., after merge completes).
|
||||
func (b *Beads) UpdateAgentActiveMR(id string, activeMR string) error {
|
||||
// First get current issue to preserve other fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse existing fields
|
||||
fields := ParseAgentFields(issue.Description)
|
||||
fields.ActiveMR = activeMR
|
||||
|
||||
// Format new description
|
||||
description := FormatAgentDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(id, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateAgentNotificationLevel updates the notification_level field in an agent bead.
|
||||
// Valid levels: verbose, normal, muted (DND mode).
|
||||
// Pass empty string to reset to default (normal).
|
||||
func (b *Beads) UpdateAgentNotificationLevel(id string, level string) error {
|
||||
// Validate level
|
||||
if level != "" && level != NotifyVerbose && level != NotifyNormal && level != NotifyMuted {
|
||||
return fmt.Errorf("invalid notification level %q: must be verbose, normal, or muted", level)
|
||||
}
|
||||
|
||||
// First get current issue to preserve other fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse existing fields
|
||||
fields := ParseAgentFields(issue.Description)
|
||||
fields.NotificationLevel = level
|
||||
|
||||
// Format new description
|
||||
description := FormatAgentDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(id, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// GetAgentNotificationLevel returns the notification level for an agent.
|
||||
// Returns "normal" if not set (the default).
|
||||
func (b *Beads) GetAgentNotificationLevel(id string) (string, error) {
|
||||
_, fields, err := b.GetAgentBead(id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fields == nil {
|
||||
return NotifyNormal, nil
|
||||
}
|
||||
if fields.NotificationLevel == "" {
|
||||
return NotifyNormal, nil
|
||||
}
|
||||
return fields.NotificationLevel, nil
|
||||
}
|
||||
|
||||
// DeleteAgentBead permanently deletes an agent bead.
|
||||
// Uses --hard --force for immediate permanent deletion (no tombstone).
|
||||
//
|
||||
// WARNING: Due to a bd bug, --hard --force still creates tombstones instead of
|
||||
// truly deleting. This breaks CreateOrReopenAgentBead because tombstones are
|
||||
// invisible to bd show/reopen but still block bd create via UNIQUE constraint.
|
||||
//
|
||||
//
|
||||
// WORKAROUND: Use CloseAndClearAgentBead instead, which allows CreateOrReopenAgentBead
|
||||
// to reopen the bead on re-spawn.
|
||||
func (b *Beads) DeleteAgentBead(id string) error {
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// CloseAndClearAgentBead closes an agent bead (soft delete).
|
||||
// This is the recommended way to clean up agent beads because CreateOrReopenAgentBead
|
||||
// can reopen closed beads when re-spawning polecats with the same name.
|
||||
//
|
||||
// This is a workaround for the bd tombstone bug where DeleteAgentBead creates
|
||||
// tombstones that cannot be reopened.
|
||||
//
|
||||
// To emulate the clean slate of delete --force --hard, this clears all mutable
|
||||
// fields (hook_bead, active_mr, cleanup_status, agent_state) before closing.
|
||||
func (b *Beads) CloseAndClearAgentBead(id, reason string) error {
|
||||
// Clear mutable fields to emulate delete --force --hard behavior.
|
||||
// This ensures reopened agent beads don't have stale state.
|
||||
|
||||
// First get current issue to preserve immutable fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
// If we can't read the issue, still attempt to close
|
||||
args := []string{"close", id}
|
||||
if reason != "" {
|
||||
args = append(args, "--reason="+reason)
|
||||
}
|
||||
_, closeErr := b.run(args...)
|
||||
return closeErr
|
||||
}
|
||||
|
||||
// Parse existing fields and clear mutable ones
|
||||
fields := ParseAgentFields(issue.Description)
|
||||
fields.HookBead = "" // Clear hook_bead
|
||||
fields.ActiveMR = "" // Clear active_mr
|
||||
fields.CleanupStatus = "" // Clear cleanup_status
|
||||
fields.AgentState = "closed"
|
||||
|
||||
// Update description with cleared fields
|
||||
description := FormatAgentDescription(issue.Title, fields)
|
||||
if err := b.Update(id, UpdateOptions{Description: &description}); err != nil {
|
||||
// Non-fatal: continue with close even if update fails
|
||||
}
|
||||
|
||||
// Also clear the hook slot in the database
|
||||
if err := b.ClearHookBead(id); err != nil {
|
||||
// Non-fatal
|
||||
}
|
||||
|
||||
args := []string{"close", id}
|
||||
if reason != "" {
|
||||
args = append(args, "--reason="+reason)
|
||||
}
|
||||
_, err = b.run(args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetAgentBead retrieves an agent bead by ID.
|
||||
// Returns nil if not found.
|
||||
func (b *Beads) GetAgentBead(id string) (*Issue, *AgentFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:agent") {
|
||||
return nil, nil, fmt.Errorf("issue %s is not an agent bead (missing gt:agent label)", id)
|
||||
}
|
||||
|
||||
fields := ParseAgentFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// ListAgentBeads returns all agent beads in a single query.
|
||||
// Returns a map of agent bead ID to Issue.
|
||||
func (b *Beads) ListAgentBeads() (map[string]*Issue, error) {
|
||||
out, err := b.run("list", "--label=gt:agent", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]*Issue, len(issues))
|
||||
for _, issue := range issues {
|
||||
result[issue.ID] = issue
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
529
internal/beads/beads_channel.go
Normal file
529
internal/beads/beads_channel.go
Normal file
@@ -0,0 +1,529 @@
|
||||
// Package beads provides channel bead management for beads-native messaging.
|
||||
// Channels are named pub/sub streams where messages are broadcast to subscribers.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ChannelFields holds structured fields for channel beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type ChannelFields struct {
|
||||
Name string // Unique channel name (e.g., "alerts", "builds")
|
||||
Subscribers []string // Addresses subscribed to this channel
|
||||
Status string // active, closed
|
||||
RetentionCount int // Number of recent messages to retain (0 = unlimited)
|
||||
RetentionHours int // Hours to retain messages (0 = forever)
|
||||
CreatedBy string // Who created the channel
|
||||
CreatedAt string // ISO 8601 timestamp
|
||||
}
|
||||
|
||||
// Channel status constants
|
||||
const (
|
||||
ChannelStatusActive = "active"
|
||||
ChannelStatusClosed = "closed"
|
||||
)
|
||||
|
||||
// FormatChannelDescription creates a description string from channel fields.
|
||||
func FormatChannelDescription(title string, fields *ChannelFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("name: %s", fields.Name))
|
||||
|
||||
// Subscribers stored as comma-separated list
|
||||
if len(fields.Subscribers) > 0 {
|
||||
lines = append(lines, fmt.Sprintf("subscribers: %s", strings.Join(fields.Subscribers, ",")))
|
||||
} else {
|
||||
lines = append(lines, "subscribers: null")
|
||||
}
|
||||
|
||||
if fields.Status != "" {
|
||||
lines = append(lines, fmt.Sprintf("status: %s", fields.Status))
|
||||
} else {
|
||||
lines = append(lines, "status: active")
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("retention_count: %d", fields.RetentionCount))
|
||||
lines = append(lines, fmt.Sprintf("retention_hours: %d", fields.RetentionHours))
|
||||
|
||||
if fields.CreatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_by: %s", fields.CreatedBy))
|
||||
} else {
|
||||
lines = append(lines, "created_by: null")
|
||||
}
|
||||
|
||||
if fields.CreatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_at: %s", fields.CreatedAt))
|
||||
} else {
|
||||
lines = append(lines, "created_at: null")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseChannelFields extracts channel fields from an issue's description.
|
||||
func ParseChannelFields(description string) *ChannelFields {
|
||||
fields := &ChannelFields{
|
||||
Status: ChannelStatusActive,
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "name":
|
||||
fields.Name = value
|
||||
case "subscribers":
|
||||
if value != "" {
|
||||
// Parse comma-separated subscribers
|
||||
for _, s := range strings.Split(value, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if s != "" {
|
||||
fields.Subscribers = append(fields.Subscribers, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "status":
|
||||
fields.Status = value
|
||||
case "retention_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.RetentionCount = v
|
||||
}
|
||||
case "retention_hours":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.RetentionHours = v
|
||||
}
|
||||
case "created_by":
|
||||
fields.CreatedBy = value
|
||||
case "created_at":
|
||||
fields.CreatedAt = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// ChannelBeadID returns the bead ID for a channel name.
|
||||
// Format: hq-channel-<name> (town-level, channels span rigs)
|
||||
func ChannelBeadID(name string) string {
|
||||
return "hq-channel-" + name
|
||||
}
|
||||
|
||||
// CreateChannelBead creates a channel bead for pub/sub messaging.
|
||||
// The ID format is: hq-channel-<name> (e.g., hq-channel-alerts)
|
||||
// Channels are town-level entities (hq- prefix) because they span rigs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateChannelBead(name string, subscribers []string, createdBy string) (*Issue, error) {
|
||||
id := ChannelBeadID(name)
|
||||
title := fmt.Sprintf("Channel: %s", name)
|
||||
|
||||
fields := &ChannelFields{
|
||||
Name: name,
|
||||
Subscribers: subscribers,
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: createdBy,
|
||||
CreatedAt: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
description := FormatChannelDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=task", // Channels use task type with gt:channel label
|
||||
"--labels=gt:channel",
|
||||
"--force", // Override prefix check (town beads may have mixed prefixes)
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// GetChannelBead retrieves a channel bead by name.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetChannelBead(name string) (*Issue, *ChannelFields, error) {
|
||||
id := ChannelBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:channel") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a channel bead (missing gt:channel label)", id)
|
||||
}
|
||||
|
||||
fields := ParseChannelFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// GetChannelByID retrieves a channel bead by its full ID.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetChannelByID(id string) (*Issue, *ChannelFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:channel") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a channel bead (missing gt:channel label)", id)
|
||||
}
|
||||
|
||||
fields := ParseChannelFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// UpdateChannelSubscribers updates the subscribers list for a channel.
|
||||
func (b *Beads) UpdateChannelSubscribers(name string, subscribers []string) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
fields.Subscribers = subscribers
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// SubscribeToChannel adds a subscriber to a channel if not already subscribed.
|
||||
func (b *Beads) SubscribeToChannel(name string, subscriber string) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
// Check if already subscribed
|
||||
for _, s := range fields.Subscribers {
|
||||
if s == subscriber {
|
||||
return nil // Already subscribed
|
||||
}
|
||||
}
|
||||
|
||||
fields.Subscribers = append(fields.Subscribers, subscriber)
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UnsubscribeFromChannel removes a subscriber from a channel.
|
||||
func (b *Beads) UnsubscribeFromChannel(name string, subscriber string) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
// Filter out the subscriber
|
||||
var newSubscribers []string
|
||||
for _, s := range fields.Subscribers {
|
||||
if s != subscriber {
|
||||
newSubscribers = append(newSubscribers, s)
|
||||
}
|
||||
}
|
||||
|
||||
fields.Subscribers = newSubscribers
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateChannelRetention updates the retention policy for a channel.
|
||||
func (b *Beads) UpdateChannelRetention(name string, retentionCount, retentionHours int) error {
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
fields.RetentionCount = retentionCount
|
||||
fields.RetentionHours = retentionHours
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateChannelStatus updates the status of a channel bead.
|
||||
func (b *Beads) UpdateChannelStatus(name, status string) error {
|
||||
// Validate status
|
||||
if status != ChannelStatusActive && status != ChannelStatusClosed {
|
||||
return fmt.Errorf("invalid channel status %q: must be active or closed", status)
|
||||
}
|
||||
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("channel %q not found", name)
|
||||
}
|
||||
|
||||
fields.Status = status
|
||||
description := FormatChannelDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// DeleteChannelBead permanently deletes a channel bead.
|
||||
func (b *Beads) DeleteChannelBead(name string) error {
|
||||
id := ChannelBeadID(name)
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// ListChannelBeads returns all channel beads.
|
||||
func (b *Beads) ListChannelBeads() (map[string]*ChannelFields, error) {
|
||||
out, err := b.run("list", "--label=gt:channel", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]*ChannelFields, len(issues))
|
||||
for _, issue := range issues {
|
||||
fields := ParseChannelFields(issue.Description)
|
||||
if fields.Name != "" {
|
||||
result[fields.Name] = fields
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// LookupChannelByName finds a channel by its name field (not by ID).
|
||||
// This is used for address resolution where we may not know the full bead ID.
|
||||
func (b *Beads) LookupChannelByName(name string) (*Issue, *ChannelFields, error) {
|
||||
// First try direct lookup by standard ID format
|
||||
issue, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if issue != nil {
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// If not found by ID, search all channels by name field
|
||||
channels, err := b.ListChannelBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if fields, ok := channels[name]; ok {
|
||||
// Found by name, now get the full issue
|
||||
id := ChannelBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
return nil, nil, nil // Not found
|
||||
}
|
||||
|
||||
// EnforceChannelRetention prunes old messages from a channel to enforce retention.
|
||||
// Called after posting a new message to the channel (on-write cleanup).
|
||||
// Enforces both count-based (RetentionCount) and time-based (RetentionHours) limits.
|
||||
func (b *Beads) EnforceChannelRetention(name string) error {
|
||||
// Get channel config
|
||||
_, fields, err := b.GetChannelBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fields == nil {
|
||||
return fmt.Errorf("channel not found: %s", name)
|
||||
}
|
||||
|
||||
// Skip if no retention limits configured
|
||||
if fields.RetentionCount <= 0 && fields.RetentionHours <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query messages in this channel (oldest first)
|
||||
out, err := b.run("list",
|
||||
"--type=message",
|
||||
"--label=channel:"+name,
|
||||
"--json",
|
||||
"--limit=0",
|
||||
"--sort=created",
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing channel messages: %w", err)
|
||||
}
|
||||
|
||||
var messages []struct {
|
||||
ID string `json:"id"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &messages); err != nil {
|
||||
return fmt.Errorf("parsing channel messages: %w", err)
|
||||
}
|
||||
|
||||
// Track which messages to delete (use map to avoid duplicates)
|
||||
toDeleteIDs := make(map[string]bool)
|
||||
|
||||
// Time-based retention: delete messages older than RetentionHours
|
||||
if fields.RetentionHours > 0 {
|
||||
cutoff := time.Now().Add(-time.Duration(fields.RetentionHours) * time.Hour)
|
||||
for _, msg := range messages {
|
||||
createdAt, err := time.Parse(time.RFC3339, msg.CreatedAt)
|
||||
if err != nil {
|
||||
continue // Skip messages with unparseable timestamps
|
||||
}
|
||||
if createdAt.Before(cutoff) {
|
||||
toDeleteIDs[msg.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count-based retention: delete oldest messages beyond RetentionCount
|
||||
if fields.RetentionCount > 0 {
|
||||
toDeleteByCount := len(messages) - fields.RetentionCount
|
||||
for i := 0; i < toDeleteByCount && i < len(messages); i++ {
|
||||
toDeleteIDs[messages[i].ID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Delete marked messages (best-effort)
|
||||
for id := range toDeleteIDs {
|
||||
// Use close instead of delete for audit trail
|
||||
_, _ = b.run("close", id, "--reason=channel retention pruning")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneAllChannels enforces retention on all channels.
|
||||
// Called by Deacon patrol as a backup cleanup mechanism.
|
||||
// Enforces both count-based (RetentionCount) and time-based (RetentionHours) limits.
|
||||
// Uses a 10% buffer for count-based pruning to avoid thrashing.
|
||||
func (b *Beads) PruneAllChannels() (int, error) {
|
||||
channels, err := b.ListChannelBeads()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pruned := 0
|
||||
for name, fields := range channels {
|
||||
// Skip if no retention limits configured
|
||||
if fields.RetentionCount <= 0 && fields.RetentionHours <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get messages with timestamps
|
||||
out, err := b.run("list",
|
||||
"--type=message",
|
||||
"--label=channel:"+name,
|
||||
"--json",
|
||||
"--limit=0",
|
||||
"--sort=created",
|
||||
)
|
||||
if err != nil {
|
||||
continue // Skip on error
|
||||
}
|
||||
|
||||
var messages []struct {
|
||||
ID string `json:"id"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &messages); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track which messages to delete (use map to avoid duplicates)
|
||||
toDeleteIDs := make(map[string]bool)
|
||||
|
||||
// Time-based retention: delete messages older than RetentionHours
|
||||
if fields.RetentionHours > 0 {
|
||||
cutoff := time.Now().Add(-time.Duration(fields.RetentionHours) * time.Hour)
|
||||
for _, msg := range messages {
|
||||
createdAt, err := time.Parse(time.RFC3339, msg.CreatedAt)
|
||||
if err != nil {
|
||||
continue // Skip messages with unparseable timestamps
|
||||
}
|
||||
if createdAt.Before(cutoff) {
|
||||
toDeleteIDs[msg.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count-based retention with 10% buffer to avoid thrashing
|
||||
if fields.RetentionCount > 0 {
|
||||
threshold := int(float64(fields.RetentionCount) * 1.1)
|
||||
if len(messages) > threshold {
|
||||
toDeleteByCount := len(messages) - fields.RetentionCount
|
||||
for i := 0; i < toDeleteByCount && i < len(messages); i++ {
|
||||
toDeleteIDs[messages[i].ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete marked messages
|
||||
for id := range toDeleteIDs {
|
||||
if _, err := b.run("close", id, "--reason=patrol retention pruning"); err == nil {
|
||||
pruned++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pruned, nil
|
||||
}
|
||||
271
internal/beads/beads_channel_test.go
Normal file
271
internal/beads/beads_channel_test.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFormatChannelDescription(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
title string
|
||||
fields *ChannelFields
|
||||
want []string // Lines that should be present
|
||||
}{
|
||||
{
|
||||
name: "basic channel",
|
||||
title: "Channel: alerts",
|
||||
fields: &ChannelFields{
|
||||
Name: "alerts",
|
||||
Subscribers: []string{"gastown/crew/max", "gastown/witness"},
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
want: []string{
|
||||
"Channel: alerts",
|
||||
"name: alerts",
|
||||
"subscribers: gastown/crew/max,gastown/witness",
|
||||
"status: active",
|
||||
"created_by: human",
|
||||
"created_at: 2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty subscribers",
|
||||
title: "Channel: empty",
|
||||
fields: &ChannelFields{
|
||||
Name: "empty",
|
||||
Subscribers: nil,
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
want: []string{
|
||||
"name: empty",
|
||||
"subscribers: null",
|
||||
"created_by: admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with retention",
|
||||
title: "Channel: builds",
|
||||
fields: &ChannelFields{
|
||||
Name: "builds",
|
||||
Subscribers: []string{"*/witness"},
|
||||
RetentionCount: 100,
|
||||
RetentionHours: 24,
|
||||
},
|
||||
want: []string{
|
||||
"name: builds",
|
||||
"retention_count: 100",
|
||||
"retention_hours: 24",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "closed channel",
|
||||
title: "Channel: old",
|
||||
fields: &ChannelFields{
|
||||
Name: "old",
|
||||
Status: ChannelStatusClosed,
|
||||
},
|
||||
want: []string{
|
||||
"status: closed",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil fields",
|
||||
title: "Just a title",
|
||||
fields: nil,
|
||||
want: []string{"Just a title"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := FormatChannelDescription(tt.title, tt.fields)
|
||||
for _, line := range tt.want {
|
||||
if !strings.Contains(got, line) {
|
||||
t.Errorf("FormatChannelDescription() missing line %q\ngot:\n%s", line, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseChannelFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
want *ChannelFields
|
||||
}{
|
||||
{
|
||||
name: "full channel",
|
||||
description: `Channel: alerts
|
||||
|
||||
name: alerts
|
||||
subscribers: gastown/crew/max,gastown/witness,*/refinery
|
||||
status: active
|
||||
retention_count: 50
|
||||
retention_hours: 48
|
||||
created_by: human
|
||||
created_at: 2024-01-15T10:00:00Z`,
|
||||
want: &ChannelFields{
|
||||
Name: "alerts",
|
||||
Subscribers: []string{"gastown/crew/max", "gastown/witness", "*/refinery"},
|
||||
Status: ChannelStatusActive,
|
||||
RetentionCount: 50,
|
||||
RetentionHours: 48,
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "null subscribers",
|
||||
description: `Channel: empty
|
||||
|
||||
name: empty
|
||||
subscribers: null
|
||||
status: active
|
||||
created_by: admin`,
|
||||
want: &ChannelFields{
|
||||
Name: "empty",
|
||||
Subscribers: nil,
|
||||
Status: ChannelStatusActive,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single subscriber",
|
||||
description: `name: solo
|
||||
subscribers: gastown/crew/max
|
||||
status: active`,
|
||||
want: &ChannelFields{
|
||||
Name: "solo",
|
||||
Subscribers: []string{"gastown/crew/max"},
|
||||
Status: ChannelStatusActive,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
want: &ChannelFields{
|
||||
Status: ChannelStatusActive, // Default
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "subscribers with spaces",
|
||||
description: `name: spaced
|
||||
subscribers: a, b , c
|
||||
status: active`,
|
||||
want: &ChannelFields{
|
||||
Name: "spaced",
|
||||
Subscribers: []string{"a", "b", "c"},
|
||||
Status: ChannelStatusActive,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "closed status",
|
||||
description: `name: archived
|
||||
status: closed`,
|
||||
want: &ChannelFields{
|
||||
Name: "archived",
|
||||
Status: ChannelStatusClosed,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseChannelFields(tt.description)
|
||||
if got.Name != tt.want.Name {
|
||||
t.Errorf("Name = %q, want %q", got.Name, tt.want.Name)
|
||||
}
|
||||
if got.Status != tt.want.Status {
|
||||
t.Errorf("Status = %q, want %q", got.Status, tt.want.Status)
|
||||
}
|
||||
if got.RetentionCount != tt.want.RetentionCount {
|
||||
t.Errorf("RetentionCount = %d, want %d", got.RetentionCount, tt.want.RetentionCount)
|
||||
}
|
||||
if got.RetentionHours != tt.want.RetentionHours {
|
||||
t.Errorf("RetentionHours = %d, want %d", got.RetentionHours, tt.want.RetentionHours)
|
||||
}
|
||||
if got.CreatedBy != tt.want.CreatedBy {
|
||||
t.Errorf("CreatedBy = %q, want %q", got.CreatedBy, tt.want.CreatedBy)
|
||||
}
|
||||
if got.CreatedAt != tt.want.CreatedAt {
|
||||
t.Errorf("CreatedAt = %q, want %q", got.CreatedAt, tt.want.CreatedAt)
|
||||
}
|
||||
if len(got.Subscribers) != len(tt.want.Subscribers) {
|
||||
t.Errorf("Subscribers count = %d, want %d", len(got.Subscribers), len(tt.want.Subscribers))
|
||||
} else {
|
||||
for i, s := range got.Subscribers {
|
||||
if s != tt.want.Subscribers[i] {
|
||||
t.Errorf("Subscribers[%d] = %q, want %q", i, s, tt.want.Subscribers[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelBeadID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want string
|
||||
}{
|
||||
{"alerts", "hq-channel-alerts"},
|
||||
{"builds", "hq-channel-builds"},
|
||||
{"team-updates", "hq-channel-team-updates"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := ChannelBeadID(tt.name); got != tt.want {
|
||||
t.Errorf("ChannelBeadID(%q) = %q, want %q", tt.name, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestChannelRoundTrip(t *testing.T) {
|
||||
// Test that Format -> Parse preserves data
|
||||
original := &ChannelFields{
|
||||
Name: "test-channel",
|
||||
Subscribers: []string{"gastown/crew/max", "*/witness", "@town"},
|
||||
Status: ChannelStatusActive,
|
||||
RetentionCount: 100,
|
||||
RetentionHours: 72,
|
||||
CreatedBy: "tester",
|
||||
CreatedAt: "2024-01-15T12:00:00Z",
|
||||
}
|
||||
|
||||
description := FormatChannelDescription("Channel: test-channel", original)
|
||||
parsed := ParseChannelFields(description)
|
||||
|
||||
if parsed.Name != original.Name {
|
||||
t.Errorf("Name: got %q, want %q", parsed.Name, original.Name)
|
||||
}
|
||||
if parsed.Status != original.Status {
|
||||
t.Errorf("Status: got %q, want %q", parsed.Status, original.Status)
|
||||
}
|
||||
if parsed.RetentionCount != original.RetentionCount {
|
||||
t.Errorf("RetentionCount: got %d, want %d", parsed.RetentionCount, original.RetentionCount)
|
||||
}
|
||||
if parsed.RetentionHours != original.RetentionHours {
|
||||
t.Errorf("RetentionHours: got %d, want %d", parsed.RetentionHours, original.RetentionHours)
|
||||
}
|
||||
if parsed.CreatedBy != original.CreatedBy {
|
||||
t.Errorf("CreatedBy: got %q, want %q", parsed.CreatedBy, original.CreatedBy)
|
||||
}
|
||||
if parsed.CreatedAt != original.CreatedAt {
|
||||
t.Errorf("CreatedAt: got %q, want %q", parsed.CreatedAt, original.CreatedAt)
|
||||
}
|
||||
if len(parsed.Subscribers) != len(original.Subscribers) {
|
||||
t.Fatalf("Subscribers count: got %d, want %d", len(parsed.Subscribers), len(original.Subscribers))
|
||||
}
|
||||
for i, s := range original.Subscribers {
|
||||
if parsed.Subscribers[i] != s {
|
||||
t.Errorf("Subscribers[%d]: got %q, want %q", i, parsed.Subscribers[i], s)
|
||||
}
|
||||
}
|
||||
}
|
||||
155
internal/beads/beads_delegation.go
Normal file
155
internal/beads/beads_delegation.go
Normal file
@@ -0,0 +1,155 @@
|
||||
// Package beads provides delegation tracking for work units.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Delegation represents a work delegation relationship between work units.
|
||||
// Delegation links a parent work unit to a child work unit, tracking who
|
||||
// delegated the work and to whom, along with any terms of the delegation.
|
||||
// This enables work distribution with credit cascade - work flows down,
|
||||
// validation and credit flow up.
|
||||
type Delegation struct {
|
||||
// Parent is the work unit ID that delegated the work
|
||||
Parent string `json:"parent"`
|
||||
|
||||
// Child is the work unit ID that received the delegated work
|
||||
Child string `json:"child"`
|
||||
|
||||
// DelegatedBy is the entity (hop:// URI or actor string) that delegated
|
||||
DelegatedBy string `json:"delegated_by"`
|
||||
|
||||
// DelegatedTo is the entity (hop:// URI or actor string) receiving delegation
|
||||
DelegatedTo string `json:"delegated_to"`
|
||||
|
||||
// Terms contains optional conditions of the delegation
|
||||
Terms *DelegationTerms `json:"terms,omitempty"`
|
||||
|
||||
// CreatedAt is when the delegation was created
|
||||
CreatedAt string `json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
// DelegationTerms holds optional terms/conditions for a delegation.
|
||||
type DelegationTerms struct {
|
||||
// Portion describes what part of the parent work is delegated
|
||||
Portion string `json:"portion,omitempty"`
|
||||
|
||||
// Deadline is the expected completion date
|
||||
Deadline string `json:"deadline,omitempty"`
|
||||
|
||||
// AcceptanceCriteria describes what constitutes completion
|
||||
AcceptanceCriteria string `json:"acceptance_criteria,omitempty"`
|
||||
|
||||
// CreditShare is the percentage of credit that flows to the delegate (0-100)
|
||||
CreditShare int `json:"credit_share,omitempty"`
|
||||
}
|
||||
|
||||
// AddDelegation creates a delegation relationship from parent to child work unit.
|
||||
// The delegation tracks who delegated (delegatedBy) and who received (delegatedTo),
|
||||
// along with optional terms. Delegations enable credit cascade - when child work
|
||||
// is completed, credit flows up to the parent work unit and its delegator.
|
||||
//
|
||||
// Note: This is stored as metadata on the child issue until bd CLI has native
|
||||
// delegation support. Once bd supports `bd delegate add`, this will be updated.
|
||||
func (b *Beads) AddDelegation(d *Delegation) error {
|
||||
if d.Parent == "" || d.Child == "" {
|
||||
return fmt.Errorf("delegation requires both parent and child work unit IDs")
|
||||
}
|
||||
if d.DelegatedBy == "" || d.DelegatedTo == "" {
|
||||
return fmt.Errorf("delegation requires both delegated_by and delegated_to entities")
|
||||
}
|
||||
|
||||
// Store delegation as JSON in the child issue's delegated_from slot
|
||||
delegationJSON, err := json.Marshal(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling delegation: %w", err)
|
||||
}
|
||||
|
||||
// Set the delegated_from slot on the child issue
|
||||
_, err = b.run("slot", "set", d.Child, "delegated_from", string(delegationJSON))
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting delegation slot: %w", err)
|
||||
}
|
||||
|
||||
// Also add a dependency so child blocks parent (work must complete before parent can close)
|
||||
if err := b.AddDependency(d.Parent, d.Child); err != nil {
|
||||
// Log but don't fail - the delegation is still recorded
|
||||
fmt.Printf("Warning: could not add blocking dependency for delegation: %v\n", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveDelegation removes a delegation relationship.
|
||||
func (b *Beads) RemoveDelegation(parent, child string) error {
|
||||
// Clear the delegated_from slot on the child
|
||||
_, err := b.run("slot", "clear", child, "delegated_from")
|
||||
if err != nil {
|
||||
return fmt.Errorf("clearing delegation slot: %w", err)
|
||||
}
|
||||
|
||||
// Also remove the blocking dependency
|
||||
if err := b.RemoveDependency(parent, child); err != nil {
|
||||
// Log but don't fail
|
||||
fmt.Printf("Warning: could not remove blocking dependency: %v\n", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDelegation retrieves the delegation information for a child work unit.
|
||||
// Returns nil if the issue has no delegation.
|
||||
func (b *Beads) GetDelegation(child string) (*Delegation, error) {
|
||||
// Verify the issue exists first
|
||||
if _, err := b.Show(child); err != nil {
|
||||
return nil, fmt.Errorf("getting issue: %w", err)
|
||||
}
|
||||
|
||||
// Get delegation from the slot
|
||||
out, err := b.run("slot", "get", child, "delegated_from")
|
||||
if err != nil {
|
||||
// No delegation slot means no delegation
|
||||
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "no slot") {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("getting delegation slot: %w", err)
|
||||
}
|
||||
|
||||
slotValue := strings.TrimSpace(string(out))
|
||||
if slotValue == "" || slotValue == "null" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var delegation Delegation
|
||||
if err := json.Unmarshal([]byte(slotValue), &delegation); err != nil {
|
||||
return nil, fmt.Errorf("parsing delegation: %w", err)
|
||||
}
|
||||
|
||||
return &delegation, nil
|
||||
}
|
||||
|
||||
// ListDelegationsFrom returns all delegations from a parent work unit.
|
||||
// This searches for issues that have delegated_from pointing to the parent.
|
||||
func (b *Beads) ListDelegationsFrom(parent string) ([]*Delegation, error) {
|
||||
// List all issues that depend on this parent (delegated work blocks parent)
|
||||
issues, err := b.List(ListOptions{Status: "all"})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing issues: %w", err)
|
||||
}
|
||||
|
||||
var delegations []*Delegation
|
||||
for _, issue := range issues {
|
||||
d, err := b.GetDelegation(issue.ID)
|
||||
if err != nil {
|
||||
continue // Skip issues with errors
|
||||
}
|
||||
if d != nil && d.Parent == parent {
|
||||
delegations = append(delegations, d)
|
||||
}
|
||||
}
|
||||
|
||||
return delegations, nil
|
||||
}
|
||||
93
internal/beads/beads_dog.go
Normal file
93
internal/beads/beads_dog.go
Normal file
@@ -0,0 +1,93 @@
|
||||
// Package beads provides dog agent bead management.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CreateDogAgentBead creates an agent bead for a dog.
|
||||
// Dogs use a different schema than other agents - they use labels for metadata.
|
||||
// Returns the created issue or an error.
|
||||
func (b *Beads) CreateDogAgentBead(name, location string) (*Issue, error) {
|
||||
title := fmt.Sprintf("Dog: %s", name)
|
||||
labels := []string{
|
||||
"gt:agent",
|
||||
"role_type:dog",
|
||||
"rig:town",
|
||||
"location:" + location,
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"create", "--json",
|
||||
"--role-type=dog",
|
||||
"--title=" + title,
|
||||
"--labels=" + strings.Join(labels, ","),
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// FindDogAgentBead finds the agent bead for a dog by name.
|
||||
// Searches for agent beads with role_type:dog and matching title.
|
||||
// Returns nil if not found.
|
||||
func (b *Beads) FindDogAgentBead(name string) (*Issue, error) {
|
||||
// List all agent beads and filter by role_type:dog label
|
||||
issues, err := b.List(ListOptions{
|
||||
Label: "gt:agent",
|
||||
Status: "all",
|
||||
Priority: -1, // No priority filter
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing agents: %w", err)
|
||||
}
|
||||
|
||||
expectedTitle := fmt.Sprintf("Dog: %s", name)
|
||||
for _, issue := range issues {
|
||||
// Check title match and role_type:dog label
|
||||
if issue.Title == expectedTitle {
|
||||
for _, label := range issue.Labels {
|
||||
if label == "role_type:dog" {
|
||||
return issue, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DeleteDogAgentBead finds and deletes the agent bead for a dog.
|
||||
// Returns nil if the bead doesn't exist (idempotent).
|
||||
func (b *Beads) DeleteDogAgentBead(name string) error {
|
||||
issue, err := b.FindDogAgentBead(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding dog bead: %w", err)
|
||||
}
|
||||
if issue == nil {
|
||||
return nil // Already doesn't exist - idempotent
|
||||
}
|
||||
|
||||
err = b.DeleteAgentBead(issue.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting bead %s: %w", issue.ID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
441
internal/beads/beads_escalation.go
Normal file
441
internal/beads/beads_escalation.go
Normal file
@@ -0,0 +1,441 @@
|
||||
// Package beads provides escalation bead management.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EscalationFields holds structured fields for escalation beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type EscalationFields struct {
|
||||
Severity string // critical, high, medium, low
|
||||
Reason string // Why this was escalated
|
||||
Source string // Source identifier (e.g., plugin:rebuild-gt, patrol:deacon)
|
||||
EscalatedBy string // Agent address that escalated (e.g., "gastown/Toast")
|
||||
EscalatedAt string // ISO 8601 timestamp
|
||||
AckedBy string // Agent that acknowledged (empty if not acked)
|
||||
AckedAt string // When acknowledged (empty if not acked)
|
||||
ClosedBy string // Agent that closed (empty if not closed)
|
||||
ClosedReason string // Resolution reason (empty if not closed)
|
||||
RelatedBead string // Optional: related bead ID (task, bug, etc.)
|
||||
OriginalSeverity string // Original severity before any re-escalation
|
||||
ReescalationCount int // Number of times this has been re-escalated
|
||||
LastReescalatedAt string // When last re-escalated (empty if never)
|
||||
LastReescalatedBy string // Who last re-escalated (empty if never)
|
||||
}
|
||||
|
||||
// EscalationState constants for bead status tracking.
|
||||
const (
|
||||
EscalationOpen = "open" // Unacknowledged
|
||||
EscalationAcked = "acked" // Acknowledged but not resolved
|
||||
EscalationClosed = "closed" // Resolved/closed
|
||||
)
|
||||
|
||||
// FormatEscalationDescription creates a description string from escalation fields.
|
||||
func FormatEscalationDescription(title string, fields *EscalationFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("severity: %s", fields.Severity))
|
||||
lines = append(lines, fmt.Sprintf("reason: %s", fields.Reason))
|
||||
if fields.Source != "" {
|
||||
lines = append(lines, fmt.Sprintf("source: %s", fields.Source))
|
||||
} else {
|
||||
lines = append(lines, "source: null")
|
||||
}
|
||||
lines = append(lines, fmt.Sprintf("escalated_by: %s", fields.EscalatedBy))
|
||||
lines = append(lines, fmt.Sprintf("escalated_at: %s", fields.EscalatedAt))
|
||||
|
||||
if fields.AckedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("acked_by: %s", fields.AckedBy))
|
||||
} else {
|
||||
lines = append(lines, "acked_by: null")
|
||||
}
|
||||
|
||||
if fields.AckedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("acked_at: %s", fields.AckedAt))
|
||||
} else {
|
||||
lines = append(lines, "acked_at: null")
|
||||
}
|
||||
|
||||
if fields.ClosedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("closed_by: %s", fields.ClosedBy))
|
||||
} else {
|
||||
lines = append(lines, "closed_by: null")
|
||||
}
|
||||
|
||||
if fields.ClosedReason != "" {
|
||||
lines = append(lines, fmt.Sprintf("closed_reason: %s", fields.ClosedReason))
|
||||
} else {
|
||||
lines = append(lines, "closed_reason: null")
|
||||
}
|
||||
|
||||
if fields.RelatedBead != "" {
|
||||
lines = append(lines, fmt.Sprintf("related_bead: %s", fields.RelatedBead))
|
||||
} else {
|
||||
lines = append(lines, "related_bead: null")
|
||||
}
|
||||
|
||||
// Reescalation fields
|
||||
if fields.OriginalSeverity != "" {
|
||||
lines = append(lines, fmt.Sprintf("original_severity: %s", fields.OriginalSeverity))
|
||||
} else {
|
||||
lines = append(lines, "original_severity: null")
|
||||
}
|
||||
lines = append(lines, fmt.Sprintf("reescalation_count: %d", fields.ReescalationCount))
|
||||
if fields.LastReescalatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("last_reescalated_at: %s", fields.LastReescalatedAt))
|
||||
} else {
|
||||
lines = append(lines, "last_reescalated_at: null")
|
||||
}
|
||||
if fields.LastReescalatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("last_reescalated_by: %s", fields.LastReescalatedBy))
|
||||
} else {
|
||||
lines = append(lines, "last_reescalated_by: null")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseEscalationFields extracts escalation fields from an issue's description.
|
||||
func ParseEscalationFields(description string) *EscalationFields {
|
||||
fields := &EscalationFields{}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "severity":
|
||||
fields.Severity = value
|
||||
case "reason":
|
||||
fields.Reason = value
|
||||
case "source":
|
||||
fields.Source = value
|
||||
case "escalated_by":
|
||||
fields.EscalatedBy = value
|
||||
case "escalated_at":
|
||||
fields.EscalatedAt = value
|
||||
case "acked_by":
|
||||
fields.AckedBy = value
|
||||
case "acked_at":
|
||||
fields.AckedAt = value
|
||||
case "closed_by":
|
||||
fields.ClosedBy = value
|
||||
case "closed_reason":
|
||||
fields.ClosedReason = value
|
||||
case "related_bead":
|
||||
fields.RelatedBead = value
|
||||
case "original_severity":
|
||||
fields.OriginalSeverity = value
|
||||
case "reescalation_count":
|
||||
if n, err := strconv.Atoi(value); err == nil {
|
||||
fields.ReescalationCount = n
|
||||
}
|
||||
case "last_reescalated_at":
|
||||
fields.LastReescalatedAt = value
|
||||
case "last_reescalated_by":
|
||||
fields.LastReescalatedBy = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// CreateEscalationBead creates an escalation bead for tracking escalations.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateEscalationBead(title string, fields *EscalationFields) (*Issue, error) {
|
||||
description := FormatEscalationDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=task",
|
||||
"--labels=gt:escalation",
|
||||
}
|
||||
|
||||
// Add severity as a label for easy filtering
|
||||
if fields != nil && fields.Severity != "" {
|
||||
args = append(args, fmt.Sprintf("--labels=severity:%s", fields.Severity))
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// AckEscalation acknowledges an escalation bead.
|
||||
// Sets acked_by and acked_at fields, adds "acked" label.
|
||||
func (b *Beads) AckEscalation(id, ackedBy string) error {
|
||||
// First get current issue to preserve other fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's an escalation
|
||||
if !HasLabel(issue, "gt:escalation") {
|
||||
return fmt.Errorf("issue %s is not an escalation bead (missing gt:escalation label)", id)
|
||||
}
|
||||
|
||||
// Parse existing fields
|
||||
fields := ParseEscalationFields(issue.Description)
|
||||
fields.AckedBy = ackedBy
|
||||
fields.AckedAt = time.Now().Format(time.RFC3339)
|
||||
|
||||
// Format new description
|
||||
description := FormatEscalationDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(id, UpdateOptions{
|
||||
Description: &description,
|
||||
AddLabels: []string{"acked"},
|
||||
})
|
||||
}
|
||||
|
||||
// CloseEscalation closes an escalation bead with a resolution reason.
|
||||
// Sets closed_by and closed_reason fields, closes the issue.
|
||||
func (b *Beads) CloseEscalation(id, closedBy, reason string) error {
|
||||
// First get current issue to preserve other fields
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's an escalation
|
||||
if !HasLabel(issue, "gt:escalation") {
|
||||
return fmt.Errorf("issue %s is not an escalation bead (missing gt:escalation label)", id)
|
||||
}
|
||||
|
||||
// Parse existing fields
|
||||
fields := ParseEscalationFields(issue.Description)
|
||||
fields.ClosedBy = closedBy
|
||||
fields.ClosedReason = reason
|
||||
|
||||
// Format new description
|
||||
description := FormatEscalationDescription(issue.Title, fields)
|
||||
|
||||
// Update description first
|
||||
if err := b.Update(id, UpdateOptions{
|
||||
Description: &description,
|
||||
AddLabels: []string{"resolved"},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the issue
|
||||
_, err = b.run("close", id, "--reason="+reason)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetEscalationBead retrieves an escalation bead by ID.
|
||||
// Returns nil if not found.
|
||||
func (b *Beads) GetEscalationBead(id string) (*Issue, *EscalationFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:escalation") {
|
||||
return nil, nil, fmt.Errorf("issue %s is not an escalation bead (missing gt:escalation label)", id)
|
||||
}
|
||||
|
||||
fields := ParseEscalationFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// ListEscalations returns all open escalation beads.
|
||||
func (b *Beads) ListEscalations() ([]*Issue, error) {
|
||||
out, err := b.run("list", "--label=gt:escalation", "--status=open", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// ListEscalationsBySeverity returns open escalation beads filtered by severity.
|
||||
func (b *Beads) ListEscalationsBySeverity(severity string) ([]*Issue, error) {
|
||||
out, err := b.run("list",
|
||||
"--label=gt:escalation",
|
||||
"--label=severity:"+severity,
|
||||
"--status=open",
|
||||
"--json",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// ListStaleEscalations returns escalations older than the given threshold.
|
||||
// threshold is a duration string like "1h" or "30m".
|
||||
func (b *Beads) ListStaleEscalations(threshold time.Duration) ([]*Issue, error) {
|
||||
// Get all open escalations
|
||||
escalations, err := b.ListEscalations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cutoff := time.Now().Add(-threshold)
|
||||
var stale []*Issue
|
||||
|
||||
for _, issue := range escalations {
|
||||
// Skip acknowledged escalations
|
||||
if HasLabel(issue, "acked") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if older than threshold
|
||||
createdAt, err := time.Parse(time.RFC3339, issue.CreatedAt)
|
||||
if err != nil {
|
||||
continue // Skip if can't parse
|
||||
}
|
||||
|
||||
if createdAt.Before(cutoff) {
|
||||
stale = append(stale, issue)
|
||||
}
|
||||
}
|
||||
|
||||
return stale, nil
|
||||
}
|
||||
|
||||
// ReescalationResult holds the result of a reescalation operation.
|
||||
type ReescalationResult struct {
|
||||
ID string
|
||||
Title string
|
||||
OldSeverity string
|
||||
NewSeverity string
|
||||
ReescalationNum int
|
||||
Skipped bool
|
||||
SkipReason string
|
||||
}
|
||||
|
||||
// ReescalateEscalation bumps the severity of an escalation and updates tracking fields.
|
||||
// Returns the new severity if successful, or an error.
|
||||
// reescalatedBy should be the identity of the agent/process doing the reescalation.
|
||||
// maxReescalations limits how many times an escalation can be bumped (0 = unlimited).
|
||||
func (b *Beads) ReescalateEscalation(id, reescalatedBy string, maxReescalations int) (*ReescalationResult, error) {
|
||||
// Get the escalation
|
||||
issue, fields, err := b.GetEscalationBead(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if issue == nil {
|
||||
return nil, fmt.Errorf("escalation not found: %s", id)
|
||||
}
|
||||
|
||||
result := &ReescalationResult{
|
||||
ID: id,
|
||||
Title: issue.Title,
|
||||
OldSeverity: fields.Severity,
|
||||
}
|
||||
|
||||
// Check if already at max reescalations
|
||||
if maxReescalations > 0 && fields.ReescalationCount >= maxReescalations {
|
||||
result.Skipped = true
|
||||
result.SkipReason = fmt.Sprintf("already at max reescalations (%d)", maxReescalations)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Check if already at critical (can't bump further)
|
||||
if fields.Severity == "critical" {
|
||||
result.Skipped = true
|
||||
result.SkipReason = "already at critical severity"
|
||||
result.NewSeverity = "critical"
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Save original severity on first reescalation
|
||||
if fields.OriginalSeverity == "" {
|
||||
fields.OriginalSeverity = fields.Severity
|
||||
}
|
||||
|
||||
// Bump severity
|
||||
newSeverity := bumpSeverity(fields.Severity)
|
||||
fields.Severity = newSeverity
|
||||
fields.ReescalationCount++
|
||||
fields.LastReescalatedAt = time.Now().Format(time.RFC3339)
|
||||
fields.LastReescalatedBy = reescalatedBy
|
||||
|
||||
result.NewSeverity = newSeverity
|
||||
result.ReescalationNum = fields.ReescalationCount
|
||||
|
||||
// Format new description
|
||||
description := FormatEscalationDescription(issue.Title, fields)
|
||||
|
||||
// Update the bead with new description and severity label
|
||||
if err := b.Update(id, UpdateOptions{
|
||||
Description: &description,
|
||||
AddLabels: []string{"reescalated", "severity:" + newSeverity},
|
||||
RemoveLabels: []string{"severity:" + result.OldSeverity},
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("updating escalation: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// bumpSeverity returns the next higher severity level.
|
||||
// low -> medium -> high -> critical
|
||||
func bumpSeverity(severity string) string {
|
||||
switch severity {
|
||||
case "low":
|
||||
return "medium"
|
||||
case "medium":
|
||||
return "high"
|
||||
case "high":
|
||||
return "critical"
|
||||
default:
|
||||
return "critical"
|
||||
}
|
||||
}
|
||||
311
internal/beads/beads_group.go
Normal file
311
internal/beads/beads_group.go
Normal file
@@ -0,0 +1,311 @@
|
||||
// Package beads provides group bead management for beads-native messaging.
|
||||
// Groups are named collections of addresses used for mail distribution.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GroupFields holds structured fields for group beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type GroupFields struct {
|
||||
Name string // Unique group name (e.g., "ops-team", "all-witnesses")
|
||||
Members []string // Addresses, patterns, or group names (can nest)
|
||||
CreatedBy string // Who created the group
|
||||
CreatedAt string // ISO 8601 timestamp
|
||||
}
|
||||
|
||||
// FormatGroupDescription creates a description string from group fields.
|
||||
func FormatGroupDescription(title string, fields *GroupFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("name: %s", fields.Name))
|
||||
|
||||
// Members stored as comma-separated list
|
||||
if len(fields.Members) > 0 {
|
||||
lines = append(lines, fmt.Sprintf("members: %s", strings.Join(fields.Members, ",")))
|
||||
} else {
|
||||
lines = append(lines, "members: null")
|
||||
}
|
||||
|
||||
if fields.CreatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_by: %s", fields.CreatedBy))
|
||||
} else {
|
||||
lines = append(lines, "created_by: null")
|
||||
}
|
||||
|
||||
if fields.CreatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_at: %s", fields.CreatedAt))
|
||||
} else {
|
||||
lines = append(lines, "created_at: null")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseGroupFields extracts group fields from an issue's description.
|
||||
func ParseGroupFields(description string) *GroupFields {
|
||||
fields := &GroupFields{}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "name":
|
||||
fields.Name = value
|
||||
case "members":
|
||||
if value != "" {
|
||||
// Parse comma-separated members
|
||||
for _, m := range strings.Split(value, ",") {
|
||||
m = strings.TrimSpace(m)
|
||||
if m != "" {
|
||||
fields.Members = append(fields.Members, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "created_by":
|
||||
fields.CreatedBy = value
|
||||
case "created_at":
|
||||
fields.CreatedAt = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// GroupBeadID returns the bead ID for a group name.
|
||||
// Format: hq-group-<name> (town-level, groups span rigs)
|
||||
func GroupBeadID(name string) string {
|
||||
return "hq-group-" + name
|
||||
}
|
||||
|
||||
// CreateGroupBead creates a group bead for mail distribution.
|
||||
// The ID format is: hq-group-<name> (e.g., hq-group-ops-team)
|
||||
// Groups are town-level entities (hq- prefix) because they span rigs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateGroupBead(name string, members []string, createdBy string) (*Issue, error) {
|
||||
id := GroupBeadID(name)
|
||||
title := fmt.Sprintf("Group: %s", name)
|
||||
|
||||
fields := &GroupFields{
|
||||
Name: name,
|
||||
Members: members,
|
||||
CreatedBy: createdBy,
|
||||
CreatedAt: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
description := FormatGroupDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=task", // Groups use task type with gt:group label
|
||||
"--labels=gt:group",
|
||||
"--force", // Override prefix check (town beads may have mixed prefixes)
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// GetGroupBead retrieves a group bead by name.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetGroupBead(name string) (*Issue, *GroupFields, error) {
|
||||
id := GroupBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:group") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a group bead (missing gt:group label)", id)
|
||||
}
|
||||
|
||||
fields := ParseGroupFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// GetGroupByID retrieves a group bead by its full ID.
|
||||
// Returns nil, nil if not found.
|
||||
func (b *Beads) GetGroupByID(id string) (*Issue, *GroupFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:group") {
|
||||
return nil, nil, fmt.Errorf("bead %s is not a group bead (missing gt:group label)", id)
|
||||
}
|
||||
|
||||
fields := ParseGroupFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// UpdateGroupMembers updates the members list for a group.
|
||||
func (b *Beads) UpdateGroupMembers(name string, members []string) error {
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("group %q not found", name)
|
||||
}
|
||||
|
||||
fields.Members = members
|
||||
description := FormatGroupDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// AddGroupMember adds a member to a group if not already present.
|
||||
func (b *Beads) AddGroupMember(name string, member string) error {
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("group %q not found", name)
|
||||
}
|
||||
|
||||
// Check if already a member
|
||||
for _, m := range fields.Members {
|
||||
if m == member {
|
||||
return nil // Already a member
|
||||
}
|
||||
}
|
||||
|
||||
fields.Members = append(fields.Members, member)
|
||||
description := FormatGroupDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// RemoveGroupMember removes a member from a group.
|
||||
func (b *Beads) RemoveGroupMember(name string, member string) error {
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("group %q not found", name)
|
||||
}
|
||||
|
||||
// Filter out the member
|
||||
var newMembers []string
|
||||
for _, m := range fields.Members {
|
||||
if m != member {
|
||||
newMembers = append(newMembers, m)
|
||||
}
|
||||
}
|
||||
|
||||
fields.Members = newMembers
|
||||
description := FormatGroupDescription(issue.Title, fields)
|
||||
|
||||
return b.Update(issue.ID, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// DeleteGroupBead permanently deletes a group bead.
|
||||
func (b *Beads) DeleteGroupBead(name string) error {
|
||||
id := GroupBeadID(name)
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// ListGroupBeads returns all group beads.
|
||||
func (b *Beads) ListGroupBeads() (map[string]*GroupFields, error) {
|
||||
out, err := b.run("list", "--label=gt:group", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]*GroupFields, len(issues))
|
||||
for _, issue := range issues {
|
||||
fields := ParseGroupFields(issue.Description)
|
||||
if fields.Name != "" {
|
||||
result[fields.Name] = fields
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// LookupGroupByName finds a group by its name field (not by ID).
|
||||
// This is used for address resolution where we may not know the full bead ID.
|
||||
func (b *Beads) LookupGroupByName(name string) (*Issue, *GroupFields, error) {
|
||||
// First try direct lookup by standard ID format
|
||||
issue, fields, err := b.GetGroupBead(name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if issue != nil {
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// If not found by ID, search all groups by name field
|
||||
groups, err := b.ListGroupBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if fields, ok := groups[name]; ok {
|
||||
// Found by name, now get the full issue
|
||||
id := GroupBeadID(name)
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
return nil, nil, nil // Not found
|
||||
}
|
||||
209
internal/beads/beads_group_test.go
Normal file
209
internal/beads/beads_group_test.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFormatGroupDescription(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
title string
|
||||
fields *GroupFields
|
||||
want []string // Lines that should be present
|
||||
}{
|
||||
{
|
||||
name: "basic group",
|
||||
title: "Group: ops-team",
|
||||
fields: &GroupFields{
|
||||
Name: "ops-team",
|
||||
Members: []string{"gastown/crew/max", "gastown/witness"},
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
want: []string{
|
||||
"Group: ops-team",
|
||||
"name: ops-team",
|
||||
"members: gastown/crew/max,gastown/witness",
|
||||
"created_by: human",
|
||||
"created_at: 2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty members",
|
||||
title: "Group: empty",
|
||||
fields: &GroupFields{
|
||||
Name: "empty",
|
||||
Members: nil,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
want: []string{
|
||||
"name: empty",
|
||||
"members: null",
|
||||
"created_by: admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "patterns in members",
|
||||
title: "Group: all-witnesses",
|
||||
fields: &GroupFields{
|
||||
Name: "all-witnesses",
|
||||
Members: []string{"*/witness", "@crew"},
|
||||
},
|
||||
want: []string{
|
||||
"members: */witness,@crew",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil fields",
|
||||
title: "Just a title",
|
||||
fields: nil,
|
||||
want: []string{"Just a title"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := FormatGroupDescription(tt.title, tt.fields)
|
||||
for _, line := range tt.want {
|
||||
if !strings.Contains(got, line) {
|
||||
t.Errorf("FormatGroupDescription() missing line %q\ngot:\n%s", line, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGroupFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
want *GroupFields
|
||||
}{
|
||||
{
|
||||
name: "full group",
|
||||
description: `Group: ops-team
|
||||
|
||||
name: ops-team
|
||||
members: gastown/crew/max,gastown/witness,*/refinery
|
||||
created_by: human
|
||||
created_at: 2024-01-15T10:00:00Z`,
|
||||
want: &GroupFields{
|
||||
Name: "ops-team",
|
||||
Members: []string{"gastown/crew/max", "gastown/witness", "*/refinery"},
|
||||
CreatedBy: "human",
|
||||
CreatedAt: "2024-01-15T10:00:00Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "null members",
|
||||
description: `Group: empty
|
||||
|
||||
name: empty
|
||||
members: null
|
||||
created_by: admin`,
|
||||
want: &GroupFields{
|
||||
Name: "empty",
|
||||
Members: nil,
|
||||
CreatedBy: "admin",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single member",
|
||||
description: `name: solo
|
||||
members: gastown/crew/max`,
|
||||
want: &GroupFields{
|
||||
Name: "solo",
|
||||
Members: []string{"gastown/crew/max"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
want: &GroupFields{},
|
||||
},
|
||||
{
|
||||
name: "members with spaces",
|
||||
description: `name: spaced
|
||||
members: a, b , c`,
|
||||
want: &GroupFields{
|
||||
Name: "spaced",
|
||||
Members: []string{"a", "b", "c"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseGroupFields(tt.description)
|
||||
if got.Name != tt.want.Name {
|
||||
t.Errorf("Name = %q, want %q", got.Name, tt.want.Name)
|
||||
}
|
||||
if got.CreatedBy != tt.want.CreatedBy {
|
||||
t.Errorf("CreatedBy = %q, want %q", got.CreatedBy, tt.want.CreatedBy)
|
||||
}
|
||||
if got.CreatedAt != tt.want.CreatedAt {
|
||||
t.Errorf("CreatedAt = %q, want %q", got.CreatedAt, tt.want.CreatedAt)
|
||||
}
|
||||
if len(got.Members) != len(tt.want.Members) {
|
||||
t.Errorf("Members count = %d, want %d", len(got.Members), len(tt.want.Members))
|
||||
} else {
|
||||
for i, m := range got.Members {
|
||||
if m != tt.want.Members[i] {
|
||||
t.Errorf("Members[%d] = %q, want %q", i, m, tt.want.Members[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupBeadID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want string
|
||||
}{
|
||||
{"ops-team", "hq-group-ops-team"},
|
||||
{"all", "hq-group-all"},
|
||||
{"crew-leads", "hq-group-crew-leads"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := GroupBeadID(tt.name); got != tt.want {
|
||||
t.Errorf("GroupBeadID(%q) = %q, want %q", tt.name, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
// Test that Format -> Parse preserves data
|
||||
original := &GroupFields{
|
||||
Name: "test-group",
|
||||
Members: []string{"gastown/crew/max", "*/witness", "@town"},
|
||||
CreatedBy: "tester",
|
||||
CreatedAt: "2024-01-15T12:00:00Z",
|
||||
}
|
||||
|
||||
description := FormatGroupDescription("Group: test-group", original)
|
||||
parsed := ParseGroupFields(description)
|
||||
|
||||
if parsed.Name != original.Name {
|
||||
t.Errorf("Name: got %q, want %q", parsed.Name, original.Name)
|
||||
}
|
||||
if parsed.CreatedBy != original.CreatedBy {
|
||||
t.Errorf("CreatedBy: got %q, want %q", parsed.CreatedBy, original.CreatedBy)
|
||||
}
|
||||
if parsed.CreatedAt != original.CreatedAt {
|
||||
t.Errorf("CreatedAt: got %q, want %q", parsed.CreatedAt, original.CreatedAt)
|
||||
}
|
||||
if len(parsed.Members) != len(original.Members) {
|
||||
t.Fatalf("Members count: got %d, want %d", len(parsed.Members), len(original.Members))
|
||||
}
|
||||
for i, m := range original.Members {
|
||||
if parsed.Members[i] != m {
|
||||
t.Errorf("Members[%d]: got %q, want %q", i, parsed.Members[i], m)
|
||||
}
|
||||
}
|
||||
}
|
||||
133
internal/beads/beads_merge_slot.go
Normal file
133
internal/beads/beads_merge_slot.go
Normal file
@@ -0,0 +1,133 @@
|
||||
// Package beads provides merge slot management for serialized conflict resolution.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MergeSlotStatus represents the result of checking a merge slot.
|
||||
type MergeSlotStatus struct {
|
||||
ID string `json:"id"`
|
||||
Available bool `json:"available"`
|
||||
Holder string `json:"holder,omitempty"`
|
||||
Waiters []string `json:"waiters,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// MergeSlotCreate creates the merge slot bead for the current rig.
|
||||
// The slot is used for serialized conflict resolution in the merge queue.
|
||||
// Returns the slot ID if successful.
|
||||
func (b *Beads) MergeSlotCreate() (string, error) {
|
||||
out, err := b.run("merge-slot", "create", "--json")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating merge slot: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &result); err != nil {
|
||||
return "", fmt.Errorf("parsing merge-slot create output: %w", err)
|
||||
}
|
||||
|
||||
return result.ID, nil
|
||||
}
|
||||
|
||||
// MergeSlotCheck checks the availability of the merge slot.
|
||||
// Returns the current status including holder and waiters if held.
|
||||
func (b *Beads) MergeSlotCheck() (*MergeSlotStatus, error) {
|
||||
out, err := b.run("merge-slot", "check", "--json")
|
||||
if err != nil {
|
||||
// Check if slot doesn't exist
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return &MergeSlotStatus{Error: "not found"}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("checking merge slot: %w", err)
|
||||
}
|
||||
|
||||
var status MergeSlotStatus
|
||||
if err := json.Unmarshal(out, &status); err != nil {
|
||||
return nil, fmt.Errorf("parsing merge-slot check output: %w", err)
|
||||
}
|
||||
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
// MergeSlotAcquire attempts to acquire the merge slot for exclusive access.
|
||||
// If holder is empty, defaults to BD_ACTOR environment variable.
|
||||
// If addWaiter is true and the slot is held, the requester is added to the waiters queue.
|
||||
// Returns the acquisition result.
|
||||
func (b *Beads) MergeSlotAcquire(holder string, addWaiter bool) (*MergeSlotStatus, error) {
|
||||
args := []string{"merge-slot", "acquire", "--json"}
|
||||
if holder != "" {
|
||||
args = append(args, "--holder="+holder)
|
||||
}
|
||||
if addWaiter {
|
||||
args = append(args, "--wait")
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
// Parse the output even on error - it may contain useful info
|
||||
var status MergeSlotStatus
|
||||
if jsonErr := json.Unmarshal(out, &status); jsonErr == nil {
|
||||
return &status, nil
|
||||
}
|
||||
return nil, fmt.Errorf("acquiring merge slot: %w", err)
|
||||
}
|
||||
|
||||
var status MergeSlotStatus
|
||||
if err := json.Unmarshal(out, &status); err != nil {
|
||||
return nil, fmt.Errorf("parsing merge-slot acquire output: %w", err)
|
||||
}
|
||||
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
// MergeSlotRelease releases the merge slot after conflict resolution completes.
|
||||
// If holder is provided, it verifies the slot is held by that holder before releasing.
|
||||
func (b *Beads) MergeSlotRelease(holder string) error {
|
||||
args := []string{"merge-slot", "release", "--json"}
|
||||
if holder != "" {
|
||||
args = append(args, "--holder="+holder)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("releasing merge slot: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Released bool `json:"released"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &result); err != nil {
|
||||
return fmt.Errorf("parsing merge-slot release output: %w", err)
|
||||
}
|
||||
|
||||
if !result.Released && result.Error != "" {
|
||||
return fmt.Errorf("slot release failed: %s", result.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MergeSlotEnsureExists creates the merge slot if it doesn't exist.
|
||||
// This is idempotent - safe to call multiple times.
|
||||
func (b *Beads) MergeSlotEnsureExists() (string, error) {
|
||||
// Check if slot exists first
|
||||
status, err := b.MergeSlotCheck()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if status.Error == "not found" {
|
||||
// Create it
|
||||
return b.MergeSlotCreate()
|
||||
}
|
||||
|
||||
return status.ID, nil
|
||||
}
|
||||
45
internal/beads/beads_mr.go
Normal file
45
internal/beads/beads_mr.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Package beads provides merge request and gate utilities.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FindMRForBranch searches for an existing merge-request bead for the given branch.
|
||||
// Returns the MR bead if found, nil if not found.
|
||||
// This enables idempotent `gt done` - if an MR already exists, we skip creation.
|
||||
func (b *Beads) FindMRForBranch(branch string) (*Issue, error) {
|
||||
// List all merge-request beads (open status only - closed MRs are already processed)
|
||||
issues, err := b.List(ListOptions{
|
||||
Status: "open",
|
||||
Label: "gt:merge-request",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Search for one matching this branch
|
||||
// MR description format: "branch: <branch>\ntarget: ..."
|
||||
branchPrefix := "branch: " + branch + "\n"
|
||||
for _, issue := range issues {
|
||||
if strings.HasPrefix(issue.Description, branchPrefix) {
|
||||
return issue, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AddGateWaiter registers an agent as a waiter on a gate bead.
|
||||
// When the gate closes, the waiter will receive a wake notification via gt gate wake.
|
||||
// The waiter is typically the polecat's address (e.g., "gastown/polecats/Toast").
|
||||
func (b *Beads) AddGateWaiter(gateID, waiter string) error {
|
||||
// Use bd gate add-waiter to register the waiter on the gate
|
||||
// This adds the waiter to the gate's native waiters field
|
||||
_, err := b.run("gate", "add-waiter", gateID, waiter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding gate waiter: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
393
internal/beads/beads_queue.go
Normal file
393
internal/beads/beads_queue.go
Normal file
@@ -0,0 +1,393 @@
|
||||
// Package beads provides queue bead management.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// QueueFields holds structured fields for queue beads.
|
||||
// These are stored as "key: value" lines in the description.
|
||||
type QueueFields struct {
|
||||
Name string // Queue name (human-readable identifier)
|
||||
ClaimPattern string // Pattern for who can claim from queue (e.g., "gastown/polecats/*")
|
||||
Status string // active, paused, closed
|
||||
MaxConcurrency int // Maximum number of concurrent workers (0 = unlimited)
|
||||
ProcessingOrder string // fifo, priority (default: fifo)
|
||||
AvailableCount int // Number of items ready to process
|
||||
ProcessingCount int // Number of items currently being processed
|
||||
CompletedCount int // Number of items completed
|
||||
FailedCount int // Number of items that failed
|
||||
CreatedBy string // Who created this queue
|
||||
CreatedAt string // ISO 8601 timestamp of creation
|
||||
}
|
||||
|
||||
// Queue status constants
|
||||
const (
|
||||
QueueStatusActive = "active"
|
||||
QueueStatusPaused = "paused"
|
||||
QueueStatusClosed = "closed"
|
||||
)
|
||||
|
||||
// Queue processing order constants
|
||||
const (
|
||||
QueueOrderFIFO = "fifo"
|
||||
QueueOrderPriority = "priority"
|
||||
)
|
||||
|
||||
// FormatQueueDescription creates a description string from queue fields.
|
||||
func FormatQueueDescription(title string, fields *QueueFields) string {
|
||||
if fields == nil {
|
||||
return title
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, title)
|
||||
lines = append(lines, "")
|
||||
|
||||
if fields.Name != "" {
|
||||
lines = append(lines, fmt.Sprintf("name: %s", fields.Name))
|
||||
} else {
|
||||
lines = append(lines, "name: null")
|
||||
}
|
||||
|
||||
if fields.ClaimPattern != "" {
|
||||
lines = append(lines, fmt.Sprintf("claim_pattern: %s", fields.ClaimPattern))
|
||||
} else {
|
||||
lines = append(lines, "claim_pattern: *") // Default: anyone can claim
|
||||
}
|
||||
|
||||
if fields.Status != "" {
|
||||
lines = append(lines, fmt.Sprintf("status: %s", fields.Status))
|
||||
} else {
|
||||
lines = append(lines, "status: active")
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("max_concurrency: %d", fields.MaxConcurrency))
|
||||
|
||||
if fields.ProcessingOrder != "" {
|
||||
lines = append(lines, fmt.Sprintf("processing_order: %s", fields.ProcessingOrder))
|
||||
} else {
|
||||
lines = append(lines, "processing_order: fifo")
|
||||
}
|
||||
|
||||
lines = append(lines, fmt.Sprintf("available_count: %d", fields.AvailableCount))
|
||||
lines = append(lines, fmt.Sprintf("processing_count: %d", fields.ProcessingCount))
|
||||
lines = append(lines, fmt.Sprintf("completed_count: %d", fields.CompletedCount))
|
||||
lines = append(lines, fmt.Sprintf("failed_count: %d", fields.FailedCount))
|
||||
|
||||
if fields.CreatedBy != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_by: %s", fields.CreatedBy))
|
||||
}
|
||||
if fields.CreatedAt != "" {
|
||||
lines = append(lines, fmt.Sprintf("created_at: %s", fields.CreatedAt))
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseQueueFields extracts queue fields from an issue's description.
|
||||
func ParseQueueFields(description string) *QueueFields {
|
||||
fields := &QueueFields{
|
||||
Status: QueueStatusActive,
|
||||
ProcessingOrder: QueueOrderFIFO,
|
||||
ClaimPattern: "*", // Default: anyone can claim
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "name":
|
||||
fields.Name = value
|
||||
case "claim_pattern":
|
||||
if value != "" {
|
||||
fields.ClaimPattern = value
|
||||
}
|
||||
case "status":
|
||||
fields.Status = value
|
||||
case "max_concurrency":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.MaxConcurrency = v
|
||||
}
|
||||
case "processing_order":
|
||||
fields.ProcessingOrder = value
|
||||
case "available_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.AvailableCount = v
|
||||
}
|
||||
case "processing_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.ProcessingCount = v
|
||||
}
|
||||
case "completed_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.CompletedCount = v
|
||||
}
|
||||
case "failed_count":
|
||||
if v, err := strconv.Atoi(value); err == nil {
|
||||
fields.FailedCount = v
|
||||
}
|
||||
case "created_by":
|
||||
fields.CreatedBy = value
|
||||
case "created_at":
|
||||
fields.CreatedAt = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// QueueBeadID returns the queue bead ID for a given queue name.
|
||||
// Format: hq-q-<name> for town-level queues, gt-q-<name> for rig-level queues.
|
||||
func QueueBeadID(name string, isTownLevel bool) string {
|
||||
if isTownLevel {
|
||||
return "hq-q-" + name
|
||||
}
|
||||
return "gt-q-" + name
|
||||
}
|
||||
|
||||
// CreateQueueBead creates a queue bead for tracking work queues.
|
||||
// The ID format is: <prefix>-q-<name> (e.g., gt-q-merge, hq-q-dispatch)
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateQueueBead(id, title string, fields *QueueFields) (*Issue, error) {
|
||||
description := FormatQueueDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--type=queue",
|
||||
"--labels=gt:queue",
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// GetQueueBead retrieves a queue bead by ID.
|
||||
// Returns nil if not found.
|
||||
func (b *Beads) GetQueueBead(id string) (*Issue, *QueueFields, error) {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:queue") {
|
||||
return nil, nil, fmt.Errorf("issue %s is not a queue bead (missing gt:queue label)", id)
|
||||
}
|
||||
|
||||
fields := ParseQueueFields(issue.Description)
|
||||
return issue, fields, nil
|
||||
}
|
||||
|
||||
// UpdateQueueFields updates the fields of a queue bead.
|
||||
func (b *Beads) UpdateQueueFields(id string, fields *QueueFields) error {
|
||||
issue, err := b.Show(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
description := FormatQueueDescription(issue.Title, fields)
|
||||
return b.Update(id, UpdateOptions{Description: &description})
|
||||
}
|
||||
|
||||
// UpdateQueueCounts updates the count fields of a queue bead.
|
||||
// This is a convenience method for incrementing/decrementing counts.
|
||||
func (b *Beads) UpdateQueueCounts(id string, available, processing, completed, failed int) error {
|
||||
issue, currentFields, err := b.GetQueueBead(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
currentFields.AvailableCount = available
|
||||
currentFields.ProcessingCount = processing
|
||||
currentFields.CompletedCount = completed
|
||||
currentFields.FailedCount = failed
|
||||
|
||||
return b.UpdateQueueFields(id, currentFields)
|
||||
}
|
||||
|
||||
// UpdateQueueStatus updates the status of a queue bead.
|
||||
func (b *Beads) UpdateQueueStatus(id, status string) error {
|
||||
// Validate status
|
||||
if status != QueueStatusActive && status != QueueStatusPaused && status != QueueStatusClosed {
|
||||
return fmt.Errorf("invalid queue status %q: must be active, paused, or closed", status)
|
||||
}
|
||||
|
||||
issue, currentFields, err := b.GetQueueBead(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issue == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
currentFields.Status = status
|
||||
return b.UpdateQueueFields(id, currentFields)
|
||||
}
|
||||
|
||||
// ListQueueBeads returns all queue beads.
|
||||
func (b *Beads) ListQueueBeads() (map[string]*Issue, error) {
|
||||
out, err := b.run("list", "--label=gt:queue", "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd list output: %w", err)
|
||||
}
|
||||
|
||||
result := make(map[string]*Issue, len(issues))
|
||||
for _, issue := range issues {
|
||||
result[issue.ID] = issue
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DeleteQueueBead permanently deletes a queue bead.
|
||||
// Uses --hard --force for immediate permanent deletion (no tombstone).
|
||||
func (b *Beads) DeleteQueueBead(id string) error {
|
||||
_, err := b.run("delete", id, "--hard", "--force")
|
||||
return err
|
||||
}
|
||||
|
||||
// LookupQueueByName finds a queue by its name field (not by ID).
|
||||
// This is used for address resolution where we may not know the full bead ID.
|
||||
func (b *Beads) LookupQueueByName(name string) (*Issue, *QueueFields, error) {
|
||||
// First try direct lookup by standard ID formats (town and rig level)
|
||||
for _, isTownLevel := range []bool{true, false} {
|
||||
id := QueueBeadID(name, isTownLevel)
|
||||
issue, fields, err := b.GetQueueBead(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if issue != nil {
|
||||
return issue, fields, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If not found by ID, search all queues by name field
|
||||
queues, err := b.ListQueueBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, issue := range queues {
|
||||
fields := ParseQueueFields(issue.Description)
|
||||
if fields.Name == name {
|
||||
return issue, fields, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, nil // Not found
|
||||
}
|
||||
|
||||
// MatchClaimPattern checks if an identity matches a claim pattern.
|
||||
// Patterns support:
|
||||
// - "*" matches anyone
|
||||
// - "gastown/polecats/*" matches any polecat in gastown rig
|
||||
// - "*/witness" matches any witness role across rigs
|
||||
// - Exact match for specific identities
|
||||
func MatchClaimPattern(pattern, identity string) bool {
|
||||
// Wildcard matches anyone
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Exact match
|
||||
if pattern == identity {
|
||||
return true
|
||||
}
|
||||
|
||||
// Wildcard pattern matching
|
||||
if strings.Contains(pattern, "*") {
|
||||
// Convert to simple glob matching
|
||||
// "gastown/polecats/*" should match "gastown/polecats/capable"
|
||||
// "*/witness" should match "gastown/witness"
|
||||
parts := strings.Split(pattern, "*")
|
||||
if len(parts) == 2 {
|
||||
prefix := parts[0]
|
||||
suffix := parts[1]
|
||||
if strings.HasPrefix(identity, prefix) && strings.HasSuffix(identity, suffix) {
|
||||
// Check that the middle part doesn't contain path separators
|
||||
// unless the pattern allows it (e.g., "*/" at start)
|
||||
middle := identity[len(prefix) : len(identity)-len(suffix)]
|
||||
// Only allow single segment match (no extra slashes)
|
||||
if !strings.Contains(middle, "/") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FindEligibleQueues returns all queue beads that the given identity can claim from.
|
||||
func (b *Beads) FindEligibleQueues(identity string) ([]*Issue, []*QueueFields, error) {
|
||||
queues, err := b.ListQueueBeads()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var eligibleIssues []*Issue
|
||||
var eligibleFields []*QueueFields
|
||||
|
||||
for _, issue := range queues {
|
||||
fields := ParseQueueFields(issue.Description)
|
||||
|
||||
// Skip inactive queues
|
||||
if fields.Status != QueueStatusActive {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if identity matches claim pattern
|
||||
if MatchClaimPattern(fields.ClaimPattern, identity) {
|
||||
eligibleIssues = append(eligibleIssues, issue)
|
||||
eligibleFields = append(eligibleFields, fields)
|
||||
}
|
||||
}
|
||||
|
||||
return eligibleIssues, eligibleFields, nil
|
||||
}
|
||||
301
internal/beads/beads_queue_test.go
Normal file
301
internal/beads/beads_queue_test.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMatchClaimPattern(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pattern string
|
||||
identity string
|
||||
want bool
|
||||
}{
|
||||
// Wildcard matches anyone
|
||||
{
|
||||
name: "wildcard matches anyone",
|
||||
pattern: "*",
|
||||
identity: "gastown/crew/max",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard matches town-level agent",
|
||||
pattern: "*",
|
||||
identity: "mayor/",
|
||||
want: true,
|
||||
},
|
||||
|
||||
// Exact match
|
||||
{
|
||||
name: "exact match",
|
||||
pattern: "gastown/crew/max",
|
||||
identity: "gastown/crew/max",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "exact match fails on different identity",
|
||||
pattern: "gastown/crew/max",
|
||||
identity: "gastown/crew/nux",
|
||||
want: false,
|
||||
},
|
||||
|
||||
// Suffix wildcard
|
||||
{
|
||||
name: "suffix wildcard matches",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "gastown/polecats/capable",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "suffix wildcard matches different name",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "gastown/polecats/nux",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "suffix wildcard doesn't match nested path",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "gastown/polecats/sub/capable",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "suffix wildcard doesn't match different rig",
|
||||
pattern: "gastown/polecats/*",
|
||||
identity: "bartertown/polecats/capable",
|
||||
want: false,
|
||||
},
|
||||
|
||||
// Prefix wildcard
|
||||
{
|
||||
name: "prefix wildcard matches",
|
||||
pattern: "*/witness",
|
||||
identity: "gastown/witness",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "prefix wildcard matches different rig",
|
||||
pattern: "*/witness",
|
||||
identity: "bartertown/witness",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "prefix wildcard doesn't match different role",
|
||||
pattern: "*/witness",
|
||||
identity: "gastown/refinery",
|
||||
want: false,
|
||||
},
|
||||
|
||||
// Crew patterns
|
||||
{
|
||||
name: "crew wildcard",
|
||||
pattern: "gastown/crew/*",
|
||||
identity: "gastown/crew/max",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "crew wildcard matches any crew member",
|
||||
pattern: "gastown/crew/*",
|
||||
identity: "gastown/crew/jack",
|
||||
want: true,
|
||||
},
|
||||
|
||||
// Edge cases
|
||||
{
|
||||
name: "empty identity doesn't match",
|
||||
pattern: "*",
|
||||
identity: "",
|
||||
want: true, // * matches anything
|
||||
},
|
||||
{
|
||||
name: "empty pattern doesn't match",
|
||||
pattern: "",
|
||||
identity: "gastown/crew/max",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := MatchClaimPattern(tt.pattern, tt.identity)
|
||||
if got != tt.want {
|
||||
t.Errorf("MatchClaimPattern(%q, %q) = %v, want %v",
|
||||
tt.pattern, tt.identity, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatQueueDescription(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
title string
|
||||
fields *QueueFields
|
||||
want []string // Lines that should be present
|
||||
}{
|
||||
{
|
||||
name: "basic queue",
|
||||
title: "Queue: work-requests",
|
||||
fields: &QueueFields{
|
||||
Name: "work-requests",
|
||||
ClaimPattern: "gastown/crew/*",
|
||||
Status: QueueStatusActive,
|
||||
},
|
||||
want: []string{
|
||||
"Queue: work-requests",
|
||||
"name: work-requests",
|
||||
"claim_pattern: gastown/crew/*",
|
||||
"status: active",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "queue with default claim pattern",
|
||||
title: "Queue: public",
|
||||
fields: &QueueFields{
|
||||
Name: "public",
|
||||
Status: QueueStatusActive,
|
||||
},
|
||||
want: []string{
|
||||
"name: public",
|
||||
"claim_pattern: *", // Default
|
||||
"status: active",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "queue with counts",
|
||||
title: "Queue: processing",
|
||||
fields: &QueueFields{
|
||||
Name: "processing",
|
||||
ClaimPattern: "*/refinery",
|
||||
Status: QueueStatusActive,
|
||||
AvailableCount: 5,
|
||||
ProcessingCount: 2,
|
||||
CompletedCount: 10,
|
||||
FailedCount: 1,
|
||||
},
|
||||
want: []string{
|
||||
"name: processing",
|
||||
"claim_pattern: */refinery",
|
||||
"available_count: 5",
|
||||
"processing_count: 2",
|
||||
"completed_count: 10",
|
||||
"failed_count: 1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil fields",
|
||||
title: "Just Title",
|
||||
fields: nil,
|
||||
want: []string{"Just Title"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := FormatQueueDescription(tt.title, tt.fields)
|
||||
for _, line := range tt.want {
|
||||
if !strings.Contains(got, line) {
|
||||
t.Errorf("FormatQueueDescription() missing line %q in:\n%s", line, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseQueueFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
description string
|
||||
wantName string
|
||||
wantPattern string
|
||||
wantStatus string
|
||||
}{
|
||||
{
|
||||
name: "basic queue",
|
||||
description: `Queue: work-requests
|
||||
|
||||
name: work-requests
|
||||
claim_pattern: gastown/crew/*
|
||||
status: active`,
|
||||
wantName: "work-requests",
|
||||
wantPattern: "gastown/crew/*",
|
||||
wantStatus: QueueStatusActive,
|
||||
},
|
||||
{
|
||||
name: "queue with defaults",
|
||||
description: `Queue: minimal
|
||||
|
||||
name: minimal`,
|
||||
wantName: "minimal",
|
||||
wantPattern: "*", // Default
|
||||
wantStatus: QueueStatusActive,
|
||||
},
|
||||
{
|
||||
name: "empty description",
|
||||
description: "",
|
||||
wantName: "",
|
||||
wantPattern: "*", // Default
|
||||
wantStatus: QueueStatusActive,
|
||||
},
|
||||
{
|
||||
name: "queue with counts",
|
||||
description: `Queue: processing
|
||||
|
||||
name: processing
|
||||
claim_pattern: */refinery
|
||||
status: paused
|
||||
available_count: 5
|
||||
processing_count: 2`,
|
||||
wantName: "processing",
|
||||
wantPattern: "*/refinery",
|
||||
wantStatus: QueueStatusPaused,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseQueueFields(tt.description)
|
||||
if got.Name != tt.wantName {
|
||||
t.Errorf("Name = %q, want %q", got.Name, tt.wantName)
|
||||
}
|
||||
if got.ClaimPattern != tt.wantPattern {
|
||||
t.Errorf("ClaimPattern = %q, want %q", got.ClaimPattern, tt.wantPattern)
|
||||
}
|
||||
if got.Status != tt.wantStatus {
|
||||
t.Errorf("Status = %q, want %q", got.Status, tt.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueueBeadID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queueName string
|
||||
isTownLevel bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "town-level queue",
|
||||
queueName: "dispatch",
|
||||
isTownLevel: true,
|
||||
want: "hq-q-dispatch",
|
||||
},
|
||||
{
|
||||
name: "rig-level queue",
|
||||
queueName: "merge",
|
||||
isTownLevel: false,
|
||||
want: "gt-q-merge",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := QueueBeadID(tt.queueName, tt.isTownLevel)
|
||||
if got != tt.want {
|
||||
t.Errorf("QueueBeadID(%q, %v) = %q, want %q",
|
||||
tt.queueName, tt.isTownLevel, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
245
internal/beads/beads_redirect.go
Normal file
245
internal/beads/beads_redirect.go
Normal file
@@ -0,0 +1,245 @@
|
||||
// Package beads provides redirect resolution for beads databases.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ResolveBeadsDir returns the actual beads directory, following any redirect.
|
||||
// If workDir/.beads/redirect exists, it reads the redirect path and resolves it
|
||||
// relative to workDir (not the .beads directory). Otherwise, returns workDir/.beads.
|
||||
//
|
||||
// This is essential for crew workers and polecats that use shared beads via redirect.
|
||||
// The redirect file contains a relative path like "../../mayor/rig/.beads".
|
||||
//
|
||||
// Example: if we're at crew/max/ and .beads/redirect contains "../../mayor/rig/.beads",
|
||||
// the redirect is resolved from crew/max/ (not crew/max/.beads/), giving us
|
||||
// mayor/rig/.beads at the rig root level.
|
||||
//
|
||||
// Circular redirect detection: If the resolved path equals the original beads directory,
|
||||
// this indicates an errant redirect file that should be removed. The function logs a
|
||||
// warning and returns the original beads directory.
|
||||
func ResolveBeadsDir(workDir string) string {
|
||||
if filepath.Base(workDir) == ".beads" {
|
||||
workDir = filepath.Dir(workDir)
|
||||
}
|
||||
beadsDir := filepath.Join(workDir, ".beads")
|
||||
redirectPath := filepath.Join(beadsDir, "redirect")
|
||||
|
||||
// Check for redirect file
|
||||
data, err := os.ReadFile(redirectPath) //nolint:gosec // G304: path is constructed internally
|
||||
if err != nil {
|
||||
// No redirect, use local .beads
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Read and clean the redirect path
|
||||
redirectTarget := strings.TrimSpace(string(data))
|
||||
if redirectTarget == "" {
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Resolve relative to workDir (the redirect is written from the perspective
|
||||
// of being inside workDir, not inside workDir/.beads)
|
||||
// e.g., redirect contains "../../mayor/rig/.beads"
|
||||
// from crew/max/, this resolves to mayor/rig/.beads
|
||||
resolved := filepath.Join(workDir, redirectTarget)
|
||||
|
||||
// Clean the path to resolve .. components
|
||||
resolved = filepath.Clean(resolved)
|
||||
|
||||
// Detect circular redirects: if resolved path equals original beads dir,
|
||||
// this is an errant redirect file (e.g., redirect in mayor/rig/.beads pointing to itself)
|
||||
if resolved == beadsDir {
|
||||
fmt.Fprintf(os.Stderr, "Warning: circular redirect detected in %s (points to itself), ignoring redirect\n", redirectPath)
|
||||
// Remove the errant redirect file to prevent future warnings
|
||||
if err := os.Remove(redirectPath); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: could not remove errant redirect file: %v\n", err)
|
||||
}
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Follow redirect chains (e.g., crew/.beads -> rig/.beads -> mayor/rig/.beads)
|
||||
// This is intentional for the rig-level redirect architecture.
|
||||
// Limit depth to prevent infinite loops from misconfigured redirects.
|
||||
return resolveBeadsDirWithDepth(resolved, 3)
|
||||
}
|
||||
|
||||
// resolveBeadsDirWithDepth follows redirect chains with a depth limit.
|
||||
func resolveBeadsDirWithDepth(beadsDir string, maxDepth int) string {
|
||||
if maxDepth <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "Warning: redirect chain too deep at %s, stopping\n", beadsDir)
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
redirectPath := filepath.Join(beadsDir, "redirect")
|
||||
data, err := os.ReadFile(redirectPath) //nolint:gosec // G304: path is constructed internally
|
||||
if err != nil {
|
||||
// No redirect, this is the final destination
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
redirectTarget := strings.TrimSpace(string(data))
|
||||
if redirectTarget == "" {
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Resolve relative to parent of beadsDir (the workDir)
|
||||
workDir := filepath.Dir(beadsDir)
|
||||
resolved := filepath.Clean(filepath.Join(workDir, redirectTarget))
|
||||
|
||||
// Detect circular redirect
|
||||
if resolved == beadsDir {
|
||||
fmt.Fprintf(os.Stderr, "Warning: circular redirect detected in %s, stopping\n", redirectPath)
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Recursively follow
|
||||
return resolveBeadsDirWithDepth(resolved, maxDepth-1)
|
||||
}
|
||||
|
||||
// cleanBeadsRuntimeFiles removes gitignored runtime files from a .beads directory
|
||||
// while preserving tracked files (formulas/, README.md, config.yaml, .gitignore).
|
||||
// This is safe to call even if the directory doesn't exist.
|
||||
func cleanBeadsRuntimeFiles(beadsDir string) error {
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
return nil // Nothing to clean
|
||||
}
|
||||
|
||||
// Runtime files/patterns that are gitignored and safe to remove
|
||||
runtimePatterns := []string{
|
||||
// SQLite databases
|
||||
"*.db", "*.db-*", "*.db?*",
|
||||
// Daemon runtime
|
||||
"daemon.lock", "daemon.log", "daemon.pid", "bd.sock",
|
||||
// Sync state
|
||||
"sync-state.json", "last-touched", "metadata.json",
|
||||
// Version tracking
|
||||
".local_version",
|
||||
// Redirect file (we're about to recreate it)
|
||||
"redirect",
|
||||
// Merge artifacts
|
||||
"beads.base.*", "beads.left.*", "beads.right.*",
|
||||
// JSONL files (tracked but will be redirected, safe to remove in worktrees)
|
||||
"issues.jsonl", "interactions.jsonl",
|
||||
// Runtime directories
|
||||
"mq",
|
||||
}
|
||||
|
||||
var firstErr error
|
||||
for _, pattern := range runtimePatterns {
|
||||
matches, err := filepath.Glob(filepath.Join(beadsDir, pattern))
|
||||
if err != nil {
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
for _, match := range matches {
|
||||
if err := os.RemoveAll(match); err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
// SetupRedirect creates a .beads/redirect file for a worktree to point to the rig's shared beads.
|
||||
// This is used by crew, polecats, and refinery worktrees to share the rig's beads database.
|
||||
//
|
||||
// Parameters:
|
||||
// - townRoot: the town root directory (e.g., ~/gt)
|
||||
// - worktreePath: the worktree directory (e.g., <rig>/crew/<name> or <rig>/refinery/rig)
|
||||
//
|
||||
// The function:
|
||||
// 1. Computes the relative path from worktree to rig-level .beads
|
||||
// 2. Cleans up runtime files (preserving tracked files like formulas/)
|
||||
// 3. Creates the redirect file
|
||||
//
|
||||
// Safety: This function refuses to create redirects in the canonical beads location
|
||||
// (mayor/rig) to prevent circular redirect chains.
|
||||
func SetupRedirect(townRoot, worktreePath string) error {
|
||||
// Get rig root from worktree path
|
||||
// worktreePath = <town>/<rig>/crew/<name> or <town>/<rig>/refinery/rig etc.
|
||||
relPath, err := filepath.Rel(townRoot, worktreePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing relative path: %w", err)
|
||||
}
|
||||
parts := strings.Split(filepath.ToSlash(relPath), "/")
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("invalid worktree path: must be at least 2 levels deep from town root")
|
||||
}
|
||||
|
||||
// Safety check: prevent creating redirect in canonical beads location (mayor/rig)
|
||||
// This would create a circular redirect chain since rig/.beads redirects to mayor/rig/.beads
|
||||
if len(parts) >= 2 && parts[1] == "mayor" {
|
||||
return fmt.Errorf("cannot create redirect in canonical beads location (mayor/rig)")
|
||||
}
|
||||
|
||||
rigRoot := filepath.Join(townRoot, parts[0])
|
||||
rigBeadsPath := filepath.Join(rigRoot, ".beads")
|
||||
mayorBeadsPath := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
|
||||
// Check rig-level .beads first, fall back to mayor/rig/.beads (tracked beads architecture)
|
||||
usesMayorFallback := false
|
||||
if _, err := os.Stat(rigBeadsPath); os.IsNotExist(err) {
|
||||
// No rig/.beads - check for mayor/rig/.beads (tracked beads architecture)
|
||||
if _, err := os.Stat(mayorBeadsPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("no beads found at %s or %s", rigBeadsPath, mayorBeadsPath)
|
||||
}
|
||||
// Using mayor fallback - warn user to run bd doctor
|
||||
fmt.Fprintf(os.Stderr, "Warning: rig .beads not found at %s, using %s\n", rigBeadsPath, mayorBeadsPath)
|
||||
fmt.Fprintf(os.Stderr, " Run 'bd doctor' to fix rig beads configuration\n")
|
||||
usesMayorFallback = true
|
||||
}
|
||||
|
||||
// Clean up runtime files in .beads/ but preserve tracked files (formulas/, README.md, etc.)
|
||||
worktreeBeadsDir := filepath.Join(worktreePath, ".beads")
|
||||
if err := cleanBeadsRuntimeFiles(worktreeBeadsDir); err != nil {
|
||||
return fmt.Errorf("cleaning runtime files: %w", err)
|
||||
}
|
||||
|
||||
// Create .beads directory if it doesn't exist
|
||||
if err := os.MkdirAll(worktreeBeadsDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating .beads dir: %w", err)
|
||||
}
|
||||
|
||||
// Compute relative path from worktree to rig root
|
||||
// e.g., crew/<name> (depth 2) -> ../../.beads
|
||||
// refinery/rig (depth 2) -> ../../.beads
|
||||
depth := len(parts) - 1 // subtract 1 for rig name itself
|
||||
upPath := strings.Repeat("../", depth)
|
||||
|
||||
var redirectPath string
|
||||
if usesMayorFallback {
|
||||
// Direct redirect to mayor/rig/.beads since rig/.beads doesn't exist
|
||||
redirectPath = upPath + "mayor/rig/.beads"
|
||||
} else {
|
||||
redirectPath = upPath + ".beads"
|
||||
|
||||
// Check if rig-level beads has a redirect (tracked beads case).
|
||||
// If so, redirect directly to the final destination to avoid chains.
|
||||
// The bd CLI doesn't support redirect chains, so we must skip intermediate hops.
|
||||
rigRedirectPath := filepath.Join(rigBeadsPath, "redirect")
|
||||
if data, err := os.ReadFile(rigRedirectPath); err == nil {
|
||||
rigRedirectTarget := strings.TrimSpace(string(data))
|
||||
if rigRedirectTarget != "" {
|
||||
// Rig has redirect (e.g., "mayor/rig/.beads" for tracked beads).
|
||||
// Redirect worktree directly to the final destination.
|
||||
redirectPath = upPath + rigRedirectTarget
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create redirect file
|
||||
redirectFile := filepath.Join(worktreeBeadsDir, "redirect")
|
||||
if err := os.WriteFile(redirectFile, []byte(redirectPath+"\n"), 0644); err != nil {
|
||||
return fmt.Errorf("creating redirect file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
120
internal/beads/beads_rig.go
Normal file
120
internal/beads/beads_rig.go
Normal file
@@ -0,0 +1,120 @@
|
||||
// Package beads provides rig identity bead management.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RigFields contains the fields specific to rig identity beads.
|
||||
type RigFields struct {
|
||||
Repo string // Git URL for the rig's repository
|
||||
Prefix string // Beads prefix for this rig (e.g., "gt", "bd")
|
||||
State string // Operational state: active, archived, maintenance
|
||||
}
|
||||
|
||||
// FormatRigDescription formats the description field for a rig identity bead.
|
||||
func FormatRigDescription(name string, fields *RigFields) string {
|
||||
if fields == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var lines []string
|
||||
lines = append(lines, fmt.Sprintf("Rig identity bead for %s.", name))
|
||||
lines = append(lines, "")
|
||||
|
||||
if fields.Repo != "" {
|
||||
lines = append(lines, fmt.Sprintf("repo: %s", fields.Repo))
|
||||
}
|
||||
if fields.Prefix != "" {
|
||||
lines = append(lines, fmt.Sprintf("prefix: %s", fields.Prefix))
|
||||
}
|
||||
if fields.State != "" {
|
||||
lines = append(lines, fmt.Sprintf("state: %s", fields.State))
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// ParseRigFields extracts rig fields from an issue's description.
|
||||
func ParseRigFields(description string) *RigFields {
|
||||
fields := &RigFields{}
|
||||
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
value := strings.TrimSpace(line[colonIdx+1:])
|
||||
if value == "null" || value == "" {
|
||||
value = ""
|
||||
}
|
||||
|
||||
switch strings.ToLower(key) {
|
||||
case "repo":
|
||||
fields.Repo = value
|
||||
case "prefix":
|
||||
fields.Prefix = value
|
||||
case "state":
|
||||
fields.State = value
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// CreateRigBead creates a rig identity bead for tracking rig metadata.
|
||||
// The ID format is: <prefix>-rig-<name> (e.g., gt-rig-gastown)
|
||||
// Use RigBeadID() helper to generate correct IDs.
|
||||
// The created_by field is populated from BD_ACTOR env var for provenance tracking.
|
||||
func (b *Beads) CreateRigBead(id, title string, fields *RigFields) (*Issue, error) {
|
||||
description := FormatRigDescription(title, fields)
|
||||
|
||||
args := []string{"create", "--json",
|
||||
"--id=" + id,
|
||||
"--title=" + title,
|
||||
"--description=" + description,
|
||||
"--labels=gt:rig",
|
||||
}
|
||||
if NeedsForceForID(id) {
|
||||
args = append(args, "--force")
|
||||
}
|
||||
|
||||
// Default actor from BD_ACTOR env var for provenance tracking
|
||||
// Uses getActor() to respect isolated mode (tests)
|
||||
if actor := b.getActor(); actor != "" {
|
||||
args = append(args, "--actor="+actor)
|
||||
}
|
||||
|
||||
out, err := b.run(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(out, &issue); err != nil {
|
||||
return nil, fmt.Errorf("parsing bd create output: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// RigBeadIDWithPrefix generates a rig identity bead ID using the specified prefix.
|
||||
// Format: <prefix>-rig-<name> (e.g., gt-rig-gastown)
|
||||
func RigBeadIDWithPrefix(prefix, name string) string {
|
||||
return fmt.Sprintf("%s-rig-%s", prefix, name)
|
||||
}
|
||||
|
||||
// RigBeadID generates a rig identity bead ID using "gt" prefix.
|
||||
// For non-gastown rigs, use RigBeadIDWithPrefix with the rig's configured prefix.
|
||||
func RigBeadID(name string) string {
|
||||
return RigBeadIDWithPrefix("gt", name)
|
||||
}
|
||||
160
internal/beads/beads_role.go
Normal file
160
internal/beads/beads_role.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Package beads provides role bead management.
|
||||
//
|
||||
// DEPRECATED: Role beads are deprecated. Role definitions are now config-based.
|
||||
// See internal/config/roles/*.toml and config-based-roles.md for the new system.
|
||||
//
|
||||
// This file is kept for backward compatibility with existing role beads but
|
||||
// new code should use config.LoadRoleDefinition() instead of reading role beads.
|
||||
// The daemon no longer uses role beads as of Phase 2 (config-based roles).
|
||||
package beads
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// DEPRECATED: Role bead ID naming convention is no longer used.
|
||||
// Role definitions are now config-based (internal/config/roles/*.toml).
|
||||
//
|
||||
// Role beads were stored in town beads (~/.beads/) with hq- prefix.
|
||||
//
|
||||
// Canonical format was: hq-<role>-role
|
||||
//
|
||||
// Examples:
|
||||
// - hq-mayor-role
|
||||
// - hq-deacon-role
|
||||
// - hq-witness-role
|
||||
// - hq-refinery-role
|
||||
// - hq-crew-role
|
||||
// - hq-polecat-role
|
||||
//
|
||||
// Legacy functions RoleBeadID() and RoleBeadIDTown() still work for
|
||||
// backward compatibility but should not be used in new code.
|
||||
|
||||
// RoleBeadID returns the role bead ID for a given role type.
|
||||
// Role beads define lifecycle configuration for each agent type.
|
||||
// Deprecated: Use RoleBeadIDTown() for town-level beads with hq- prefix.
|
||||
// Role beads are global templates and should use hq-<role>-role, not gt-<role>-role.
|
||||
func RoleBeadID(roleType string) string {
|
||||
return "gt-" + roleType + "-role"
|
||||
}
|
||||
|
||||
// DogRoleBeadID returns the Dog role bead ID.
|
||||
func DogRoleBeadID() string {
|
||||
return RoleBeadID("dog")
|
||||
}
|
||||
|
||||
// MayorRoleBeadID returns the Mayor role bead ID.
|
||||
func MayorRoleBeadID() string {
|
||||
return RoleBeadID("mayor")
|
||||
}
|
||||
|
||||
// DeaconRoleBeadID returns the Deacon role bead ID.
|
||||
func DeaconRoleBeadID() string {
|
||||
return RoleBeadID("deacon")
|
||||
}
|
||||
|
||||
// WitnessRoleBeadID returns the Witness role bead ID.
|
||||
func WitnessRoleBeadID() string {
|
||||
return RoleBeadID("witness")
|
||||
}
|
||||
|
||||
// RefineryRoleBeadID returns the Refinery role bead ID.
|
||||
func RefineryRoleBeadID() string {
|
||||
return RoleBeadID("refinery")
|
||||
}
|
||||
|
||||
// CrewRoleBeadID returns the Crew role bead ID.
|
||||
func CrewRoleBeadID() string {
|
||||
return RoleBeadID("crew")
|
||||
}
|
||||
|
||||
// PolecatRoleBeadID returns the Polecat role bead ID.
|
||||
func PolecatRoleBeadID() string {
|
||||
return RoleBeadID("polecat")
|
||||
}
|
||||
|
||||
// GetRoleConfig looks up a role bead and returns its parsed RoleConfig.
|
||||
// Returns nil, nil if the role bead doesn't exist or has no config.
|
||||
//
|
||||
// Deprecated: Use config.LoadRoleDefinition() instead. Role definitions
|
||||
// are now config-based, not stored as beads.
|
||||
func (b *Beads) GetRoleConfig(roleBeadID string) (*RoleConfig, error) {
|
||||
issue, err := b.Show(roleBeadID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !HasLabel(issue, "gt:role") {
|
||||
return nil, fmt.Errorf("bead %s is not a role bead (missing gt:role label)", roleBeadID)
|
||||
}
|
||||
|
||||
return ParseRoleConfig(issue.Description), nil
|
||||
}
|
||||
|
||||
// HasLabel checks if an issue has a specific label.
|
||||
func HasLabel(issue *Issue, label string) bool {
|
||||
for _, l := range issue.Labels {
|
||||
if l == label {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RoleBeadDef defines a role bead's metadata.
|
||||
//
|
||||
// Deprecated: Role beads are no longer created. Role definitions are
|
||||
// now config-based (internal/config/roles/*.toml).
|
||||
type RoleBeadDef struct {
|
||||
ID string // e.g., "hq-witness-role"
|
||||
Title string // e.g., "Witness Role"
|
||||
Desc string // Description of the role
|
||||
}
|
||||
|
||||
// AllRoleBeadDefs returns all role bead definitions.
|
||||
//
|
||||
// Deprecated: Role beads are no longer created by gt install or gt doctor.
|
||||
// This function is kept for backward compatibility only.
|
||||
func AllRoleBeadDefs() []RoleBeadDef {
|
||||
return []RoleBeadDef{
|
||||
{
|
||||
ID: MayorRoleBeadIDTown(),
|
||||
Title: "Mayor Role",
|
||||
Desc: "Role definition for Mayor agents. Global coordinator for cross-rig work.",
|
||||
},
|
||||
{
|
||||
ID: DeaconRoleBeadIDTown(),
|
||||
Title: "Deacon Role",
|
||||
Desc: "Role definition for Deacon agents. Daemon beacon for heartbeats and monitoring.",
|
||||
},
|
||||
{
|
||||
ID: DogRoleBeadIDTown(),
|
||||
Title: "Dog Role",
|
||||
Desc: "Role definition for Dog agents. Town-level workers for cross-rig tasks.",
|
||||
},
|
||||
{
|
||||
ID: WitnessRoleBeadIDTown(),
|
||||
Title: "Witness Role",
|
||||
Desc: "Role definition for Witness agents. Per-rig worker monitor with progressive nudging.",
|
||||
},
|
||||
{
|
||||
ID: RefineryRoleBeadIDTown(),
|
||||
Title: "Refinery Role",
|
||||
Desc: "Role definition for Refinery agents. Merge queue processor with verification gates.",
|
||||
},
|
||||
{
|
||||
ID: PolecatRoleBeadIDTown(),
|
||||
Title: "Polecat Role",
|
||||
Desc: "Role definition for Polecat agents. Ephemeral workers for batch work dispatch.",
|
||||
},
|
||||
{
|
||||
ID: CrewRoleBeadIDTown(),
|
||||
Title: "Crew Role",
|
||||
Desc: "Role definition for Crew agents. Persistent user-managed workspaces.",
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package beads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -84,19 +85,16 @@ func TestIsBeadsRepo(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestWrapError tests error wrapping.
|
||||
// ZFC: Only test ErrNotFound detection. ErrNotARepo and ErrSyncConflict
|
||||
// were removed as per ZFC - agents should handle those errors directly.
|
||||
func TestWrapError(t *testing.T) {
|
||||
b := New("/test")
|
||||
|
||||
tests := []struct {
|
||||
stderr string
|
||||
wantErr error
|
||||
wantNil bool
|
||||
stderr string
|
||||
wantErr error
|
||||
wantNil bool
|
||||
}{
|
||||
{"not a beads repository", ErrNotARepo, false},
|
||||
{"No .beads directory found", ErrNotARepo, false},
|
||||
{".beads directory not found", ErrNotARepo, false},
|
||||
{"sync conflict detected", ErrSyncConflict, false},
|
||||
{"CONFLICT in file.md", ErrSyncConflict, false},
|
||||
{"Issue not found: gt-xyz", ErrNotFound, false},
|
||||
{"gt-xyz not found", ErrNotFound, false},
|
||||
}
|
||||
@@ -127,7 +125,6 @@ func TestIntegration(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Walk up to find .beads
|
||||
dir := cwd
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(dir, ".beads")); err == nil {
|
||||
@@ -140,13 +137,24 @@ func TestIntegration(t *testing.T) {
|
||||
dir = parent
|
||||
}
|
||||
|
||||
// Resolve the actual beads directory (following redirect if present)
|
||||
// In multi-worktree setups, worktrees have .beads/redirect pointing to
|
||||
// the canonical beads location (e.g., mayor/rig/.beads)
|
||||
beadsDir := ResolveBeadsDir(dir)
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
t.Skip("no beads.db found (JSONL-only repo)")
|
||||
}
|
||||
|
||||
b := New(dir)
|
||||
|
||||
// Sync database with JSONL before testing to avoid "Database out of sync" errors.
|
||||
// This can happen when JSONL is updated (e.g., by git pull) but the SQLite database
|
||||
// hasn't been imported yet. Running sync --import-only ensures we test against
|
||||
// consistent data and prevents flaky test failures.
|
||||
syncCmd := exec.Command("bd", "--no-daemon", "sync", "--import-only")
|
||||
// We use --allow-stale to handle cases where the daemon is actively writing and
|
||||
// the staleness check would otherwise fail spuriously.
|
||||
syncCmd := exec.Command("bd", "--no-daemon", "--allow-stale", "sync", "--import-only")
|
||||
syncCmd.Dir = dir
|
||||
if err := syncCmd.Run(); err != nil {
|
||||
// If sync fails (e.g., no database exists), just log and continue
|
||||
@@ -201,10 +209,10 @@ func TestIntegration(t *testing.T) {
|
||||
// TestParseMRFields tests parsing MR fields from issue descriptions.
|
||||
func TestParseMRFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
issue *Issue
|
||||
wantNil bool
|
||||
wantFields *MRFields
|
||||
name string
|
||||
issue *Issue
|
||||
wantNil bool
|
||||
wantFields *MRFields
|
||||
}{
|
||||
{
|
||||
name: "nil issue",
|
||||
@@ -521,8 +529,8 @@ author: someone
|
||||
target: main`,
|
||||
},
|
||||
fields: &MRFields{
|
||||
Branch: "polecat/Capable/gt-ghi",
|
||||
Target: "integration/epic",
|
||||
Branch: "polecat/Capable/gt-ghi",
|
||||
Target: "integration/epic",
|
||||
CloseReason: "merged",
|
||||
},
|
||||
want: `branch: polecat/Capable/gt-ghi
|
||||
@@ -1032,10 +1040,10 @@ func TestParseAgentBeadID(t *testing.T) {
|
||||
// Parseable but not valid agent roles (IsAgentSessionBead will reject)
|
||||
{"gt-abc123", "", "abc123", "", true}, // Parses as town-level but not valid role
|
||||
// Other prefixes (bd-, hq-)
|
||||
{"bd-mayor", "", "mayor", "", true}, // bd prefix town-level
|
||||
{"bd-beads-witness", "beads", "witness", "", true}, // bd prefix rig-level singleton
|
||||
{"bd-beads-polecat-pearl", "beads", "polecat", "pearl", true}, // bd prefix rig-level named
|
||||
{"hq-mayor", "", "mayor", "", true}, // hq prefix town-level
|
||||
{"bd-mayor", "", "mayor", "", true}, // bd prefix town-level
|
||||
{"bd-beads-witness", "beads", "witness", "", true}, // bd prefix rig-level singleton
|
||||
{"bd-beads-polecat-pearl", "beads", "polecat", "pearl", true}, // bd prefix rig-level named
|
||||
{"hq-mayor", "", "mayor", "", true}, // hq prefix town-level
|
||||
// Truly invalid patterns
|
||||
{"x-mayor", "", "", "", false}, // Prefix too short (1 char)
|
||||
{"abcd-mayor", "", "", "", false}, // Prefix too long (4 chars)
|
||||
@@ -1502,3 +1510,867 @@ func TestDelegationTerms(t *testing.T) {
|
||||
t.Errorf("parsed.CreditShare = %d, want %d", parsed.CreditShare, terms.CreditShare)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetupRedirect tests the beads redirect setup for worktrees.
|
||||
func TestSetupRedirect(t *testing.T) {
|
||||
t.Run("crew worktree with local beads", func(t *testing.T) {
|
||||
// Setup: town/rig/.beads (local, no redirect)
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// Create rig structure
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
// Run SetupRedirect
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect was created
|
||||
redirectPath := filepath.Join(crewPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("crew worktree with tracked beads", func(t *testing.T) {
|
||||
// Setup: town/rig/.beads/redirect -> mayor/rig/.beads (tracked)
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
mayorRigBeads := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// Create rig structure with tracked beads
|
||||
if err := os.MkdirAll(mayorRigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
// Create rig-level redirect to mayor/rig/.beads
|
||||
if err := os.WriteFile(filepath.Join(rigBeads, "redirect"), []byte("mayor/rig/.beads\n"), 0644); err != nil {
|
||||
t.Fatalf("write rig redirect: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
// Run SetupRedirect
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect goes directly to mayor/rig/.beads (no chain - bd CLI doesn't support chains)
|
||||
redirectPath := filepath.Join(crewPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../mayor/rig/.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
|
||||
// Verify redirect resolves correctly
|
||||
resolved := ResolveBeadsDir(crewPath)
|
||||
// crew/max -> ../../mayor/rig/.beads (direct, no chain)
|
||||
if resolved != mayorRigBeads {
|
||||
t.Errorf("resolved = %q, want %q", resolved, mayorRigBeads)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("polecat worktree", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
polecatPath := filepath.Join(rigRoot, "polecats", "worker1")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(polecatPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir polecat: %v", err)
|
||||
}
|
||||
|
||||
if err := SetupRedirect(townRoot, polecatPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
redirectPath := filepath.Join(polecatPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("refinery worktree", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
refineryPath := filepath.Join(rigRoot, "refinery", "rig")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(refineryPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir refinery: %v", err)
|
||||
}
|
||||
|
||||
if err := SetupRedirect(townRoot, refineryPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
redirectPath := filepath.Join(refineryPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("cleans runtime files but preserves tracked files", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
crewBeads := filepath.Join(crewPath, ".beads")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
// Simulate worktree with both runtime and tracked files
|
||||
if err := os.MkdirAll(crewBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew beads: %v", err)
|
||||
}
|
||||
// Runtime files (should be removed)
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "beads.db"), []byte("fake db"), 0644); err != nil {
|
||||
t.Fatalf("write fake db: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "issues.jsonl"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatalf("write issues.jsonl: %v", err)
|
||||
}
|
||||
// Tracked files (should be preserved)
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "config.yaml"), []byte("prefix: test"), 0644); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(crewBeads, "README.md"), []byte("# Beads"), 0644); err != nil {
|
||||
t.Fatalf("write README: %v", err)
|
||||
}
|
||||
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify runtime files were cleaned up
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "beads.db")); !os.IsNotExist(err) {
|
||||
t.Error("beads.db should have been removed")
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "issues.jsonl")); !os.IsNotExist(err) {
|
||||
t.Error("issues.jsonl should have been removed")
|
||||
}
|
||||
|
||||
// Verify tracked files were preserved
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "config.yaml")); err != nil {
|
||||
t.Errorf("config.yaml should have been preserved: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(crewBeads, "README.md")); err != nil {
|
||||
t.Errorf("README.md should have been preserved: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect was created
|
||||
redirectPath := filepath.Join(crewBeads, "redirect")
|
||||
if _, err := os.Stat(redirectPath); err != nil {
|
||||
t.Errorf("redirect file should exist: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("rejects mayor/rig canonical location", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
rigBeads := filepath.Join(rigRoot, ".beads")
|
||||
mayorRigPath := filepath.Join(rigRoot, "mayor", "rig")
|
||||
|
||||
if err := os.MkdirAll(rigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(mayorRigPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
err := SetupRedirect(townRoot, mayorRigPath)
|
||||
if err == nil {
|
||||
t.Error("SetupRedirect should reject mayor/rig location")
|
||||
}
|
||||
if err != nil && !strings.Contains(err.Error(), "canonical") {
|
||||
t.Errorf("error should mention canonical location, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("rejects path too shallow", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
|
||||
if err := os.MkdirAll(rigRoot, 0755); err != nil {
|
||||
t.Fatalf("mkdir rig: %v", err)
|
||||
}
|
||||
|
||||
err := SetupRedirect(townRoot, rigRoot)
|
||||
if err == nil {
|
||||
t.Error("SetupRedirect should reject rig root (too shallow)")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fails if rig beads missing", func(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// No rig/.beads or mayor/rig/.beads created
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
err := SetupRedirect(townRoot, crewPath)
|
||||
if err == nil {
|
||||
t.Error("SetupRedirect should fail if rig .beads missing")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("crew worktree with mayor/rig beads only", func(t *testing.T) {
|
||||
// Setup: no rig/.beads, only mayor/rig/.beads exists
|
||||
// This is the tracked beads architecture where rig root has no .beads directory
|
||||
townRoot := t.TempDir()
|
||||
rigRoot := filepath.Join(townRoot, "testrig")
|
||||
mayorRigBeads := filepath.Join(rigRoot, "mayor", "rig", ".beads")
|
||||
crewPath := filepath.Join(rigRoot, "crew", "max")
|
||||
|
||||
// Create only mayor/rig/.beads (no rig/.beads)
|
||||
if err := os.MkdirAll(mayorRigBeads, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig beads: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(crewPath, 0755); err != nil {
|
||||
t.Fatalf("mkdir crew: %v", err)
|
||||
}
|
||||
|
||||
// Run SetupRedirect - should succeed and point to mayor/rig/.beads
|
||||
if err := SetupRedirect(townRoot, crewPath); err != nil {
|
||||
t.Fatalf("SetupRedirect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify redirect points to mayor/rig/.beads
|
||||
redirectPath := filepath.Join(crewPath, ".beads", "redirect")
|
||||
content, err := os.ReadFile(redirectPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read redirect: %v", err)
|
||||
}
|
||||
|
||||
want := "../../mayor/rig/.beads\n"
|
||||
if string(content) != want {
|
||||
t.Errorf("redirect content = %q, want %q", string(content), want)
|
||||
}
|
||||
|
||||
// Verify redirect resolves correctly
|
||||
resolved := ResolveBeadsDir(crewPath)
|
||||
if resolved != mayorRigBeads {
|
||||
t.Errorf("resolved = %q, want %q", resolved, mayorRigBeads)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestAgentBeadTombstoneBug demonstrates the bd bug where `bd delete --hard --force`
|
||||
// creates tombstones instead of truly deleting records.
|
||||
//
|
||||
//
|
||||
// This test documents the bug behavior:
|
||||
// 1. Create agent bead
|
||||
// 2. Delete with --hard --force (supposed to permanently delete)
|
||||
// 3. BUG: Tombstone is created instead
|
||||
// 4. BUG: bd create fails with UNIQUE constraint
|
||||
// 5. BUG: bd reopen fails with "issue not found" (tombstones are invisible)
|
||||
func TestAgentBeadTombstoneBug(t *testing.T) {
|
||||
// Skip: bd CLI 0.47.2 has a bug where database writes don't commit
|
||||
// ("sql: database is closed" during auto-flush). This blocks all tests
|
||||
// that need to create issues. See internal issue for tracking.
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create isolated beads instance and initialize database
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-tombstone"
|
||||
|
||||
// Step 1: Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Delete with --hard --force (supposed to permanently delete)
|
||||
err = bd.DeleteAgentBead(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: BUG - Tombstone exists (check via bd list --status=tombstone)
|
||||
out, err := bd.run("list", "--status=tombstone", "--json")
|
||||
if err != nil {
|
||||
t.Fatalf("list tombstones: %v", err)
|
||||
}
|
||||
|
||||
// Parse to check if our agent is in the tombstone list
|
||||
var tombstones []Issue
|
||||
if err := json.Unmarshal(out, &tombstones); err != nil {
|
||||
t.Fatalf("parse tombstones: %v", err)
|
||||
}
|
||||
|
||||
foundTombstone := false
|
||||
for _, ts := range tombstones {
|
||||
if ts.ID == agentID {
|
||||
foundTombstone = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundTombstone {
|
||||
// If bd ever fixes the --hard flag, this test will fail here
|
||||
// That's a good thing - it means the bug is fixed!
|
||||
t.Skip("bd --hard appears to be fixed (no tombstone created) - update this test")
|
||||
}
|
||||
|
||||
// Step 4: BUG - bd create fails with UNIQUE constraint
|
||||
_, err = bd.CreateAgentBead(agentID, "Test agent 2", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected UNIQUE constraint error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "UNIQUE constraint") {
|
||||
t.Errorf("expected UNIQUE constraint error, got: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: BUG - bd reopen fails (tombstones are invisible)
|
||||
_, err = bd.run("reopen", agentID, "--reason=test")
|
||||
if err == nil {
|
||||
t.Fatal("expected reopen to fail on tombstone, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no issue found") && !strings.Contains(err.Error(), "issue not found") {
|
||||
t.Errorf("expected 'issue not found' error, got: %v", err)
|
||||
}
|
||||
|
||||
t.Log("BUG CONFIRMED: bd delete --hard creates tombstones that block recreation")
|
||||
}
|
||||
|
||||
// TestAgentBeadCloseReopenWorkaround demonstrates the workaround for the tombstone bug:
|
||||
// use Close instead of Delete, then Reopen works.
|
||||
func TestAgentBeadCloseReopenWorkaround(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-closereopen"
|
||||
|
||||
// Step 1: Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Close (not delete) - this is the workaround
|
||||
err = bd.CloseAndClearAgentBead(agentID, "polecat removed")
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Verify bead is closed (not tombstone)
|
||||
issue, err := bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show after close: %v", err)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
|
||||
// Step 4: Reopen works on closed beads
|
||||
_, err = bd.run("reopen", agentID, "--reason=re-spawning")
|
||||
if err != nil {
|
||||
t.Fatalf("reopen failed: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Verify bead is open again
|
||||
issue, err = bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show after reopen: %v", err)
|
||||
}
|
||||
if issue.Status != "open" {
|
||||
t.Errorf("status = %q, want 'open'", issue.Status)
|
||||
}
|
||||
|
||||
t.Log("WORKAROUND CONFIRMED: Close + Reopen works for agent bead lifecycle")
|
||||
}
|
||||
|
||||
// TestCreateOrReopenAgentBead_ClosedBead tests that CreateOrReopenAgentBead
|
||||
// successfully reopens a closed agent bead and updates its fields.
|
||||
func TestCreateOrReopenAgentBead_ClosedBead(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-lifecycle"
|
||||
|
||||
// Simulate polecat lifecycle: spawn → nuke → respawn
|
||||
|
||||
// Spawn 1: Create agent bead with first task
|
||||
issue1, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Spawn 1 - CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
if issue1.Status != "open" {
|
||||
t.Errorf("Spawn 1: status = %q, want 'open'", issue1.Status)
|
||||
}
|
||||
|
||||
// Nuke 1: Close agent bead (workaround for tombstone bug)
|
||||
err = bd.CloseAndClearAgentBead(agentID, "polecat nuked")
|
||||
if err != nil {
|
||||
t.Fatalf("Nuke 1 - CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Spawn 2: CreateOrReopenAgentBead should reopen and update
|
||||
issue2, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-2", // Different task
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Spawn 2 - CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
if issue2.Status != "open" {
|
||||
t.Errorf("Spawn 2: status = %q, want 'open'", issue2.Status)
|
||||
}
|
||||
|
||||
// Verify the hook was updated to the new task
|
||||
fields := ParseAgentFields(issue2.Description)
|
||||
if fields.HookBead != "test-task-2" {
|
||||
t.Errorf("Spawn 2: hook_bead = %q, want 'test-task-2'", fields.HookBead)
|
||||
}
|
||||
|
||||
// Nuke 2: Close again
|
||||
err = bd.CloseAndClearAgentBead(agentID, "polecat nuked again")
|
||||
if err != nil {
|
||||
t.Fatalf("Nuke 2 - CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Spawn 3: Should still work
|
||||
issue3, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-task-3",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Spawn 3 - CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
|
||||
fields = ParseAgentFields(issue3.Description)
|
||||
if fields.HookBead != "test-task-3" {
|
||||
t.Errorf("Spawn 3: hook_bead = %q, want 'test-task-3'", fields.HookBead)
|
||||
}
|
||||
|
||||
t.Log("LIFECYCLE TEST PASSED: spawn → nuke → respawn works with close/reopen")
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_FieldClearing tests that CloseAndClearAgentBead clears all mutable
|
||||
// fields to emulate delete --force --hard behavior. This ensures reopened agent
|
||||
// beads don't have stale state from previous lifecycle.
|
||||
func TestCloseAndClearAgentBead_FieldClearing(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
// Test cases for field clearing permutations
|
||||
tests := []struct {
|
||||
name string
|
||||
fields *AgentFields
|
||||
reason string
|
||||
}{
|
||||
{
|
||||
name: "all_fields_populated",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
HookBead: "test-issue-123",
|
||||
CleanupStatus: "clean",
|
||||
ActiveMR: "test-mr-456",
|
||||
NotificationLevel: "normal",
|
||||
},
|
||||
reason: "polecat completed work",
|
||||
},
|
||||
{
|
||||
name: "only_hook_bead",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-issue-789",
|
||||
},
|
||||
reason: "polecat nuked",
|
||||
},
|
||||
{
|
||||
name: "only_active_mr",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
ActiveMR: "test-mr-abc",
|
||||
},
|
||||
reason: "",
|
||||
},
|
||||
{
|
||||
name: "only_cleanup_status",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "idle",
|
||||
CleanupStatus: "has_uncommitted",
|
||||
},
|
||||
reason: "cleanup required",
|
||||
},
|
||||
{
|
||||
name: "no_mutable_fields",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
},
|
||||
reason: "fresh spawn closed",
|
||||
},
|
||||
{
|
||||
name: "polecat_with_all_field_types",
|
||||
fields: &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "processing",
|
||||
HookBead: "test-task-xyz",
|
||||
ActiveMR: "test-mr-processing",
|
||||
CleanupStatus: "has_uncommitted",
|
||||
NotificationLevel: "verbose",
|
||||
},
|
||||
reason: "comprehensive cleanup",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Use tc.name for suffix to avoid hash-like patterns (e.g., single digits)
|
||||
// that trigger bd's isLikelyHash() prefix extraction in v0.47.1+
|
||||
agentID := fmt.Sprintf("test-testrig-%s-%s", tc.fields.RoleType, tc.name)
|
||||
|
||||
// Step 1: Create agent bead with specified fields
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", tc.fields)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Verify fields were set
|
||||
issue, err := bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show before close: %v", err)
|
||||
}
|
||||
beforeFields := ParseAgentFields(issue.Description)
|
||||
if tc.fields.HookBead != "" && beforeFields.HookBead != tc.fields.HookBead {
|
||||
t.Errorf("before close: hook_bead = %q, want %q", beforeFields.HookBead, tc.fields.HookBead)
|
||||
}
|
||||
|
||||
// Step 2: Close the agent bead
|
||||
err = bd.CloseAndClearAgentBead(agentID, tc.reason)
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Verify bead is closed
|
||||
issue, err = bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show after close: %v", err)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
|
||||
// Step 4: Verify mutable fields were cleared
|
||||
afterFields := ParseAgentFields(issue.Description)
|
||||
|
||||
// hook_bead should be cleared (empty or "null")
|
||||
if afterFields.HookBead != "" {
|
||||
t.Errorf("after close: hook_bead = %q, want empty (was %q)", afterFields.HookBead, tc.fields.HookBead)
|
||||
}
|
||||
|
||||
// active_mr should be cleared
|
||||
if afterFields.ActiveMR != "" {
|
||||
t.Errorf("after close: active_mr = %q, want empty (was %q)", afterFields.ActiveMR, tc.fields.ActiveMR)
|
||||
}
|
||||
|
||||
// cleanup_status should be cleared
|
||||
if afterFields.CleanupStatus != "" {
|
||||
t.Errorf("after close: cleanup_status = %q, want empty (was %q)", afterFields.CleanupStatus, tc.fields.CleanupStatus)
|
||||
}
|
||||
|
||||
// agent_state should be "closed"
|
||||
if afterFields.AgentState != "closed" {
|
||||
t.Errorf("after close: agent_state = %q, want 'closed' (was %q)", afterFields.AgentState, tc.fields.AgentState)
|
||||
}
|
||||
|
||||
// Immutable fields should be preserved
|
||||
if afterFields.RoleType != tc.fields.RoleType {
|
||||
t.Errorf("after close: role_type = %q, want %q (should be preserved)", afterFields.RoleType, tc.fields.RoleType)
|
||||
}
|
||||
if afterFields.Rig != tc.fields.Rig {
|
||||
t.Errorf("after close: rig = %q, want %q (should be preserved)", afterFields.Rig, tc.fields.Rig)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_NonExistent tests behavior when closing a non-existent agent bead.
|
||||
func TestCloseAndClearAgentBead_NonExistent(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
// Attempt to close non-existent bead
|
||||
err := bd.CloseAndClearAgentBead("test-nonexistent-polecat-xyz", "should fail")
|
||||
|
||||
// Should return an error (bd close on non-existent issue fails)
|
||||
if err == nil {
|
||||
t.Error("CloseAndClearAgentBead on non-existent bead should return error")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_AlreadyClosed tests behavior when closing an already-closed agent bead.
|
||||
func TestCloseAndClearAgentBead_AlreadyClosed(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-doubleclosed"
|
||||
|
||||
// Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
HookBead: "test-issue-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// First close - should succeed
|
||||
err = bd.CloseAndClearAgentBead(agentID, "first close")
|
||||
if err != nil {
|
||||
t.Fatalf("First CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Second close - behavior depends on bd close semantics
|
||||
// Document actual behavior: bd close on already-closed bead may error or be idempotent
|
||||
err = bd.CloseAndClearAgentBead(agentID, "second close")
|
||||
|
||||
// Verify bead is still closed regardless of error
|
||||
issue, showErr := bd.Show(agentID)
|
||||
if showErr != nil {
|
||||
t.Fatalf("Show after double close: %v", showErr)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status after double close = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
|
||||
// Log actual behavior for documentation
|
||||
if err != nil {
|
||||
t.Logf("BEHAVIOR: CloseAndClearAgentBead on already-closed bead returns error: %v", err)
|
||||
} else {
|
||||
t.Log("BEHAVIOR: CloseAndClearAgentBead on already-closed bead is idempotent (no error)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_ReopenHasCleanState tests that reopening a closed agent bead
|
||||
// starts with clean state (no stale hook_bead, active_mr, etc.).
|
||||
func TestCloseAndClearAgentBead_ReopenHasCleanState(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
agentID := "test-testrig-polecat-cleanreopen"
|
||||
|
||||
// Step 1: Create agent with all fields populated
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
HookBead: "test-old-issue",
|
||||
CleanupStatus: "clean",
|
||||
ActiveMR: "test-old-mr",
|
||||
NotificationLevel: "normal",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Close - should clear mutable fields
|
||||
err = bd.CloseAndClearAgentBead(agentID, "completing old work")
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Reopen with new fields
|
||||
newIssue, err := bd.CreateOrReopenAgentBead(agentID, agentID, &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "spawning",
|
||||
HookBead: "test-new-issue",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateOrReopenAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: Verify new state - should have new hook, no stale data
|
||||
fields := ParseAgentFields(newIssue.Description)
|
||||
|
||||
if fields.HookBead != "test-new-issue" {
|
||||
t.Errorf("hook_bead = %q, want 'test-new-issue'", fields.HookBead)
|
||||
}
|
||||
|
||||
// The old active_mr should NOT be present (was cleared on close)
|
||||
if fields.ActiveMR == "test-old-mr" {
|
||||
t.Error("active_mr still has stale value 'test-old-mr' - CloseAndClearAgentBead didn't clear it")
|
||||
}
|
||||
|
||||
// agent_state should be the new state
|
||||
if fields.AgentState != "spawning" {
|
||||
t.Errorf("agent_state = %q, want 'spawning'", fields.AgentState)
|
||||
}
|
||||
|
||||
t.Log("CLEAN STATE CONFIRMED: Reopened agent bead has no stale mutable fields")
|
||||
}
|
||||
|
||||
// TestCloseAndClearAgentBead_ReasonVariations tests close with different reason values.
|
||||
func TestCloseAndClearAgentBead_ReasonVariations(t *testing.T) {
|
||||
t.Skip("bd CLI 0.47.2 bug: database writes don't commit")
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
bd := NewIsolated(tmpDir)
|
||||
if err := bd.Init("test"); err != nil {
|
||||
t.Fatalf("bd init: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
reason string
|
||||
}{
|
||||
{"empty_reason", ""},
|
||||
{"simple_reason", "polecat nuked"},
|
||||
{"reason_with_spaces", "polecat completed work successfully"},
|
||||
{"reason_with_special_chars", "closed: issue #123 (resolved)"},
|
||||
{"long_reason", "This is a very long reason that explains in detail why the agent bead was closed including multiple sentences and detailed context about the situation."},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Use tc.name for suffix to avoid hash-like patterns (e.g., "reason0")
|
||||
// that trigger bd's isLikelyHash() prefix extraction in v0.47.1+
|
||||
agentID := fmt.Sprintf("test-testrig-polecat-%s", tc.name)
|
||||
|
||||
// Create agent bead
|
||||
_, err := bd.CreateAgentBead(agentID, "Test agent", &AgentFields{
|
||||
RoleType: "polecat",
|
||||
Rig: "testrig",
|
||||
AgentState: "running",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CreateAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Close with specified reason
|
||||
err = bd.CloseAndClearAgentBead(agentID, tc.reason)
|
||||
if err != nil {
|
||||
t.Fatalf("CloseAndClearAgentBead: %v", err)
|
||||
}
|
||||
|
||||
// Verify closed
|
||||
issue, err := bd.Show(agentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Show: %v", err)
|
||||
}
|
||||
if issue.Status != "closed" {
|
||||
t.Errorf("status = %q, want 'closed'", issue.Status)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
130
internal/beads/beads_types.go
Normal file
130
internal/beads/beads_types.go
Normal file
@@ -0,0 +1,130 @@
|
||||
// Package beads provides custom type management for agent beads.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
)
|
||||
|
||||
// typesSentinel is a marker file indicating custom types have been configured.
|
||||
// This persists across CLI invocations to avoid redundant bd config calls.
|
||||
const typesSentinel = ".gt-types-configured"
|
||||
|
||||
// ensuredDirs tracks which beads directories have been ensured this session.
|
||||
// This provides fast in-memory caching for multiple creates in the same CLI run.
|
||||
var (
|
||||
ensuredDirs = make(map[string]bool)
|
||||
ensuredMu sync.Mutex
|
||||
)
|
||||
|
||||
// FindTownRoot walks up from startDir to find the Gas Town root directory.
|
||||
// The town root is identified by the presence of mayor/town.json.
|
||||
// Returns empty string if not found (reached filesystem root).
|
||||
func FindTownRoot(startDir string) string {
|
||||
dir := startDir
|
||||
for {
|
||||
townFile := filepath.Join(dir, "mayor", "town.json")
|
||||
if _, err := os.Stat(townFile); err == nil {
|
||||
return dir
|
||||
}
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
return "" // Reached filesystem root
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveRoutingTarget determines which beads directory a bead ID will route to.
|
||||
// It extracts the prefix from the bead ID and looks up the corresponding route.
|
||||
// Returns the resolved beads directory path, following any redirects.
|
||||
//
|
||||
// If townRoot is empty or prefix is not found, falls back to the provided fallbackDir.
|
||||
func ResolveRoutingTarget(townRoot, beadID, fallbackDir string) string {
|
||||
if townRoot == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
// Extract prefix from bead ID (e.g., "gt-gastown-polecat-Toast" -> "gt-")
|
||||
prefix := ExtractPrefix(beadID)
|
||||
if prefix == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
// Look up rig path for this prefix
|
||||
rigPath := GetRigPathForPrefix(townRoot, prefix)
|
||||
if rigPath == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
// Resolve redirects and get final beads directory
|
||||
beadsDir := ResolveBeadsDir(rigPath)
|
||||
if beadsDir == "" {
|
||||
return fallbackDir
|
||||
}
|
||||
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// EnsureCustomTypes ensures the target beads directory has custom types configured.
|
||||
// Uses a two-level caching strategy:
|
||||
// - In-memory cache for multiple creates in the same CLI invocation
|
||||
// - Sentinel file on disk for persistence across CLI invocations
|
||||
//
|
||||
// This function is thread-safe and idempotent.
|
||||
func EnsureCustomTypes(beadsDir string) error {
|
||||
if beadsDir == "" {
|
||||
return fmt.Errorf("empty beads directory")
|
||||
}
|
||||
|
||||
ensuredMu.Lock()
|
||||
defer ensuredMu.Unlock()
|
||||
|
||||
// Fast path: in-memory cache (same CLI invocation)
|
||||
if ensuredDirs[beadsDir] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fast path: sentinel file exists (previous CLI invocation)
|
||||
sentinelPath := filepath.Join(beadsDir, typesSentinel)
|
||||
if _, err := os.Stat(sentinelPath); err == nil {
|
||||
ensuredDirs[beadsDir] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify beads directory exists
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
return fmt.Errorf("beads directory does not exist: %s", beadsDir)
|
||||
}
|
||||
|
||||
// Configure custom types via bd CLI
|
||||
typesList := strings.Join(constants.BeadsCustomTypesList(), ",")
|
||||
cmd := exec.Command("bd", "config", "set", "types.custom", typesList)
|
||||
cmd.Dir = beadsDir
|
||||
cmd.Env = append(os.Environ(), "BEADS_DIR="+beadsDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("configure custom types in %s: %s: %w",
|
||||
beadsDir, strings.TrimSpace(string(output)), err)
|
||||
}
|
||||
|
||||
// Write sentinel file (best effort - don't fail if this fails)
|
||||
// The sentinel contains a version marker for future compatibility
|
||||
_ = os.WriteFile(sentinelPath, []byte("v1\n"), 0644)
|
||||
|
||||
ensuredDirs[beadsDir] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetEnsuredDirs clears the in-memory cache of ensured directories.
|
||||
// This is primarily useful for testing.
|
||||
func ResetEnsuredDirs() {
|
||||
ensuredMu.Lock()
|
||||
defer ensuredMu.Unlock()
|
||||
ensuredDirs = make(map[string]bool)
|
||||
}
|
||||
234
internal/beads/beads_types_test.go
Normal file
234
internal/beads/beads_types_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindTownRoot(t *testing.T) {
|
||||
// Create a temporary town structure
|
||||
tmpDir := t.TempDir()
|
||||
mayorDir := filepath.Join(tmpDir, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(mayorDir, "town.json"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create nested directories
|
||||
deepDir := filepath.Join(tmpDir, "rig1", "crew", "worker1")
|
||||
if err := os.MkdirAll(deepDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
startDir string
|
||||
expected string
|
||||
}{
|
||||
{"from town root", tmpDir, tmpDir},
|
||||
{"from mayor dir", mayorDir, tmpDir},
|
||||
{"from deep nested dir", deepDir, tmpDir},
|
||||
{"from non-town dir", t.TempDir(), ""},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := FindTownRoot(tc.startDir)
|
||||
if result != tc.expected {
|
||||
t.Errorf("FindTownRoot(%q) = %q, want %q", tc.startDir, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveRoutingTarget(t *testing.T) {
|
||||
// Create a temporary town with routes
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create mayor/town.json for FindTownRoot
|
||||
mayorDir := filepath.Join(tmpDir, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(mayorDir, "town.json"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create routes.jsonl
|
||||
routesContent := `{"prefix": "gt-", "path": "gastown/mayor/rig"}
|
||||
{"prefix": "hq-", "path": "."}
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "routes.jsonl"), []byte(routesContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create the rig beads directory
|
||||
rigBeadsDir := filepath.Join(tmpDir, "gastown", "mayor", "rig", ".beads")
|
||||
if err := os.MkdirAll(rigBeadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fallback := "/fallback/.beads"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
townRoot string
|
||||
beadID string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "rig-level bead routes to rig",
|
||||
townRoot: tmpDir,
|
||||
beadID: "gt-gastown-polecat-Toast",
|
||||
expected: rigBeadsDir,
|
||||
},
|
||||
{
|
||||
name: "town-level bead routes to town",
|
||||
townRoot: tmpDir,
|
||||
beadID: "hq-mayor",
|
||||
expected: beadsDir,
|
||||
},
|
||||
{
|
||||
name: "unknown prefix falls back",
|
||||
townRoot: tmpDir,
|
||||
beadID: "xx-unknown",
|
||||
expected: fallback,
|
||||
},
|
||||
{
|
||||
name: "empty townRoot falls back",
|
||||
townRoot: "",
|
||||
beadID: "gt-gastown-polecat-Toast",
|
||||
expected: fallback,
|
||||
},
|
||||
{
|
||||
name: "no prefix falls back",
|
||||
townRoot: tmpDir,
|
||||
beadID: "noprefixid",
|
||||
expected: fallback,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := ResolveRoutingTarget(tc.townRoot, tc.beadID, fallback)
|
||||
if result != tc.expected {
|
||||
t.Errorf("ResolveRoutingTarget(%q, %q, %q) = %q, want %q",
|
||||
tc.townRoot, tc.beadID, fallback, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureCustomTypes(t *testing.T) {
|
||||
// Reset the in-memory cache before testing
|
||||
ResetEnsuredDirs()
|
||||
|
||||
t.Run("empty beads dir returns error", func(t *testing.T) {
|
||||
err := EnsureCustomTypes("")
|
||||
if err == nil {
|
||||
t.Error("expected error for empty beads dir")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("non-existent beads dir returns error", func(t *testing.T) {
|
||||
err := EnsureCustomTypes("/nonexistent/path/.beads")
|
||||
if err == nil {
|
||||
t.Error("expected error for non-existent beads dir")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sentinel file triggers cache hit", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create sentinel file
|
||||
sentinelPath := filepath.Join(beadsDir, typesSentinel)
|
||||
if err := os.WriteFile(sentinelPath, []byte("v1\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reset cache to ensure we're testing sentinel detection
|
||||
ResetEnsuredDirs()
|
||||
|
||||
// This should succeed without running bd (sentinel exists)
|
||||
err := EnsureCustomTypes(beadsDir)
|
||||
if err != nil {
|
||||
t.Errorf("expected success with sentinel file, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("in-memory cache prevents repeated calls", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create sentinel to avoid bd call
|
||||
sentinelPath := filepath.Join(beadsDir, typesSentinel)
|
||||
if err := os.WriteFile(sentinelPath, []byte("v1\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ResetEnsuredDirs()
|
||||
|
||||
// First call
|
||||
if err := EnsureCustomTypes(beadsDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove sentinel - second call should still succeed due to in-memory cache
|
||||
os.Remove(sentinelPath)
|
||||
|
||||
if err := EnsureCustomTypes(beadsDir); err != nil {
|
||||
t.Errorf("expected cache hit, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeads_getTownRoot(t *testing.T) {
|
||||
// Create a temporary town
|
||||
tmpDir := t.TempDir()
|
||||
mayorDir := filepath.Join(tmpDir, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(mayorDir, "town.json"), []byte("{}"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create nested directory
|
||||
rigDir := filepath.Join(tmpDir, "myrig", "mayor", "rig")
|
||||
if err := os.MkdirAll(rigDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := New(rigDir)
|
||||
|
||||
// First call should find town root
|
||||
root1 := b.getTownRoot()
|
||||
if root1 != tmpDir {
|
||||
t.Errorf("first getTownRoot() = %q, want %q", root1, tmpDir)
|
||||
}
|
||||
|
||||
// Second call should return cached value
|
||||
root2 := b.getTownRoot()
|
||||
if root2 != root1 {
|
||||
t.Errorf("second getTownRoot() = %q, want cached %q", root2, root1)
|
||||
}
|
||||
|
||||
// Verify searchedRoot flag is set
|
||||
if !b.searchedRoot {
|
||||
t.Error("expected searchedRoot to be true after getTownRoot()")
|
||||
}
|
||||
}
|
||||
@@ -5,9 +5,15 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
gracefulTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// BdDaemonInfo represents the status of a single bd daemon instance.
|
||||
type BdDaemonInfo struct {
|
||||
Workspace string `json:"workspace"`
|
||||
@@ -69,21 +75,12 @@ func EnsureBdDaemonHealth(workDir string) string {
|
||||
|
||||
// Check if any daemons need attention
|
||||
needsRestart := false
|
||||
var issues []string
|
||||
|
||||
for _, d := range health.Daemons {
|
||||
switch d.Status {
|
||||
case "healthy":
|
||||
// Good
|
||||
case "version_mismatch":
|
||||
case "version_mismatch", "stale", "unresponsive":
|
||||
needsRestart = true
|
||||
issues = append(issues, fmt.Sprintf("%s: version mismatch", d.Workspace))
|
||||
case "stale":
|
||||
needsRestart = true
|
||||
issues = append(issues, fmt.Sprintf("%s: stale", d.Workspace))
|
||||
case "unresponsive":
|
||||
needsRestart = true
|
||||
issues = append(issues, fmt.Sprintf("%s: unresponsive", d.Workspace))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,9 +109,8 @@ func EnsureBdDaemonHealth(workDir string) string {
|
||||
|
||||
// restartBdDaemons restarts all bd daemons.
|
||||
func restartBdDaemons() error { //nolint:unparam // error return kept for future use
|
||||
// Stop all daemons first
|
||||
stopCmd := exec.Command("bd", "daemon", "killall")
|
||||
_ = stopCmd.Run() // Ignore errors - daemons might not be running
|
||||
// Stop all daemons first using pkill to avoid auto-start side effects
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd daemon").Run()
|
||||
|
||||
// Give time for cleanup
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
@@ -128,7 +124,121 @@ func restartBdDaemons() error { //nolint:unparam // error return kept for future
|
||||
// StartBdDaemonIfNeeded starts the bd daemon for a specific workspace if not running.
|
||||
// This is a best-effort operation - failures are logged but don't block execution.
|
||||
func StartBdDaemonIfNeeded(workDir string) error {
|
||||
cmd := exec.Command("bd", "daemon", "--start")
|
||||
cmd := exec.Command("bd", "daemon", "start")
|
||||
cmd.Dir = workDir
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// StopAllBdProcesses stops all bd daemon and activity processes.
|
||||
// Returns (daemonsKilled, activityKilled, error).
|
||||
// If dryRun is true, returns counts without stopping anything.
|
||||
func StopAllBdProcesses(dryRun, force bool) (int, int, error) {
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
daemonsBefore := CountBdDaemons()
|
||||
activityBefore := CountBdActivityProcesses()
|
||||
|
||||
if dryRun {
|
||||
return daemonsBefore, activityBefore, nil
|
||||
}
|
||||
|
||||
daemonsKilled, daemonsRemaining := stopBdDaemons(force)
|
||||
activityKilled, activityRemaining := stopBdActivityProcesses(force)
|
||||
|
||||
if daemonsRemaining > 0 {
|
||||
return daemonsKilled, activityKilled, fmt.Errorf("bd daemon shutdown incomplete: %d still running", daemonsRemaining)
|
||||
}
|
||||
if activityRemaining > 0 {
|
||||
return daemonsKilled, activityKilled, fmt.Errorf("bd activity shutdown incomplete: %d still running", activityRemaining)
|
||||
}
|
||||
|
||||
return daemonsKilled, activityKilled, nil
|
||||
}
|
||||
|
||||
// CountBdDaemons returns count of running bd daemons.
|
||||
// Uses pgrep instead of "bd daemon list" to avoid triggering daemon auto-start
|
||||
// during shutdown verification.
|
||||
func CountBdDaemons() int {
|
||||
// Use pgrep -f with wc -l for cross-platform compatibility
|
||||
// (macOS pgrep doesn't support -c flag)
|
||||
cmd := exec.Command("sh", "-c", "pgrep -f 'bd daemon' 2>/dev/null | wc -l")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||
return count
|
||||
}
|
||||
|
||||
|
||||
func stopBdDaemons(force bool) (int, int) {
|
||||
before := CountBdDaemons()
|
||||
if before == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Use pkill directly instead of "bd daemon killall" to avoid triggering
|
||||
// daemon auto-start as a side effect of running bd commands.
|
||||
// Note: pkill -f pattern may match unintended processes in rare cases
|
||||
// (e.g., editors with "bd daemon" in file content). This is acceptable
|
||||
// given the alternative of respawning daemons during shutdown.
|
||||
if force {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd daemon").Run()
|
||||
} else {
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd daemon").Run()
|
||||
time.Sleep(gracefulTimeout)
|
||||
if remaining := CountBdDaemons(); remaining > 0 {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd daemon").Run()
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
final := CountBdDaemons()
|
||||
killed := before - final
|
||||
if killed < 0 {
|
||||
killed = 0 // Race condition: more processes spawned than we killed
|
||||
}
|
||||
return killed, final
|
||||
}
|
||||
|
||||
// CountBdActivityProcesses returns count of running `bd activity` processes.
|
||||
func CountBdActivityProcesses() int {
|
||||
// Use pgrep -f with wc -l for cross-platform compatibility
|
||||
// (macOS pgrep doesn't support -c flag)
|
||||
cmd := exec.Command("sh", "-c", "pgrep -f 'bd activity' 2>/dev/null | wc -l")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||
return count
|
||||
}
|
||||
|
||||
func stopBdActivityProcesses(force bool) (int, int) {
|
||||
before := CountBdActivityProcesses()
|
||||
if before == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
if force {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd activity").Run()
|
||||
} else {
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd activity").Run()
|
||||
time.Sleep(gracefulTimeout)
|
||||
if remaining := CountBdActivityProcesses(); remaining > 0 {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd activity").Run()
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
after := CountBdActivityProcesses()
|
||||
killed := before - after
|
||||
if killed < 0 {
|
||||
killed = 0 // Race condition: more processes spawned than we killed
|
||||
}
|
||||
return killed, after
|
||||
}
|
||||
|
||||
33
internal/beads/daemon_test.go
Normal file
33
internal/beads/daemon_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCountBdActivityProcesses(t *testing.T) {
|
||||
count := CountBdActivityProcesses()
|
||||
if count < 0 {
|
||||
t.Errorf("count should be non-negative, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCountBdDaemons(t *testing.T) {
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
t.Skip("bd not installed")
|
||||
}
|
||||
count := CountBdDaemons()
|
||||
if count < 0 {
|
||||
t.Errorf("count should be non-negative, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStopAllBdProcesses_DryRun(t *testing.T) {
|
||||
daemonsKilled, activityKilled, err := StopAllBdProcesses(true, false)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if daemonsKilled < 0 || activityKilled < 0 {
|
||||
t.Errorf("counts should be non-negative: daemons=%d, activity=%d", daemonsKilled, activityKilled)
|
||||
}
|
||||
}
|
||||
@@ -528,6 +528,25 @@ type RoleConfig struct {
|
||||
// EnvVars are additional environment variables to set in the session.
|
||||
// Stored as "key=value" pairs.
|
||||
EnvVars map[string]string
|
||||
|
||||
// Health check thresholds - per ZFC, agents control their own stuck detection.
|
||||
// These allow the Deacon's patrol config to be agent-defined rather than hardcoded.
|
||||
|
||||
// PingTimeout is how long to wait for a health check response.
|
||||
// Format: duration string (e.g., "30s", "1m"). Default: 30s.
|
||||
PingTimeout string
|
||||
|
||||
// ConsecutiveFailures is how many failed health checks before force-kill.
|
||||
// Default: 3.
|
||||
ConsecutiveFailures int
|
||||
|
||||
// KillCooldown is the minimum time between force-kills of the same agent.
|
||||
// Format: duration string (e.g., "5m", "10m"). Default: 5m.
|
||||
KillCooldown string
|
||||
|
||||
// StuckThreshold is how long a wisp can be in_progress before considered stuck.
|
||||
// Format: duration string (e.g., "1h", "30m"). Default: 1h.
|
||||
StuckThreshold string
|
||||
}
|
||||
|
||||
// ParseRoleConfig extracts RoleConfig from a role bead's description.
|
||||
@@ -576,6 +595,21 @@ func ParseRoleConfig(description string) *RoleConfig {
|
||||
config.EnvVars[envKey] = envVal
|
||||
hasFields = true
|
||||
}
|
||||
// Health check threshold fields (ZFC: agent-controlled)
|
||||
case "ping_timeout", "ping-timeout", "pingtimeout":
|
||||
config.PingTimeout = value
|
||||
hasFields = true
|
||||
case "consecutive_failures", "consecutive-failures", "consecutivefailures":
|
||||
if n, err := parseIntValue(value); err == nil {
|
||||
config.ConsecutiveFailures = n
|
||||
hasFields = true
|
||||
}
|
||||
case "kill_cooldown", "kill-cooldown", "killcooldown":
|
||||
config.KillCooldown = value
|
||||
hasFields = true
|
||||
case "stuck_threshold", "stuck-threshold", "stuckthreshold":
|
||||
config.StuckThreshold = value
|
||||
hasFields = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -585,6 +619,13 @@ func ParseRoleConfig(description string) *RoleConfig {
|
||||
return config
|
||||
}
|
||||
|
||||
// parseIntValue parses an integer from a string value.
|
||||
func parseIntValue(s string) (int, error) {
|
||||
var n int
|
||||
_, err := fmt.Sscanf(s, "%d", &n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// FormatRoleConfig formats RoleConfig as a string suitable for a role bead description.
|
||||
// Only non-empty/non-default fields are included.
|
||||
func FormatRoleConfig(config *RoleConfig) string {
|
||||
|
||||
11
internal/beads/force.go
Normal file
11
internal/beads/force.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package beads
|
||||
|
||||
import "strings"
|
||||
|
||||
// NeedsForceForID returns true when a bead ID uses multiple hyphens.
|
||||
// Recent bd versions infer the prefix from the last hyphen, which can cause
|
||||
// prefix-mismatch errors for valid system IDs like "st-stockdrop-polecat-nux"
|
||||
// and "hq-cv-abc". We pass --force to honor the explicit ID in those cases.
|
||||
func NeedsForceForID(id string) bool {
|
||||
return strings.Count(id, "-") > 1
|
||||
}
|
||||
23
internal/beads/force_test.go
Normal file
23
internal/beads/force_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package beads
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestNeedsForceForID(t *testing.T) {
|
||||
tests := []struct {
|
||||
id string
|
||||
want bool
|
||||
}{
|
||||
{id: "", want: false},
|
||||
{id: "hq-mayor", want: false},
|
||||
{id: "gt-abc123", want: false},
|
||||
{id: "hq-mayor-role", want: true},
|
||||
{id: "st-stockdrop-polecat-nux", want: true},
|
||||
{id: "hq-cv-abc", want: true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if got := NeedsForceForID(tc.id); got != tc.want {
|
||||
t.Fatalf("NeedsForceForID(%q) = %v, want %v", tc.id, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,10 +48,10 @@ func (b *Beads) GetOrCreateHandoffBead(role string) (*Issue, error) {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
// Create new handoff bead
|
||||
// Create new handoff bead (type is deprecated, uses gt:task label via backward compat)
|
||||
issue, err := b.Create(CreateOptions{
|
||||
Title: HandoffBeadTitle(role),
|
||||
Type: "task",
|
||||
Type: "task", // Converted to gt:task label by Create()
|
||||
Priority: 2,
|
||||
Description: "", // Empty until first handoff
|
||||
Actor: role,
|
||||
@@ -107,7 +107,7 @@ func (b *Beads) ClearMail(reason string) (*ClearMailResult, error) {
|
||||
// List all open messages
|
||||
issues, err := b.List(ListOptions{
|
||||
Status: "open",
|
||||
Type: "message",
|
||||
Label: "gt:message",
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -158,6 +158,7 @@ func (b *Beads) AttachMolecule(pinnedBeadID, moleculeID string) (*Issue, error)
|
||||
return nil, fmt.Errorf("fetching pinned bead: %w", err)
|
||||
}
|
||||
|
||||
// Only allow pinned beads (permanent records like role definitions)
|
||||
if issue.Status != StatusPinned {
|
||||
return nil, fmt.Errorf("issue %s is not pinned (status: %s)", pinnedBeadID, issue.Status)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
// Route represents a prefix-to-path routing rule.
|
||||
@@ -57,7 +59,12 @@ func LoadRoutes(beadsDir string) ([]Route, error) {
|
||||
// If the prefix already exists, it updates the path.
|
||||
func AppendRoute(townRoot string, route Route) error {
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
return AppendRouteToDir(beadsDir, route)
|
||||
}
|
||||
|
||||
// AppendRouteToDir appends a route to routes.jsonl in the given beads directory.
|
||||
// If the prefix already exists, it updates the path.
|
||||
func AppendRouteToDir(beadsDir string, route Route) error {
|
||||
// Load existing routes
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil {
|
||||
@@ -106,6 +113,11 @@ func RemoveRoute(townRoot string, prefix string) error {
|
||||
|
||||
// WriteRoutes writes routes to routes.jsonl, overwriting existing content.
|
||||
func WriteRoutes(beadsDir string, routes []Route) error {
|
||||
// Ensure beads directory exists
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating beads directory: %w", err)
|
||||
}
|
||||
|
||||
routesPath := filepath.Join(beadsDir, RoutesFileName)
|
||||
|
||||
file, err := os.Create(routesPath)
|
||||
@@ -145,7 +157,7 @@ func GetPrefixForRig(townRoot, rigName string) string {
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil || routes == nil {
|
||||
return "gt" // Default prefix
|
||||
return config.GetRigPrefix(townRoot, rigName)
|
||||
}
|
||||
|
||||
// Look for a route where the path starts with the rig name
|
||||
@@ -158,7 +170,7 @@ func GetPrefixForRig(townRoot, rigName string) string {
|
||||
}
|
||||
}
|
||||
|
||||
return "gt" // Default prefix
|
||||
return config.GetRigPrefix(townRoot, rigName)
|
||||
}
|
||||
|
||||
// FindConflictingPrefixes checks for duplicate prefixes in routes.
|
||||
@@ -185,3 +197,60 @@ func FindConflictingPrefixes(beadsDir string) (map[string][]string, error) {
|
||||
|
||||
return conflicts, nil
|
||||
}
|
||||
|
||||
// ExtractPrefix extracts the prefix from a bead ID.
|
||||
// For example, "ap-qtsup.16" returns "ap-", "hq-cv-abc" returns "hq-".
|
||||
// Returns empty string if no valid prefix found (empty input, no hyphen,
|
||||
// or hyphen at position 0 which would indicate an invalid prefix).
|
||||
func ExtractPrefix(beadID string) string {
|
||||
if beadID == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
idx := strings.Index(beadID, "-")
|
||||
if idx <= 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return beadID[:idx+1]
|
||||
}
|
||||
|
||||
// GetRigPathForPrefix returns the rig path for a given bead ID prefix.
|
||||
// The townRoot should be the Gas Town root directory (e.g., ~/gt).
|
||||
// Returns the full absolute path to the rig directory, or empty string if not found.
|
||||
// For town-level beads (path="."), returns townRoot.
|
||||
func GetRigPathForPrefix(townRoot, prefix string) string {
|
||||
beadsDir := filepath.Join(townRoot, ".beads")
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil || routes == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, r := range routes {
|
||||
if r.Prefix == prefix {
|
||||
if r.Path == "." {
|
||||
return townRoot // Town-level beads
|
||||
}
|
||||
return filepath.Join(townRoot, r.Path)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// ResolveHookDir determines the directory for running bd update on a bead.
|
||||
// Since bd update doesn't support routing or redirects, we must resolve the
|
||||
// actual rig directory from the bead's prefix. hookWorkDir is only used as
|
||||
// a fallback if prefix resolution fails.
|
||||
func ResolveHookDir(townRoot, beadID, hookWorkDir string) string {
|
||||
// Always try prefix resolution first - bd update needs the actual rig dir
|
||||
prefix := ExtractPrefix(beadID)
|
||||
if rigPath := GetRigPathForPrefix(townRoot, prefix); rigPath != "" {
|
||||
return rigPath
|
||||
}
|
||||
// Fallback to hookWorkDir if provided
|
||||
if hookWorkDir != "" {
|
||||
return hookWorkDir
|
||||
}
|
||||
return townRoot
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
func TestGetPrefixForRig(t *testing.T) {
|
||||
@@ -52,6 +54,170 @@ func TestGetPrefixForRig_NoRoutesFile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPrefixForRig_RigsConfigFallback(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Write rigs.json with a non-gt prefix
|
||||
rigsPath := filepath.Join(tmpDir, "mayor", "rigs.json")
|
||||
if err := os.MkdirAll(filepath.Dir(rigsPath), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := &config.RigsConfig{
|
||||
Version: config.CurrentRigsVersion,
|
||||
Rigs: map[string]config.RigEntry{
|
||||
"project_ideas": {
|
||||
BeadsConfig: &config.BeadsConfig{Prefix: "pi"},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := config.SaveRigsConfig(rigsPath, cfg); err != nil {
|
||||
t.Fatalf("SaveRigsConfig: %v", err)
|
||||
}
|
||||
|
||||
result := GetPrefixForRig(tmpDir, "project_ideas")
|
||||
if result != "pi" {
|
||||
t.Errorf("Expected prefix from rigs config, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
beadID string
|
||||
expected string
|
||||
}{
|
||||
{"ap-qtsup.16", "ap-"},
|
||||
{"hq-cv-abc", "hq-"},
|
||||
{"gt-mol-xyz", "gt-"},
|
||||
{"bd-123", "bd-"},
|
||||
{"", ""},
|
||||
{"nohyphen", ""},
|
||||
{"-startswithhyphen", ""}, // Leading hyphen = invalid prefix
|
||||
{"-", ""}, // Just hyphen = invalid
|
||||
{"a-", "a-"}, // Trailing hyphen is valid
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.beadID, func(t *testing.T) {
|
||||
result := ExtractPrefix(tc.beadID)
|
||||
if result != tc.expected {
|
||||
t.Errorf("ExtractPrefix(%q) = %q, want %q", tc.beadID, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRigPathForPrefix(t *testing.T) {
|
||||
// Create a temporary directory with routes.jsonl
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
routesContent := `{"prefix": "ap-", "path": "ai_platform/mayor/rig"}
|
||||
{"prefix": "gt-", "path": "gastown/mayor/rig"}
|
||||
{"prefix": "hq-", "path": "."}
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "routes.jsonl"), []byte(routesContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
prefix string
|
||||
expected string
|
||||
}{
|
||||
{"ap-", filepath.Join(tmpDir, "ai_platform/mayor/rig")},
|
||||
{"gt-", filepath.Join(tmpDir, "gastown/mayor/rig")},
|
||||
{"hq-", tmpDir}, // Town-level beads return townRoot
|
||||
{"unknown-", ""}, // Unknown prefix returns empty
|
||||
{"", ""}, // Empty prefix returns empty
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.prefix, func(t *testing.T) {
|
||||
result := GetRigPathForPrefix(tmpDir, tc.prefix)
|
||||
if result != tc.expected {
|
||||
t.Errorf("GetRigPathForPrefix(%q, %q) = %q, want %q", tmpDir, tc.prefix, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRigPathForPrefix_NoRoutesFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
// No routes.jsonl file
|
||||
|
||||
result := GetRigPathForPrefix(tmpDir, "ap-")
|
||||
if result != "" {
|
||||
t.Errorf("Expected empty string when no routes file, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveHookDir(t *testing.T) {
|
||||
// Create a temporary directory with routes.jsonl
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
routesContent := `{"prefix": "ap-", "path": "ai_platform/mayor/rig"}
|
||||
{"prefix": "hq-", "path": "."}
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "routes.jsonl"), []byte(routesContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
beadID string
|
||||
hookWorkDir string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "prefix resolution takes precedence over hookWorkDir",
|
||||
beadID: "ap-test",
|
||||
hookWorkDir: "/custom/path",
|
||||
expected: filepath.Join(tmpDir, "ai_platform/mayor/rig"),
|
||||
},
|
||||
{
|
||||
name: "resolves rig path from prefix",
|
||||
beadID: "ap-test",
|
||||
hookWorkDir: "",
|
||||
expected: filepath.Join(tmpDir, "ai_platform/mayor/rig"),
|
||||
},
|
||||
{
|
||||
name: "town-level bead returns townRoot",
|
||||
beadID: "hq-test",
|
||||
hookWorkDir: "",
|
||||
expected: tmpDir,
|
||||
},
|
||||
{
|
||||
name: "unknown prefix uses hookWorkDir as fallback",
|
||||
beadID: "xx-unknown",
|
||||
hookWorkDir: "/fallback/path",
|
||||
expected: "/fallback/path",
|
||||
},
|
||||
{
|
||||
name: "unknown prefix without hookWorkDir falls back to townRoot",
|
||||
beadID: "xx-unknown",
|
||||
hookWorkDir: "",
|
||||
expected: tmpDir,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := ResolveHookDir(tmpDir, tc.beadID, tc.hookWorkDir)
|
||||
if result != tc.expected {
|
||||
t.Errorf("ResolveHookDir(%q, %q, %q) = %q, want %q",
|
||||
tmpDir, tc.beadID, tc.hookWorkDir, result, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentBeadIDsWithPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -22,15 +22,12 @@ import (
|
||||
// to return true when only Boot is running.
|
||||
const SessionName = "gt-boot"
|
||||
|
||||
// MarkerFileName is the file that indicates Boot is currently running.
|
||||
// MarkerFileName is the lock file for Boot startup coordination.
|
||||
const MarkerFileName = ".boot-running"
|
||||
|
||||
// StatusFileName stores Boot's last execution status.
|
||||
const StatusFileName = ".boot-status.json"
|
||||
|
||||
// DefaultMarkerTTL is how long a marker is considered valid before it's stale.
|
||||
const DefaultMarkerTTL = 5 * time.Minute
|
||||
|
||||
// Status represents Boot's execution status.
|
||||
type Status struct {
|
||||
Running bool `json:"running"`
|
||||
@@ -43,11 +40,11 @@ type Status struct {
|
||||
|
||||
// Boot manages the Boot watchdog lifecycle.
|
||||
type Boot struct {
|
||||
townRoot string
|
||||
bootDir string // ~/gt/deacon/dogs/boot/
|
||||
deaconDir string // ~/gt/deacon/
|
||||
tmux *tmux.Tmux
|
||||
degraded bool
|
||||
townRoot string
|
||||
bootDir string // ~/gt/deacon/dogs/boot/
|
||||
deaconDir string // ~/gt/deacon/
|
||||
tmux *tmux.Tmux
|
||||
degraded bool
|
||||
}
|
||||
|
||||
// New creates a new Boot manager.
|
||||
@@ -77,22 +74,9 @@ func (b *Boot) statusPath() string {
|
||||
}
|
||||
|
||||
// IsRunning checks if Boot is currently running.
|
||||
// Returns true if marker exists and isn't stale, false otherwise.
|
||||
// Queries tmux directly for observable reality (ZFC principle).
|
||||
func (b *Boot) IsRunning() bool {
|
||||
info, err := os.Stat(b.markerPath())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if marker is stale (older than TTL)
|
||||
age := time.Since(info.ModTime())
|
||||
if age > DefaultMarkerTTL {
|
||||
// Stale marker - clean it up
|
||||
_ = os.Remove(b.markerPath())
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return b.IsSessionAlive()
|
||||
}
|
||||
|
||||
// IsSessionAlive checks if the Boot tmux session exists.
|
||||
@@ -105,7 +89,7 @@ func (b *Boot) IsSessionAlive() bool {
|
||||
// Returns error if Boot is already running.
|
||||
func (b *Boot) AcquireLock() error {
|
||||
if b.IsRunning() {
|
||||
return fmt.Errorf("boot is already running (marker exists)")
|
||||
return fmt.Errorf("boot is already running (session exists)")
|
||||
}
|
||||
|
||||
if err := b.EnsureDir(); err != nil {
|
||||
@@ -160,7 +144,8 @@ func (b *Boot) LoadStatus() (*Status, error) {
|
||||
// Spawn starts Boot in a fresh tmux session.
|
||||
// Boot runs the mol-boot-triage molecule and exits when done.
|
||||
// In degraded mode (no tmux), it runs in a subprocess.
|
||||
func (b *Boot) Spawn() error {
|
||||
// The agentOverride parameter allows specifying an agent alias to use instead of the town default.
|
||||
func (b *Boot) Spawn(agentOverride string) error {
|
||||
if b.IsRunning() {
|
||||
return fmt.Errorf("boot is already running")
|
||||
}
|
||||
@@ -170,14 +155,15 @@ func (b *Boot) Spawn() error {
|
||||
return b.spawnDegraded()
|
||||
}
|
||||
|
||||
return b.spawnTmux()
|
||||
return b.spawnTmux(agentOverride)
|
||||
}
|
||||
|
||||
// spawnTmux spawns Boot in a tmux session.
|
||||
func (b *Boot) spawnTmux() error {
|
||||
// Kill any stale session first
|
||||
func (b *Boot) spawnTmux(agentOverride string) error {
|
||||
// Kill any stale session first.
|
||||
// Use KillSessionWithProcesses to ensure all descendant processes are killed.
|
||||
if b.IsSessionAlive() {
|
||||
_ = b.tmux.KillSession(SessionName)
|
||||
_ = b.tmux.KillSessionWithProcesses(SessionName)
|
||||
}
|
||||
|
||||
// Ensure boot directory exists (it should have CLAUDE.md with Boot context)
|
||||
@@ -185,20 +171,32 @@ func (b *Boot) spawnTmux() error {
|
||||
return fmt.Errorf("ensuring boot dir: %w", err)
|
||||
}
|
||||
|
||||
// Create new session in boot directory (not deacon dir) so Claude reads Boot's CLAUDE.md
|
||||
if err := b.tmux.NewSession(SessionName, b.bootDir); err != nil {
|
||||
// Build startup command with optional agent override
|
||||
// The "gt boot triage" prompt tells Boot to immediately start triage (GUPP principle)
|
||||
var startCmd string
|
||||
if agentOverride != "" {
|
||||
var err error
|
||||
startCmd, err = config.BuildAgentStartupCommandWithAgentOverride("boot", "", b.townRoot, "", "gt boot triage", agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command with agent override: %w", err)
|
||||
}
|
||||
} else {
|
||||
startCmd = config.BuildAgentStartupCommand("boot", "", b.townRoot, "", "gt boot triage")
|
||||
}
|
||||
|
||||
// Create session with command directly to avoid send-keys race condition.
|
||||
// See: https://github.com/anthropics/gastown/issues/280
|
||||
if err := b.tmux.NewSessionWithCommand(SessionName, b.bootDir, startCmd); err != nil {
|
||||
return fmt.Errorf("creating boot session: %w", err)
|
||||
}
|
||||
|
||||
// Set environment
|
||||
_ = b.tmux.SetEnvironment(SessionName, "GT_ROLE", "boot")
|
||||
_ = b.tmux.SetEnvironment(SessionName, "BD_ACTOR", "deacon-boot")
|
||||
|
||||
// Launch Claude with environment exported inline and initial triage prompt
|
||||
// The "gt boot triage" prompt tells Boot to immediately start triage (GUPP principle)
|
||||
startCmd := config.BuildAgentStartupCommand("boot", "deacon-boot", "", "gt boot triage")
|
||||
if err := b.tmux.SendKeys(SessionName, startCmd); err != nil {
|
||||
return fmt.Errorf("sending startup command: %w", err)
|
||||
// Set environment using centralized AgentEnv for consistency
|
||||
envVars := config.AgentEnv(config.AgentEnvConfig{
|
||||
Role: "boot",
|
||||
TownRoot: b.townRoot,
|
||||
})
|
||||
for k, v := range envVars {
|
||||
_ = b.tmux.SetEnvironment(SessionName, k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -211,11 +209,14 @@ func (b *Boot) spawnDegraded() error {
|
||||
// This performs the triage logic without a full Claude session
|
||||
cmd := exec.Command("gt", "boot", "triage", "--degraded")
|
||||
cmd.Dir = b.deaconDir
|
||||
cmd.Env = append(os.Environ(),
|
||||
"GT_ROLE=boot",
|
||||
"BD_ACTOR=deacon-boot",
|
||||
"GT_DEGRADED=true",
|
||||
)
|
||||
|
||||
// Use centralized AgentEnv for consistency with tmux mode
|
||||
envVars := config.AgentEnv(config.AgentEnvConfig{
|
||||
Role: "boot",
|
||||
TownRoot: b.townRoot,
|
||||
})
|
||||
cmd.Env = config.EnvForExecCommand(envVars)
|
||||
cmd.Env = append(cmd.Env, "GT_DEGRADED=true")
|
||||
|
||||
// Run async - don't wait for completion
|
||||
return cmd.Start()
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
)
|
||||
|
||||
// Filename is the checkpoint file name within the polecat directory.
|
||||
@@ -84,7 +86,7 @@ func Write(polecatDir string, cp *Checkpoint) error {
|
||||
|
||||
// Set session ID from environment if available
|
||||
if cp.SessionID == "" {
|
||||
cp.SessionID = os.Getenv("CLAUDE_SESSION_ID")
|
||||
cp.SessionID = runtime.SessionIDFromEnv()
|
||||
if cp.SessionID == "" {
|
||||
cp.SessionID = fmt.Sprintf("pid-%d", os.Getpid())
|
||||
}
|
||||
@@ -179,9 +181,9 @@ func (cp *Checkpoint) Age() time.Duration {
|
||||
return time.Since(cp.Timestamp)
|
||||
}
|
||||
|
||||
// IsStale returns true if the checkpoint is older than the threshold.
|
||||
// IsStale returns true if the checkpoint is at or older than the threshold.
|
||||
func (cp *Checkpoint) IsStale(threshold time.Duration) bool {
|
||||
return cp.Age() > threshold
|
||||
return cp.Age() >= threshold
|
||||
}
|
||||
|
||||
// Summary returns a concise summary of the checkpoint.
|
||||
|
||||
398
internal/checkpoint/checkpoint_test.go
Normal file
398
internal/checkpoint/checkpoint_test.go
Normal file
@@ -0,0 +1,398 @@
|
||||
package checkpoint
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPath(t *testing.T) {
|
||||
dir := "/some/polecat/dir"
|
||||
got := Path(dir)
|
||||
want := filepath.Join(dir, Filename)
|
||||
if got != want {
|
||||
t.Errorf("Path(%q) = %q, want %q", dir, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWrite(t *testing.T) {
|
||||
// Create temp directory
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Test reading non-existent checkpoint returns nil, nil
|
||||
cp, err := Read(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Read non-existent: unexpected error: %v", err)
|
||||
}
|
||||
if cp != nil {
|
||||
t.Fatal("Read non-existent: expected nil checkpoint")
|
||||
}
|
||||
|
||||
// Create and write a checkpoint
|
||||
original := &Checkpoint{
|
||||
MoleculeID: "mol-123",
|
||||
CurrentStep: "step-1",
|
||||
StepTitle: "Build the thing",
|
||||
ModifiedFiles: []string{"file1.go", "file2.go"},
|
||||
LastCommit: "abc123",
|
||||
Branch: "feature/test",
|
||||
HookedBead: "gt-xyz",
|
||||
Notes: "Some notes",
|
||||
}
|
||||
|
||||
if err := Write(tmpDir, original); err != nil {
|
||||
t.Fatalf("Write: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Verify file exists
|
||||
path := Path(tmpDir)
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
t.Fatal("Write: checkpoint file not created")
|
||||
}
|
||||
|
||||
// Read it back
|
||||
loaded, err := Read(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: unexpected error: %v", err)
|
||||
}
|
||||
if loaded == nil {
|
||||
t.Fatal("Read: expected non-nil checkpoint")
|
||||
}
|
||||
|
||||
// Verify fields
|
||||
if loaded.MoleculeID != original.MoleculeID {
|
||||
t.Errorf("MoleculeID = %q, want %q", loaded.MoleculeID, original.MoleculeID)
|
||||
}
|
||||
if loaded.CurrentStep != original.CurrentStep {
|
||||
t.Errorf("CurrentStep = %q, want %q", loaded.CurrentStep, original.CurrentStep)
|
||||
}
|
||||
if loaded.StepTitle != original.StepTitle {
|
||||
t.Errorf("StepTitle = %q, want %q", loaded.StepTitle, original.StepTitle)
|
||||
}
|
||||
if loaded.Branch != original.Branch {
|
||||
t.Errorf("Branch = %q, want %q", loaded.Branch, original.Branch)
|
||||
}
|
||||
if loaded.HookedBead != original.HookedBead {
|
||||
t.Errorf("HookedBead = %q, want %q", loaded.HookedBead, original.HookedBead)
|
||||
}
|
||||
if loaded.Notes != original.Notes {
|
||||
t.Errorf("Notes = %q, want %q", loaded.Notes, original.Notes)
|
||||
}
|
||||
if len(loaded.ModifiedFiles) != len(original.ModifiedFiles) {
|
||||
t.Errorf("ModifiedFiles len = %d, want %d", len(loaded.ModifiedFiles), len(original.ModifiedFiles))
|
||||
}
|
||||
|
||||
// Verify timestamp was set
|
||||
if loaded.Timestamp.IsZero() {
|
||||
t.Error("Timestamp should be set by Write")
|
||||
}
|
||||
|
||||
// Verify SessionID was set
|
||||
if loaded.SessionID == "" {
|
||||
t.Error("SessionID should be set by Write")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWritePreservesTimestamp(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create checkpoint with explicit timestamp
|
||||
ts := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
cp := &Checkpoint{
|
||||
Timestamp: ts,
|
||||
Notes: "test",
|
||||
}
|
||||
|
||||
if err := Write(tmpDir, cp); err != nil {
|
||||
t.Fatalf("Write: %v", err)
|
||||
}
|
||||
|
||||
loaded, err := Read(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
|
||||
if !loaded.Timestamp.Equal(ts) {
|
||||
t.Errorf("Timestamp = %v, want %v", loaded.Timestamp, ts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadCorruptedJSON(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := Path(tmpDir)
|
||||
|
||||
// Write invalid JSON
|
||||
if err := os.WriteFile(path, []byte("not valid json{"), 0600); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
_, err := Read(tmpDir)
|
||||
if err == nil {
|
||||
t.Fatal("Read corrupted JSON: expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Write a checkpoint
|
||||
cp := &Checkpoint{Notes: "to be removed"}
|
||||
if err := Write(tmpDir, cp); err != nil {
|
||||
t.Fatalf("Write: %v", err)
|
||||
}
|
||||
|
||||
// Verify it exists
|
||||
path := Path(tmpDir)
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
t.Fatal("checkpoint should exist before Remove")
|
||||
}
|
||||
|
||||
// Remove it
|
||||
if err := Remove(tmpDir); err != nil {
|
||||
t.Fatalf("Remove: %v", err)
|
||||
}
|
||||
|
||||
// Verify it's gone
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
t.Fatal("checkpoint should not exist after Remove")
|
||||
}
|
||||
|
||||
// Remove again should not error
|
||||
if err := Remove(tmpDir); err != nil {
|
||||
t.Fatalf("Remove non-existent: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapture(t *testing.T) {
|
||||
// Use current directory (should be a git repo)
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("Getwd: %v", err)
|
||||
}
|
||||
|
||||
// Find git root
|
||||
gitRoot := cwd
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(gitRoot, ".git")); err == nil {
|
||||
break
|
||||
}
|
||||
parent := filepath.Dir(gitRoot)
|
||||
if parent == gitRoot {
|
||||
t.Skip("not in a git repository")
|
||||
}
|
||||
gitRoot = parent
|
||||
}
|
||||
|
||||
cp, err := Capture(gitRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("Capture: %v", err)
|
||||
}
|
||||
|
||||
// Should have timestamp
|
||||
if cp.Timestamp.IsZero() {
|
||||
t.Error("Timestamp should be set")
|
||||
}
|
||||
|
||||
// Should have branch (we're in a git repo)
|
||||
if cp.Branch == "" {
|
||||
t.Error("Branch should be set in git repo")
|
||||
}
|
||||
|
||||
// Should have last commit
|
||||
if cp.LastCommit == "" {
|
||||
t.Error("LastCommit should be set in git repo")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithMolecule(t *testing.T) {
|
||||
cp := &Checkpoint{}
|
||||
result := cp.WithMolecule("mol-abc", "step-1", "Do the thing")
|
||||
|
||||
if result != cp {
|
||||
t.Error("WithMolecule should return same checkpoint")
|
||||
}
|
||||
if cp.MoleculeID != "mol-abc" {
|
||||
t.Errorf("MoleculeID = %q, want %q", cp.MoleculeID, "mol-abc")
|
||||
}
|
||||
if cp.CurrentStep != "step-1" {
|
||||
t.Errorf("CurrentStep = %q, want %q", cp.CurrentStep, "step-1")
|
||||
}
|
||||
if cp.StepTitle != "Do the thing" {
|
||||
t.Errorf("StepTitle = %q, want %q", cp.StepTitle, "Do the thing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithHookedBead(t *testing.T) {
|
||||
cp := &Checkpoint{}
|
||||
result := cp.WithHookedBead("gt-123")
|
||||
|
||||
if result != cp {
|
||||
t.Error("WithHookedBead should return same checkpoint")
|
||||
}
|
||||
if cp.HookedBead != "gt-123" {
|
||||
t.Errorf("HookedBead = %q, want %q", cp.HookedBead, "gt-123")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithNotes(t *testing.T) {
|
||||
cp := &Checkpoint{}
|
||||
result := cp.WithNotes("important context")
|
||||
|
||||
if result != cp {
|
||||
t.Error("WithNotes should return same checkpoint")
|
||||
}
|
||||
if cp.Notes != "important context" {
|
||||
t.Errorf("Notes = %q, want %q", cp.Notes, "important context")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAge(t *testing.T) {
|
||||
cp := &Checkpoint{
|
||||
Timestamp: time.Now().Add(-5 * time.Minute),
|
||||
}
|
||||
|
||||
age := cp.Age()
|
||||
if age < 4*time.Minute || age > 6*time.Minute {
|
||||
t.Errorf("Age = %v, expected ~5 minutes", age)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsStale(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
age time.Duration
|
||||
threshold time.Duration
|
||||
want bool
|
||||
}{
|
||||
{"fresh", 5 * time.Minute, 1 * time.Hour, false},
|
||||
{"stale", 2 * time.Hour, 1 * time.Hour, true},
|
||||
{"exactly threshold", 1 * time.Hour, 1 * time.Hour, true}, // timing race: by the time IsStale runs, age > threshold
|
||||
{"just over threshold", 1*time.Hour + time.Second, 1 * time.Hour, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cp := &Checkpoint{
|
||||
Timestamp: time.Now().Add(-tt.age),
|
||||
}
|
||||
got := cp.IsStale(tt.threshold)
|
||||
if got != tt.want {
|
||||
t.Errorf("IsStale(%v) = %v, want %v", tt.threshold, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummary(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cp *Checkpoint
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
cp: &Checkpoint{},
|
||||
want: "no significant state",
|
||||
},
|
||||
{
|
||||
name: "molecule only",
|
||||
cp: &Checkpoint{MoleculeID: "mol-123"},
|
||||
want: "molecule mol-123",
|
||||
},
|
||||
{
|
||||
name: "molecule with step",
|
||||
cp: &Checkpoint{MoleculeID: "mol-123", CurrentStep: "step-1"},
|
||||
want: "molecule mol-123, step step-1",
|
||||
},
|
||||
{
|
||||
name: "hooked bead",
|
||||
cp: &Checkpoint{HookedBead: "gt-abc"},
|
||||
want: "hooked: gt-abc",
|
||||
},
|
||||
{
|
||||
name: "modified files",
|
||||
cp: &Checkpoint{ModifiedFiles: []string{"a.go", "b.go"}},
|
||||
want: "2 modified files",
|
||||
},
|
||||
{
|
||||
name: "branch",
|
||||
cp: &Checkpoint{Branch: "feature/test"},
|
||||
want: "branch: feature/test",
|
||||
},
|
||||
{
|
||||
name: "full",
|
||||
cp: &Checkpoint{
|
||||
MoleculeID: "mol-123",
|
||||
CurrentStep: "step-1",
|
||||
HookedBead: "gt-abc",
|
||||
ModifiedFiles: []string{"a.go"},
|
||||
Branch: "main",
|
||||
},
|
||||
want: "molecule mol-123, step step-1, hooked: gt-abc, 1 modified files, branch: main",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.cp.Summary()
|
||||
if got != tt.want {
|
||||
t.Errorf("Summary() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointJSONRoundtrip(t *testing.T) {
|
||||
original := &Checkpoint{
|
||||
MoleculeID: "mol-test",
|
||||
CurrentStep: "step-2",
|
||||
StepTitle: "Testing JSON",
|
||||
ModifiedFiles: []string{"x.go", "y.go", "z.go"},
|
||||
LastCommit: "deadbeef",
|
||||
Branch: "develop",
|
||||
HookedBead: "gt-roundtrip",
|
||||
Timestamp: time.Date(2025, 6, 15, 10, 30, 0, 0, time.UTC),
|
||||
SessionID: "session-123",
|
||||
Notes: "Testing round trip",
|
||||
}
|
||||
|
||||
data, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
|
||||
var loaded Checkpoint
|
||||
if err := json.Unmarshal(data, &loaded); err != nil {
|
||||
t.Fatalf("Unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if loaded.MoleculeID != original.MoleculeID {
|
||||
t.Errorf("MoleculeID mismatch")
|
||||
}
|
||||
if loaded.CurrentStep != original.CurrentStep {
|
||||
t.Errorf("CurrentStep mismatch")
|
||||
}
|
||||
if loaded.StepTitle != original.StepTitle {
|
||||
t.Errorf("StepTitle mismatch")
|
||||
}
|
||||
if loaded.Branch != original.Branch {
|
||||
t.Errorf("Branch mismatch")
|
||||
}
|
||||
if loaded.HookedBead != original.HookedBead {
|
||||
t.Errorf("HookedBead mismatch")
|
||||
}
|
||||
if loaded.SessionID != original.SessionID {
|
||||
t.Errorf("SessionID mismatch")
|
||||
}
|
||||
if loaded.Notes != original.Notes {
|
||||
t.Errorf("Notes mismatch")
|
||||
}
|
||||
if !loaded.Timestamp.Equal(original.Timestamp) {
|
||||
t.Errorf("Timestamp mismatch")
|
||||
}
|
||||
if len(loaded.ModifiedFiles) != len(original.ModifiedFiles) {
|
||||
t.Errorf("ModifiedFiles length mismatch")
|
||||
}
|
||||
}
|
||||
@@ -3,13 +3,42 @@
|
||||
"beads@beads-marketplace": false
|
||||
},
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash(gh pr create*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git checkout -b*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git switch -c*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime && gt mail check --inject && gt nudge deacon session-started"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook && gt mail check --inject && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -20,7 +49,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -31,7 +60,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt mail check --inject"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt mail check --inject"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -42,7 +71,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt costs record"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt costs record"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -3,13 +3,42 @@
|
||||
"beads@beads-marketplace": false
|
||||
},
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash(gh pr create*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git checkout -b*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": "Bash(git switch -c*)",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/.local/bin:$PATH\" && gt tap guard pr-workflow"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"SessionStart": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime && gt nudge deacon session-started"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook && gt nudge deacon session-started"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -20,7 +49,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt prime"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt prime --hook"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -31,7 +60,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt mail check --inject"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt mail check --inject"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -42,7 +71,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "gt costs record"
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt costs record"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ const (
|
||||
// RoleTypeFor returns the RoleType for a given role name.
|
||||
func RoleTypeFor(role string) RoleType {
|
||||
switch role {
|
||||
case "polecat", "witness", "refinery":
|
||||
case "polecat", "witness", "refinery", "deacon":
|
||||
return Autonomous
|
||||
default:
|
||||
return Interactive
|
||||
@@ -35,20 +35,27 @@ func RoleTypeFor(role string) RoleType {
|
||||
}
|
||||
|
||||
// EnsureSettings ensures .claude/settings.json exists in the given directory.
|
||||
// For worktrees, we use sparse checkout to exclude source repo's .claude/ directory,
|
||||
// so our settings.json is the only one Claude Code sees.
|
||||
func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
return EnsureSettingsAt(workDir, roleType, ".claude", "settings.json")
|
||||
}
|
||||
|
||||
// EnsureSettingsAt ensures a settings file exists at a custom directory/file.
|
||||
// If the file doesn't exist, it copies the appropriate template based on role type.
|
||||
// If the file already exists, it's left unchanged.
|
||||
func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
claudeDir := filepath.Join(workDir, ".claude")
|
||||
settingsPath := filepath.Join(claudeDir, "settings.json")
|
||||
func EnsureSettingsAt(workDir string, roleType RoleType, settingsDir, settingsFile string) error {
|
||||
claudeDir := filepath.Join(workDir, settingsDir)
|
||||
settingsPath := filepath.Join(claudeDir, settingsFile)
|
||||
|
||||
// If settings already exist, don't overwrite
|
||||
if _, err := os.Stat(settingsPath); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create .claude directory if needed
|
||||
// Create settings directory if needed
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating .claude directory: %w", err)
|
||||
return fmt.Errorf("creating settings directory: %w", err)
|
||||
}
|
||||
|
||||
// Select template based on role type
|
||||
@@ -78,3 +85,8 @@ func EnsureSettings(workDir string, roleType RoleType) error {
|
||||
func EnsureSettingsForRole(workDir, role string) error {
|
||||
return EnsureSettings(workDir, RoleTypeFor(role))
|
||||
}
|
||||
|
||||
// EnsureSettingsForRoleAt is a convenience function that combines RoleTypeFor and EnsureSettingsAt.
|
||||
func EnsureSettingsForRoleAt(workDir, role, settingsDir, settingsFile string) error {
|
||||
return EnsureSettingsAt(workDir, RoleTypeFor(role), settingsDir, settingsFile)
|
||||
}
|
||||
|
||||
@@ -264,6 +264,25 @@ Examples:
|
||||
RunE: runAccountStatus,
|
||||
}
|
||||
|
||||
var accountSwitchCmd = &cobra.Command{
|
||||
Use: "switch <handle>",
|
||||
Short: "Switch to a different account",
|
||||
Long: `Switch the active Claude Code account.
|
||||
|
||||
This command:
|
||||
1. Backs up ~/.claude to the current account's config_dir (if needed)
|
||||
2. Creates a symlink from ~/.claude to the target account's config_dir
|
||||
3. Updates the default account in accounts.json
|
||||
|
||||
After switching, you must restart Claude Code for the change to take effect.
|
||||
|
||||
Examples:
|
||||
gt account switch work # Switch to work account
|
||||
gt account switch personal # Switch to personal account`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runAccountSwitch,
|
||||
}
|
||||
|
||||
func runAccountStatus(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
@@ -318,6 +337,122 @@ func runAccountStatus(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runAccountSwitch(cmd *cobra.Command, args []string) error {
|
||||
targetHandle := args[0]
|
||||
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
}
|
||||
|
||||
accountsPath := constants.MayorAccountsPath(townRoot)
|
||||
cfg, err := config.LoadAccountsConfig(accountsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading accounts config: %w", err)
|
||||
}
|
||||
|
||||
// Check if target account exists
|
||||
targetAcct := cfg.GetAccount(targetHandle)
|
||||
if targetAcct == nil {
|
||||
// List available accounts
|
||||
var handles []string
|
||||
for h := range cfg.Accounts {
|
||||
handles = append(handles, h)
|
||||
}
|
||||
sort.Strings(handles)
|
||||
return fmt.Errorf("account '%s' not found. Available accounts: %v", targetHandle, handles)
|
||||
}
|
||||
|
||||
// Get ~/.claude path
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting home directory: %w", err)
|
||||
}
|
||||
claudeDir := home + "/.claude"
|
||||
|
||||
// Check current state of ~/.claude
|
||||
fileInfo, err := os.Lstat(claudeDir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("checking ~/.claude: %w", err)
|
||||
}
|
||||
|
||||
// Determine current account (if any) by checking symlink target
|
||||
var currentHandle string
|
||||
if err == nil && fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
// It's a symlink - find which account it points to
|
||||
linkTarget, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading symlink: %w", err)
|
||||
}
|
||||
for h, acct := range cfg.Accounts {
|
||||
if acct.ConfigDir == linkTarget {
|
||||
currentHandle = h
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if already on target account
|
||||
if currentHandle == targetHandle {
|
||||
fmt.Printf("Already on account '%s'\n", targetHandle)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle the case where ~/.claude is a real directory (not a symlink)
|
||||
if err == nil && fileInfo.Mode()&os.ModeSymlink == 0 && fileInfo.IsDir() {
|
||||
// It's a real directory - need to move it
|
||||
// Try to find which account it belongs to based on default
|
||||
if currentHandle == "" && cfg.Default != "" {
|
||||
currentHandle = cfg.Default
|
||||
}
|
||||
|
||||
if currentHandle != "" {
|
||||
currentAcct := cfg.GetAccount(currentHandle)
|
||||
if currentAcct != nil {
|
||||
// Move ~/.claude to the current account's config_dir
|
||||
fmt.Printf("Moving ~/.claude to %s...\n", currentAcct.ConfigDir)
|
||||
|
||||
// Remove the target config dir if it exists (it might be empty from account add)
|
||||
if _, err := os.Stat(currentAcct.ConfigDir); err == nil {
|
||||
if err := os.RemoveAll(currentAcct.ConfigDir); err != nil {
|
||||
return fmt.Errorf("removing existing config dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Rename(claudeDir, currentAcct.ConfigDir); err != nil {
|
||||
return fmt.Errorf("moving ~/.claude to %s: %w", currentAcct.ConfigDir, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("~/.claude is a directory but no default account is set. Please set a default account first with 'gt account default <handle>'")
|
||||
}
|
||||
} else if err == nil && fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
// It's a symlink - remove it so we can create a new one
|
||||
if err := os.Remove(claudeDir); err != nil {
|
||||
return fmt.Errorf("removing existing symlink: %w", err)
|
||||
}
|
||||
}
|
||||
// If ~/.claude doesn't exist, that's fine - we'll create the symlink
|
||||
|
||||
// Create symlink to target account
|
||||
if err := os.Symlink(targetAcct.ConfigDir, claudeDir); err != nil {
|
||||
return fmt.Errorf("creating symlink to %s: %w", targetAcct.ConfigDir, err)
|
||||
}
|
||||
|
||||
// Update default account
|
||||
cfg.Default = targetHandle
|
||||
if err := config.SaveAccountsConfig(accountsPath, cfg); err != nil {
|
||||
return fmt.Errorf("saving accounts config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Switched to account '%s'\n", targetHandle)
|
||||
fmt.Printf("~/.claude -> %s\n", targetAcct.ConfigDir)
|
||||
fmt.Println()
|
||||
fmt.Println(style.Warning.Render("⚠️ Restart Claude Code for the change to take effect"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Add flags
|
||||
accountListCmd.Flags().BoolVar(&accountJSON, "json", false, "Output as JSON")
|
||||
@@ -330,6 +465,7 @@ func init() {
|
||||
accountCmd.AddCommand(accountAddCmd)
|
||||
accountCmd.AddCommand(accountDefaultCmd)
|
||||
accountCmd.AddCommand(accountStatusCmd)
|
||||
accountCmd.AddCommand(accountSwitchCmd)
|
||||
|
||||
rootCmd.AddCommand(accountCmd)
|
||||
}
|
||||
|
||||
313
internal/cmd/account_test.go
Normal file
313
internal/cmd/account_test.go
Normal file
@@ -0,0 +1,313 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
)
|
||||
|
||||
// setupTestTownForAccount creates a minimal Gas Town workspace with accounts.
|
||||
func setupTestTownForAccount(t *testing.T) (townRoot string, accountsDir string) {
|
||||
t.Helper()
|
||||
|
||||
townRoot = t.TempDir()
|
||||
|
||||
// Create mayor directory with required files
|
||||
mayorDir := filepath.Join(townRoot, "mayor")
|
||||
if err := os.MkdirAll(mayorDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor: %v", err)
|
||||
}
|
||||
|
||||
// Create town.json
|
||||
townConfig := &config.TownConfig{
|
||||
Type: "town",
|
||||
Version: config.CurrentTownVersion,
|
||||
Name: "test-town",
|
||||
PublicName: "Test Town",
|
||||
CreatedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
}
|
||||
townConfigPath := filepath.Join(mayorDir, "town.json")
|
||||
if err := config.SaveTownConfig(townConfigPath, townConfig); err != nil {
|
||||
t.Fatalf("save town.json: %v", err)
|
||||
}
|
||||
|
||||
// Create empty rigs.json
|
||||
rigsConfig := &config.RigsConfig{
|
||||
Version: 1,
|
||||
Rigs: make(map[string]config.RigEntry),
|
||||
}
|
||||
rigsPath := filepath.Join(mayorDir, "rigs.json")
|
||||
if err := config.SaveRigsConfig(rigsPath, rigsConfig); err != nil {
|
||||
t.Fatalf("save rigs.json: %v", err)
|
||||
}
|
||||
|
||||
// Create accounts directory
|
||||
accountsDir = filepath.Join(t.TempDir(), "claude-accounts")
|
||||
if err := os.MkdirAll(accountsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir accounts: %v", err)
|
||||
}
|
||||
|
||||
return townRoot, accountsDir
|
||||
}
|
||||
|
||||
func setTestHome(t *testing.T, fakeHome string) {
|
||||
t.Helper()
|
||||
|
||||
t.Setenv("HOME", fakeHome)
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
return
|
||||
}
|
||||
|
||||
t.Setenv("USERPROFILE", fakeHome)
|
||||
|
||||
drive := filepath.VolumeName(fakeHome)
|
||||
if drive == "" {
|
||||
return
|
||||
}
|
||||
|
||||
t.Setenv("HOMEDRIVE", drive)
|
||||
t.Setenv("HOMEPATH", strings.TrimPrefix(fakeHome, drive))
|
||||
}
|
||||
|
||||
func TestAccountSwitch(t *testing.T) {
|
||||
t.Run("switch between accounts", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
// Create fake home directory for ~/.claude
|
||||
fakeHome := t.TempDir()
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
// Create account config directories
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
personalConfigDir := filepath.Join(accountsDir, "personal")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(personalConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir personal config: %v", err)
|
||||
}
|
||||
|
||||
// Create accounts.json with two accounts
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Accounts["personal"] = config.Account{
|
||||
Email: "steve@personal.com",
|
||||
ConfigDir: personalConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create initial symlink to work account
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.Symlink(workConfigDir, claudeDir); err != nil {
|
||||
t.Fatalf("create symlink: %v", err)
|
||||
}
|
||||
|
||||
// Change to town root
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Run switch to personal
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"personal"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify symlink points to personal
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != personalConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, personalConfigDir)
|
||||
}
|
||||
|
||||
// Verify default was updated
|
||||
loadedCfg, err := config.LoadAccountsConfig(accountsPath)
|
||||
if err != nil {
|
||||
t.Fatalf("load accounts: %v", err)
|
||||
}
|
||||
if loadedCfg.Default != "personal" {
|
||||
t.Errorf("default = %q, want 'personal'", loadedCfg.Default)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("already on target account", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create symlink already pointing to work
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.Symlink(workConfigDir, claudeDir); err != nil {
|
||||
t.Fatalf("create symlink: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to work (should be no-op)
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"work"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Symlink should still point to work
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != workConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, workConfigDir)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nonexistent account", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
if err := os.MkdirAll(workConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir work config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to nonexistent account
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"nonexistent"})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent account")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("real directory gets moved", func(t *testing.T) {
|
||||
townRoot, accountsDir := setupTestTownForAccount(t)
|
||||
|
||||
fakeHome := t.TempDir()
|
||||
setTestHome(t, fakeHome)
|
||||
|
||||
workConfigDir := filepath.Join(accountsDir, "work")
|
||||
personalConfigDir := filepath.Join(accountsDir, "personal")
|
||||
// Don't create workConfigDir - it will be created by moving ~/.claude
|
||||
if err := os.MkdirAll(personalConfigDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir personal config: %v", err)
|
||||
}
|
||||
|
||||
accountsPath := filepath.Join(townRoot, "mayor", "accounts.json")
|
||||
accountsCfg := config.NewAccountsConfig()
|
||||
accountsCfg.Accounts["work"] = config.Account{
|
||||
Email: "steve@work.com",
|
||||
ConfigDir: workConfigDir,
|
||||
}
|
||||
accountsCfg.Accounts["personal"] = config.Account{
|
||||
Email: "steve@personal.com",
|
||||
ConfigDir: personalConfigDir,
|
||||
}
|
||||
accountsCfg.Default = "work"
|
||||
if err := config.SaveAccountsConfig(accountsPath, accountsCfg); err != nil {
|
||||
t.Fatalf("save accounts.json: %v", err)
|
||||
}
|
||||
|
||||
// Create ~/.claude as a real directory with a marker file
|
||||
claudeDir := filepath.Join(fakeHome, ".claude")
|
||||
if err := os.MkdirAll(claudeDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .claude: %v", err)
|
||||
}
|
||||
markerFile := filepath.Join(claudeDir, "marker.txt")
|
||||
if err := os.WriteFile(markerFile, []byte("test"), 0644); err != nil {
|
||||
t.Fatalf("write marker: %v", err)
|
||||
}
|
||||
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
if err := os.Chdir(townRoot); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Switch to personal
|
||||
cmd := &cobra.Command{}
|
||||
err := runAccountSwitch(cmd, []string{"personal"})
|
||||
if err != nil {
|
||||
t.Fatalf("runAccountSwitch failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify ~/.claude is now a symlink to personal
|
||||
fileInfo, err := os.Lstat(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("lstat .claude: %v", err)
|
||||
}
|
||||
if fileInfo.Mode()&os.ModeSymlink == 0 {
|
||||
t.Error("~/.claude is not a symlink")
|
||||
}
|
||||
|
||||
target, err := os.Readlink(claudeDir)
|
||||
if err != nil {
|
||||
t.Fatalf("readlink: %v", err)
|
||||
}
|
||||
if target != personalConfigDir {
|
||||
t.Errorf("symlink target = %q, want %q", target, personalConfigDir)
|
||||
}
|
||||
|
||||
// Verify original content was moved to work config dir
|
||||
movedMarker := filepath.Join(workConfigDir, "marker.txt")
|
||||
if _, err := os.Stat(movedMarker); err != nil {
|
||||
t.Errorf("marker file not moved to work config dir: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
187
internal/cmd/bead.go
Normal file
187
internal/cmd/bead.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
var beadCmd = &cobra.Command{
|
||||
Use: "bead",
|
||||
Aliases: []string{"bd"},
|
||||
GroupID: GroupWork,
|
||||
Short: "Bead management utilities",
|
||||
Long: `Utilities for managing beads across repositories.`,
|
||||
}
|
||||
|
||||
var beadMoveCmd = &cobra.Command{
|
||||
Use: "move <bead-id> <target-prefix>",
|
||||
Short: "Move a bead to a different repository",
|
||||
Long: `Move a bead from one repository to another.
|
||||
|
||||
This creates a copy of the bead in the target repository (with the new prefix)
|
||||
and closes the source bead with a reference to the new location.
|
||||
|
||||
The target prefix determines which repository receives the bead.
|
||||
Common prefixes: gt- (gastown), bd- (beads), hq- (headquarters)
|
||||
|
||||
Examples:
|
||||
gt bead move gt-abc123 bd- # Move gt-abc123 to beads repo as bd-*
|
||||
gt bead move hq-xyz bd- # Move hq-xyz to beads repo
|
||||
gt bead move bd-123 gt- # Move bd-123 to gastown repo`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: runBeadMove,
|
||||
}
|
||||
|
||||
var beadMoveDryRun bool
|
||||
|
||||
var beadShowCmd = &cobra.Command{
|
||||
Use: "show <bead-id> [flags]",
|
||||
Short: "Show details of a bead",
|
||||
Long: `Displays the full details of a bead by ID.
|
||||
|
||||
This is an alias for 'gt show'. All bd show flags are supported.
|
||||
|
||||
Examples:
|
||||
gt bead show gt-abc123 # Show a gastown issue
|
||||
gt bead show hq-xyz789 # Show a town-level bead
|
||||
gt bead show bd-def456 # Show a beads issue
|
||||
gt bead show gt-abc123 --json # Output as JSON`,
|
||||
DisableFlagParsing: true, // Pass all flags through to bd show
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runShow(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var beadReadCmd = &cobra.Command{
|
||||
Use: "read <bead-id> [flags]",
|
||||
Short: "Show details of a bead (alias for 'show')",
|
||||
Long: `Displays the full details of a bead by ID.
|
||||
|
||||
This is an alias for 'gt bead show'. All bd show flags are supported.
|
||||
|
||||
Examples:
|
||||
gt bead read gt-abc123 # Show a gastown issue
|
||||
gt bead read hq-xyz789 # Show a town-level bead
|
||||
gt bead read bd-def456 # Show a beads issue
|
||||
gt bead read gt-abc123 --json # Output as JSON`,
|
||||
DisableFlagParsing: true, // Pass all flags through to bd show
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runShow(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
beadMoveCmd.Flags().BoolVarP(&beadMoveDryRun, "dry-run", "n", false, "Show what would be done")
|
||||
beadCmd.AddCommand(beadMoveCmd)
|
||||
beadCmd.AddCommand(beadShowCmd)
|
||||
beadCmd.AddCommand(beadReadCmd)
|
||||
rootCmd.AddCommand(beadCmd)
|
||||
}
|
||||
|
||||
// moveBeadInfo holds the essential fields we need to copy when moving beads
|
||||
type moveBeadInfo struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Type string `json:"issue_type"`
|
||||
Priority int `json:"priority"`
|
||||
Description string `json:"description"`
|
||||
Labels []string `json:"labels"`
|
||||
Assignee string `json:"assignee"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func runBeadMove(cmd *cobra.Command, args []string) error {
|
||||
sourceID := args[0]
|
||||
targetPrefix := args[1]
|
||||
|
||||
// Normalize prefix (ensure it ends with -)
|
||||
if !strings.HasSuffix(targetPrefix, "-") {
|
||||
targetPrefix = targetPrefix + "-"
|
||||
}
|
||||
|
||||
// Get source bead details
|
||||
showCmd := exec.Command("bd", "show", sourceID, "--json")
|
||||
output, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting bead %s: %w", sourceID, err)
|
||||
}
|
||||
|
||||
// bd show --json returns an array
|
||||
var sources []moveBeadInfo
|
||||
if err := json.Unmarshal(output, &sources); err != nil {
|
||||
return fmt.Errorf("parsing bead data: %w", err)
|
||||
}
|
||||
if len(sources) == 0 {
|
||||
return fmt.Errorf("bead %s not found", sourceID)
|
||||
}
|
||||
source := sources[0]
|
||||
|
||||
// Don't move closed beads
|
||||
if source.Status == "closed" {
|
||||
return fmt.Errorf("cannot move closed bead %s", sourceID)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Moving %s to %s...\n", style.Bold.Render("→"), sourceID, targetPrefix)
|
||||
fmt.Printf(" Title: %s\n", source.Title)
|
||||
fmt.Printf(" Type: %s\n", source.Type)
|
||||
|
||||
if beadMoveDryRun {
|
||||
fmt.Printf("\nDry run - would:\n")
|
||||
fmt.Printf(" 1. Create new bead with prefix %s\n", targetPrefix)
|
||||
fmt.Printf(" 2. Close %s with reference to new bead\n", sourceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build create command for target
|
||||
createArgs := []string{
|
||||
"create",
|
||||
"--prefix", targetPrefix,
|
||||
"--title", source.Title,
|
||||
"--type", source.Type,
|
||||
"--priority", fmt.Sprintf("%d", source.Priority),
|
||||
"--silent", // Only output the ID
|
||||
}
|
||||
|
||||
if source.Description != "" {
|
||||
createArgs = append(createArgs, "--description", source.Description)
|
||||
}
|
||||
if source.Assignee != "" {
|
||||
createArgs = append(createArgs, "--assignee", source.Assignee)
|
||||
}
|
||||
for _, label := range source.Labels {
|
||||
createArgs = append(createArgs, "--label", label)
|
||||
}
|
||||
|
||||
// Create the new bead
|
||||
createCmd := exec.Command("bd", createArgs...)
|
||||
createCmd.Stderr = os.Stderr
|
||||
newIDBytes, err := createCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new bead: %w", err)
|
||||
}
|
||||
newID := strings.TrimSpace(string(newIDBytes))
|
||||
|
||||
fmt.Printf("%s Created %s\n", style.Bold.Render("✓"), newID)
|
||||
|
||||
// Close the source bead with reference
|
||||
closeReason := fmt.Sprintf("Moved to %s", newID)
|
||||
closeCmd := exec.Command("bd", "close", sourceID, "--reason", closeReason)
|
||||
closeCmd.Stderr = os.Stderr
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
// Try to clean up the new bead if close fails
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to close source bead: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "New bead %s was created but source %s remains open\n", newID, sourceID)
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s Closed %s (moved to %s)\n", style.Bold.Render("✓"), sourceID, newID)
|
||||
fmt.Printf("\nBead moved: %s → %s\n", sourceID, newID)
|
||||
|
||||
return nil
|
||||
}
|
||||
419
internal/cmd/beads_db_init_test.go
Normal file
419
internal/cmd/beads_db_init_test.go
Normal file
@@ -0,0 +1,419 @@
|
||||
//go:build integration
|
||||
|
||||
// Package cmd contains integration tests for beads db initialization after clone.
|
||||
//
|
||||
// Run with: go test -tags=integration ./internal/cmd -run TestBeadsDbInitAfterClone -v
|
||||
//
|
||||
// Bug: GitHub Issue #72
|
||||
// When a repo with tracked .beads/ is added as a rig, beads.db doesn't exist
|
||||
// (it's gitignored) and bd operations fail because no one runs `bd init`.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// createTrackedBeadsRepoWithIssues creates a git repo with .beads/ tracked that contains existing issues.
|
||||
// This simulates a clone of a repo that has tracked beads with issues exported to issues.jsonl.
|
||||
// The beads.db is NOT included (gitignored), so prefix must be detected from issues.jsonl.
|
||||
func createTrackedBeadsRepoWithIssues(t *testing.T, path, prefix string, numIssues int) {
|
||||
t.Helper()
|
||||
|
||||
// Create directory
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
t.Fatalf("mkdir repo: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo with explicit main branch
|
||||
cmds := [][]string{
|
||||
{"git", "init", "--initial-branch=main"},
|
||||
{"git", "config", "user.email", "test@test.com"},
|
||||
{"git", "config", "user.name", "Test User"},
|
||||
}
|
||||
for _, args := range cmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Create initial file and commit (so we have something before beads)
|
||||
readmePath := filepath.Join(path, "README.md")
|
||||
if err := os.WriteFile(readmePath, []byte("# Test Repo\n"), 0644); err != nil {
|
||||
t.Fatalf("write README: %v", err)
|
||||
}
|
||||
|
||||
commitCmds := [][]string{
|
||||
{"git", "add", "."},
|
||||
{"git", "commit", "-m", "Initial commit"},
|
||||
}
|
||||
for _, args := range commitCmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beads
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
|
||||
// Run bd init
|
||||
cmd := exec.Command("bd", "--no-daemon", "init", "--prefix", prefix)
|
||||
cmd.Dir = path
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd init failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Create issues
|
||||
for i := 1; i <= numIssues; i++ {
|
||||
cmd = exec.Command("bd", "--no-daemon", "-q", "create",
|
||||
"--type", "task", "--title", fmt.Sprintf("Test issue %d", i))
|
||||
cmd.Dir = path
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd create issue %d failed: %v\nOutput: %s", i, err, output)
|
||||
}
|
||||
}
|
||||
|
||||
// Add .beads to git (simulating tracked beads)
|
||||
cmd = exec.Command("git", "add", ".beads")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git add .beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", "Add beads with issues")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git commit beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Remove beads.db to simulate what a clone would look like
|
||||
// (beads.db is gitignored, so cloned repos don't have it)
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
if err := os.Remove(dbPath); err != nil {
|
||||
t.Fatalf("remove beads.db: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBeadsDbInitAfterClone tests that when a tracked beads repo is added as a rig,
|
||||
// the beads database is properly initialized even though beads.db doesn't exist.
|
||||
func TestBeadsDbInitAfterClone(t *testing.T) {
|
||||
// Skip if bd is not available
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
t.Skip("bd not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
gtBinary := buildGT(t)
|
||||
|
||||
t.Run("TrackedRepoWithExistingPrefix", func(t *testing.T) {
|
||||
// GitHub Issue #72: gt rig add should detect existing prefix from tracked beads
|
||||
// https://github.com/steveyegge/gastown/issues/72
|
||||
//
|
||||
// This tests that when a tracked beads repo has existing issues in issues.jsonl,
|
||||
// gt rig add can detect the prefix from those issues WITHOUT --prefix flag.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-prefix-test")
|
||||
reposDir := filepath.Join(tmpDir, "repos")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a repo with existing beads prefix "existing-prefix" AND issues
|
||||
// This creates issues.jsonl with issues like "existing-prefix-1", etc.
|
||||
existingRepo := filepath.Join(reposDir, "existing-repo")
|
||||
createTrackedBeadsRepoWithIssues(t, existingRepo, "existing-prefix", 3)
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "prefix-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig WITHOUT specifying --prefix - should detect "existing-prefix" from issues.jsonl
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "myrig", existingRepo)
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt rig add failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Verify routes.jsonl has the prefix
|
||||
routesContent, err := os.ReadFile(filepath.Join(townRoot, ".beads", "routes.jsonl"))
|
||||
if err != nil {
|
||||
t.Fatalf("read routes.jsonl: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(routesContent), `"prefix":"existing-prefix-"`) {
|
||||
t.Errorf("routes.jsonl should contain existing-prefix-, got:\n%s", routesContent)
|
||||
}
|
||||
|
||||
// NOW TRY TO USE bd - this is the key test for the bug
|
||||
// Without the fix, beads.db doesn't exist and bd operations fail
|
||||
rigPath := filepath.Join(townRoot, "myrig", "mayor", "rig")
|
||||
cmd = exec.Command("bd", "--no-daemon", "--json", "-q", "create",
|
||||
"--type", "task", "--title", "test-from-rig")
|
||||
cmd.Dir = rigPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("bd create failed (bug!): %v\nOutput: %s\n\nThis is the bug: beads.db doesn't exist after clone because bd init was never run", err, output)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("parse output: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(result.ID, "existing-prefix-") {
|
||||
t.Errorf("expected existing-prefix- prefix, got %s", result.ID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TrackedRepoWithNoIssuesRequiresPrefix", func(t *testing.T) {
|
||||
// Regression test: When a tracked beads repo has NO issues (fresh init),
|
||||
// gt rig add must use the --prefix flag since there's nothing to detect from.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-no-issues")
|
||||
reposDir := filepath.Join(tmpDir, "repos-no-issues")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a tracked beads repo with NO issues (just bd init)
|
||||
emptyRepo := filepath.Join(reposDir, "empty-repo")
|
||||
createTrackedBeadsRepoWithNoIssues(t, emptyRepo, "empty-prefix")
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "no-issues-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig WITH --prefix since we can't detect from empty issues.jsonl
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "emptyrig", emptyRepo, "--prefix", "empty-prefix")
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt rig add with --prefix failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Verify routes.jsonl has the prefix
|
||||
routesContent, err := os.ReadFile(filepath.Join(townRoot, ".beads", "routes.jsonl"))
|
||||
if err != nil {
|
||||
t.Fatalf("read routes.jsonl: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(routesContent), `"prefix":"empty-prefix-"`) {
|
||||
t.Errorf("routes.jsonl should contain empty-prefix-, got:\n%s", routesContent)
|
||||
}
|
||||
|
||||
// Verify bd operations work with the configured prefix
|
||||
rigPath := filepath.Join(townRoot, "emptyrig", "mayor", "rig")
|
||||
cmd = exec.Command("bd", "--no-daemon", "--json", "-q", "create",
|
||||
"--type", "task", "--title", "test-from-empty-repo")
|
||||
cmd.Dir = rigPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("bd create failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("parse output: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(result.ID, "empty-prefix-") {
|
||||
t.Errorf("expected empty-prefix- prefix, got %s", result.ID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TrackedRepoWithPrefixMismatchErrors", func(t *testing.T) {
|
||||
// Test that when --prefix is explicitly provided but doesn't match
|
||||
// the prefix detected from existing issues, gt rig add fails with an error.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-mismatch")
|
||||
reposDir := filepath.Join(tmpDir, "repos-mismatch")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a repo with existing beads prefix "real-prefix" with issues
|
||||
mismatchRepo := filepath.Join(reposDir, "mismatch-repo")
|
||||
createTrackedBeadsRepoWithIssues(t, mismatchRepo, "real-prefix", 2)
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "mismatch-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig with WRONG --prefix - should fail
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "mismatchrig", mismatchRepo, "--prefix", "wrong-prefix")
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
// Should fail
|
||||
if err == nil {
|
||||
t.Fatalf("gt rig add should have failed with prefix mismatch, but succeeded.\nOutput: %s", output)
|
||||
}
|
||||
|
||||
// Verify error message mentions the mismatch
|
||||
outputStr := string(output)
|
||||
if !strings.Contains(outputStr, "prefix mismatch") {
|
||||
t.Errorf("expected 'prefix mismatch' in error, got:\n%s", outputStr)
|
||||
}
|
||||
if !strings.Contains(outputStr, "real-prefix") {
|
||||
t.Errorf("expected 'real-prefix' (detected) in error, got:\n%s", outputStr)
|
||||
}
|
||||
if !strings.Contains(outputStr, "wrong-prefix") {
|
||||
t.Errorf("expected 'wrong-prefix' (provided) in error, got:\n%s", outputStr)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TrackedRepoWithNoIssuesFallsBackToDerivedPrefix", func(t *testing.T) {
|
||||
// Test the fallback behavior: when a tracked beads repo has NO issues
|
||||
// and NO --prefix is provided, gt rig add should derive prefix from rig name.
|
||||
|
||||
townRoot := filepath.Join(tmpDir, "town-derived")
|
||||
reposDir := filepath.Join(tmpDir, "repos-derived")
|
||||
os.MkdirAll(reposDir, 0755)
|
||||
|
||||
// Create a tracked beads repo with NO issues
|
||||
derivedRepo := filepath.Join(reposDir, "derived-repo")
|
||||
createTrackedBeadsRepoWithNoIssues(t, derivedRepo, "original-prefix")
|
||||
|
||||
// Install town
|
||||
cmd := exec.Command(gtBinary, "install", townRoot, "--name", "derived-test")
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("gt install failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add rig WITHOUT --prefix - should derive from rig name "testrig"
|
||||
// deriveBeadsPrefix("testrig") should produce some abbreviation
|
||||
cmd = exec.Command(gtBinary, "rig", "add", "testrig", derivedRepo)
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "HOME="+tmpDir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("gt rig add (no --prefix) failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// The output should mention "Using prefix" since detection failed
|
||||
if !strings.Contains(string(output), "Using prefix") {
|
||||
t.Logf("Output: %s", output)
|
||||
}
|
||||
|
||||
// Verify bd operations work - the key test is that beads.db was initialized
|
||||
rigPath := filepath.Join(townRoot, "testrig", "mayor", "rig")
|
||||
cmd = exec.Command("bd", "--no-daemon", "--json", "-q", "create",
|
||||
"--type", "task", "--title", "test-derived-prefix")
|
||||
cmd.Dir = rigPath
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("bd create failed (beads.db not initialized?): %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
t.Fatalf("parse output: %v", err)
|
||||
}
|
||||
|
||||
// The ID should have SOME prefix (derived from "testrig")
|
||||
// We don't care exactly what it is, just that bd works
|
||||
if result.ID == "" {
|
||||
t.Error("expected non-empty issue ID")
|
||||
}
|
||||
t.Logf("Created issue with derived prefix: %s", result.ID)
|
||||
})
|
||||
}
|
||||
|
||||
// createTrackedBeadsRepoWithNoIssues creates a git repo with .beads/ tracked but NO issues.
|
||||
// This simulates a fresh bd init that was committed before any issues were created.
|
||||
func createTrackedBeadsRepoWithNoIssues(t *testing.T, path, prefix string) {
|
||||
t.Helper()
|
||||
|
||||
// Create directory
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
t.Fatalf("mkdir repo: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo with explicit main branch
|
||||
cmds := [][]string{
|
||||
{"git", "init", "--initial-branch=main"},
|
||||
{"git", "config", "user.email", "test@test.com"},
|
||||
{"git", "config", "user.name", "Test User"},
|
||||
}
|
||||
for _, args := range cmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Create initial file and commit
|
||||
readmePath := filepath.Join(path, "README.md")
|
||||
if err := os.WriteFile(readmePath, []byte("# Test Repo\n"), 0644); err != nil {
|
||||
t.Fatalf("write README: %v", err)
|
||||
}
|
||||
|
||||
commitCmds := [][]string{
|
||||
{"git", "add", "."},
|
||||
{"git", "commit", "-m", "Initial commit"},
|
||||
}
|
||||
for _, args := range commitCmds {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git %v: %v\n%s", args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beads
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir .beads: %v", err)
|
||||
}
|
||||
|
||||
// Run bd init (creates beads.db but no issues)
|
||||
cmd := exec.Command("bd", "--no-daemon", "init", "--prefix", prefix)
|
||||
cmd.Dir = path
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd init failed: %v\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
// Add .beads to git (simulating tracked beads)
|
||||
cmd = exec.Command("git", "add", ".beads")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git add .beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", "Add beads (no issues)")
|
||||
cmd.Dir = path
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("git commit beads: %v\n%s", err, out)
|
||||
}
|
||||
|
||||
// Remove beads.db to simulate what a clone would look like
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
if err := os.Remove(dbPath); err != nil {
|
||||
t.Fatalf("remove beads.db: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -6,10 +6,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
@@ -104,6 +104,58 @@ func setupRoutingTestTown(t *testing.T) string {
|
||||
return townRoot
|
||||
}
|
||||
|
||||
func initBeadsDBWithPrefix(t *testing.T, dir, prefix string) {
|
||||
t.Helper()
|
||||
|
||||
cmd := exec.Command("bd", "--no-daemon", "init", "--quiet", "--prefix", prefix)
|
||||
cmd.Dir = dir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("bd init failed in %s: %v\n%s", dir, err, output)
|
||||
}
|
||||
|
||||
// Create empty issues.jsonl to prevent bd auto-export from corrupting routes.jsonl.
|
||||
// Without this, bd create writes issue data to routes.jsonl (the first .jsonl file
|
||||
// it finds), corrupting the routing configuration. This mirrors what gt install does.
|
||||
issuesPath := filepath.Join(dir, ".beads", "issues.jsonl")
|
||||
if err := os.WriteFile(issuesPath, []byte(""), 0644); err != nil {
|
||||
t.Fatalf("create issues.jsonl in %s: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestIssue(t *testing.T, dir, title string) *beads.Issue {
|
||||
t.Helper()
|
||||
|
||||
args := []string{"--no-daemon", "create", "--json", "--title", title, "--type", "task",
|
||||
"--description", "Integration test issue"}
|
||||
cmd := exec.Command("bd", args...)
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
combinedCmd := exec.Command("bd", args...)
|
||||
combinedCmd.Dir = dir
|
||||
combinedOutput, _ := combinedCmd.CombinedOutput()
|
||||
t.Fatalf("create issue in %s: %v\n%s", dir, err, combinedOutput)
|
||||
}
|
||||
|
||||
var issue beads.Issue
|
||||
if err := json.Unmarshal(output, &issue); err != nil {
|
||||
t.Fatalf("parse create output in %s: %v", dir, err)
|
||||
}
|
||||
if issue.ID == "" {
|
||||
t.Fatalf("create issue in %s returned empty ID", dir)
|
||||
}
|
||||
return &issue
|
||||
}
|
||||
|
||||
func hasIssueID(issues []*beads.Issue, id string) bool {
|
||||
for _, issue := range issues {
|
||||
if issue.ID == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TestBeadsRoutingFromTownRoot verifies that bd show routes to correct rig
|
||||
// based on issue ID prefix when run from town root.
|
||||
func TestBeadsRoutingFromTownRoot(t *testing.T) {
|
||||
@@ -114,37 +166,38 @@ func TestBeadsRoutingFromTownRoot(t *testing.T) {
|
||||
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
|
||||
initBeadsDBWithPrefix(t, townRoot, "hq")
|
||||
|
||||
gastownRigPath := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
testrigRigPath := filepath.Join(townRoot, "testrig", "mayor", "rig")
|
||||
initBeadsDBWithPrefix(t, gastownRigPath, "gt")
|
||||
initBeadsDBWithPrefix(t, testrigRigPath, "tr")
|
||||
|
||||
townIssue := createTestIssue(t, townRoot, "Town-level routing test")
|
||||
gastownIssue := createTestIssue(t, gastownRigPath, "Gastown routing test")
|
||||
testrigIssue := createTestIssue(t, testrigRigPath, "Testrig routing test")
|
||||
|
||||
tests := []struct {
|
||||
prefix string
|
||||
expectedRig string // Expected rig path fragment in error/output
|
||||
id string
|
||||
title string
|
||||
}{
|
||||
{"hq-", "."}, // Town-level beads
|
||||
{"gt-", "gastown"},
|
||||
{"tr-", "testrig"},
|
||||
{townIssue.ID, townIssue.Title},
|
||||
{gastownIssue.ID, gastownIssue.Title},
|
||||
{testrigIssue.ID, testrigIssue.Title},
|
||||
}
|
||||
|
||||
townBeads := beads.New(townRoot)
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.prefix, func(t *testing.T) {
|
||||
// Create a fake issue ID with the prefix
|
||||
issueID := tc.prefix + "test123"
|
||||
|
||||
// Run bd show - it will fail since issue doesn't exist,
|
||||
// but we're testing routing, not the issue itself
|
||||
cmd := exec.Command("bd", "--no-daemon", "show", issueID)
|
||||
cmd.Dir = townRoot
|
||||
cmd.Env = append(os.Environ(), "BD_DEBUG_ROUTING=1")
|
||||
output, _ := cmd.CombinedOutput()
|
||||
|
||||
// The debug routing output or error message should indicate
|
||||
// which beads directory was used
|
||||
outputStr := string(output)
|
||||
t.Logf("Output for %s: %s", issueID, outputStr)
|
||||
|
||||
// We expect either the routing debug output or an error from the correct beads
|
||||
// If routing works, the error will be about not finding the issue,
|
||||
// not about routing failure
|
||||
if strings.Contains(outputStr, "no matching route") {
|
||||
t.Errorf("routing failed for prefix %s: %s", tc.prefix, outputStr)
|
||||
t.Run(tc.id, func(t *testing.T) {
|
||||
issue, err := townBeads.Show(tc.id)
|
||||
if err != nil {
|
||||
t.Fatalf("bd show %s failed: %v", tc.id, err)
|
||||
}
|
||||
if issue.ID != tc.id {
|
||||
t.Errorf("issue.ID = %s, want %s", issue.ID, tc.id)
|
||||
}
|
||||
if issue.Title != tc.title {
|
||||
t.Errorf("issue.Title = %q, want %q", issue.Title, tc.title)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -263,30 +316,21 @@ func TestBeadsListFromPolecatDirectory(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
polecatDir := filepath.Join(townRoot, "gastown", "polecats", "rictus")
|
||||
|
||||
// Initialize beads in mayor/rig so bd list can work
|
||||
mayorRigBeads := filepath.Join(townRoot, "gastown", "mayor", "rig", ".beads")
|
||||
rigPath := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
initBeadsDBWithPrefix(t, rigPath, "gt")
|
||||
|
||||
// Create a minimal beads.db (or use bd init)
|
||||
// For now, just test that the redirect is followed
|
||||
cmd := exec.Command("bd", "--no-daemon", "list")
|
||||
cmd.Dir = polecatDir
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
// We expect either success (empty list) or an error about missing db,
|
||||
// but NOT an error about missing .beads directory (since redirect should work)
|
||||
outputStr := string(output)
|
||||
t.Logf("bd list output: %s", outputStr)
|
||||
issue := createTestIssue(t, rigPath, "Polecat list redirect test")
|
||||
|
||||
issues, err := beads.New(polecatDir).List(beads.ListOptions{
|
||||
Status: "open",
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil {
|
||||
// Check it's not a "no .beads directory" error
|
||||
if strings.Contains(outputStr, "no .beads directory") {
|
||||
t.Errorf("redirect not followed: %s", outputStr)
|
||||
}
|
||||
// Check it's finding the right beads directory via redirect
|
||||
if strings.Contains(outputStr, "redirect") && !strings.Contains(outputStr, mayorRigBeads) {
|
||||
// This is okay - the redirect is being processed
|
||||
t.Logf("redirect detected in output (expected)")
|
||||
}
|
||||
t.Fatalf("bd list from polecat dir failed: %v", err)
|
||||
}
|
||||
|
||||
if !hasIssueID(issues, issue.ID) {
|
||||
t.Errorf("bd list from polecat dir missing issue %s", issue.ID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -300,18 +344,20 @@ func TestBeadsListFromCrewDirectory(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
crewDir := filepath.Join(townRoot, "gastown", "crew", "max")
|
||||
|
||||
cmd := exec.Command("bd", "--no-daemon", "list")
|
||||
cmd.Dir = crewDir
|
||||
output, err := cmd.CombinedOutput()
|
||||
rigPath := filepath.Join(townRoot, "gastown", "mayor", "rig")
|
||||
initBeadsDBWithPrefix(t, rigPath, "gt")
|
||||
|
||||
outputStr := string(output)
|
||||
t.Logf("bd list output from crew: %s", outputStr)
|
||||
issue := createTestIssue(t, rigPath, "Crew list redirect test")
|
||||
|
||||
issues, err := beads.New(crewDir).List(beads.ListOptions{
|
||||
Status: "open",
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil {
|
||||
// Check it's not a "no .beads directory" error
|
||||
if strings.Contains(outputStr, "no .beads directory") {
|
||||
t.Errorf("redirect not followed for crew: %s", outputStr)
|
||||
}
|
||||
t.Fatalf("bd list from crew dir failed: %v", err)
|
||||
}
|
||||
if !hasIssueID(issues, issue.ID) {
|
||||
t.Errorf("bd list from crew dir missing issue %s", issue.ID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,6 +489,73 @@ func TestBeadsRemoveRoute(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestSlingCrossRigRoutingResolution verifies that sling can resolve rig paths
|
||||
// for cross-rig bead hooking using ExtractPrefix and GetRigPathForPrefix.
|
||||
// This is the fix for https://github.com/steveyegge/gastown/issues/148
|
||||
func TestSlingCrossRigRoutingResolution(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
|
||||
tests := []struct {
|
||||
beadID string
|
||||
expectedPath string // Relative to townRoot, or "." for town-level
|
||||
}{
|
||||
{"gt-mol-abc", "gastown/mayor/rig"},
|
||||
{"tr-task-xyz", "testrig/mayor/rig"},
|
||||
{"hq-cv-123", "."}, // Town-level beads
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.beadID, func(t *testing.T) {
|
||||
// Step 1: Extract prefix from bead ID
|
||||
prefix := beads.ExtractPrefix(tc.beadID)
|
||||
if prefix == "" {
|
||||
t.Fatalf("ExtractPrefix(%q) returned empty", tc.beadID)
|
||||
}
|
||||
|
||||
// Step 2: Resolve rig path from prefix
|
||||
rigPath := beads.GetRigPathForPrefix(townRoot, prefix)
|
||||
if rigPath == "" {
|
||||
t.Fatalf("GetRigPathForPrefix(%q, %q) returned empty", townRoot, prefix)
|
||||
}
|
||||
|
||||
// Step 3: Verify the path is correct
|
||||
var expectedFull string
|
||||
if tc.expectedPath == "." {
|
||||
expectedFull = townRoot
|
||||
} else {
|
||||
expectedFull = filepath.Join(townRoot, tc.expectedPath)
|
||||
}
|
||||
|
||||
if rigPath != expectedFull {
|
||||
t.Errorf("GetRigPathForPrefix resolved to %q, want %q", rigPath, expectedFull)
|
||||
}
|
||||
|
||||
// Step 4: Verify the .beads directory exists at that path
|
||||
beadsDir := filepath.Join(rigPath, ".beads")
|
||||
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
|
||||
t.Errorf(".beads directory doesn't exist at resolved path: %s", beadsDir)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSlingCrossRigUnknownPrefix verifies behavior for unknown prefixes.
|
||||
func TestSlingCrossRigUnknownPrefix(t *testing.T) {
|
||||
townRoot := setupRoutingTestTown(t)
|
||||
|
||||
// An unknown prefix should return empty string
|
||||
unknownBeadID := "xx-unknown-123"
|
||||
prefix := beads.ExtractPrefix(unknownBeadID)
|
||||
if prefix != "xx-" {
|
||||
t.Fatalf("ExtractPrefix(%q) = %q, want %q", unknownBeadID, prefix, "xx-")
|
||||
}
|
||||
|
||||
rigPath := beads.GetRigPathForPrefix(townRoot, prefix)
|
||||
if rigPath != "" {
|
||||
t.Errorf("GetRigPathForPrefix for unknown prefix returned %q, want empty", rigPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBeadsGetPrefixForRig verifies prefix lookup by rig name.
|
||||
func TestBeadsGetPrefixForRig(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
156
internal/cmd/beads_version.go
Normal file
156
internal/cmd/beads_version.go
Normal file
@@ -0,0 +1,156 @@
|
||||
// Package cmd provides CLI commands for the gt tool.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MinBeadsVersion is the minimum required beads version for Gas Town.
|
||||
// This version must include custom type support (bd-i54l).
|
||||
const MinBeadsVersion = "0.44.0"
|
||||
|
||||
// beadsVersion represents a parsed semantic version.
|
||||
type beadsVersion struct {
|
||||
major int
|
||||
minor int
|
||||
patch int
|
||||
}
|
||||
|
||||
// parseBeadsVersion parses a version string like "0.44.0" into components.
|
||||
func parseBeadsVersion(v string) (beadsVersion, error) {
|
||||
// Strip leading 'v' if present
|
||||
v = strings.TrimPrefix(v, "v")
|
||||
|
||||
// Split on dots
|
||||
parts := strings.Split(v, ".")
|
||||
if len(parts) < 2 {
|
||||
return beadsVersion{}, fmt.Errorf("invalid version format: %s", v)
|
||||
}
|
||||
|
||||
major, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return beadsVersion{}, fmt.Errorf("invalid major version: %s", parts[0])
|
||||
}
|
||||
|
||||
minor, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return beadsVersion{}, fmt.Errorf("invalid minor version: %s", parts[1])
|
||||
}
|
||||
|
||||
patch := 0
|
||||
if len(parts) >= 3 {
|
||||
// Handle versions like "0.44.0-dev" - take only numeric prefix
|
||||
patchStr := parts[2]
|
||||
if idx := strings.IndexFunc(patchStr, func(r rune) bool {
|
||||
return r < '0' || r > '9'
|
||||
}); idx != -1 {
|
||||
patchStr = patchStr[:idx]
|
||||
}
|
||||
if patchStr != "" {
|
||||
patch, err = strconv.Atoi(patchStr)
|
||||
if err != nil {
|
||||
return beadsVersion{}, fmt.Errorf("invalid patch version: %s", parts[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return beadsVersion{major: major, minor: minor, patch: patch}, nil
|
||||
}
|
||||
|
||||
// compare returns -1 if v < other, 0 if equal, 1 if v > other.
|
||||
func (v beadsVersion) compare(other beadsVersion) int {
|
||||
if v.major != other.major {
|
||||
if v.major < other.major {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if v.minor != other.minor {
|
||||
if v.minor < other.minor {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if v.patch != other.patch {
|
||||
if v.patch < other.patch {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Pre-compiled regex for beads version parsing
|
||||
var beadsVersionRe = regexp.MustCompile(`bd version (\d+\.\d+(?:\.\d+)?(?:-\w+)?)`)
|
||||
|
||||
func getBeadsVersion() (string, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "bd", "version")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return "", fmt.Errorf("bd version check timed out")
|
||||
}
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return "", fmt.Errorf("bd version failed: %s", string(exitErr.Stderr))
|
||||
}
|
||||
return "", fmt.Errorf("failed to run bd: %w (is beads installed?)", err)
|
||||
}
|
||||
|
||||
// Parse output like "bd version 0.44.0 (dev)"
|
||||
// or "bd version 0.44.0"
|
||||
matches := beadsVersionRe.FindStringSubmatch(string(output))
|
||||
if len(matches) < 2 {
|
||||
return "", fmt.Errorf("could not parse beads version from: %s", strings.TrimSpace(string(output)))
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
var (
|
||||
cachedVersionCheckResult error
|
||||
versionCheckOnce sync.Once
|
||||
)
|
||||
|
||||
// CheckBeadsVersion verifies that the installed beads version meets the minimum requirement.
|
||||
// Returns nil if the version is sufficient, or an error with details if not.
|
||||
// The check is performed only once per process execution.
|
||||
func CheckBeadsVersion() error {
|
||||
versionCheckOnce.Do(func() {
|
||||
cachedVersionCheckResult = checkBeadsVersionInternal()
|
||||
})
|
||||
return cachedVersionCheckResult
|
||||
}
|
||||
|
||||
func checkBeadsVersionInternal() error {
|
||||
installedStr, err := getBeadsVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot verify beads version: %w", err)
|
||||
}
|
||||
|
||||
installed, err := parseBeadsVersion(installedStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse installed beads version %q: %w", installedStr, err)
|
||||
}
|
||||
|
||||
required, err := parseBeadsVersion(MinBeadsVersion)
|
||||
if err != nil {
|
||||
// This would be a bug in our code
|
||||
return fmt.Errorf("cannot parse required beads version %q: %w", MinBeadsVersion, err)
|
||||
}
|
||||
|
||||
if installed.compare(required) < 0 {
|
||||
return fmt.Errorf("beads version %s is required, but %s is installed\n\nPlease upgrade beads: go install github.com/steveyegge/beads/cmd/bd@latest", MinBeadsVersion, installedStr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user